Tomato: fixed crash caused by interaction between color management and
[blender.git] / intern / memutil / MEM_CacheLimiter.h
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Peter Schlaile <peter@schlaile.de> 2005
19  *
20  * ***** END GPL LICENSE BLOCK *****
21  */
22
23 /** \file memutil/MEM_CacheLimiter.h
24  *  \ingroup memutil
25  */
26
27
28 #ifndef __MEM_CACHELIMITER_H__
29 #define __MEM_CACHELIMITER_H__
30
31 /**
32  * @section MEM_CacheLimiter
33  * This class defines a generic memory cache management system
34  * to limit memory usage to a fixed global maximum.
35  *
36  * Please use the C-API in MEM_CacheLimiterC-Api.h for code written in C.
37  *
38  * Usage example:
39  *
40  * class BigFatImage {
41  * public:
42  *       ~BigFatImage() { tell_everyone_we_are_gone(this); }
43  * };
44  *
45  * void doit() {
46  *     MEM_Cache<BigFatImage> BigFatImages;
47  *
48  *     MEM_Cache_Handle<BigFatImage>* h = BigFatImages.insert(new BigFatImage);
49  *
50  *     BigFatImages.enforce_limits();
51  *     h->ref();
52  *
53  *     work with image...
54  *
55  *     h->unref();
56  *
57  *     leave image in cache.
58  */
59
60 #include <list>
61 #include <queue>
62 #include <vector>
63 #include "MEM_Allocator.h"
64
65 template<class T>
66 class MEM_CacheLimiter;
67
68 #ifndef __MEM_CACHELIMITERC_API_H__
69 extern "C" {
70         void MEM_CacheLimiter_set_maximum(size_t m);
71         size_t MEM_CacheLimiter_get_maximum();
72 };
73 #endif
74
75 template<class T>
76 class MEM_CacheLimiterHandle {
77 public:
78         explicit MEM_CacheLimiterHandle(T * data_,MEM_CacheLimiter<T> *parent_) :
79                 data(data_),
80                 refcount(0),
81                 parent(parent_)
82         { }
83
84         void ref() {
85                 refcount++;
86         }
87
88         void unref() {
89                 refcount--;
90         }
91
92         T *get() {
93                 return data;
94         }
95
96         const T *get() const {
97                 return data;
98         }
99
100         int get_refcount() const {
101                 return refcount;
102         }
103
104         bool can_destroy() const {
105                 return !data || !refcount;
106         }
107
108         bool destroy_if_possible() {
109                 if (can_destroy()) {
110                         delete data;
111                         data = 0;
112                         unmanage();
113                         return true;
114                 }
115                 return false;
116         }
117
118         void unmanage() {
119                 parent->unmanage(this);
120         }
121
122         void touch() {
123                 parent->touch(this);
124         }
125
126         void set_priority(int priority) {
127                 this->priority = priority;
128         }
129
130         int get_priority(void) {
131                 return this->priority;
132         }
133
134 private:
135         friend class MEM_CacheLimiter<T>;
136
137         T * data;
138         int refcount;
139         int priority;
140         typename std::list<MEM_CacheLimiterHandle<T> *, MEM_Allocator<MEM_CacheLimiterHandle<T> *> >::iterator me;
141         MEM_CacheLimiter<T> * parent;
142 };
143
144 template<class T>
145 class MEM_CacheLimiter {
146 public:
147         typedef size_t (*MEM_CacheLimiter_DataSize_Func) (void *data);
148         typedef int    (*MEM_CacheLimiter_ItemPriority_Func) (void *item, int default_priority);
149
150         MEM_CacheLimiter(MEM_CacheLimiter_DataSize_Func getDataSize_)
151                 : getDataSize(getDataSize_) {
152         }
153
154         ~MEM_CacheLimiter() {
155                 for (iterator it = queue.begin(); it != queue.end(); it++) {
156                         delete *it;
157                 }
158         }
159
160         MEM_CacheLimiterHandle<T> *insert(T * elem) {
161                 queue.push_back(new MEM_CacheLimiterHandle<T>(elem, this));
162                 iterator it = queue.end();
163                 --it;
164                 queue.back()->me = it;
165                 return queue.back();
166         }
167
168         void unmanage(MEM_CacheLimiterHandle<T> *handle) {
169                 queue.erase(handle->me);
170                 delete handle;
171         }
172
173         void enforce_limits() {
174                 MEM_CachePriorityQueue priority_queue;
175                 size_t max = MEM_CacheLimiter_get_maximum();
176                 size_t mem_in_use, cur_size;
177
178                 if (max == 0) {
179                         return;
180                 }
181
182                 if(getDataSize) {
183                         mem_in_use = total_size();
184                 } else {
185                         mem_in_use = MEM_get_memory_in_use();
186                 }
187
188                 if (mem_in_use <= max) {
189                         return;
190                 }
191
192                 priority_queue = get_priority_queue();
193
194                 while (!priority_queue.empty() && mem_in_use > max) {
195                         MEM_CacheElementPtr elem = priority_queue.top();
196
197                         priority_queue.pop();
198
199                         if(getDataSize) {
200                                 cur_size = getDataSize(elem->get()->get_data());
201                         } else {
202                                 cur_size = mem_in_use;
203                         }
204
205                         if (elem->destroy_if_possible()) {
206                                 if (getDataSize) {
207                                         mem_in_use -= cur_size;
208                                 } else {
209                                         mem_in_use -= cur_size - MEM_get_memory_in_use();
210                                 }
211                         }
212                 }
213         }
214
215         void touch(MEM_CacheLimiterHandle<T> * handle) {
216                 queue.push_back(handle);
217                 queue.erase(handle->me);
218                 iterator it = queue.end();
219                 --it;
220                 handle->me = it;
221         }
222
223         void set_item_priority_func(MEM_CacheLimiter_ItemPriority_Func item_priority_func) {
224                 getItemPriority = item_priority_func;
225         }
226
227 private:
228         typedef MEM_CacheLimiterHandle<T> *MEM_CacheElementPtr;
229         typedef std::list<MEM_CacheElementPtr, MEM_Allocator<MEM_CacheElementPtr> > MEM_CacheQueue;
230         typedef typename MEM_CacheQueue::iterator iterator;
231
232         struct compare_element_priority : public std::binary_function<MEM_CacheElementPtr, MEM_CacheElementPtr, bool> {
233                 bool operator()(const MEM_CacheElementPtr left_elem, const MEM_CacheElementPtr right_elem) const {
234                         return left_elem->get_priority() > right_elem->get_priority();
235                 }
236         };
237
238         typedef std::priority_queue<MEM_CacheElementPtr, std::vector<MEM_CacheElementPtr>, compare_element_priority > MEM_CachePriorityQueue;
239
240         size_t total_size() {
241                 size_t size = 0;
242                 for (iterator it = queue.begin(); it != queue.end(); it++) {
243                         size+= getDataSize((*it)->get()->get_data());
244                 }
245                 return size;
246         }
247
248         MEM_CachePriorityQueue get_priority_queue(void) {
249                 MEM_CachePriorityQueue priority_queue;
250                 iterator it;
251                 int i;
252
253                 for (it = queue.begin(), i = 0; it != queue.end(); it++, i++) {
254                         MEM_CacheElementPtr elem = *it;
255                         int priority;
256
257                         /* by default 0 means higherst priority element */
258                         priority = queue.size() - i - 1;
259
260                         if (getItemPriority) {
261                                 priority = getItemPriority(elem->get()->get_data(), priority);
262                         }
263
264                         elem->set_priority(priority);
265
266                         priority_queue.push(elem);
267                 }
268
269                 return priority_queue;
270         }
271
272         MEM_CacheQueue queue;
273         MEM_CacheLimiter_DataSize_Func getDataSize;
274         MEM_CacheLimiter_ItemPriority_Func getItemPriority;
275 };
276
277 #endif // __MEM_CACHELIMITER_H__