Merge branch 'blender2.7'
[blender.git] / intern / memutil / MEM_CacheLimiter.h
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Peter Schlaile <peter@schlaile.de> 2005
19  *
20  * ***** END GPL LICENSE BLOCK *****
21  */
22
23 /** \file memutil/MEM_CacheLimiter.h
24  *  \ingroup memutil
25  */
26
27
28 #ifndef __MEM_CACHELIMITER_H__
29 #define __MEM_CACHELIMITER_H__
30
31 /**
32  * \section MEM_CacheLimiter
33  * This class defines a generic memory cache management system
34  * to limit memory usage to a fixed global maximum.
35  *
36  * \note Please use the C-API in MEM_CacheLimiterC-Api.h for code written in C.
37  *
38  * Usage example:
39  *
40  * \code{.cpp}
41  * class BigFatImage {
42  * public:
43  *       ~BigFatImage() { tell_everyone_we_are_gone(this); }
44  * };
45  *
46  * void doit()
47  * {
48  *     MEM_Cache<BigFatImage> BigFatImages;
49  *
50  *     MEM_Cache_Handle<BigFatImage>* h = BigFatImages.insert(new BigFatImage);
51  *
52  *     BigFatImages.enforce_limits();
53  *     h->ref();
54  *
55  *     // work with image...
56  *
57  *     h->unref();
58  *
59  *     // leave image in cache.
60  * \endcode
61  */
62
63 #include <list>
64 #include <queue>
65 #include <vector>
66 #include "MEM_Allocator.h"
67
68 template<class T>
69 class MEM_CacheLimiter;
70
71 #ifndef __MEM_CACHELIMITERC_API_H__
72 extern "C" {
73         void MEM_CacheLimiter_set_maximum(size_t m);
74         size_t MEM_CacheLimiter_get_maximum();
75         void MEM_CacheLimiter_set_disabled(bool disabled);
76         bool MEM_CacheLimiter_is_disabled(void);
77 };
78 #endif
79
80 template<class T>
81 class MEM_CacheLimiterHandle {
82 public:
83         explicit MEM_CacheLimiterHandle(T * data_,MEM_CacheLimiter<T> *parent_) :
84                 data(data_),
85                 refcount(0),
86                 parent(parent_)
87         { }
88
89         void ref() {
90                 refcount++;
91         }
92
93         void unref() {
94                 refcount--;
95         }
96
97         T *get() {
98                 return data;
99         }
100
101         const T *get() const {
102                 return data;
103         }
104
105         int get_refcount() const {
106                 return refcount;
107         }
108
109         bool can_destroy() const {
110                 return !data || !refcount;
111         }
112
113         bool destroy_if_possible() {
114                 if (can_destroy()) {
115                         delete data;
116                         data = NULL;
117                         unmanage();
118                         return true;
119                 }
120                 return false;
121         }
122
123         void unmanage() {
124                 parent->unmanage(this);
125         }
126
127         void touch() {
128                 parent->touch(this);
129         }
130
131 private:
132         friend class MEM_CacheLimiter<T>;
133
134         T * data;
135         int refcount;
136         int pos;
137         MEM_CacheLimiter<T> * parent;
138 };
139
140 template<class T>
141 class MEM_CacheLimiter {
142 public:
143         typedef size_t (*MEM_CacheLimiter_DataSize_Func) (void *data);
144         typedef int    (*MEM_CacheLimiter_ItemPriority_Func) (void *item, int default_priority);
145         typedef bool   (*MEM_CacheLimiter_ItemDestroyable_Func) (void *item);
146
147         MEM_CacheLimiter(MEM_CacheLimiter_DataSize_Func data_size_func)
148                 : data_size_func(data_size_func) {
149         }
150
151         ~MEM_CacheLimiter() {
152                 int i;
153                 for (i = 0; i < queue.size(); i++) {
154                         delete queue[i];
155                 }
156         }
157
158         MEM_CacheLimiterHandle<T> *insert(T * elem) {
159                 queue.push_back(new MEM_CacheLimiterHandle<T>(elem, this));
160                 queue.back()->pos = queue.size() - 1;
161                 return queue.back();
162         }
163
164         void unmanage(MEM_CacheLimiterHandle<T> *handle) {
165                 int pos = handle->pos;
166                 queue[pos] = queue.back();
167                 queue[pos]->pos = pos;
168                 queue.pop_back();
169                 delete handle;
170         }
171
172         size_t get_memory_in_use() {
173                 size_t size = 0;
174                 if (data_size_func) {
175                         int i;
176                         for (i = 0; i < queue.size(); i++) {
177                                 size += data_size_func(queue[i]->get()->get_data());
178                         }
179                 }
180                 else {
181                         size = MEM_get_memory_in_use();
182                 }
183                 return size;
184         }
185
186         void enforce_limits() {
187                 size_t max = MEM_CacheLimiter_get_maximum();
188                 bool is_disabled = MEM_CacheLimiter_is_disabled();
189                 size_t mem_in_use, cur_size;
190
191                 if (is_disabled) {
192                         return;
193                 }
194
195                 if (max == 0) {
196                         return;
197                 }
198
199                 mem_in_use = get_memory_in_use();
200
201                 if (mem_in_use <= max) {
202                         return;
203                 }
204
205                 while (!queue.empty() && mem_in_use > max) {
206                         MEM_CacheElementPtr elem = get_least_priority_destroyable_element();
207
208                         if (!elem)
209                                 break;
210
211                         if (data_size_func) {
212                                 cur_size = data_size_func(elem->get()->get_data());
213                         }
214                         else {
215                                 cur_size = mem_in_use;
216                         }
217
218                         if (elem->destroy_if_possible()) {
219                                 if (data_size_func) {
220                                         mem_in_use -= cur_size;
221                                 }
222                                 else {
223                                         mem_in_use -= cur_size - MEM_get_memory_in_use();
224                                 }
225                         }
226                 }
227         }
228
229         void touch(MEM_CacheLimiterHandle<T> * handle) {
230                 /* If we're using custom priority callback re-arranging the queue
231                  * doesn't make much sense because we'll iterate it all to get
232                  * least priority element anyway.
233                  */
234                 if (item_priority_func == NULL) {
235                         queue[handle->pos] = queue.back();
236                         queue[handle->pos]->pos = handle->pos;
237                         queue.pop_back();
238                         queue.push_back(handle);
239                         handle->pos = queue.size() - 1;
240                 }
241         }
242
243         void set_item_priority_func(MEM_CacheLimiter_ItemPriority_Func item_priority_func) {
244                 this->item_priority_func = item_priority_func;
245         }
246
247         void set_item_destroyable_func(MEM_CacheLimiter_ItemDestroyable_Func item_destroyable_func) {
248                 this->item_destroyable_func = item_destroyable_func;
249         }
250
251 private:
252         typedef MEM_CacheLimiterHandle<T> *MEM_CacheElementPtr;
253         typedef std::vector<MEM_CacheElementPtr, MEM_Allocator<MEM_CacheElementPtr> > MEM_CacheQueue;
254         typedef typename MEM_CacheQueue::iterator iterator;
255
256         /* Check whether element can be destroyed when enforcing cache limits */
257         bool can_destroy_element(MEM_CacheElementPtr &elem) {
258                 if (!elem->can_destroy()) {
259                         /* Element is referenced */
260                         return false;
261                 }
262                 if (item_destroyable_func) {
263                         if (!item_destroyable_func(elem->get()->get_data()))
264                                 return false;
265                 }
266                 return true;
267         }
268
269         MEM_CacheElementPtr get_least_priority_destroyable_element(void) {
270                 if (queue.empty())
271                         return NULL;
272
273                 MEM_CacheElementPtr best_match_elem = NULL;
274
275                 if (!item_priority_func) {
276                         for (iterator it = queue.begin(); it != queue.end(); it++) {
277                                 MEM_CacheElementPtr elem = *it;
278                                 if (!can_destroy_element(elem))
279                                         continue;
280                                 best_match_elem = elem;
281                                 break;
282                         }
283                 }
284                 else {
285                         int best_match_priority = 0;
286                         int i;
287
288                         for (i = 0; i < queue.size(); i++) {
289                                 MEM_CacheElementPtr elem = queue[i];
290
291                                 if (!can_destroy_element(elem))
292                                         continue;
293
294                                 /* by default 0 means highest priority element */
295                                 /* casting a size type to int is questionable,
296                                    but unlikely to cause problems */
297                                 int priority = -((int)(queue.size()) - i - 1);
298                                 priority = item_priority_func(elem->get()->get_data(), priority);
299
300                                 if (priority < best_match_priority || best_match_elem == NULL) {
301                                         best_match_priority = priority;
302                                         best_match_elem = elem;
303                                 }
304                         }
305                 }
306
307                 return best_match_elem;
308         }
309
310         MEM_CacheQueue queue;
311         MEM_CacheLimiter_DataSize_Func data_size_func;
312         MEM_CacheLimiter_ItemPriority_Func item_priority_func;
313         MEM_CacheLimiter_ItemDestroyable_Func item_destroyable_func;
314 };
315
316 #endif  // __MEM_CACHELIMITER_H__