doxygen: add newline after \file
[blender.git] / intern / memutil / MEM_CacheLimiter.h
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  */
16
17 /** \file
18  * \ingroup memutil
19  */
20
21
22 #ifndef __MEM_CACHELIMITER_H__
23 #define __MEM_CACHELIMITER_H__
24
25 /**
26  * \section MEM_CacheLimiter
27  * This class defines a generic memory cache management system
28  * to limit memory usage to a fixed global maximum.
29  *
30  * \note Please use the C-API in MEM_CacheLimiterC-Api.h for code written in C.
31  *
32  * Usage example:
33  *
34  * \code{.cpp}
35  * class BigFatImage {
36  * public:
37  *       ~BigFatImage() { tell_everyone_we_are_gone(this); }
38  * };
39  *
40  * void doit()
41  * {
42  *     MEM_Cache<BigFatImage> BigFatImages;
43  *
44  *     MEM_Cache_Handle<BigFatImage>* h = BigFatImages.insert(new BigFatImage);
45  *
46  *     BigFatImages.enforce_limits();
47  *     h->ref();
48  *
49  *     // work with image...
50  *
51  *     h->unref();
52  *
53  *     // leave image in cache.
54  * \endcode
55  */
56
57 #include <list>
58 #include <queue>
59 #include <vector>
60 #include "MEM_Allocator.h"
61
62 template<class T>
63 class MEM_CacheLimiter;
64
65 #ifndef __MEM_CACHELIMITERC_API_H__
66 extern "C" {
67         void MEM_CacheLimiter_set_maximum(size_t m);
68         size_t MEM_CacheLimiter_get_maximum();
69         void MEM_CacheLimiter_set_disabled(bool disabled);
70         bool MEM_CacheLimiter_is_disabled(void);
71 };
72 #endif
73
74 template<class T>
75 class MEM_CacheLimiterHandle {
76 public:
77         explicit MEM_CacheLimiterHandle(T * data_,MEM_CacheLimiter<T> *parent_) :
78                 data(data_),
79                 refcount(0),
80                 parent(parent_)
81         { }
82
83         void ref() {
84                 refcount++;
85         }
86
87         void unref() {
88                 refcount--;
89         }
90
91         T *get() {
92                 return data;
93         }
94
95         const T *get() const {
96                 return data;
97         }
98
99         int get_refcount() const {
100                 return refcount;
101         }
102
103         bool can_destroy() const {
104                 return !data || !refcount;
105         }
106
107         bool destroy_if_possible() {
108                 if (can_destroy()) {
109                         delete data;
110                         data = NULL;
111                         unmanage();
112                         return true;
113                 }
114                 return false;
115         }
116
117         void unmanage() {
118                 parent->unmanage(this);
119         }
120
121         void touch() {
122                 parent->touch(this);
123         }
124
125 private:
126         friend class MEM_CacheLimiter<T>;
127
128         T * data;
129         int refcount;
130         int pos;
131         MEM_CacheLimiter<T> * parent;
132 };
133
134 template<class T>
135 class MEM_CacheLimiter {
136 public:
137         typedef size_t (*MEM_CacheLimiter_DataSize_Func) (void *data);
138         typedef int    (*MEM_CacheLimiter_ItemPriority_Func) (void *item, int default_priority);
139         typedef bool   (*MEM_CacheLimiter_ItemDestroyable_Func) (void *item);
140
141         MEM_CacheLimiter(MEM_CacheLimiter_DataSize_Func data_size_func)
142                 : data_size_func(data_size_func) {
143         }
144
145         ~MEM_CacheLimiter() {
146                 int i;
147                 for (i = 0; i < queue.size(); i++) {
148                         delete queue[i];
149                 }
150         }
151
152         MEM_CacheLimiterHandle<T> *insert(T * elem) {
153                 queue.push_back(new MEM_CacheLimiterHandle<T>(elem, this));
154                 queue.back()->pos = queue.size() - 1;
155                 return queue.back();
156         }
157
158         void unmanage(MEM_CacheLimiterHandle<T> *handle) {
159                 int pos = handle->pos;
160                 queue[pos] = queue.back();
161                 queue[pos]->pos = pos;
162                 queue.pop_back();
163                 delete handle;
164         }
165
166         size_t get_memory_in_use() {
167                 size_t size = 0;
168                 if (data_size_func) {
169                         int i;
170                         for (i = 0; i < queue.size(); i++) {
171                                 size += data_size_func(queue[i]->get()->get_data());
172                         }
173                 }
174                 else {
175                         size = MEM_get_memory_in_use();
176                 }
177                 return size;
178         }
179
180         void enforce_limits() {
181                 size_t max = MEM_CacheLimiter_get_maximum();
182                 bool is_disabled = MEM_CacheLimiter_is_disabled();
183                 size_t mem_in_use, cur_size;
184
185                 if (is_disabled) {
186                         return;
187                 }
188
189                 if (max == 0) {
190                         return;
191                 }
192
193                 mem_in_use = get_memory_in_use();
194
195                 if (mem_in_use <= max) {
196                         return;
197                 }
198
199                 while (!queue.empty() && mem_in_use > max) {
200                         MEM_CacheElementPtr elem = get_least_priority_destroyable_element();
201
202                         if (!elem)
203                                 break;
204
205                         if (data_size_func) {
206                                 cur_size = data_size_func(elem->get()->get_data());
207                         }
208                         else {
209                                 cur_size = mem_in_use;
210                         }
211
212                         if (elem->destroy_if_possible()) {
213                                 if (data_size_func) {
214                                         mem_in_use -= cur_size;
215                                 }
216                                 else {
217                                         mem_in_use -= cur_size - MEM_get_memory_in_use();
218                                 }
219                         }
220                 }
221         }
222
223         void touch(MEM_CacheLimiterHandle<T> * handle) {
224                 /* If we're using custom priority callback re-arranging the queue
225                  * doesn't make much sense because we'll iterate it all to get
226                  * least priority element anyway.
227                  */
228                 if (item_priority_func == NULL) {
229                         queue[handle->pos] = queue.back();
230                         queue[handle->pos]->pos = handle->pos;
231                         queue.pop_back();
232                         queue.push_back(handle);
233                         handle->pos = queue.size() - 1;
234                 }
235         }
236
237         void set_item_priority_func(MEM_CacheLimiter_ItemPriority_Func item_priority_func) {
238                 this->item_priority_func = item_priority_func;
239         }
240
241         void set_item_destroyable_func(MEM_CacheLimiter_ItemDestroyable_Func item_destroyable_func) {
242                 this->item_destroyable_func = item_destroyable_func;
243         }
244
245 private:
246         typedef MEM_CacheLimiterHandle<T> *MEM_CacheElementPtr;
247         typedef std::vector<MEM_CacheElementPtr, MEM_Allocator<MEM_CacheElementPtr> > MEM_CacheQueue;
248         typedef typename MEM_CacheQueue::iterator iterator;
249
250         /* Check whether element can be destroyed when enforcing cache limits */
251         bool can_destroy_element(MEM_CacheElementPtr &elem) {
252                 if (!elem->can_destroy()) {
253                         /* Element is referenced */
254                         return false;
255                 }
256                 if (item_destroyable_func) {
257                         if (!item_destroyable_func(elem->get()->get_data()))
258                                 return false;
259                 }
260                 return true;
261         }
262
263         MEM_CacheElementPtr get_least_priority_destroyable_element(void) {
264                 if (queue.empty())
265                         return NULL;
266
267                 MEM_CacheElementPtr best_match_elem = NULL;
268
269                 if (!item_priority_func) {
270                         for (iterator it = queue.begin(); it != queue.end(); it++) {
271                                 MEM_CacheElementPtr elem = *it;
272                                 if (!can_destroy_element(elem))
273                                         continue;
274                                 best_match_elem = elem;
275                                 break;
276                         }
277                 }
278                 else {
279                         int best_match_priority = 0;
280                         int i;
281
282                         for (i = 0; i < queue.size(); i++) {
283                                 MEM_CacheElementPtr elem = queue[i];
284
285                                 if (!can_destroy_element(elem))
286                                         continue;
287
288                                 /* by default 0 means highest priority element */
289                                 /* casting a size type to int is questionable,
290                                    but unlikely to cause problems */
291                                 int priority = -((int)(queue.size()) - i - 1);
292                                 priority = item_priority_func(elem->get()->get_data(), priority);
293
294                                 if (priority < best_match_priority || best_match_elem == NULL) {
295                                         best_match_priority = priority;
296                                         best_match_elem = elem;
297                                 }
298                         }
299                 }
300
301                 return best_match_elem;
302         }
303
304         MEM_CacheQueue queue;
305         MEM_CacheLimiter_DataSize_Func data_size_func;
306         MEM_CacheLimiter_ItemPriority_Func item_priority_func;
307         MEM_CacheLimiter_ItemDestroyable_Func item_destroyable_func;
308 };
309
310 #endif  // __MEM_CACHELIMITER_H__