Cleanup: remove redundant doxygen \file argument
[blender.git] / intern / memutil / MEM_CacheLimiter.h
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  */
16
17 /** \file \ingroup memutil
18  */
19
20
21 #ifndef __MEM_CACHELIMITER_H__
22 #define __MEM_CACHELIMITER_H__
23
24 /**
25  * \section MEM_CacheLimiter
26  * This class defines a generic memory cache management system
27  * to limit memory usage to a fixed global maximum.
28  *
29  * \note Please use the C-API in MEM_CacheLimiterC-Api.h for code written in C.
30  *
31  * Usage example:
32  *
33  * \code{.cpp}
34  * class BigFatImage {
35  * public:
36  *       ~BigFatImage() { tell_everyone_we_are_gone(this); }
37  * };
38  *
39  * void doit()
40  * {
41  *     MEM_Cache<BigFatImage> BigFatImages;
42  *
43  *     MEM_Cache_Handle<BigFatImage>* h = BigFatImages.insert(new BigFatImage);
44  *
45  *     BigFatImages.enforce_limits();
46  *     h->ref();
47  *
48  *     // work with image...
49  *
50  *     h->unref();
51  *
52  *     // leave image in cache.
53  * \endcode
54  */
55
56 #include <list>
57 #include <queue>
58 #include <vector>
59 #include "MEM_Allocator.h"
60
61 template<class T>
62 class MEM_CacheLimiter;
63
64 #ifndef __MEM_CACHELIMITERC_API_H__
65 extern "C" {
66         void MEM_CacheLimiter_set_maximum(size_t m);
67         size_t MEM_CacheLimiter_get_maximum();
68         void MEM_CacheLimiter_set_disabled(bool disabled);
69         bool MEM_CacheLimiter_is_disabled(void);
70 };
71 #endif
72
73 template<class T>
74 class MEM_CacheLimiterHandle {
75 public:
76         explicit MEM_CacheLimiterHandle(T * data_,MEM_CacheLimiter<T> *parent_) :
77                 data(data_),
78                 refcount(0),
79                 parent(parent_)
80         { }
81
82         void ref() {
83                 refcount++;
84         }
85
86         void unref() {
87                 refcount--;
88         }
89
90         T *get() {
91                 return data;
92         }
93
94         const T *get() const {
95                 return data;
96         }
97
98         int get_refcount() const {
99                 return refcount;
100         }
101
102         bool can_destroy() const {
103                 return !data || !refcount;
104         }
105
106         bool destroy_if_possible() {
107                 if (can_destroy()) {
108                         delete data;
109                         data = NULL;
110                         unmanage();
111                         return true;
112                 }
113                 return false;
114         }
115
116         void unmanage() {
117                 parent->unmanage(this);
118         }
119
120         void touch() {
121                 parent->touch(this);
122         }
123
124 private:
125         friend class MEM_CacheLimiter<T>;
126
127         T * data;
128         int refcount;
129         int pos;
130         MEM_CacheLimiter<T> * parent;
131 };
132
133 template<class T>
134 class MEM_CacheLimiter {
135 public:
136         typedef size_t (*MEM_CacheLimiter_DataSize_Func) (void *data);
137         typedef int    (*MEM_CacheLimiter_ItemPriority_Func) (void *item, int default_priority);
138         typedef bool   (*MEM_CacheLimiter_ItemDestroyable_Func) (void *item);
139
140         MEM_CacheLimiter(MEM_CacheLimiter_DataSize_Func data_size_func)
141                 : data_size_func(data_size_func) {
142         }
143
144         ~MEM_CacheLimiter() {
145                 int i;
146                 for (i = 0; i < queue.size(); i++) {
147                         delete queue[i];
148                 }
149         }
150
151         MEM_CacheLimiterHandle<T> *insert(T * elem) {
152                 queue.push_back(new MEM_CacheLimiterHandle<T>(elem, this));
153                 queue.back()->pos = queue.size() - 1;
154                 return queue.back();
155         }
156
157         void unmanage(MEM_CacheLimiterHandle<T> *handle) {
158                 int pos = handle->pos;
159                 queue[pos] = queue.back();
160                 queue[pos]->pos = pos;
161                 queue.pop_back();
162                 delete handle;
163         }
164
165         size_t get_memory_in_use() {
166                 size_t size = 0;
167                 if (data_size_func) {
168                         int i;
169                         for (i = 0; i < queue.size(); i++) {
170                                 size += data_size_func(queue[i]->get()->get_data());
171                         }
172                 }
173                 else {
174                         size = MEM_get_memory_in_use();
175                 }
176                 return size;
177         }
178
179         void enforce_limits() {
180                 size_t max = MEM_CacheLimiter_get_maximum();
181                 bool is_disabled = MEM_CacheLimiter_is_disabled();
182                 size_t mem_in_use, cur_size;
183
184                 if (is_disabled) {
185                         return;
186                 }
187
188                 if (max == 0) {
189                         return;
190                 }
191
192                 mem_in_use = get_memory_in_use();
193
194                 if (mem_in_use <= max) {
195                         return;
196                 }
197
198                 while (!queue.empty() && mem_in_use > max) {
199                         MEM_CacheElementPtr elem = get_least_priority_destroyable_element();
200
201                         if (!elem)
202                                 break;
203
204                         if (data_size_func) {
205                                 cur_size = data_size_func(elem->get()->get_data());
206                         }
207                         else {
208                                 cur_size = mem_in_use;
209                         }
210
211                         if (elem->destroy_if_possible()) {
212                                 if (data_size_func) {
213                                         mem_in_use -= cur_size;
214                                 }
215                                 else {
216                                         mem_in_use -= cur_size - MEM_get_memory_in_use();
217                                 }
218                         }
219                 }
220         }
221
222         void touch(MEM_CacheLimiterHandle<T> * handle) {
223                 /* If we're using custom priority callback re-arranging the queue
224                  * doesn't make much sense because we'll iterate it all to get
225                  * least priority element anyway.
226                  */
227                 if (item_priority_func == NULL) {
228                         queue[handle->pos] = queue.back();
229                         queue[handle->pos]->pos = handle->pos;
230                         queue.pop_back();
231                         queue.push_back(handle);
232                         handle->pos = queue.size() - 1;
233                 }
234         }
235
236         void set_item_priority_func(MEM_CacheLimiter_ItemPriority_Func item_priority_func) {
237                 this->item_priority_func = item_priority_func;
238         }
239
240         void set_item_destroyable_func(MEM_CacheLimiter_ItemDestroyable_Func item_destroyable_func) {
241                 this->item_destroyable_func = item_destroyable_func;
242         }
243
244 private:
245         typedef MEM_CacheLimiterHandle<T> *MEM_CacheElementPtr;
246         typedef std::vector<MEM_CacheElementPtr, MEM_Allocator<MEM_CacheElementPtr> > MEM_CacheQueue;
247         typedef typename MEM_CacheQueue::iterator iterator;
248
249         /* Check whether element can be destroyed when enforcing cache limits */
250         bool can_destroy_element(MEM_CacheElementPtr &elem) {
251                 if (!elem->can_destroy()) {
252                         /* Element is referenced */
253                         return false;
254                 }
255                 if (item_destroyable_func) {
256                         if (!item_destroyable_func(elem->get()->get_data()))
257                                 return false;
258                 }
259                 return true;
260         }
261
262         MEM_CacheElementPtr get_least_priority_destroyable_element(void) {
263                 if (queue.empty())
264                         return NULL;
265
266                 MEM_CacheElementPtr best_match_elem = NULL;
267
268                 if (!item_priority_func) {
269                         for (iterator it = queue.begin(); it != queue.end(); it++) {
270                                 MEM_CacheElementPtr elem = *it;
271                                 if (!can_destroy_element(elem))
272                                         continue;
273                                 best_match_elem = elem;
274                                 break;
275                         }
276                 }
277                 else {
278                         int best_match_priority = 0;
279                         int i;
280
281                         for (i = 0; i < queue.size(); i++) {
282                                 MEM_CacheElementPtr elem = queue[i];
283
284                                 if (!can_destroy_element(elem))
285                                         continue;
286
287                                 /* by default 0 means highest priority element */
288                                 /* casting a size type to int is questionable,
289                                    but unlikely to cause problems */
290                                 int priority = -((int)(queue.size()) - i - 1);
291                                 priority = item_priority_func(elem->get()->get_data(), priority);
292
293                                 if (priority < best_match_priority || best_match_elem == NULL) {
294                                         best_match_priority = priority;
295                                         best_match_elem = elem;
296                                 }
297                         }
298                 }
299
300                 return best_match_elem;
301         }
302
303         MEM_CacheQueue queue;
304         MEM_CacheLimiter_DataSize_Func data_size_func;
305         MEM_CacheLimiter_ItemPriority_Func item_priority_func;
306         MEM_CacheLimiter_ItemDestroyable_Func item_destroyable_func;
307 };
308
309 #endif  // __MEM_CACHELIMITER_H__