Tomato: improved cache management for movie clips
[blender.git] / intern / memutil / MEM_CacheLimiter.h
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Peter Schlaile <peter@schlaile.de> 2005
19  *
20  * ***** END GPL LICENSE BLOCK *****
21  */
22
23 /** \file memutil/MEM_CacheLimiter.h
24  *  \ingroup memutil
25  */
26
27
28 #ifndef __MEM_CACHELIMITER_H__
29 #define __MEM_CACHELIMITER_H__
30
31 /**
32  * @section MEM_CacheLimiter
33  * This class defines a generic memory cache management system
34  * to limit memory usage to a fixed global maximum.
35  * 
36  * Please use the C-API in MEM_CacheLimiterC-Api.h for code written in C.
37  *
38  * Usage example:
39  *
40  * class BigFatImage {
41  * public:
42  *       ~BigFatImage() { tell_everyone_we_are_gone(this); }
43  * };
44  * 
45  * void doit() {
46  *     MEM_Cache<BigFatImage> BigFatImages;
47  *
48  *     MEM_Cache_Handle<BigFatImage>* h = BigFatImages.insert(new BigFatImage);
49  * 
50  *     BigFatImages.enforce_limits();
51  *     h->ref();
52  *
53  *     work with image...
54  *
55  *     h->unref();
56  *
57  *     leave image in cache.
58  */
59
60 #include <list>
61 #include <queue>
62 #include <vector>
63 #include "MEM_Allocator.h"
64
65 template<class T>
66 class MEM_CacheLimiter;
67
68 #ifndef __MEM_CACHELIMITERC_API_H__
69 extern "C" {
70         extern void MEM_CacheLimiter_set_maximum(size_t m);
71         extern size_t MEM_CacheLimiter_get_maximum();
72 };
73 #endif
74
75 template<class T>
76 class MEM_CacheLimiterHandle {
77 public:
78         explicit MEM_CacheLimiterHandle(T * data_, 
79                                          MEM_CacheLimiter<T> * parent_) 
80                 : data(data_), refcount(0), parent(parent_) { }
81
82         void ref() { 
83                 refcount++; 
84         }
85         void unref() { 
86                 refcount--; 
87         }
88         T * get() { 
89                 return data; 
90         }
91         const T * get() const { 
92                 return data; 
93         }
94         int get_refcount() const { 
95                 return refcount; 
96         }
97         bool can_destroy() const { 
98                 return !data || !refcount; 
99         }
100         bool destroy_if_possible() {
101                 if (can_destroy()) {
102                         delete data;
103                         data = 0;
104                         unmanage();
105                         return true;
106                 }
107                 return false;
108         }
109         void unmanage() {
110                 parent->unmanage(this);
111         }
112         void touch() {
113                 parent->touch(this);
114         }
115         void set_priority(int priority) {
116                 this->priority = priority;
117         }
118         int get_priority(void) {
119                 return this->priority;
120         }
121 private:
122         friend class MEM_CacheLimiter<T>;
123
124         T * data;
125         int refcount;
126         int priority;
127         typename std::list<MEM_CacheLimiterHandle<T> *,
128           MEM_Allocator<MEM_CacheLimiterHandle<T> *> >::iterator me;
129         MEM_CacheLimiter<T> * parent;
130 };
131
132 template<class T>
133 class MEM_CacheLimiter {
134 public:
135         typedef size_t (*MEM_CacheLimiter_DataSize_Func) (void *data);
136         typedef int (*MEM_CacheLimiter_ItemPriority_Func) (void *item, int default_priority);
137         MEM_CacheLimiter(MEM_CacheLimiter_DataSize_Func getDataSize_)
138                 : getDataSize(getDataSize_) {
139         }
140         ~MEM_CacheLimiter() {
141                 for (iterator it = queue.begin(); it != queue.end(); it++) {
142                         delete *it;
143                 }
144         }
145         MEM_CacheLimiterHandle<T> * insert(T * elem) {
146                 queue.push_back(new MEM_CacheLimiterHandle<T>(elem, this));
147                 iterator it = queue.end();
148                 --it;
149                 queue.back()->me = it;
150                 return queue.back();
151         }
152         void unmanage(MEM_CacheLimiterHandle<T> * handle) {
153                 queue.erase(handle->me);
154                 delete handle;
155         }
156         void enforce_limits() {
157                 MEM_CachePriorityQueue priority_queue;
158                 size_t max = MEM_CacheLimiter_get_maximum();
159                 size_t mem_in_use, cur_size;
160
161                 if (max == 0) {
162                         return;
163                 }
164
165                 if(getDataSize) {
166                         mem_in_use = total_size();
167                 } else {
168                         mem_in_use = MEM_get_memory_in_use();
169                 }
170
171                 if (mem_in_use <= max) {
172                         return;
173                 }
174
175                 priority_queue = get_priority_queue();
176
177                 while (!priority_queue.empty() && mem_in_use > max) {
178                         MEM_CacheElementPtr elem = priority_queue.top();
179
180                         if(getDataSize) {
181                                 cur_size = getDataSize(elem->get()->get_data());
182                         } else {
183                                 cur_size = mem_in_use;
184                         }
185
186                         elem->destroy_if_possible();
187
188                         priority_queue.pop();
189
190                         if (getDataSize) {
191                                 mem_in_use -= cur_size;
192                         } else {
193                                 mem_in_use -= cur_size - MEM_get_memory_in_use();
194                         }
195                 }
196         }
197         void touch(MEM_CacheLimiterHandle<T> * handle) {
198                 queue.push_back(handle);
199                 queue.erase(handle->me);
200                 iterator it = queue.end();
201                 --it;
202                 handle->me = it;
203         }
204         void set_item_priority_func(MEM_CacheLimiter_ItemPriority_Func item_priority_func) {
205                 getItemPriority = item_priority_func;
206         }
207 private:
208         typedef MEM_CacheLimiterHandle<T> *MEM_CacheElementPtr;
209         typedef std::list<MEM_CacheElementPtr, MEM_Allocator<MEM_CacheElementPtr> > MEM_CacheQueue;
210         typedef typename MEM_CacheQueue::iterator iterator;
211
212         struct compare_element_priority : public std::binary_function<MEM_CacheElementPtr, MEM_CacheElementPtr, bool> {
213                 bool operator()(const MEM_CacheElementPtr left_elem, const MEM_CacheElementPtr right_elem) const {
214                         return left_elem->get_priority() > right_elem->get_priority();
215                 }
216         };
217
218         typedef std::priority_queue<MEM_CacheElementPtr, std::vector<MEM_CacheElementPtr>, compare_element_priority > MEM_CachePriorityQueue;
219
220         size_t total_size() {
221                 size_t size = 0;
222                 for (iterator it = queue.begin(); it != queue.end(); it++) {
223                         size+= getDataSize((*it)->get()->get_data());
224                 }
225                 return size;
226         }
227
228         MEM_CachePriorityQueue get_priority_queue(void) {
229                 MEM_CachePriorityQueue priority_queue;
230                 iterator it;
231                 int i;
232
233                 for (it = queue.begin(), i = 0; it != queue.end(); it++, i++) {
234                         MEM_CacheElementPtr elem = *it;
235                         int priority;
236
237                         priority = i;
238
239                         if (getItemPriority) {
240                                 priority = getItemPriority(elem->get()->get_data(), priority);
241                         }
242
243                         elem->set_priority(priority);
244
245                         priority_queue.push(elem);
246                 }
247
248                 return priority_queue;
249         }
250
251         MEM_CacheQueue queue;
252         MEM_CacheLimiter_DataSize_Func getDataSize;
253         MEM_CacheLimiter_ItemPriority_Func getItemPriority;
254 };
255
256 #endif // __MEM_CACHELIMITER_H__