2 * ***** BEGIN GPL LICENSE BLOCK *****
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * Contributor(s): Peter Schlaile <peter@schlaile.de> 2005
20 * ***** END GPL LICENSE BLOCK *****
23 /** \file memutil/MEM_CacheLimiter.h
28 #ifndef __MEM_CACHELIMITER_H__
29 #define __MEM_CACHELIMITER_H__
32 * @section MEM_CacheLimiter
33 * This class defines a generic memory cache management system
34 * to limit memory usage to a fixed global maximum.
36 * Please use the C-API in MEM_CacheLimiterC-Api.h for code written in C.
42 * ~BigFatImage() { tell_everyone_we_are_gone(this); }
46 * MEM_Cache<BigFatImage> BigFatImages;
48 * MEM_Cache_Handle<BigFatImage>* h = BigFatImages.insert(new BigFatImage);
50 * BigFatImages.enforce_limits();
57 * leave image in cache.
63 #include "MEM_Allocator.h"
66 class MEM_CacheLimiter;
68 #ifndef __MEM_CACHELIMITERC_API_H__
70 void MEM_CacheLimiter_set_maximum(size_t m);
71 size_t MEM_CacheLimiter_get_maximum();
76 class MEM_CacheLimiterHandle {
78 explicit MEM_CacheLimiterHandle(T * data_,MEM_CacheLimiter<T> *parent_) :
96 const T *get() const {
100 int get_refcount() const {
104 bool can_destroy() const {
105 return !data || !refcount;
108 bool destroy_if_possible() {
119 parent->unmanage(this);
127 friend class MEM_CacheLimiter<T>;
131 typename std::list<MEM_CacheLimiterHandle<T> *, MEM_Allocator<MEM_CacheLimiterHandle<T> *> >::iterator me;
132 MEM_CacheLimiter<T> * parent;
136 class MEM_CacheLimiter {
138 typedef size_t (*MEM_CacheLimiter_DataSize_Func) (void *data);
139 typedef int (*MEM_CacheLimiter_ItemPriority_Func) (void *item, int default_priority);
141 MEM_CacheLimiter(MEM_CacheLimiter_DataSize_Func getDataSize_)
142 : getDataSize(getDataSize_) {
145 ~MEM_CacheLimiter() {
146 for (iterator it = queue.begin(); it != queue.end(); it++) {
151 MEM_CacheLimiterHandle<T> *insert(T * elem) {
152 queue.push_back(new MEM_CacheLimiterHandle<T>(elem, this));
153 iterator it = queue.end();
155 queue.back()->me = it;
159 void unmanage(MEM_CacheLimiterHandle<T> *handle) {
160 queue.erase(handle->me);
164 void enforce_limits() {
165 size_t max = MEM_CacheLimiter_get_maximum();
166 size_t mem_in_use, cur_size;
173 mem_in_use = total_size();
176 mem_in_use = MEM_get_memory_in_use();
179 if (mem_in_use <= max) {
183 while (!queue.empty() && mem_in_use > max) {
184 MEM_CacheElementPtr elem = get_least_priority_destroyable_element();
190 cur_size = getDataSize(elem->get()->get_data());
193 cur_size = mem_in_use;
196 if (elem->destroy_if_possible()) {
198 mem_in_use -= cur_size;
201 mem_in_use -= cur_size - MEM_get_memory_in_use();
207 void touch(MEM_CacheLimiterHandle<T> * handle) {
208 queue.push_back(handle);
209 queue.erase(handle->me);
210 iterator it = queue.end();
215 void set_item_priority_func(MEM_CacheLimiter_ItemPriority_Func item_priority_func) {
216 getItemPriority = item_priority_func;
220 typedef MEM_CacheLimiterHandle<T> *MEM_CacheElementPtr;
221 typedef std::list<MEM_CacheElementPtr, MEM_Allocator<MEM_CacheElementPtr> > MEM_CacheQueue;
222 typedef typename MEM_CacheQueue::iterator iterator;
224 size_t total_size() {
226 for (iterator it = queue.begin(); it != queue.end(); it++) {
227 size+= getDataSize((*it)->get()->get_data());
232 MEM_CacheElementPtr get_least_priority_destroyable_element(void) {
236 if (!getItemPriority)
237 return *queue.begin();
239 MEM_CacheElementPtr best_match_elem = NULL;
240 int best_match_priority = 0;
244 for (it = queue.begin(), i = 0; it != queue.end(); it++, i++) {
245 MEM_CacheElementPtr elem = *it;
247 if (!elem->can_destroy())
250 /* by default 0 means highest priority element */
251 /* casting a size type to int is questionable,
252 but unlikely to cause problems */
253 int priority = -((int)(queue.size()) - i - 1);
254 priority = getItemPriority(elem->get()->get_data(), priority);
256 if (priority < best_match_priority || best_match_elem == NULL) {
257 best_match_priority = priority;
258 best_match_elem = elem;
262 return best_match_elem;
265 MEM_CacheQueue queue;
266 MEM_CacheLimiter_DataSize_Func getDataSize;
267 MEM_CacheLimiter_ItemPriority_Func getItemPriority;
270 #endif // __MEM_CACHELIMITER_H__