/*
Copyright (C) 1998-99 Paul Barton-Davis
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
Pool::Pool (string n, unsigned long item_size, unsigned long nitems)
: free_list (nitems)
, _name (n)
+#ifndef NDEBUG
+ , max_usage (0)
+#endif
{
_name = n;
/* since some overloaded ::operator new() might use this,
its important that we use a "lower level" allocator to
- get more space.
+ get more space.
*/
block = malloc (nitems * item_size);
Pool::~Pool ()
{
+#ifndef NDEBUG
+ // TODO: after collecting some stats, use DEBUG::PoolStats here
+ cerr << "Pool: '" << _name << "' max: " << max_usage << " / " << total() << endmsg;
+#endif
free (block);
}
{
void *ptr;
+#ifndef NDEBUG
+ if (used () > max_usage) {
+ max_usage = used () + 1;
+ }
+#endif
+
if (free_list.read (&ptr, 1) < 1) {
fatal << "CRITICAL: " << _name << " POOL OUT OF MEMORY - RECOMPILE WITH LARGER SIZE!!" << endmsg;
- /*NOTREACHED*/
+ abort(); /*NOTREACHED*/
return 0;
} else {
return ptr;
}
/** Release an item's memory by writing its location to the free list */
-void
+void
Pool::release (void *ptr)
{
free_list.write (&ptr, 1);
/*---------------------------------------------*/
-MultiAllocSingleReleasePool::MultiAllocSingleReleasePool (string n, unsigned long isize, unsigned long nitems)
+MultiAllocSingleReleasePool::MultiAllocSingleReleasePool (string n, unsigned long isize, unsigned long nitems)
: Pool (n, isize, nitems)
{
}
{
}
-SingleAllocMultiReleasePool::SingleAllocMultiReleasePool (string n, unsigned long isize, unsigned long nitems)
+SingleAllocMultiReleasePool::SingleAllocMultiReleasePool (string n, unsigned long isize, unsigned long nitems)
: Pool (n, isize, nitems)
{
}
/*-------------------------------------------------------*/
-static void
+static void
free_per_thread_pool (void* ptr)
{
/* Rather than deleting the CrossThreadPool now, we add it to our trash buffer.
cp->parent()->add_to_trash (cp);
}
}
-
+
PerThreadPool::PerThreadPool ()
: _key (free_per_thread_pool)
, _trash (0)
_key.set (new CrossThreadPool (n, isize, nitems, this));
}
+/** @return True if CrossThreadPool for the current thread exists,
+ * False otherwise
+ */
+bool
+PerThreadPool::has_per_thread_pool ()
+{
+ CrossThreadPool* p = _key.get();
+ if (p) {
+ return true;
+ }
+ return false;
+}
+
+
/** @return CrossThreadPool for the current thread, which must previously have been created by
* calling create_per_thread_pool in the current thread.
*/
CrossThreadPool*
-PerThreadPool::per_thread_pool ()
+PerThreadPool::per_thread_pool (bool must_exist)
{
CrossThreadPool* p = _key.get();
- if (!p) {
+ if (!p && must_exist) {
fatal << "programming error: no per-thread pool \"" << _name << "\" for thread " << pthread_name() << endmsg;
- /*NOTREACHED*/
+ abort(); /*NOTREACHED*/
}
return p;
}
PerThreadPool::add_to_trash (CrossThreadPool* p)
{
Glib::Threads::Mutex::Lock lm (_trash_mutex);
-
+
if (!_trash) {
warning << "Pool " << p->name() << " has no trash collector; a memory leak has therefore occurred" << endmsg;
return;
/* we have a lock here so that multiple threads can safely call add_to_trash (even though there
can only be one writer to the _trash RingBuffer)
*/
-
+
_trash->write (&p, 1);
}
, pending (nitems)
, _parent (p)
{
-
+
}
-void*
-CrossThreadPool::alloc ()
+void
+CrossThreadPool::flush_pending_with_ev (void *ptr)
+{
+ push (ptr);
+ flush_pending ();
+}
+
+void
+CrossThreadPool::flush_pending ()
{
void* ptr;
+ bool did_release = false;
+
+ DEBUG_TRACE (DEBUG::Pool, string_compose ("%1 %2 has %3 pending free entries waiting, status size %4 free %5 used %6\n", pthread_name(), name(), pending.read_space(),
+ total(), available(), used()));
- DEBUG_TRACE (DEBUG::Pool, string_compose ("%1 %2 has %3 pending free entries waiting\n", pthread_name(), name(), pending.read_space()));
while (pending.read (&ptr, 1) == 1) {
DEBUG_TRACE (DEBUG::Pool, string_compose ("%1 %2 pushes back a pending free list entry before allocating\n", pthread_name(), name()));
free_list.write (&ptr, 1);
+ did_release = true;
+ }
+
+ if (did_release) {
+ DEBUG_TRACE (DEBUG::Pool, string_compose ("Pool size: %1 free %2 used %3 pending now %4\n", total(), available(), used(), pending_size()));
}
+}
+
+void*
+CrossThreadPool::alloc ()
+{
+ /* process anything waiting to be deleted (i.e. moved back to the free list) */
+ flush_pending ();
+ /* now allocate from the potentially larger free list */
return Pool::alloc ();
}
void
-CrossThreadPool::push (void* t)
+CrossThreadPool::push (void* t)
{
pending.write (&t, 1);
}