forked from pool/glibc
87 lines
2.9 KiB
Diff
87 lines
2.9 KiB
Diff
--- nscd/mem.c~ 2008-11-23 12:59:15.000000000 +0100
|
|
+++ nscd/mem.c 2008-11-23 13:52:01.000000000 +0100
|
|
@@ -392,6 +392,7 @@
|
|
moves = moves->next = new_move;
|
|
}
|
|
|
|
+ ref_t off_alloc_start = off_alloc;
|
|
/* The following loop will prepare to move this much data. */
|
|
off_free += off_allocend - off_alloc;
|
|
|
|
@@ -410,23 +411,60 @@
|
|
else
|
|
{
|
|
- assert (next_data < &he_data[db->head->nentries]);
|
|
- assert ((*next_data)->packet == off_alloc);
|
|
|
|
- struct datahead *dh = (struct datahead *) (db->data + off_alloc);
|
|
- do
|
|
- {
|
|
- assert ((*next_data)->key >= (*next_data)->packet);
|
|
- assert ((*next_data)->key + (*next_data)->len
|
|
- <= (*next_data)->packet + dh->allocsize);
|
|
-
|
|
- (*next_data)->packet -= disp;
|
|
- (*next_data)->key -= disp;
|
|
- ++next_data;
|
|
+ if (next_data < &he_data[db->head->nentries] && (*next_data)->packet == off_alloc)
|
|
+ {
|
|
+ struct datahead *dh = (struct datahead *) (db->data + off_alloc);
|
|
+ do
|
|
+ {
|
|
+ assert ((*next_data)->key >= (*next_data)->packet);
|
|
+ assert ((*next_data)->key + (*next_data)->len
|
|
+ <= (*next_data)->packet + dh->allocsize);
|
|
+
|
|
+ (*next_data)->packet -= disp;
|
|
+ (*next_data)->key -= disp;
|
|
+ ++next_data;
|
|
+ }
|
|
+ while (next_data < &he_data[db->head->nentries]
|
|
+ && (*next_data)->packet == off_alloc);
|
|
+
|
|
+ off_alloc += (dh->allocsize + BLOCK_ALIGN_M1) & ~BLOCK_ALIGN_M1;
|
|
}
|
|
- while (next_data < &he_data[db->head->nentries]
|
|
- && (*next_data)->packet == off_alloc);
|
|
+ else
|
|
+ {
|
|
+ /* This is not yet a hashed element but an in-flight
|
|
+ * mempool allocation. We cannot displace it, so we
|
|
+ * reset the shake-down at this point; typically,
|
|
+ * the in-flight allocations will be at the top of
|
|
+ * the pool, thus nothing will be above it and we will
|
|
+ * shake it down in the next gc run. In theory, we can
|
|
+ * grow the pool indefinitely if we always hit gc at
|
|
+ * the point we have an in-flight allocation, but
|
|
+ * that does not seem to be a realistic scenario. */
|
|
+ nscd_ssize_t blocklen = 0;
|
|
+ struct mem_in_flight *mrunp = mem_in_flight_list;
|
|
+
|
|
+ /* See the first mem_in_flight_list loop above
|
|
+ * for correctness considerations. */
|
|
+ /* Typically, we have only tiny number of in_flight
|
|
+ * records so we don't need to bother pre-sorting
|
|
+ * the list. */
|
|
+ while (!blocklen && mrunp != NULL)
|
|
+ {
|
|
+ for (enum in_flight idx = IDX_result_data;
|
|
+ idx < IDX_last && mrunp->block[idx].dbidx == db - dbs; ++idx)
|
|
+ if (mrunp->block[idx].blockoff == off_alloc)
|
|
+ blocklen = mrunp->block[idx].blocklen;
|
|
+
|
|
+ mrunp = mrunp->next;
|
|
+ }
|
|
+ assert(blocklen > 0);
|
|
+
|
|
+ moves->size = off_alloc - off_alloc_start;
|
|
+ off_free = off_allocend;
|
|
+ disp = 0;
|
|
|
|
- off_alloc += (dh->allocsize + BLOCK_ALIGN_M1) & ~BLOCK_ALIGN_M1;
|
|
+ off_alloc += blocklen;
|
|
+ }
|
|
}
|
|
}
|
|
assert (off_alloc == off_allocend);
|