src/server/pool.c

changeset 14
b8bf95b39952
parent 13
1fdbf4170ef4
child 15
cff9c4101dd7
equal deleted inserted replaced
13:1fdbf4170ef4 14:b8bf95b39952
1 /*
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
3 *
4 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
5 *
6 * THE BSD LICENSE
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * Redistributions of source code must retain the above copyright notice, this
12 * list of conditions and the following disclaimer.
13 * Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
16 *
17 * Neither the name of the nor the names of its contributors may be
18 * used to endorse or promote products derived from this software without
19 * specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
25 * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Generic pool handling routines.
36 *
37 * These routines reduce contention on the heap and guard against
38 * memory leaks.
39 *
40 * Thread warning:
41 * This implementation is thread safe. However, simultaneous
42 * mallocs/frees to the same "pool" are not safe. Do not share
43 * pools across multiple threads without providing your own
44 * synchronization.
45 *
46 * Mike Belshe
47 * 11-20-95
48 *
49 */
50
51 //include "netsite.h"
52 //include "systems.h"
53 #include "systhr.h"
54 #include "pool_pvt.h"
55 #include "ereport.h"
56 //include "base/session.h"
57 //include "frame/req.h"
58 //include "frame/http.h"
59 #include "util.h"
60 //include "base/crit.h"
61
62 //include "base/dbtbase.h"
63
64
65
66 #include <stdlib.h>
67 #include <string.h>
68 //define PERM_MALLOC malloc
69 //define PERM_FREE free
70 //define PERM_REALLOC realloc
71 //define PERM_CALLOC calloc
72 //define PERM_STRDUP strdup
73
74 /* Pool configuration parameters */
75 static pool_config_t pool_config = POOL_CONFIG_INIT;
76
77 /* Pool global statistics */
78 static pool_global_stats_t pool_global_stats;
79
80 static int
81 pool_internal_init()
82 {
83 if (pool_global_stats.lock == NULL) {
84 pool_global_stats.lock = PR_NewLock();
85 }
86
87 if (pool_config.block_size == 0) {
88 //ereport(LOG_INFORM, XP_GetAdminStr(DBT_poolInitInternalAllocatorDisabled_));
89 }
90
91 return 0;
92 }
93
94 NSAPI_PUBLIC int
95 pool_init(pblock *pb, Session *sn, Request *rq)
96 {
97 //char *str_block_size = pblock_findval("block-size", pb);
98 //char *str_pool_disable = pblock_findval("disable", pb);
99 char *str_block_size = "16384";
100 char *str_pool_disable = "false";
101 int n;
102
103 //printf("standard block size: %d\n", pool_config.block_size);
104
105 if (str_block_size != NULL) {
106 n = atoi(str_block_size);
107 if (n > 0)
108 pool_config.block_size = n;
109 }
110
111 if (str_pool_disable && util_getboolean(str_pool_disable, PR_TRUE)) {
112 /* We'll call PERM_MALLOC() on each pool_malloc() call */
113 pool_config.block_size = 0;
114 pool_config.retain_size = 0;
115 pool_config.retain_num = 0;
116 }
117
118 pool_internal_init();
119
120 return REQ_PROCEED;
121 }
122
123 static block_t *
124 _create_block(pool_t *pool, int size)
125 {
126 block_t *newblock;
127 char *newdata;
128 block_t **blk_ptr;
129 long blen;
130
131 /* Does the pool have any retained blocks on its free list? */
132 for (blk_ptr = &pool->free_blocks;
133 (newblock = *blk_ptr) != NULL; blk_ptr = &newblock->next) {
134
135 /* Yes, is this block large enough? */
136 blen = newblock->end - newblock->data;
137 if (blen >= size) {
138
139 /* Yes, take it off the free list */
140 *blk_ptr = newblock->next;
141 pool->free_size -= blen;
142 --pool->free_num;
143
144 /* Give the block to the caller */
145 newblock->start = newblock->data;
146 goto done;
147 }
148 }
149
150 newblock = (block_t *)PERM_MALLOC(sizeof(block_t));
151 newdata = (char *)PERM_MALLOC(size);
152 if (newblock == NULL || (newdata == NULL && size != 0)) {
153 //ereport(LOG_CATASTROPHE,
154 // XP_GetAdminStr(DBT_poolCreateBlockOutOfMemory_));
155 PERM_FREE(newblock);
156 PERM_FREE(newdata);
157 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
158 return NULL;
159 }
160 newblock->data = newdata;
161 newblock->start = newblock->data;
162 newblock->end = newblock->data + size;
163 newblock->next = NULL;
164 blen = size;
165
166 #ifdef POOL_GLOBAL_STATISTICS
167 PR_AtomicIncrement((PRInt32 *)&pool_global_stats.blkAlloc);
168 #endif /* POOL_GLOBAL_STATISTICS */
169
170 done:
171
172 #ifdef PER_POOL_STATISTICS
173 ++pool->stats.blkAlloc;
174 #endif /* PER_POOL_STATISTICS */
175
176 return newblock;
177 }
178
179 static void
180 _free_block(block_t *block)
181 {
182 long blen = block->end - block->data;
183
184 #ifdef POOL_ZERO_DEBUG
185 memset(block->data, POOL_ZERO_DEBUG, blen);
186 #endif /* POOL_ZERO_DEBUG */
187
188 PERM_FREE(block->data);
189
190 #ifdef POOL_ZERO_DEBUG
191 memset(block, POOL_ZERO_DEBUG, sizeof(block));
192 #endif /* POOL_ZERO_DEBUG */
193
194 PERM_FREE(block);
195
196 #ifdef POOL_GLOBAL_STATISTICS
197 PR_AtomicIncrement((PRInt32 *)&pool_global_stats.blkFree);
198 #endif /* POOL_GLOBAL_STATISTICS */
199 }
200
201 /* ptr_in_pool()
202 * Checks to see if the given pointer is in the given pool.
203 * If true, returns a ptr to the block_t containing the ptr;
204 * otherwise returns NULL
205 */
206 block_t *
207 _ptr_in_pool(pool_t *pool, const void *ptr)
208 {
209 block_t *block_ptr = NULL;
210
211 /* try to find a block which contains this ptr */
212
213 if (POOL_PTR_IN_BLOCK(pool->curr_block, ptr)) {
214 block_ptr = pool->curr_block;
215 }
216 else {
217 for (block_ptr = pool->used_blocks; block_ptr; block_ptr = block_ptr->next) {
218 if (POOL_PTR_IN_BLOCK(block_ptr, ptr))
219 break;
220 }
221 }
222 return block_ptr;
223 }
224
225
226 NSAPI_PUBLIC pool_handle_t *
227 pool_create()
228 {
229 pool_t *newpool;
230
231 newpool = (pool_t *)PERM_MALLOC(sizeof(pool_t));
232
233 if (newpool) {
234 /* Have to initialize now, as pools get created sometimes
235 * before pool_init can be called...
236 */
237 if (pool_global_stats.lock == NULL) {
238 pool_internal_init();
239 }
240
241 newpool->used_blocks = NULL;
242 newpool->free_blocks = NULL;
243 newpool->free_size = 0;
244 newpool->free_num = 0;
245 newpool->size = 0;
246 newpool->next = NULL;
247
248 #ifdef PER_POOL_STATISTICS
249 /* Initial per pool statistics */
250 memset((void *)(&newpool->stats), 0, sizeof(newpool->stats));
251 newpool->stats.thread = PR_GetCurrentThread();
252 newpool->stats.created = PR_Now();
253 #endif /* PER_POOL_STATISTICS */
254
255 /* No need to lock, since pool has not been exposed yet */
256 newpool->curr_block =_create_block(newpool, pool_config.block_size);
257 if (newpool->curr_block == NULL) {
258 //ereport(LOG_CATASTROPHE, XP_GetAdminStr(DBT_poolCreateOutOfMemory_));
259 pool_destroy((pool_handle_t *)newpool);
260 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
261 return NULL;
262 }
263
264 /* Add to known pools list */
265 PR_Lock(pool_global_stats.lock);
266 newpool->next = pool_global_stats.poolList;
267 pool_global_stats.poolList = newpool;
268 ++pool_global_stats.createCnt;
269 #ifdef PER_POOL_STATISTICS
270 newpool->stats.poolId = pool_global_stats.createCnt;
271 #endif /* PER_POOL_STATISTICS */
272 PR_Unlock(pool_global_stats.lock);
273
274 }
275 else {
276 //ereport(LOG_CATASTROPHE, XP_GetAdminStr(DBT_poolCreateOutOfMemory_1));
277 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
278 }
279
280 return (pool_handle_t *)newpool;
281 }
282
283 /*
284 * pool_mark - get mark for subsequent recycle
285 *
286 * This function returns a value that can be used to free all pool
287 * memory which is subsequently allocated, without freeing memory
288 * that has already been allocated when pool_mark() is called.
289 * The pool_recycle() function is used to free the memory allocated
290 * since pool_mark() was called.
291 *
292 * This function may be called several times before pool_recycle()
293 * is called, but some care must be taken not to pass an invalid
294 * mark value to pool_recycle(), which would cause all pool memory
295 * to be freed. A mark value becomes invalid when pool_recycle is
296 * called with a previously returned mark value.
297 */
298 NSAPI_PUBLIC void *
299 pool_mark(pool_handle_t *pool_handle)
300 {
301 pool_t *pool = (pool_t *)pool_handle;
302
303 PR_ASSERT(pool != NULL);
304
305 if (pool == NULL)
306 return NULL;
307
308 #ifdef PER_POOL_STATISTICS
309 pool->stats.thread = PR_GetCurrentThread();
310 #endif /* PER_POOL_STATISTICS */
311
312 /* Never return end as it points outside the block */
313 if (pool->curr_block->start == pool->curr_block->end)
314 return pool->curr_block;
315
316 return (void *)(pool->curr_block->start);
317 }
318
319 /*
320 * pool_recycle - recycle memory in a pool
321 *
322 * This function returns all the allocated memory for a pool back to
323 * a free list associated with the pool. It is like pool_destroy() in
324 * the sense that all data structures previously allocated from the
325 * pool are freed, but it keeps the memory associated with the pool,
326 * and doesn't actually destroy the pool.
327 *
328 * The "mark" argument can be a value previously returned by
329 * pool_mark(), in which case the pool is returned to the state it
330 * was in when pool_mark() was called, or it can be NULL, in which
331 * case the pool is completely recycled.
332 */
333 NSAPI_PUBLIC void
334 pool_recycle(pool_handle_t *pool_handle, void *mark)
335 {
336 pool_t *pool = (pool_t *)pool_handle;
337 block_t *tmp_blk;
338 unsigned long blen;
339
340 PR_ASSERT(pool != NULL);
341
342 if (pool == NULL)
343 return;
344
345 /* Fix up curr_block. There should always be a curr_block. */
346 tmp_blk = pool->curr_block;
347 PR_ASSERT(tmp_blk != NULL);
348
349 /* Start with curr_block, then scan blocks on used_blocks list */
350 for (;;) {
351
352 /* Check if the mark is at the end of this block */
353 if (tmp_blk == mark) {
354 pool->curr_block = tmp_blk;
355 break;
356 }
357
358 /* Look for a block containing the mark */
359 if (POOL_PTR_IN_BLOCK(tmp_blk, mark)) {
360
361 /* Reset block start pointer to marked spot */
362 if (tmp_blk == pool->curr_block) {
363 blen = tmp_blk->start - (char *)mark;
364 } else {
365 blen = tmp_blk->end - (char *)mark;
366 }
367 pool->size -= blen;
368 PR_ASSERT(pool->size >= 0);
369 tmp_blk->start = (char *)mark;
370 pool->curr_block = tmp_blk;
371 break;
372 }
373
374 /* Reset block start pointer to base of block */
375 if (tmp_blk == pool->curr_block) {
376 /* Count just the allocated length in the current block */
377 blen = tmp_blk->start - tmp_blk->data;
378 }
379 else {
380 /* Count the entire size of a used_block */
381 blen = tmp_blk->end - tmp_blk->data;
382 }
383 tmp_blk->start = tmp_blk->data;
384 pool->size -= blen;
385 PR_ASSERT(pool->size >= 0);
386
387 /*
388 * If there are no more used blocks after this one, then set
389 * this block up as the current block and return.
390 */
391 if (pool->used_blocks == NULL) {
392 PR_ASSERT(mark == NULL);
393 pool->curr_block = tmp_blk;
394 break;
395 }
396
397 /* Otherwise free this block one way or another */
398
399 /* Add block length to total retained length and check limit */
400 if ((pool->free_size + blen) <= pool_config.retain_size &&
401 pool->free_num < pool_config.retain_num) {
402
403 /* Retain block on pool free list */
404 /*
405 * XXX hep - could sort blocks on free list in order
406 * ascending size to get "best fit" allocation in
407 * _create_block(), but the default block size is large
408 * enough that fit should rarely be an issue.
409 */
410 tmp_blk->next = pool->free_blocks;
411 pool->free_blocks = tmp_blk;
412 pool->free_size += blen;
413 ++pool->free_num;
414 }
415 else {
416 /* Limit exceeded - free block */
417 _free_block(tmp_blk);
418 }
419
420 #ifdef PER_POOL_STATISTICS
421 ++pool->stats.blkFree;
422 #endif /* PER_POOL_STATISTICS */
423
424 /* Remove next block from used blocks list */
425 tmp_blk = pool->used_blocks;
426 pool->used_blocks = tmp_blk->next;
427 }
428 }
429
430 NSAPI_PUBLIC void
431 pool_destroy(pool_handle_t *pool_handle)
432 {
433 pool_t *pool = (pool_t *)pool_handle;
434 block_t *tmp_blk;
435
436 PR_ASSERT(pool != NULL);
437
438 if (pool == NULL)
439 return;
440
441 if (pool->curr_block)
442 _free_block(pool->curr_block);
443
444 while(pool->used_blocks) {
445 tmp_blk = pool->used_blocks;
446 pool->used_blocks = tmp_blk->next;
447 _free_block(tmp_blk);
448 }
449
450 while(pool->free_blocks) {
451 tmp_blk = pool->free_blocks;
452 pool->free_blocks = tmp_blk->next;
453 _free_block(tmp_blk);
454 }
455
456 {
457 pool_t **ppool;
458
459 /* Remove from the known pools list */
460 PR_Lock(pool_global_stats.lock);
461 for (ppool = &pool_global_stats.poolList;
462 *ppool; ppool = &(*ppool)->next) {
463 if (*ppool == pool) {
464 ++pool_global_stats.destroyCnt;
465 *ppool = pool->next;
466 break;
467 }
468 }
469 PR_Unlock(pool_global_stats.lock);
470 }
471
472 #ifdef POOL_ZERO_DEBUG
473 memset(pool, POOL_ZERO_DEBUG, sizeof(pool));
474 #endif /* POOL_ZERO_DEBUG */
475
476 PERM_FREE(pool);
477
478 return;
479 }
480
481
482 NSAPI_PUBLIC void *
483 pool_malloc(pool_handle_t *pool_handle, size_t size)
484 {
485 pool_t *pool = (pool_t *)pool_handle;
486 block_t *curr_block;
487 long reqsize, blocksize;
488 char *ptr;
489
490 if (pool == NULL)
491 return PERM_MALLOC(size);
492
493 reqsize = ALIGN(size);
494 if (reqsize == 0) {
495 /* Assign a unique address to each 0-byte allocation */
496 reqsize = WORD_SIZE;
497 }
498
499 curr_block = pool->curr_block;
500 ptr = curr_block->start;
501 curr_block->start += reqsize;
502
503 /* does this fit into the last allocated block? */
504 if (curr_block->start > curr_block->end) {
505
506 /* Did not fit; time to allocate a new block */
507
508 curr_block->start -= reqsize; /* keep structs in tact */
509
510 /* Count unallocated bytes in current block in pool size */
511 pool->size += curr_block->end - curr_block->start;
512 PR_ASSERT(pool->size >= 0);
513 #ifdef PER_POOL_STATISTICS
514 if (pool->size > pool->stats.maxAlloc) {
515 pool->stats.maxAlloc = pool->size;
516 }
517 #endif /* PER_POOL_STATISTICS */
518
519 /* Move current block to used block list */
520 curr_block->next = pool->used_blocks;
521 pool->used_blocks = curr_block;
522
523 /* Allocate a chunk of memory which is at least block_size bytes */
524 blocksize = reqsize;
525 if (blocksize < pool_config.block_size)
526 blocksize = pool_config.block_size;
527
528 curr_block = _create_block(pool, blocksize);
529 pool->curr_block = curr_block;
530
531 if (curr_block == NULL) {
532 //ereport(LOG_CATASTROPHE,
533 // XP_GetAdminStr(DBT_poolMallocOutOfMemory_));
534 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
535 return NULL;
536 }
537
538 ptr = curr_block->start;
539 curr_block->start += reqsize;
540 }
541
542 pool->size += reqsize;
543 PR_ASSERT(pool->size >= 0);
544
545 #ifdef PER_POOL_STATISTICS
546 if (pool->size > pool->stats.maxAlloc) {
547 pool->stats.maxAlloc = pool->size;
548 }
549 ++pool->stats.allocCnt;
550 pool->stats.thread = PR_GetCurrentThread();
551 #endif /* PER_POOL_STATISTICS */
552
553 return ptr;
554 }
555
556 NSAPI_PUBLIC void
557 pool_free(pool_handle_t *pool_handle, void *ptr)
558 {
559 pool_t *pool = (pool_t *)pool_handle;
560
561 if (ptr == NULL)
562 return;
563
564 if (pool == NULL) {
565 PERM_FREE(ptr);
566 return;
567 }
568
569 PR_ASSERT(_ptr_in_pool(pool, ptr));
570
571 #ifdef PER_POOL_STATISTICS
572
573 ++pool->stats.freeCnt;
574 pool->stats.thread = PR_GetCurrentThread();
575
576 #endif /* PER_POOL_STATISTICS */
577
578 return;
579 }
580
581 NSAPI_PUBLIC void *
582 pool_calloc(pool_handle_t *pool_handle, size_t nelem, size_t elsize)
583 {
584 void *ptr;
585
586 if (pool_handle == NULL)
587 return calloc(1, elsize * nelem);
588
589 ptr = pool_malloc(pool_handle, elsize * nelem);
590 if (ptr)
591 memset(ptr, 0, elsize * nelem);
592 return ptr;
593 }
594
595 NSAPI_PUBLIC void *
596 pool_realloc(pool_handle_t *pool_handle, void *ptr, size_t size)
597 {
598 pool_t *pool = (pool_t *)pool_handle;
599 void *newptr;
600 block_t *block_ptr;
601 size_t oldsize;
602
603 if (pool == NULL)
604 return PERM_REALLOC(ptr, size);
605
606 if ( (newptr = pool_malloc(pool_handle, size)) == NULL)
607 return NULL;
608
609 /* With our structure we don't know exactly where the end
610 * of the original block is. But we do know an upper bound
611 * which is a valid ptr. Search the outstanding blocks
612 * for the block which contains this ptr, and copy...
613 */
614
615 if ( !(block_ptr = _ptr_in_pool(pool, ptr)) ) {
616 /* User is trying to realloc nonmalloc'd space! */
617 return newptr;
618 }
619
620 oldsize = block_ptr->end - (char *)ptr ;
621 if (oldsize > size)
622 oldsize = size;
623 memmove((char *)newptr, (char *)ptr, oldsize);
624
625 return newptr;
626 }
627
628 NSAPI_PUBLIC char *
629 pool_strdup(pool_handle_t *pool_handle, const char *orig_str)
630 {
631 char *new_str;
632 int len = strlen(orig_str);
633
634 if (pool_handle == NULL)
635 return PERM_STRDUP(orig_str);
636
637 new_str = (char *)pool_malloc(pool_handle, len+1);
638
639 if (new_str)
640 memcpy(new_str, orig_str, len+1);
641
642 return new_str;
643 }
644
645 NSAPI_PUBLIC long
646 pool_space(pool_handle_t *pool_handle)
647 {
648 pool_t *pool = (pool_t *)pool_handle;
649
650 return pool->size;
651 }
652
653 NSAPI_PUBLIC int pool_enabled()
654 {
655 if (getThreadMallocKey() == -1)
656 return 0;
657
658 if (!systhread_getdata(getThreadMallocKey()))
659 return 0;
660
661 return 1;
662 }
663
664 #ifdef DEBUG
665 NSAPI_PUBLIC void INTpool_assert(pool_handle_t *pool_handle, const void *ptr)
666 {
667 pool_t *pool = (pool_t *)pool_handle;
668
669 if (pool == NULL)
670 return;
671
672 PR_ASSERT(_ptr_in_pool(pool, ptr));
673 }
674 #endif
675
676 NSAPI_PUBLIC pool_config_t *pool_getConfig(void)
677 {
678 return &pool_config;
679 }
680
681 #ifdef POOL_GLOBAL_STATISTICS
682 NSAPI_PUBLIC pool_global_stats_t *pool_getGlobalStats(void)
683 {
684 return &pool_global_stats;
685 }
686 #endif /* POOL_GLOBAL_STATISTICS */
687
688 #ifdef PER_POOL_STATISTICS
689 NSAPI_PUBLIC pool_stats_t *pool_getPoolStats(pool_handle_t *pool_handle)
690 {
691 pool_t *pool = (pool_t *)pool_handle;
692
693 if (pool == NULL)
694 return NULL;
695
696 return &pool->stats;
697 }
698 #endif /* PER_POOL_STATISTICS */
699

mercurial