UNIXworkcode

1 /* 2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 3 * 4 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 5 * 6 * THE BSD LICENSE 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * Redistributions of source code must retain the above copyright notice, this 12 * list of conditions and the following disclaimer. 13 * Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * 17 * Neither the name of the nor the names of its contributors may be 18 * used to endorse or promote products derived from this software without 19 * specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER 25 * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 26 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Generic pool handling routines. 36 * 37 * These routines reduce contention on the heap and guard against 38 * memory leaks. 39 * 40 * Thread warning: 41 * This implementation is thread safe. However, simultaneous 42 * mallocs/frees to the same "pool" are not safe. Do not share 43 * pools across multiple threads without providing your own 44 * synchronization. 45 * 46 * Mike Belshe 47 * 11-20-95 48 * 49 */ 50 51 //include "netsite.h" 52 //include "systems.h" 53 #include "systhr.h" 54 #include "pool_pvt.h" 55 //include "ereport.h" 56 //include "base/session.h" 57 //include "frame/req.h" 58 //include "frame/http.h" 59 #include "util.h" 60 //include "base/crit.h" 61 62 //include "base/dbtbase.h" 63 64 65 66 #include <stdlib.h> 67 #include <string.h> 68 //define PERM_MALLOC malloc 69 //define PERM_FREE free 70 //define PERM_REALLOC realloc 71 //define PERM_CALLOC calloc 72 //define PERM_STRDUP strdup 73 74 /* Pool configuration parameters */ 75 static pool_config_t pool_config = POOL_CONFIG_INIT; 76 77 /* Pool global statistics */ 78 static pool_global_stats_t pool_global_stats; 79 80 static int 81 pool_internal_init() 82 { 83 //if (pool_global_stats.lock == NULL) { 84 // pool_global_stats.lock = PR_NewLock(); // TODO: remove 85 //} 86 87 if (pool_config.block_size == 0) { 88 //ereport(LOG_INFORM, XP_GetAdminStr(DBT_poolInitInternalAllocatorDisabled_)); 89 } 90 91 return 0; 92 } 93 94 NSAPI_PUBLIC int 95 pool_init(pblock *pb, Session *sn, Request *rq) 96 { 97 //char *str_block_size = pblock_findval("block-size", pb); 98 //char *str_pool_disable = pblock_findval("disable", pb); 99 char *str_block_size = "16384"; 100 char *str_pool_disable = "false"; 101 int n; 102 103 //printf("standard block size: %d\n", pool_config.block_size); 104 105 if (str_block_size != NULL) { 106 n = atoi(str_block_size); 107 if (n > 0) 108 pool_config.block_size = n; 109 } 110 111 if (str_pool_disable && util_getboolean(str_pool_disable, PR_TRUE)) { 112 /* We'll call PERM_MALLOC() on each pool_malloc() call */ 113 pool_config.block_size = 0; 114 pool_config.retain_size = 0; 115 pool_config.retain_num = 0; 116 } 117 118 pool_internal_init(); 119 120 return REQ_PROCEED; 121 } 122 123 static block_t * 124 _create_block(pool_t *pool, int size) 125 { 126 block_t *newblock; 127 char *newdata; 128 block_t **blk_ptr; 129 long blen; 130 131 /* Does the pool have any retained blocks on its free list? */ 132 for (blk_ptr = &pool->free_blocks; 133 (newblock = *blk_ptr) != NULL; blk_ptr = &newblock->next) { 134 135 /* Yes, is this block large enough? */ 136 blen = newblock->end - newblock->data; 137 if (blen >= size) { 138 139 /* Yes, take it off the free list */ 140 *blk_ptr = newblock->next; 141 pool->free_size -= blen; 142 --pool->free_num; 143 144 /* Give the block to the caller */ 145 newblock->start = newblock->data; 146 goto done; 147 } 148 } 149 150 newblock = (block_t *)PERM_MALLOC(sizeof(block_t)); 151 newdata = (char *)PERM_MALLOC(size); 152 if (newblock == NULL || (newdata == NULL && size != 0)) { 153 //ereport(LOG_CATASTROPHE, 154 // XP_GetAdminStr(DBT_poolCreateBlockOutOfMemory_)); 155 PERM_FREE(newblock); 156 PERM_FREE(newdata); 157 //PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); 158 return NULL; 159 } 160 newblock->data = newdata; 161 newblock->start = newblock->data; 162 newblock->end = newblock->data + size; 163 newblock->next = NULL; 164 blen = size; 165 166 #ifdef POOL_GLOBAL_STATISTICS 167 PR_AtomicIncrement((PRInt32 *)&pool_global_stats.blkAlloc); 168 #endif /* POOL_GLOBAL_STATISTICS */ 169 170 done: 171 172 #ifdef PER_POOL_STATISTICS 173 ++pool->stats.blkAlloc; 174 #endif /* PER_POOL_STATISTICS */ 175 176 return newblock; 177 } 178 179 static void 180 _free_block(block_t *block) 181 { 182 #ifdef POOL_ZERO_DEBUG 183 long blen = block->end - block->data; 184 memset(block->data, POOL_ZERO_DEBUG, blen); 185 #endif /* POOL_ZERO_DEBUG */ 186 187 PERM_FREE(block->data); 188 189 #ifdef POOL_ZERO_DEBUG 190 memset(block, POOL_ZERO_DEBUG, sizeof(block)); 191 #endif /* POOL_ZERO_DEBUG */ 192 193 PERM_FREE(block); 194 195 #ifdef POOL_GLOBAL_STATISTICS 196 PR_AtomicIncrement((PRInt32 *)&pool_global_stats.blkFree); 197 #endif /* POOL_GLOBAL_STATISTICS */ 198 } 199 200 /* ptr_in_pool() 201 * Checks to see if the given pointer is in the given pool. 202 * If true, returns a ptr to the block_t containing the ptr; 203 * otherwise returns NULL 204 */ 205 block_t * 206 _ptr_in_pool(pool_t *pool, const void *ptr) 207 { 208 block_t *block_ptr = NULL; 209 210 /* try to find a block which contains this ptr */ 211 212 if (POOL_PTR_IN_BLOCK(pool->curr_block, ptr)) { 213 block_ptr = pool->curr_block; 214 } 215 else { 216 for (block_ptr = pool->used_blocks; block_ptr; block_ptr = block_ptr->next) { 217 if (POOL_PTR_IN_BLOCK(block_ptr, ptr)) 218 break; 219 } 220 } 221 return block_ptr; 222 } 223 224 225 NSAPI_PUBLIC pool_handle_t * 226 pool_create() 227 { 228 pool_t *newpool; 229 230 newpool = (pool_t *)PERM_MALLOC(sizeof(pool_t)); 231 232 if (newpool) { 233 /* Have to initialize now, as pools get created sometimes 234 * before pool_init can be called... 235 */ 236 //if (pool_global_stats.lock == NULL) { // TODO: remove 237 // pool_internal_init(); 238 //} 239 240 newpool->used_blocks = NULL; 241 newpool->free_blocks = NULL; 242 newpool->free_size = 0; 243 newpool->free_num = 0; 244 newpool->size = 0; 245 newpool->next = NULL; 246 247 #ifdef PER_POOL_STATISTICS 248 /* Initial per pool statistics */ 249 memset((void *)(&newpool->stats), 0, sizeof(newpool->stats)); 250 newpool->stats.thread = PR_GetCurrentThread(); 251 newpool->stats.created = PR_Now(); 252 #endif /* PER_POOL_STATISTICS */ 253 254 /* No need to lock, since pool has not been exposed yet */ 255 newpool->curr_block =_create_block(newpool, pool_config.block_size); 256 if (newpool->curr_block == NULL) { 257 //ereport(LOG_CATASTROPHE, XP_GetAdminStr(DBT_poolCreateOutOfMemory_)); 258 pool_destroy((pool_handle_t *)newpool); 259 //PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); 260 return NULL; 261 } 262 263 /* Add to known pools list */ 264 265 // NOTICE: 266 // known pools list removed 267 268 //PR_Lock(pool_global_stats.lock); 269 //newpool->next = pool_global_stats.poolList; 270 //pool_global_stats.poolList = newpool; 271 //++pool_global_stats.createCnt; 272 #ifdef PER_POOL_STATISTICS 273 newpool->stats.poolId = pool_global_stats.createCnt; 274 #endif /* PER_POOL_STATISTICS */ 275 //PR_Unlock(pool_global_stats.lock); 276 277 } 278 else { 279 //ereport(LOG_CATASTROPHE, XP_GetAdminStr(DBT_poolCreateOutOfMemory_1)); 280 //PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); 281 } 282 283 return (pool_handle_t *)newpool; 284 } 285 286 /* 287 * pool_mark - get mark for subsequent recycle 288 * 289 * This function returns a value that can be used to free all pool 290 * memory which is subsequently allocated, without freeing memory 291 * that has already been allocated when pool_mark() is called. 292 * The pool_recycle() function is used to free the memory allocated 293 * since pool_mark() was called. 294 * 295 * This function may be called several times before pool_recycle() 296 * is called, but some care must be taken not to pass an invalid 297 * mark value to pool_recycle(), which would cause all pool memory 298 * to be freed. A mark value becomes invalid when pool_recycle is 299 * called with a previously returned mark value. 300 */ 301 NSAPI_PUBLIC void * 302 pool_mark(pool_handle_t *pool_handle) 303 { 304 pool_t *pool = (pool_t *)pool_handle; 305 306 //PR_ASSERT(pool != NULL); 307 308 if (pool == NULL) 309 return NULL; 310 311 #ifdef PER_POOL_STATISTICS 312 pool->stats.thread = PR_GetCurrentThread(); 313 #endif /* PER_POOL_STATISTICS */ 314 315 /* Never return end as it points outside the block */ 316 if (pool->curr_block->start == pool->curr_block->end) 317 return pool->curr_block; 318 319 return (void *)(pool->curr_block->start); 320 } 321 322 /* 323 * pool_recycle - recycle memory in a pool 324 * 325 * This function returns all the allocated memory for a pool back to 326 * a free list associated with the pool. It is like pool_destroy() in 327 * the sense that all data structures previously allocated from the 328 * pool are freed, but it keeps the memory associated with the pool, 329 * and doesn't actually destroy the pool. 330 * 331 * The "mark" argument can be a value previously returned by 332 * pool_mark(), in which case the pool is returned to the state it 333 * was in when pool_mark() was called, or it can be NULL, in which 334 * case the pool is completely recycled. 335 */ 336 NSAPI_PUBLIC void 337 pool_recycle(pool_handle_t *pool_handle, void *mark) 338 { 339 pool_t *pool = (pool_t *)pool_handle; 340 block_t *tmp_blk; 341 unsigned long blen; 342 343 //PR_ASSERT(pool != NULL); 344 345 if (pool == NULL) 346 return; 347 348 /* Fix up curr_block. There should always be a curr_block. */ 349 tmp_blk = pool->curr_block; 350 //PR_ASSERT(tmp_blk != NULL); 351 352 /* Start with curr_block, then scan blocks on used_blocks list */ 353 for (;;) { 354 355 /* Check if the mark is at the end of this block */ 356 if (tmp_blk == mark) { 357 pool->curr_block = tmp_blk; 358 break; 359 } 360 361 /* Look for a block containing the mark */ 362 if (POOL_PTR_IN_BLOCK(tmp_blk, mark)) { 363 364 /* Reset block start pointer to marked spot */ 365 if (tmp_blk == pool->curr_block) { 366 blen = tmp_blk->start - (char *)mark; 367 } else { 368 blen = tmp_blk->end - (char *)mark; 369 } 370 pool->size -= blen; 371 //PR_ASSERT(pool->size >= 0); 372 tmp_blk->start = (char *)mark; 373 pool->curr_block = tmp_blk; 374 break; 375 } 376 377 /* Reset block start pointer to base of block */ 378 if (tmp_blk == pool->curr_block) { 379 /* Count just the allocated length in the current block */ 380 blen = tmp_blk->start - tmp_blk->data; 381 } 382 else { 383 /* Count the entire size of a used_block */ 384 blen = tmp_blk->end - tmp_blk->data; 385 } 386 tmp_blk->start = tmp_blk->data; 387 pool->size -= blen; 388 //PR_ASSERT(pool->size >= 0); 389 390 /* 391 * If there are no more used blocks after this one, then set 392 * this block up as the current block and return. 393 */ 394 if (pool->used_blocks == NULL) { 395 //PR_ASSERT(mark == NULL); 396 pool->curr_block = tmp_blk; 397 break; 398 } 399 400 /* Otherwise free this block one way or another */ 401 402 /* Add block length to total retained length and check limit */ 403 if ((pool->free_size + blen) <= pool_config.retain_size && 404 pool->free_num < pool_config.retain_num) { 405 406 /* Retain block on pool free list */ 407 /* 408 * XXX hep - could sort blocks on free list in order 409 * ascending size to get "best fit" allocation in 410 * _create_block(), but the default block size is large 411 * enough that fit should rarely be an issue. 412 */ 413 tmp_blk->next = pool->free_blocks; 414 pool->free_blocks = tmp_blk; 415 pool->free_size += blen; 416 ++pool->free_num; 417 } 418 else { 419 /* Limit exceeded - free block */ 420 _free_block(tmp_blk); 421 } 422 423 #ifdef PER_POOL_STATISTICS 424 //++pool->stats.blkFree; 425 #endif /* PER_POOL_STATISTICS */ 426 427 /* Remove next block from used blocks list */ 428 tmp_blk = pool->used_blocks; 429 pool->used_blocks = tmp_blk->next; 430 } 431 } 432 433 NSAPI_PUBLIC void 434 pool_destroy(pool_handle_t *pool_handle) 435 { 436 pool_t *pool = (pool_t *)pool_handle; 437 block_t *tmp_blk; 438 439 //PR_ASSERT(pool != NULL); 440 441 if (pool == NULL) 442 return; 443 444 if (pool->curr_block) 445 _free_block(pool->curr_block); 446 447 while(pool->used_blocks) { 448 tmp_blk = pool->used_blocks; 449 pool->used_blocks = tmp_blk->next; 450 _free_block(tmp_blk); 451 } 452 453 while(pool->free_blocks) { 454 tmp_blk = pool->free_blocks; 455 pool->free_blocks = tmp_blk->next; 456 _free_block(tmp_blk); 457 } 458 459 { 460 //pool_t **ppool; 461 462 /* Remove from the known pools list */ 463 // NOTICE: known pools list removed 464 /* 465 PR_Lock(pool_global_stats.lock); 466 for (ppool = &pool_global_stats.poolList; 467 *ppool; ppool = &(*ppool)->next) { 468 if (*ppool == pool) { 469 ++pool_global_stats.destroyCnt; 470 *ppool = pool->next; 471 break; 472 } 473 } 474 PR_Unlock(pool_global_stats.lock); 475 */ 476 } 477 478 #ifdef POOL_ZERO_DEBUG 479 memset(pool, POOL_ZERO_DEBUG, sizeof(pool)); 480 #endif /* POOL_ZERO_DEBUG */ 481 482 PERM_FREE(pool); 483 484 return; 485 } 486 487 488 NSAPI_PUBLIC void * 489 pool_malloc(pool_handle_t *pool_handle, size_t size) 490 { 491 pool_t *pool = (pool_t *)pool_handle; 492 block_t *curr_block; 493 long reqsize, blocksize; 494 char *ptr; 495 496 if (pool == NULL) 497 return PERM_MALLOC(size); 498 499 reqsize = ALIGN(size); 500 if (reqsize == 0) { 501 /* Assign a unique address to each 0-byte allocation */ 502 reqsize = WORD_SIZE; 503 } 504 505 curr_block = pool->curr_block; 506 ptr = curr_block->start; 507 curr_block->start += reqsize; 508 509 /* does this fit into the last allocated block? */ 510 if (curr_block->start > curr_block->end) { 511 512 /* Did not fit; time to allocate a new block */ 513 514 curr_block->start -= reqsize; /* keep structs in tact */ 515 516 /* Count unallocated bytes in current block in pool size */ 517 pool->size += curr_block->end - curr_block->start; 518 //PR_ASSERT(pool->size >= 0); 519 #ifdef PER_POOL_STATISTICS 520 if (pool->size > pool->stats.maxAlloc) { 521 pool->stats.maxAlloc = pool->size; 522 } 523 #endif /* PER_POOL_STATISTICS */ 524 525 /* Move current block to used block list */ 526 curr_block->next = pool->used_blocks; 527 pool->used_blocks = curr_block; 528 529 /* Allocate a chunk of memory which is at least block_size bytes */ 530 blocksize = reqsize; 531 if (blocksize < pool_config.block_size) 532 blocksize = pool_config.block_size; 533 534 curr_block = _create_block(pool, blocksize); 535 pool->curr_block = curr_block; 536 537 if (curr_block == NULL) { 538 //ereport(LOG_CATASTROPHE, 539 // XP_GetAdminStr(DBT_poolMallocOutOfMemory_)); 540 //PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); 541 return NULL; 542 } 543 544 ptr = curr_block->start; 545 curr_block->start += reqsize; 546 } 547 548 pool->size += reqsize; 549 //PR_ASSERT(pool->size >= 0); 550 551 #ifdef PER_POOL_STATISTICS 552 if (pool->size > pool->stats.maxAlloc) { 553 pool->stats.maxAlloc = pool->size; 554 } 555 ++pool->stats.allocCnt; 556 pool->stats.thread = PR_GetCurrentThread(); 557 #endif /* PER_POOL_STATISTICS */ 558 559 return ptr; 560 } 561 562 NSAPI_PUBLIC void 563 pool_free(pool_handle_t *pool_handle, void *ptr) 564 { 565 pool_t *pool = (pool_t *)pool_handle; 566 567 if (ptr == NULL) 568 return; 569 570 if (pool == NULL) { 571 PERM_FREE(ptr); 572 return; 573 } 574 575 //PR_ASSERT(_ptr_in_pool(pool, ptr)); 576 577 #ifdef PER_POOL_STATISTICS 578 579 ++pool->stats.freeCnt; 580 pool->stats.thread = PR_GetCurrentThread(); 581 582 #endif /* PER_POOL_STATISTICS */ 583 584 return; 585 } 586 587 NSAPI_PUBLIC void * 588 pool_calloc(pool_handle_t *pool_handle, size_t nelem, size_t elsize) 589 { 590 void *ptr; 591 592 if (pool_handle == NULL) 593 return calloc(1, elsize * nelem); 594 595 ptr = pool_malloc(pool_handle, elsize * nelem); 596 if (ptr) 597 memset(ptr, 0, elsize * nelem); 598 return ptr; 599 } 600 601 NSAPI_PUBLIC void * 602 pool_realloc(pool_handle_t *pool_handle, void *ptr, size_t size) 603 { 604 pool_t *pool = (pool_t *)pool_handle; 605 void *newptr; 606 block_t *block_ptr; 607 size_t oldsize; 608 609 if (pool == NULL) 610 return PERM_REALLOC(ptr, size); 611 612 if ( (newptr = pool_malloc(pool_handle, size)) == NULL) 613 return NULL; 614 615 /* With our structure we don't know exactly where the end 616 * of the original block is. But we do know an upper bound 617 * which is a valid ptr. Search the outstanding blocks 618 * for the block which contains this ptr, and copy... 619 */ 620 621 if ( !(block_ptr = _ptr_in_pool(pool, ptr)) ) { 622 /* User is trying to realloc nonmalloc'd space! */ 623 return newptr; 624 } 625 626 oldsize = block_ptr->end - (char *)ptr ; 627 if (oldsize > size) 628 oldsize = size; 629 memmove((char *)newptr, (char *)ptr, oldsize); 630 631 return newptr; 632 } 633 634 NSAPI_PUBLIC char * 635 pool_strdup(pool_handle_t *pool_handle, const char *orig_str) 636 { 637 char *new_str; 638 int len = strlen(orig_str); 639 640 if (pool_handle == NULL) 641 return PERM_STRDUP(orig_str); 642 643 new_str = (char *)pool_malloc(pool_handle, len+1); 644 645 if (new_str) 646 memcpy(new_str, orig_str, len+1); 647 648 return new_str; 649 } 650 651 NSAPI_PUBLIC long 652 pool_space(pool_handle_t *pool_handle) 653 { 654 pool_t *pool = (pool_t *)pool_handle; 655 656 return pool->size; 657 } 658 659 NSAPI_PUBLIC int pool_enabled() 660 { 661 if (getThreadMallocKey() == -1) 662 return 0; 663 664 if (!systhread_getdata(getThreadMallocKey())) 665 return 0; 666 667 return 1; 668 } 669 670 #ifdef DEBUG 671 NSAPI_PUBLIC void INTpool_assert(pool_handle_t *pool_handle, const void *ptr) 672 { 673 pool_t *pool = (pool_t *)pool_handle; 674 675 if (pool == NULL) 676 return; 677 678 //PR_ASSERT(_ptr_in_pool(pool, ptr)); 679 } 680 #endif 681 682 NSAPI_PUBLIC pool_config_t *pool_getConfig(void) 683 { 684 return &pool_config; 685 } 686 687 #ifdef POOL_GLOBAL_STATISTICS 688 NSAPI_PUBLIC pool_global_stats_t *pool_getGlobalStats(void) 689 { 690 return &pool_global_stats; 691 } 692 #endif /* POOL_GLOBAL_STATISTICS */ 693 694 #ifdef PER_POOL_STATISTICS 695 NSAPI_PUBLIC pool_stats_t *pool_getPoolStats(pool_handle_t *pool_handle) 696 { 697 pool_t *pool = (pool_t *)pool_handle; 698 699 if (pool == NULL) 700 return NULL; 701 702 return &pool->stats; 703 } 704 #endif /* PER_POOL_STATISTICS */ 705 706 // new 707 sstr_t sstrdup_pool(pool_handle_t *pool, sstr_t s) { 708 sstr_t newstring; 709 newstring.ptr = (char*)pool_malloc(pool, s.length + 1); 710 if (newstring.ptr != NULL) { 711 newstring.length = s.length; 712 newstring.ptr[newstring.length] = 0; 713 714 memcpy(newstring.ptr, s.ptr, s.length); 715 } 716 717 return newstring; 718 } 719 720