UNIXworkcode

1 /* 2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. 3 * 4 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 5 * 6 * THE BSD LICENSE 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * Redistributions of source code must retain the above copyright notice, this 12 * list of conditions and the following disclaimer. 13 * Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * 17 * Neither the name of the nor the names of its contributors may be 18 * used to endorse or promote products derived from this software without 19 * specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER 25 * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 26 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Generic pool handling routines. 36 * 37 * These routines reduce contention on the heap and guard against 38 * memory leaks. 39 * 40 * Thread warning: 41 * This implementation is thread safe. However, simultaneous 42 * mallocs/frees to the same "pool" are not safe. Do not share 43 * pools across multiple threads without providing your own 44 * synchronization. 45 * 46 * Mike Belshe 47 * 11-20-95 48 * 49 */ 50 51 //include "netsite.h" 52 //include "systems.h" 53 #include "systhr.h" 54 #include "pool_pvt.h" 55 //include "ereport.h" 56 //include "base/session.h" 57 //include "frame/req.h" 58 //include "frame/http.h" 59 #include "util.h" 60 //include "base/crit.h" 61 62 //include "base/dbtbase.h" 63 64 65 66 #include <stdlib.h> 67 #include <string.h> 68 #include <limits.h> 69 //define PERM_MALLOC malloc 70 //define PERM_FREE free 71 //define PERM_REALLOC realloc 72 //define PERM_CALLOC calloc 73 //define PERM_STRDUP strdup 74 75 /* Pool configuration parameters */ 76 static pool_config_t pool_config = POOL_CONFIG_INIT; 77 78 /* Pool global statistics */ 79 static pool_global_stats_t pool_global_stats; 80 81 /* ucx allocator pool class */ 82 static cx_allocator_class pool_allocator_class = { 83 (void *(*)(void *,size_t )) pool_malloc, 84 (void *(*)(void *,void *, size_t )) pool_realloc, 85 (void *(*)(void *,size_t ,size_t )) pool_calloc, 86 (void (*)(void *, void *))pool_free 87 }; 88 89 static int 90 pool_internal_init() 91 { 92 //if (pool_global_stats.lock == NULL) { 93 // pool_global_stats.lock = PR_NewLock(); // TODO: remove 94 //} 95 96 if (pool_config.block_size == 0) { 97 //ereport(LOG_INFORM, XP_GetAdminStr(DBT_poolInitInternalAllocatorDisabled_)); 98 } 99 100 return 0; 101 } 102 103 #define POOL_MIN_BLOCKSIZE 128 104 105 NSAPI_PUBLIC int 106 pool_init(pblock *pb, Session *sn, Request *rq) 107 { 108 //char *str_block_size = pblock_findval("block-size", pb); 109 //char *str_pool_disable = pblock_findval("disable", pb); 110 char *str_block_size = "16384"; 111 char *str_pool_disable = "false"; 112 int n; 113 114 //printf("standard block size: %d\n", pool_config.block_size); 115 116 if (str_block_size != NULL) { 117 int64_t value; 118 if(!util_strtoint(str_block_size, &value)) { 119 log_ereport(LOG_MISCONFIG, "pool-init: param ''block-size'' is not an integer"); 120 return REQ_ABORTED; 121 } 122 if(value > INT_MAX) { 123 log_ereport(LOG_MISCONFIG, "pool-init: block-size is too big"); 124 return REQ_ABORTED; 125 } 126 if(value < POOL_MIN_BLOCKSIZE) { 127 log_ereport(LOG_MISCONFIG, "pool-init: block-size is too small"); 128 return REQ_ABORTED; 129 } 130 pool_config.block_size = value; 131 } 132 133 if (str_pool_disable && util_getboolean(str_pool_disable, PR_TRUE)) { 134 /* We'll call PERM_MALLOC() on each pool_malloc() call */ 135 pool_config.block_size = 0; 136 pool_config.retain_size = 0; 137 pool_config.retain_num = 0; 138 } 139 140 pool_internal_init(); 141 142 return REQ_PROCEED; 143 } 144 145 CxAllocator* pool_allocator(pool_handle_t *pool) { 146 return &((pool_t *)pool)->allocator; 147 } 148 149 static block_t * 150 _create_block(pool_t *pool, int size) 151 { 152 block_t *newblock; 153 char *newdata; 154 block_t **blk_ptr; 155 long blen; 156 157 /* Does the pool have any retained blocks on its free list? */ 158 for (blk_ptr = &pool->free_blocks; 159 (newblock = *blk_ptr) != NULL; blk_ptr = &newblock->next) { 160 161 /* Yes, is this block large enough? */ 162 blen = newblock->end - newblock->data; 163 if (blen >= size) { 164 165 /* Yes, take it off the free list */ 166 *blk_ptr = newblock->next; 167 pool->free_size -= blen; 168 --pool->free_num; 169 170 /* Give the block to the caller */ 171 newblock->start = newblock->data; 172 goto done; 173 } 174 } 175 176 newblock = (block_t *)PERM_MALLOC(sizeof(block_t)); 177 newdata = (char *)PERM_MALLOC(size); 178 if (newblock == NULL || (newdata == NULL && size != 0)) { 179 //ereport(LOG_CATASTROPHE, 180 // XP_GetAdminStr(DBT_poolCreateBlockOutOfMemory_)); 181 PERM_FREE(newblock); 182 PERM_FREE(newdata); 183 //PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); 184 return NULL; 185 } 186 newblock->data = newdata; 187 newblock->start = newblock->data; 188 newblock->end = newblock->data + size; 189 newblock->next = NULL; 190 blen = size; 191 192 #ifdef POOL_GLOBAL_STATISTICS 193 PR_AtomicIncrement((PRInt32 *)&pool_global_stats.blkAlloc); 194 #endif /* POOL_GLOBAL_STATISTICS */ 195 196 done: 197 198 #ifdef PER_POOL_STATISTICS 199 ++pool->stats.blkAlloc; 200 #endif /* PER_POOL_STATISTICS */ 201 202 return newblock; 203 } 204 205 static void 206 _free_block(block_t *block) 207 { 208 #ifdef POOL_ZERO_DEBUG 209 long blen = block->end - block->data; 210 memset(block->data, POOL_ZERO_DEBUG, blen); 211 #endif /* POOL_ZERO_DEBUG */ 212 213 PERM_FREE(block->data); 214 215 #ifdef POOL_ZERO_DEBUG 216 memset(block, POOL_ZERO_DEBUG, sizeof(block)); 217 #endif /* POOL_ZERO_DEBUG */ 218 219 PERM_FREE(block); 220 221 #ifdef POOL_GLOBAL_STATISTICS 222 PR_AtomicIncrement((PRInt32 *)&pool_global_stats.blkFree); 223 #endif /* POOL_GLOBAL_STATISTICS */ 224 } 225 226 /* ptr_in_pool() 227 * Checks to see if the given pointer is in the given pool. 228 * If true, returns a ptr to the block_t containing the ptr; 229 * otherwise returns NULL 230 */ 231 block_t * 232 _ptr_in_pool(pool_t *pool, const void *ptr) 233 { 234 block_t *block_ptr = NULL; 235 236 /* try to find a block which contains this ptr */ 237 238 if (POOL_PTR_IN_BLOCK(pool->curr_block, ptr)) { 239 block_ptr = pool->curr_block; 240 } 241 else { 242 for (block_ptr = pool->used_blocks; block_ptr; block_ptr = block_ptr->next) { 243 if (POOL_PTR_IN_BLOCK(block_ptr, ptr)) 244 break; 245 } 246 } 247 return block_ptr; 248 } 249 250 251 NSAPI_PUBLIC pool_handle_t * 252 pool_create() 253 { 254 pool_t *newpool; 255 256 newpool = (pool_t *)PERM_MALLOC(sizeof(pool_t)); 257 258 if (newpool) { 259 /* Have to initialize now, as pools get created sometimes 260 * before pool_init can be called... 261 */ 262 //if (pool_global_stats.lock == NULL) { // TODO: remove 263 // pool_internal_init(); 264 //} 265 266 newpool->allocator.cl = &pool_allocator_class; 267 newpool->allocator.data = newpool; 268 269 newpool->used_blocks = NULL; 270 newpool->free_blocks = NULL; 271 newpool->free_size = 0; 272 newpool->free_num = 0; 273 newpool->size = 0; 274 newpool->next = NULL; 275 276 #ifdef PER_POOL_STATISTICS 277 /* Initial per pool statistics */ 278 memset((void *)(&newpool->stats), 0, sizeof(newpool->stats)); 279 newpool->stats.thread = PR_GetCurrentThread(); 280 newpool->stats.created = PR_Now(); 281 #endif /* PER_POOL_STATISTICS */ 282 283 /* No need to lock, since pool has not been exposed yet */ 284 newpool->curr_block =_create_block(newpool, pool_config.block_size); 285 if (newpool->curr_block == NULL) { 286 //ereport(LOG_CATASTROPHE, XP_GetAdminStr(DBT_poolCreateOutOfMemory_)); 287 pool_destroy((pool_handle_t *)newpool); 288 //PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); 289 return NULL; 290 } 291 292 /* Add to known pools list */ 293 294 // NOTICE: 295 // known pools list removed 296 297 //PR_Lock(pool_global_stats.lock); 298 //newpool->next = pool_global_stats.poolList; 299 //pool_global_stats.poolList = newpool; 300 //++pool_global_stats.createCnt; 301 #ifdef PER_POOL_STATISTICS 302 newpool->stats.poolId = pool_global_stats.createCnt; 303 #endif /* PER_POOL_STATISTICS */ 304 //PR_Unlock(pool_global_stats.lock); 305 306 } 307 else { 308 //ereport(LOG_CATASTROPHE, XP_GetAdminStr(DBT_poolCreateOutOfMemory_1)); 309 //PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); 310 } 311 312 return (pool_handle_t *)newpool; 313 } 314 315 /* 316 * pool_mark - get mark for subsequent recycle 317 * 318 * This function returns a value that can be used to free all pool 319 * memory which is subsequently allocated, without freeing memory 320 * that has already been allocated when pool_mark() is called. 321 * The pool_recycle() function is used to free the memory allocated 322 * since pool_mark() was called. 323 * 324 * This function may be called several times before pool_recycle() 325 * is called, but some care must be taken not to pass an invalid 326 * mark value to pool_recycle(), which would cause all pool memory 327 * to be freed. A mark value becomes invalid when pool_recycle is 328 * called with a previously returned mark value. 329 */ 330 NSAPI_PUBLIC void * 331 pool_mark(pool_handle_t *pool_handle) 332 { 333 pool_t *pool = (pool_t *)pool_handle; 334 335 //PR_ASSERT(pool != NULL); 336 337 if (pool == NULL) 338 return NULL; 339 340 #ifdef PER_POOL_STATISTICS 341 pool->stats.thread = PR_GetCurrentThread(); 342 #endif /* PER_POOL_STATISTICS */ 343 344 /* Never return end as it points outside the block */ 345 if (pool->curr_block->start == pool->curr_block->end) 346 return pool->curr_block; 347 348 return (void *)(pool->curr_block->start); 349 } 350 351 /* 352 * pool_recycle - recycle memory in a pool 353 * 354 * This function returns all the allocated memory for a pool back to 355 * a free list associated with the pool. It is like pool_destroy() in 356 * the sense that all data structures previously allocated from the 357 * pool are freed, but it keeps the memory associated with the pool, 358 * and doesn't actually destroy the pool. 359 * 360 * The "mark" argument can be a value previously returned by 361 * pool_mark(), in which case the pool is returned to the state it 362 * was in when pool_mark() was called, or it can be NULL, in which 363 * case the pool is completely recycled. 364 */ 365 NSAPI_PUBLIC void 366 pool_recycle(pool_handle_t *pool_handle, void *mark) 367 { 368 pool_t *pool = (pool_t *)pool_handle; 369 block_t *tmp_blk; 370 unsigned long blen; 371 372 //PR_ASSERT(pool != NULL); 373 374 if (pool == NULL) 375 return; 376 377 /* Fix up curr_block. There should always be a curr_block. */ 378 tmp_blk = pool->curr_block; 379 //PR_ASSERT(tmp_blk != NULL); 380 381 /* Start with curr_block, then scan blocks on used_blocks list */ 382 for (;;) { 383 384 /* Check if the mark is at the end of this block */ 385 if (tmp_blk == mark) { 386 pool->curr_block = tmp_blk; 387 break; 388 } 389 390 /* Look for a block containing the mark */ 391 if (POOL_PTR_IN_BLOCK(tmp_blk, mark)) { 392 393 /* Reset block start pointer to marked spot */ 394 if (tmp_blk == pool->curr_block) { 395 blen = tmp_blk->start - (char *)mark; 396 } else { 397 blen = tmp_blk->end - (char *)mark; 398 } 399 pool->size -= blen; 400 //PR_ASSERT(pool->size >= 0); 401 tmp_blk->start = (char *)mark; 402 pool->curr_block = tmp_blk; 403 break; 404 } 405 406 /* Reset block start pointer to base of block */ 407 if (tmp_blk == pool->curr_block) { 408 /* Count just the allocated length in the current block */ 409 blen = tmp_blk->start - tmp_blk->data; 410 } 411 else { 412 /* Count the entire size of a used_block */ 413 blen = tmp_blk->end - tmp_blk->data; 414 } 415 tmp_blk->start = tmp_blk->data; 416 pool->size -= blen; 417 //PR_ASSERT(pool->size >= 0); 418 419 /* 420 * If there are no more used blocks after this one, then set 421 * this block up as the current block and return. 422 */ 423 if (pool->used_blocks == NULL) { 424 //PR_ASSERT(mark == NULL); 425 pool->curr_block = tmp_blk; 426 break; 427 } 428 429 /* Otherwise free this block one way or another */ 430 431 /* Add block length to total retained length and check limit */ 432 if ((pool->free_size + blen) <= pool_config.retain_size && 433 pool->free_num < pool_config.retain_num) { 434 435 /* Retain block on pool free list */ 436 /* 437 * XXX hep - could sort blocks on free list in order 438 * ascending size to get "best fit" allocation in 439 * _create_block(), but the default block size is large 440 * enough that fit should rarely be an issue. 441 */ 442 tmp_blk->next = pool->free_blocks; 443 pool->free_blocks = tmp_blk; 444 pool->free_size += blen; 445 ++pool->free_num; 446 } 447 else { 448 /* Limit exceeded - free block */ 449 _free_block(tmp_blk); 450 } 451 452 #ifdef PER_POOL_STATISTICS 453 //++pool->stats.blkFree; 454 #endif /* PER_POOL_STATISTICS */ 455 456 /* Remove next block from used blocks list */ 457 tmp_blk = pool->used_blocks; 458 pool->used_blocks = tmp_blk->next; 459 } 460 } 461 462 NSAPI_PUBLIC void 463 pool_destroy(pool_handle_t *pool_handle) 464 { 465 pool_t *pool = (pool_t *)pool_handle; 466 block_t *tmp_blk; 467 468 //PR_ASSERT(pool != NULL); 469 470 if (pool == NULL) 471 return; 472 473 if (pool->curr_block) 474 _free_block(pool->curr_block); 475 476 while(pool->used_blocks) { 477 tmp_blk = pool->used_blocks; 478 pool->used_blocks = tmp_blk->next; 479 _free_block(tmp_blk); 480 } 481 482 while(pool->free_blocks) { 483 tmp_blk = pool->free_blocks; 484 pool->free_blocks = tmp_blk->next; 485 _free_block(tmp_blk); 486 } 487 488 { 489 //pool_t **ppool; 490 491 /* Remove from the known pools list */ 492 // NOTICE: known pools list removed 493 /* 494 PR_Lock(pool_global_stats.lock); 495 for (ppool = &pool_global_stats.poolList; 496 *ppool; ppool = &(*ppool)->next) { 497 if (*ppool == pool) { 498 ++pool_global_stats.destroyCnt; 499 *ppool = pool->next; 500 break; 501 } 502 } 503 PR_Unlock(pool_global_stats.lock); 504 */ 505 } 506 507 #ifdef POOL_ZERO_DEBUG 508 memset(pool, POOL_ZERO_DEBUG, sizeof(pool)); 509 #endif /* POOL_ZERO_DEBUG */ 510 511 PERM_FREE(pool); 512 513 return; 514 } 515 516 517 NSAPI_PUBLIC void * 518 pool_malloc(pool_handle_t *pool_handle, size_t size) 519 { 520 pool_t *pool = (pool_t *)pool_handle; 521 block_t *curr_block; 522 long reqsize, blocksize; 523 char *ptr; 524 525 if (pool == NULL) 526 return PERM_MALLOC(size); 527 528 reqsize = ALIGN(size); 529 if (reqsize == 0) { 530 /* Assign a unique address to each 0-byte allocation */ 531 reqsize = WORD_SIZE; 532 } 533 534 curr_block = pool->curr_block; 535 ptr = curr_block->start; 536 curr_block->start += reqsize; 537 538 /* does this fit into the last allocated block? */ 539 if (curr_block->start > curr_block->end) { 540 541 /* Did not fit; time to allocate a new block */ 542 543 curr_block->start -= reqsize; /* keep structs in tact */ 544 545 /* Count unallocated bytes in current block in pool size */ 546 pool->size += curr_block->end - curr_block->start; 547 //PR_ASSERT(pool->size >= 0); 548 #ifdef PER_POOL_STATISTICS 549 if (pool->size > pool->stats.maxAlloc) { 550 pool->stats.maxAlloc = pool->size; 551 } 552 #endif /* PER_POOL_STATISTICS */ 553 554 /* Move current block to used block list */ 555 curr_block->next = pool->used_blocks; 556 pool->used_blocks = curr_block; 557 558 /* Allocate a chunk of memory which is at least block_size bytes */ 559 blocksize = reqsize; 560 if (blocksize < pool_config.block_size) 561 blocksize = pool_config.block_size; 562 563 curr_block = _create_block(pool, blocksize); 564 pool->curr_block = curr_block; 565 566 if (curr_block == NULL) { 567 //ereport(LOG_CATASTROPHE, 568 // XP_GetAdminStr(DBT_poolMallocOutOfMemory_)); 569 //PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); 570 return NULL; 571 } 572 573 ptr = curr_block->start; 574 curr_block->start += reqsize; 575 } 576 577 pool->size += reqsize; 578 //PR_ASSERT(pool->size >= 0); 579 580 #ifdef PER_POOL_STATISTICS 581 if (pool->size > pool->stats.maxAlloc) { 582 pool->stats.maxAlloc = pool->size; 583 } 584 ++pool->stats.allocCnt; 585 pool->stats.thread = PR_GetCurrentThread(); 586 #endif /* PER_POOL_STATISTICS */ 587 588 return ptr; 589 } 590 591 NSAPI_PUBLIC void 592 pool_free(pool_handle_t *pool_handle, void *ptr) 593 { 594 pool_t *pool = (pool_t *)pool_handle; 595 596 if (ptr == NULL) 597 return; 598 599 if (pool == NULL) { 600 PERM_FREE(ptr); 601 return; 602 } 603 604 //PR_ASSERT(_ptr_in_pool(pool, ptr)); 605 606 #ifdef PER_POOL_STATISTICS 607 608 ++pool->stats.freeCnt; 609 pool->stats.thread = PR_GetCurrentThread(); 610 611 #endif /* PER_POOL_STATISTICS */ 612 613 return; 614 } 615 616 NSAPI_PUBLIC void * 617 pool_calloc(pool_handle_t *pool_handle, size_t nelem, size_t elsize) 618 { 619 void *ptr; 620 621 if (pool_handle == NULL) 622 return calloc(1, elsize * nelem); 623 624 ptr = pool_malloc(pool_handle, elsize * nelem); 625 if (ptr) 626 memset(ptr, 0, elsize * nelem); 627 return ptr; 628 } 629 630 NSAPI_PUBLIC void * 631 pool_realloc(pool_handle_t *pool_handle, void *ptr, size_t size) 632 { 633 pool_t *pool = (pool_t *)pool_handle; 634 void *newptr; 635 block_t *block_ptr; 636 size_t oldsize; 637 638 if (pool == NULL) 639 return PERM_REALLOC(ptr, size); 640 641 if ( (newptr = pool_malloc(pool_handle, size)) == NULL) 642 return NULL; 643 644 /* With our structure we don't know exactly where the end 645 * of the original block is. But we do know an upper bound 646 * which is a valid ptr. Search the outstanding blocks 647 * for the block which contains this ptr, and copy... 648 */ 649 650 if ( !(block_ptr = _ptr_in_pool(pool, ptr)) ) { 651 /* User is trying to realloc nonmalloc'd space! */ 652 return newptr; 653 } 654 655 oldsize = block_ptr->end - (char *)ptr ; 656 if (oldsize > size) 657 oldsize = size; 658 memmove((char *)newptr, (char *)ptr, oldsize); 659 660 return newptr; 661 } 662 663 NSAPI_PUBLIC char * 664 pool_strdup(pool_handle_t *pool_handle, const char *orig_str) 665 { 666 char *new_str; 667 int len = strlen(orig_str); 668 669 if (pool_handle == NULL) 670 return PERM_STRDUP(orig_str); 671 672 new_str = (char *)pool_malloc(pool_handle, len+1); 673 674 if (new_str) 675 memcpy(new_str, orig_str, len+1); 676 677 return new_str; 678 } 679 680 NSAPI_PUBLIC long 681 pool_space(pool_handle_t *pool_handle) 682 { 683 pool_t *pool = (pool_t *)pool_handle; 684 685 return pool->size; 686 } 687 688 NSAPI_PUBLIC int pool_enabled() 689 { 690 if (getThreadMallocKey() == -1) 691 return 0; 692 693 if (!systhread_getdata(getThreadMallocKey())) 694 return 0; 695 696 return 1; 697 } 698 699 #ifdef DEBUG 700 NSAPI_PUBLIC void INTpool_assert(pool_handle_t *pool_handle, const void *ptr) 701 { 702 pool_t *pool = (pool_t *)pool_handle; 703 704 if (pool == NULL) 705 return; 706 707 //PR_ASSERT(_ptr_in_pool(pool, ptr)); 708 } 709 #endif 710 711 NSAPI_PUBLIC pool_config_t *pool_getConfig(void) 712 { 713 return &pool_config; 714 } 715 716 #ifdef POOL_GLOBAL_STATISTICS 717 NSAPI_PUBLIC pool_global_stats_t *pool_getGlobalStats(void) 718 { 719 return &pool_global_stats; 720 } 721 #endif /* POOL_GLOBAL_STATISTICS */ 722 723 #ifdef PER_POOL_STATISTICS 724 NSAPI_PUBLIC pool_stats_t *pool_getPoolStats(pool_handle_t *pool_handle) 725 { 726 pool_t *pool = (pool_t *)pool_handle; 727 728 if (pool == NULL) 729 return NULL; 730 731 return &pool->stats; 732 } 733 #endif /* PER_POOL_STATISTICS */ 734 735 // new 736 cxmutstr cx_strdup_pool(pool_handle_t *pool, cxmutstr s) { 737 cxmutstr newstring; 738 newstring.ptr = (char*)pool_malloc(pool, s.length + 1); 739 if (newstring.ptr != NULL) { 740 newstring.length = s.length; 741 newstring.ptr[newstring.length] = 0; 742 743 memcpy(newstring.ptr, s.ptr, s.length); 744 } 745 746 return newstring; 747 } 748 749