summaryrefslogtreecommitdiff
path: root/libc/malloc/malloc.c
diff options
context:
space:
mode:
authorjoseph <joseph@7b3dc134-2b1b-0410-93df-9e9f96275f8d>2013-11-06 23:03:08 +0000
committerjoseph <joseph@7b3dc134-2b1b-0410-93df-9e9f96275f8d>2013-11-06 23:03:08 +0000
commit036dfd44bcc8ed8e97dcd9c763f61d3b59796941 (patch)
tree27fe47c83800c73d61262ea4e70b211f6753e516 /libc/malloc/malloc.c
parentfe2ed5aaa408e1ab996a9fe1595a05634208a79c (diff)
Merge changes between r24305 and r24468 from /fsf/trunk.
git-svn-id: svn://svn.eglibc.org/trunk@24469 7b3dc134-2b1b-0410-93df-9e9f96275f8d
Diffstat (limited to 'libc/malloc/malloc.c')
-rw-r--r--libc/malloc/malloc.c51
1 files changed, 23 insertions, 28 deletions
diff --git a/libc/malloc/malloc.c b/libc/malloc/malloc.c
index 1a18c3f5f..897c43a39 100644
--- a/libc/malloc/malloc.c
+++ b/libc/malloc/malloc.c
@@ -2253,7 +2253,6 @@ static void* sysmalloc(INTERNAL_SIZE_T nb, mstate av)
mchunkptr remainder; /* remainder from allocation */
unsigned long remainder_size; /* its size */
- unsigned long sum; /* for updating stats */
size_t pagemask = GLRO(dl_pagesize) - 1;
bool tried_mmap = false;
@@ -2325,12 +2324,12 @@ static void* sysmalloc(INTERNAL_SIZE_T nb, mstate av)
/* update statistics */
- if (++mp_.n_mmaps > mp_.max_n_mmaps)
- mp_.max_n_mmaps = mp_.n_mmaps;
+ int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
+ atomic_max (&mp_.max_n_mmaps, new);
- sum = mp_.mmapped_mem += size;
- if (sum > (unsigned long)(mp_.max_mmapped_mem))
- mp_.max_mmapped_mem = sum;
+ unsigned long sum;
+ sum = atomic_exchange_and_add(&mp_.mmapped_mem, size) + size;
+ atomic_max (&mp_.max_mmapped_mem, sum);
check_chunk(av, p);
@@ -2780,8 +2779,8 @@ munmap_chunk(mchunkptr p)
return;
}
- mp_.n_mmaps--;
- mp_.mmapped_mem -= total_size;
+ atomic_decrement (&mp_.n_mmaps);
+ atomic_add (&mp_.mmapped_mem, -total_size);
/* If munmap failed the process virtual memory address space is in a
bad shape. Just leave the block hanging around, the process will
@@ -2822,10 +2821,10 @@ mremap_chunk(mchunkptr p, size_t new_size)
assert((p->prev_size == offset));
set_head(p, (new_size - offset)|IS_MMAPPED);
- mp_.mmapped_mem -= size + offset;
- mp_.mmapped_mem += new_size;
- if ((unsigned long)mp_.mmapped_mem > (unsigned long)mp_.max_mmapped_mem)
- mp_.max_mmapped_mem = mp_.mmapped_mem;
+ INTERNAL_SIZE_T new;
+ new = atomic_exchange_and_add (&mp_.mmapped_mem, new_size - size - offset)
+ + new_size - size - offset;
+ atomic_max (&mp_.max_mmapped_mem, new);
return p;
}
@@ -3017,6 +3016,14 @@ __libc_memalign(size_t alignment, size_t bytes)
/* Otherwise, ensure that it is at least a minimum chunk size */
if (alignment < MINSIZE) alignment = MINSIZE;
+ /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
+ power of 2 and will cause overflow in the check below. */
+ if (alignment > SIZE_MAX / 2 + 1)
+ {
+ __set_errno (EINVAL);
+ return 0;
+ }
+
/* Check for overflow. */
if (bytes > SIZE_MAX - alignment - MINSIZE)
{
@@ -5042,23 +5049,11 @@ malloc_info (int options, FILE *fp)
sizes[i].total = sizes[i].count * sizes[i].to;
}
- mbinptr bin = bin_at (ar_ptr, 1);
- struct malloc_chunk *r = bin->fd;
- if (r != NULL)
- {
- while (r != bin)
- {
- ++sizes[NFASTBINS].count;
- sizes[NFASTBINS].total += r->size;
- sizes[NFASTBINS].from = MIN (sizes[NFASTBINS].from, r->size);
- sizes[NFASTBINS].to = MAX (sizes[NFASTBINS].to, r->size);
- r = r->fd;
- }
- nblocks += sizes[NFASTBINS].count;
- avail += sizes[NFASTBINS].total;
- }
- for (size_t i = 2; i < NBINS; ++i)
+ mbinptr bin;
+ struct malloc_chunk *r;
+
+ for (size_t i = 1; i < NBINS; ++i)
{
bin = bin_at (ar_ptr, i);
r = bin->fd;