dlmalloc/
dlmalloc.rs

1// This is a version of dlmalloc.c ported to Rust. You can find the original
2// source at ftp://g.oswego.edu/pub/misc/malloc.c
3//
4// The original source was written by Doug Lea and released to the public domain
5
6macro_rules! debug_assert {
7    ($($arg:tt)*) => {
8        if cfg!(all(feature = "debug", debug_assertions)) {
9            assert!($($arg)*);
10        }
11    };
12}
13
14macro_rules! debug_assert_eq {
15    ($($arg:tt)*) => {
16        if cfg!(all(feature = "debug", debug_assertions)) {
17            assert_eq!($($arg)*);
18        }
19    };
20}
21
22use core::cmp;
23use core::mem;
24use core::ptr;
25
26use crate::Allocator;
27
28pub struct Dlmalloc<A> {
29    smallmap: u32,
30    treemap: u32,
31    smallbins: [*mut Chunk; (NSMALLBINS + 1) * 2],
32    treebins: [*mut TreeChunk; NTREEBINS],
33    dvsize: usize,
34    topsize: usize,
35    dv: *mut Chunk,
36    top: *mut Chunk,
37    footprint: usize,
38    max_footprint: usize,
39    seg: Segment,
40    trim_check: usize,
41    least_addr: *mut u8,
42    release_checks: usize,
43    system_allocator: A,
44}
45unsafe impl<A: Send> Send for Dlmalloc<A> {}
46
47// TODO: document this
48const NSMALLBINS: usize = 32;
49const NTREEBINS: usize = 32;
50const SMALLBIN_SHIFT: usize = 3;
51const TREEBIN_SHIFT: usize = 8;
52
53const NSMALLBINS_U32: u32 = NSMALLBINS as u32;
54const NTREEBINS_U32: u32 = NTREEBINS as u32;
55
56// TODO: runtime configurable? documentation?
57const DEFAULT_GRANULARITY: usize = 64 * 1024;
58const DEFAULT_TRIM_THRESHOLD: usize = 2 * 1024 * 1024;
59const MAX_RELEASE_CHECK_RATE: usize = 4095;
60
61#[repr(C)]
62struct Chunk {
63    prev_foot: usize,
64    head: usize,
65    prev: *mut Chunk,
66    next: *mut Chunk,
67}
68
69#[repr(C)]
70struct TreeChunk {
71    chunk: Chunk,
72    child: [*mut TreeChunk; 2],
73    parent: *mut TreeChunk,
74    index: u32,
75}
76
77#[repr(C)]
78#[derive(Clone, Copy)]
79struct Segment {
80    base: *mut u8,
81    size: usize,
82    next: *mut Segment,
83    flags: u32,
84}
85
86fn align_up(a: usize, alignment: usize) -> usize {
87    debug_assert!(alignment.is_power_of_two());
88    (a + (alignment - 1)) & !(alignment - 1)
89}
90
91fn left_bits(x: u32) -> u32 {
92    (x << 1) | (!(x << 1)).wrapping_add(1)
93}
94
95fn least_bit(x: u32) -> u32 {
96    x & (!x + 1)
97}
98
99fn leftshift_for_tree_index(x: u32) -> u32 {
100    let x = usize::try_from(x).unwrap();
101    if x == NTREEBINS - 1 {
102        0
103    } else {
104        (mem::size_of::<usize>() * 8 - 1 - ((x >> 1) + TREEBIN_SHIFT - 2)) as u32
105    }
106}
107
108impl<A> Dlmalloc<A> {
109    pub const fn new(system_allocator: A) -> Dlmalloc<A> {
110        Dlmalloc {
111            smallmap: 0,
112            treemap: 0,
113            smallbins: [ptr::null_mut(); (NSMALLBINS + 1) * 2],
114            treebins: [ptr::null_mut(); NTREEBINS],
115            dvsize: 0,
116            topsize: 0,
117            dv: ptr::null_mut(),
118            top: ptr::null_mut(),
119            footprint: 0,
120            max_footprint: 0,
121            seg: Segment {
122                base: ptr::null_mut(),
123                size: 0,
124                next: ptr::null_mut(),
125                flags: 0,
126            },
127            trim_check: 0,
128            least_addr: ptr::null_mut(),
129            release_checks: 0,
130            system_allocator,
131        }
132    }
133
134    pub fn allocator(&self) -> &A {
135        &self.system_allocator
136    }
137}
138
139impl<A: Allocator> Dlmalloc<A> {
140    // TODO: can we get rid of this?
141    pub fn malloc_alignment(&self) -> usize {
142        mem::size_of::<usize>() * 2
143    }
144
145    // TODO: dox
146    fn chunk_overhead(&self) -> usize {
147        mem::size_of::<usize>()
148    }
149
150    fn mmap_chunk_overhead(&self) -> usize {
151        2 * mem::size_of::<usize>()
152    }
153
154    // TODO: dox
155    fn min_large_size(&self) -> usize {
156        1 << TREEBIN_SHIFT
157    }
158
159    // TODO: dox
160    fn max_small_size(&self) -> usize {
161        self.min_large_size() - 1
162    }
163
164    // TODO: dox
165    fn max_small_request(&self) -> usize {
166        self.max_small_size() - (self.malloc_alignment() - 1) - self.chunk_overhead()
167    }
168
169    // TODO: dox
170    fn min_chunk_size(&self) -> usize {
171        align_up(mem::size_of::<Chunk>(), self.malloc_alignment())
172    }
173
174    // TODO: dox
175    fn min_request(&self) -> usize {
176        self.min_chunk_size() - self.chunk_overhead() - 1
177    }
178
179    // TODO: dox
180    fn max_request(&self) -> usize {
181        // min_sys_alloc_space: the largest `X` such that
182        //   pad_request(X - 1)        -- minus 1, because requests of exactly
183        //                                `max_request` will not be honored
184        //   + self.top_foot_size()
185        //   + self.malloc_alignment()
186        //   + DEFAULT_GRANULARITY
187        // ==
188        //   usize::MAX
189        let min_sys_alloc_space =
190            ((!0 - (DEFAULT_GRANULARITY + self.top_foot_size() + self.malloc_alignment()) + 1)
191                & !self.malloc_alignment())
192                - self.chunk_overhead()
193                + 1;
194
195        cmp::min((!self.min_chunk_size() + 1) << 2, min_sys_alloc_space)
196    }
197
198    fn pad_request(&self, amt: usize) -> usize {
199        align_up(amt + self.chunk_overhead(), self.malloc_alignment())
200    }
201
202    fn small_index(&self, size: usize) -> u32 {
203        (size >> SMALLBIN_SHIFT) as u32
204    }
205
206    fn small_index2size(&self, idx: u32) -> usize {
207        usize::try_from(idx).unwrap() << SMALLBIN_SHIFT
208    }
209
210    fn is_small(&self, s: usize) -> bool {
211        s >> SMALLBIN_SHIFT < NSMALLBINS
212    }
213
214    fn is_aligned(&self, a: usize) -> bool {
215        a & (self.malloc_alignment() - 1) == 0
216    }
217
218    fn align_offset(&self, addr: *mut u8) -> usize {
219        addr.align_offset(self.malloc_alignment())
220    }
221
222    fn align_offset_usize(&self, addr: usize) -> usize {
223        align_up(addr, self.malloc_alignment()) - addr
224    }
225
226    fn top_foot_size(&self) -> usize {
227        self.align_offset_usize(Chunk::mem_offset())
228            + self.pad_request(mem::size_of::<Segment>())
229            + self.min_chunk_size()
230    }
231
232    fn mmap_foot_pad(&self) -> usize {
233        4 * mem::size_of::<usize>()
234    }
235
236    fn align_as_chunk(&self, ptr: *mut u8) -> *mut Chunk {
237        unsafe {
238            let chunk = Chunk::to_mem(ptr.cast());
239            ptr.add(self.align_offset(chunk)).cast()
240        }
241    }
242
243    fn request2size(&self, req: usize) -> usize {
244        if req < self.min_request() {
245            self.min_chunk_size()
246        } else {
247            self.pad_request(req)
248        }
249    }
250
251    unsafe fn overhead_for(&self, p: *mut Chunk) -> usize {
252        if Chunk::mmapped(p) {
253            self.mmap_chunk_overhead()
254        } else {
255            self.chunk_overhead()
256        }
257    }
258
259    pub unsafe fn calloc_must_clear(&self, ptr: *mut u8) -> bool {
260        !self.system_allocator.allocates_zeros() || !Chunk::mmapped(Chunk::from_mem(ptr))
261    }
262
263    pub unsafe fn malloc(&mut self, size: usize) -> *mut u8 {
264        self.check_malloc_state();
265
266        let nb;
267        if size <= self.max_small_request() {
268            nb = self.request2size(size);
269            let mut idx = self.small_index(nb);
270            let smallbits = self.smallmap >> idx;
271
272            // Check the bin for `idx` (the lowest bit) but also check the next
273            // bin up to use that to satisfy our request, if needed.
274            if smallbits & 0b11 != 0 {
275                // If our the lowest bit, our `idx`, is unset then bump up the
276                // index as we'll be using the next bucket up.
277                idx += !smallbits & 1;
278
279                let b = self.smallbin_at(idx);
280                let p = (*b).prev;
281                self.unlink_first_small_chunk(b, p, idx);
282                let smallsize = self.small_index2size(idx);
283                Chunk::set_inuse_and_pinuse(p, smallsize);
284                let ret = Chunk::to_mem(p);
285                self.check_malloced_chunk(ret, nb);
286                return ret;
287            }
288
289            if nb > self.dvsize {
290                // If there's some other bin with some memory, then we just use
291                // the next smallest bin
292                if smallbits != 0 {
293                    let leftbits = (smallbits << idx) & left_bits(1 << idx);
294                    let leastbit = least_bit(leftbits);
295                    let i = leastbit.trailing_zeros();
296                    let b = self.smallbin_at(i);
297                    let p = (*b).prev;
298                    debug_assert_eq!(Chunk::size(p), self.small_index2size(i));
299                    self.unlink_first_small_chunk(b, p, i);
300                    let smallsize = self.small_index2size(i);
301                    let rsize = smallsize - nb;
302                    if mem::size_of::<usize>() != 4 && rsize < self.min_chunk_size() {
303                        Chunk::set_inuse_and_pinuse(p, smallsize);
304                    } else {
305                        Chunk::set_size_and_pinuse_of_inuse_chunk(p, nb);
306                        let r = Chunk::plus_offset(p, nb);
307                        Chunk::set_size_and_pinuse_of_free_chunk(r, rsize);
308                        self.replace_dv(r, rsize);
309                    }
310                    let ret = Chunk::to_mem(p);
311                    self.check_malloced_chunk(ret, nb);
312                    return ret;
313                } else if self.treemap != 0 {
314                    let mem = self.tmalloc_small(nb);
315                    if !mem.is_null() {
316                        self.check_malloced_chunk(mem, nb);
317                        self.check_malloc_state();
318                        return mem;
319                    }
320                }
321            }
322        } else if size >= self.max_request() {
323            // TODO: translate this to unsupported
324            return ptr::null_mut();
325        } else {
326            nb = self.pad_request(size);
327            if self.treemap != 0 {
328                let mem = self.tmalloc_large(nb);
329                if !mem.is_null() {
330                    self.check_malloced_chunk(mem, nb);
331                    self.check_malloc_state();
332                    return mem;
333                }
334            }
335        }
336
337        // use the `dv` node if we can, splitting it if necessary or otherwise
338        // exhausting the entire chunk
339        if nb <= self.dvsize {
340            let rsize = self.dvsize - nb;
341            let p = self.dv;
342            if rsize >= self.min_chunk_size() {
343                self.dv = Chunk::plus_offset(p, nb);
344                self.dvsize = rsize;
345                let r = self.dv;
346                Chunk::set_size_and_pinuse_of_free_chunk(r, rsize);
347                Chunk::set_size_and_pinuse_of_inuse_chunk(p, nb);
348            } else {
349                let dvs = self.dvsize;
350                self.dvsize = 0;
351                self.dv = ptr::null_mut();
352                Chunk::set_inuse_and_pinuse(p, dvs);
353            }
354            let ret = Chunk::to_mem(p);
355            self.check_malloced_chunk(ret, nb);
356            self.check_malloc_state();
357            return ret;
358        }
359
360        // Split the top node if we can
361        if nb < self.topsize {
362            self.topsize -= nb;
363            let rsize = self.topsize;
364            let p = self.top;
365            self.top = Chunk::plus_offset(p, nb);
366            let r = self.top;
367            (*r).head = rsize | PINUSE;
368            Chunk::set_size_and_pinuse_of_inuse_chunk(p, nb);
369            self.check_top_chunk(self.top);
370            let ret = Chunk::to_mem(p);
371            self.check_malloced_chunk(ret, nb);
372            self.check_malloc_state();
373            return ret;
374        }
375
376        self.sys_alloc(nb)
377    }
378
379    /// allocates system resources
380    unsafe fn sys_alloc(&mut self, size: usize) -> *mut u8 {
381        self.check_malloc_state();
382        // keep in sync with max_request
383        let asize = align_up(
384            size + self.top_foot_size() + self.malloc_alignment(),
385            DEFAULT_GRANULARITY,
386        );
387
388        let (tbase, tsize, flags) = self.system_allocator.alloc(asize);
389        if tbase.is_null() {
390            return tbase;
391        }
392
393        self.footprint += tsize;
394        self.max_footprint = cmp::max(self.max_footprint, self.footprint);
395
396        if self.top.is_null() {
397            if self.least_addr.is_null() || tbase < self.least_addr {
398                self.least_addr = tbase;
399            }
400            self.seg.base = tbase;
401            self.seg.size = tsize;
402            self.seg.flags = flags;
403            self.release_checks = MAX_RELEASE_CHECK_RATE;
404            self.init_bins();
405            let tsize = tsize - self.top_foot_size();
406            self.init_top(tbase.cast(), tsize);
407        // let mn = Chunk::next(Chunk::from_mem(self as *mut _ as *mut u8));
408        // let top_foot_size = self.top_foot_size();
409        // self.init_top(mn, tbase as usize + tsize - mn as usize - top_foot_size);
410        } else {
411            let mut sp: *mut Segment = &mut self.seg;
412            while !sp.is_null() && tbase != Segment::top(sp) {
413                sp = (*sp).next;
414            }
415            if !sp.is_null()
416                && !Segment::is_extern(sp)
417                && Segment::sys_flags(sp) == flags
418                && Segment::holds(sp, self.top.cast())
419            {
420                (*sp).size += tsize;
421                let ptr = self.top;
422                let size = self.topsize + tsize;
423                self.init_top(ptr, size);
424            } else {
425                self.least_addr = cmp::min(tbase, self.least_addr);
426                let mut sp: *mut Segment = &mut self.seg;
427                while !sp.is_null() && (*sp).base != tbase.add(tsize) {
428                    sp = (*sp).next;
429                }
430                if !sp.is_null() && !Segment::is_extern(sp) && Segment::sys_flags(sp) == flags {
431                    let oldbase = (*sp).base;
432                    (*sp).base = tbase;
433                    (*sp).size += tsize;
434                    return self.prepend_alloc(tbase, oldbase, size);
435                } else {
436                    self.add_segment(tbase, tsize, flags);
437                }
438            }
439        }
440
441        if size < self.topsize {
442            self.topsize -= size;
443            let rsize = self.topsize;
444            let p = self.top;
445            self.top = Chunk::plus_offset(p, size);
446            let r = self.top;
447            (*r).head = rsize | PINUSE;
448            Chunk::set_size_and_pinuse_of_inuse_chunk(p, size);
449            let ret = Chunk::to_mem(p);
450            self.check_top_chunk(self.top);
451            self.check_malloced_chunk(ret, size);
452            self.check_malloc_state();
453            return ret;
454        }
455
456        return ptr::null_mut();
457    }
458
459    pub unsafe fn realloc(&mut self, oldmem: *mut u8, bytes: usize) -> *mut u8 {
460        if bytes >= self.max_request() {
461            return ptr::null_mut();
462        }
463        let nb = self.request2size(bytes);
464        let oldp = Chunk::from_mem(oldmem);
465        let newp = self.try_realloc_chunk(oldp, nb, true);
466        if !newp.is_null() {
467            self.check_inuse_chunk(newp);
468            return Chunk::to_mem(newp);
469        }
470        let ptr = self.malloc(bytes);
471        if !ptr.is_null() {
472            let oc = Chunk::size(oldp) - self.overhead_for(oldp);
473            ptr::copy_nonoverlapping(oldmem, ptr, cmp::min(oc, bytes));
474            self.free(oldmem);
475        }
476        return ptr;
477    }
478
479    unsafe fn try_realloc_chunk(&mut self, p: *mut Chunk, nb: usize, can_move: bool) -> *mut Chunk {
480        let oldsize = Chunk::size(p);
481        let next = Chunk::plus_offset(p, oldsize);
482
483        if Chunk::mmapped(p) {
484            self.mmap_resize(p, nb, can_move)
485        } else if oldsize >= nb {
486            let rsize = oldsize - nb;
487            if rsize >= self.min_chunk_size() {
488                let r = Chunk::plus_offset(p, nb);
489                Chunk::set_inuse(p, nb);
490                Chunk::set_inuse(r, rsize);
491                self.dispose_chunk(r, rsize);
492            }
493            p
494        } else if next == self.top {
495            // extend into top
496            if oldsize + self.topsize <= nb {
497                return ptr::null_mut();
498            }
499            let newsize = oldsize + self.topsize;
500            let newtopsize = newsize - nb;
501            let newtop = Chunk::plus_offset(p, nb);
502            Chunk::set_inuse(p, nb);
503            (*newtop).head = newtopsize | PINUSE;
504            self.top = newtop;
505            self.topsize = newtopsize;
506            p
507        } else if next == self.dv {
508            // extend into dv
509            let dvs = self.dvsize;
510            if oldsize + dvs < nb {
511                return ptr::null_mut();
512            }
513            let dsize = oldsize + dvs - nb;
514            if dsize >= self.min_chunk_size() {
515                let r = Chunk::plus_offset(p, nb);
516                let n = Chunk::plus_offset(r, dsize);
517                Chunk::set_inuse(p, nb);
518                Chunk::set_size_and_pinuse_of_free_chunk(r, dsize);
519                Chunk::clear_pinuse(n);
520                self.dvsize = dsize;
521                self.dv = r;
522            } else {
523                // exhaust dv
524                let newsize = oldsize + dvs;
525                Chunk::set_inuse(p, newsize);
526                self.dvsize = 0;
527                self.dv = ptr::null_mut();
528            }
529            return p;
530        } else if !Chunk::cinuse(next) {
531            // extend into the next free chunk
532            let nextsize = Chunk::size(next);
533            if oldsize + nextsize < nb {
534                return ptr::null_mut();
535            }
536            let rsize = oldsize + nextsize - nb;
537            self.unlink_chunk(next, nextsize);
538            if rsize < self.min_chunk_size() {
539                let newsize = oldsize + nextsize;
540                Chunk::set_inuse(p, newsize);
541            } else {
542                let r = Chunk::plus_offset(p, nb);
543                Chunk::set_inuse(p, nb);
544                Chunk::set_inuse(r, rsize);
545                self.dispose_chunk(r, rsize);
546            }
547            p
548        } else {
549            ptr::null_mut()
550        }
551    }
552
553    unsafe fn mmap_resize(&mut self, oldp: *mut Chunk, nb: usize, can_move: bool) -> *mut Chunk {
554        let oldsize = Chunk::size(oldp);
555        // Can't shrink mmap regions below a small size
556        if self.is_small(nb) {
557            return ptr::null_mut();
558        }
559
560        // Keep the old chunk if it's big enough but not too big
561        if oldsize >= nb + mem::size_of::<usize>() && (oldsize - nb) <= (DEFAULT_GRANULARITY << 1) {
562            return oldp;
563        }
564
565        let offset = (*oldp).prev_foot;
566        let oldmmsize = oldsize + offset + self.mmap_foot_pad();
567        let newmmsize =
568            self.mmap_align(nb + 6 * mem::size_of::<usize>() + self.malloc_alignment() - 1);
569        let ptr = self.system_allocator.remap(
570            oldp.cast::<u8>().sub(offset),
571            oldmmsize,
572            newmmsize,
573            can_move,
574        );
575        if ptr.is_null() {
576            return ptr::null_mut();
577        }
578        let newp = ptr.add(offset).cast::<Chunk>();
579        let psize = newmmsize - offset - self.mmap_foot_pad();
580        (*newp).head = psize;
581        (*Chunk::plus_offset(newp, psize)).head = Chunk::fencepost_head();
582        (*Chunk::plus_offset(newp, psize + mem::size_of::<usize>())).head = 0;
583        self.least_addr = cmp::min(ptr, self.least_addr);
584        self.footprint = self.footprint + newmmsize - oldmmsize;
585        self.max_footprint = cmp::max(self.max_footprint, self.footprint);
586        self.check_mmapped_chunk(newp);
587        return newp;
588    }
589
590    fn mmap_align(&self, a: usize) -> usize {
591        align_up(a, self.system_allocator.page_size())
592    }
593
594    // Only call this with power-of-two alignment and alignment >
595    // `self.malloc_alignment()`
596    pub unsafe fn memalign(&mut self, mut alignment: usize, bytes: usize) -> *mut u8 {
597        if alignment < self.min_chunk_size() {
598            alignment = self.min_chunk_size();
599        }
600        if bytes >= self.max_request() - alignment {
601            return ptr::null_mut();
602        }
603        let nb = self.request2size(bytes);
604        let req = nb + alignment + self.min_chunk_size() - self.chunk_overhead();
605        let mem = self.malloc(req);
606        if mem.is_null() {
607            return mem;
608        }
609        let mut p = Chunk::from_mem(mem);
610        if mem as usize & (alignment - 1) != 0 {
611            // Here we find an aligned sopt inside the chunk. Since we need to
612            // give back leading space in a chunk of at least `min_chunk_size`,
613            // if the first calculation places us at a spot with less than
614            // `min_chunk_size` leader we can move to the next aligned spot.
615            // we've allocated enough total room so that this is always possible
616            let br =
617                Chunk::from_mem(((mem as usize + alignment - 1) & (!alignment + 1)) as *mut u8);
618            let pos = if (br as usize - p as usize) > self.min_chunk_size() {
619                br.cast::<u8>()
620            } else {
621                br.cast::<u8>().add(alignment)
622            };
623            let newp = pos.cast::<Chunk>();
624            let leadsize = pos as usize - p as usize;
625            let newsize = Chunk::size(p) - leadsize;
626
627            // for mmapped chunks just adjust the offset
628            if Chunk::mmapped(p) {
629                (*newp).prev_foot = (*p).prev_foot + leadsize;
630                (*newp).head = newsize;
631            } else {
632                // give back the leader, use the rest
633                Chunk::set_inuse(newp, newsize);
634                Chunk::set_inuse(p, leadsize);
635                self.dispose_chunk(p, leadsize);
636            }
637            p = newp;
638        }
639
640        // give back spare room at the end
641        if !Chunk::mmapped(p) {
642            let size = Chunk::size(p);
643            if size > nb + self.min_chunk_size() {
644                let remainder_size = size - nb;
645                let remainder = Chunk::plus_offset(p, nb);
646                Chunk::set_inuse(p, nb);
647                Chunk::set_inuse(remainder, remainder_size);
648                self.dispose_chunk(remainder, remainder_size);
649            }
650        }
651
652        let mem = Chunk::to_mem(p);
653        debug_assert!(Chunk::size(p) >= nb);
654        debug_assert_eq!(align_up(mem as usize, alignment), mem as usize);
655        self.check_inuse_chunk(p);
656        return mem;
657    }
658
659    // consolidate and bin a chunk, differs from exported versions of free
660    // mainly in that the chunk need not be marked as inuse
661    unsafe fn dispose_chunk(&mut self, mut p: *mut Chunk, mut psize: usize) {
662        let next = Chunk::plus_offset(p, psize);
663        if !Chunk::pinuse(p) {
664            let prevsize = (*p).prev_foot;
665            if Chunk::mmapped(p) {
666                psize += prevsize + self.mmap_foot_pad();
667                if self
668                    .system_allocator
669                    .free(p.cast::<u8>().sub(prevsize), psize)
670                {
671                    self.footprint -= psize;
672                }
673                return;
674            }
675            let prev = Chunk::minus_offset(p, prevsize);
676            psize += prevsize;
677            p = prev;
678            if p != self.dv {
679                self.unlink_chunk(p, prevsize);
680            } else if (*next).head & INUSE == INUSE {
681                self.dvsize = psize;
682                Chunk::set_free_with_pinuse(p, psize, next);
683                return;
684            }
685        }
686
687        if !Chunk::cinuse(next) {
688            // consolidate forward
689            if next == self.top {
690                self.topsize += psize;
691                let tsize = self.topsize;
692                self.top = p;
693                (*p).head = tsize | PINUSE;
694                if p == self.dv {
695                    self.dv = ptr::null_mut();
696                    self.dvsize = 0;
697                }
698                return;
699            } else if next == self.dv {
700                self.dvsize += psize;
701                let dsize = self.dvsize;
702                self.dv = p;
703                Chunk::set_size_and_pinuse_of_free_chunk(p, dsize);
704                return;
705            } else {
706                let nsize = Chunk::size(next);
707                psize += nsize;
708                self.unlink_chunk(next, nsize);
709                Chunk::set_size_and_pinuse_of_free_chunk(p, psize);
710                if p == self.dv {
711                    self.dvsize = psize;
712                    return;
713                }
714            }
715        } else {
716            Chunk::set_free_with_pinuse(p, psize, next);
717        }
718        self.insert_chunk(p, psize);
719    }
720
721    unsafe fn init_top(&mut self, ptr: *mut Chunk, size: usize) {
722        let offset = self.align_offset(Chunk::to_mem(ptr));
723        let p = Chunk::plus_offset(ptr, offset);
724        let size = size - offset;
725
726        self.top = p;
727        self.topsize = size;
728        (*p).head = size | PINUSE;
729        (*Chunk::plus_offset(p, size)).head = self.top_foot_size();
730        self.trim_check = DEFAULT_TRIM_THRESHOLD;
731    }
732
733    unsafe fn init_bins(&mut self) {
734        for i in 0..NSMALLBINS_U32 {
735            let bin = self.smallbin_at(i);
736            (*bin).next = bin;
737            (*bin).prev = bin;
738        }
739    }
740
741    unsafe fn prepend_alloc(&mut self, newbase: *mut u8, oldbase: *mut u8, size: usize) -> *mut u8 {
742        let p = self.align_as_chunk(newbase);
743        let mut oldfirst = self.align_as_chunk(oldbase);
744        let psize = oldfirst as usize - p as usize;
745        let q = Chunk::plus_offset(p, size);
746        let mut qsize = psize - size;
747        Chunk::set_size_and_pinuse_of_inuse_chunk(p, size);
748
749        debug_assert!(oldfirst > q);
750        debug_assert!(Chunk::pinuse(oldfirst));
751        debug_assert!(qsize >= self.min_chunk_size());
752
753        // consolidate the remainder with the first chunk of the old base
754        if oldfirst == self.top {
755            self.topsize += qsize;
756            let tsize = self.topsize;
757            self.top = q;
758            (*q).head = tsize | PINUSE;
759            self.check_top_chunk(q);
760        } else if oldfirst == self.dv {
761            self.dvsize += qsize;
762            let dsize = self.dvsize;
763            self.dv = q;
764            Chunk::set_size_and_pinuse_of_free_chunk(q, dsize);
765        } else {
766            if !Chunk::inuse(oldfirst) {
767                let nsize = Chunk::size(oldfirst);
768                self.unlink_chunk(oldfirst, nsize);
769                oldfirst = Chunk::plus_offset(oldfirst, nsize);
770                qsize += nsize;
771            }
772            Chunk::set_free_with_pinuse(q, qsize, oldfirst);
773            self.insert_chunk(q, qsize);
774            self.check_free_chunk(q);
775        }
776
777        let ret = Chunk::to_mem(p);
778        self.check_malloced_chunk(ret, size);
779        self.check_malloc_state();
780        return ret;
781    }
782
783    // add a segment to hold a new noncontiguous region
784    unsafe fn add_segment(&mut self, tbase: *mut u8, tsize: usize, flags: u32) {
785        // TODO: what in the world is this function doing
786
787        // Determine locations and sizes of segment, fenceposts, and the old top
788        let old_top = self.top.cast::<u8>();
789        let oldsp = self.segment_holding(old_top);
790        let old_end = Segment::top(oldsp);
791        let ssize = self.pad_request(mem::size_of::<Segment>());
792        let offset = ssize + mem::size_of::<usize>() * 4 + self.malloc_alignment() - 1;
793        let rawsp = old_end.sub(offset);
794        let offset = self.align_offset(Chunk::to_mem(rawsp.cast()));
795        let asp = rawsp.add(offset);
796        let csp = if asp < old_top.add(self.min_chunk_size()) {
797            old_top
798        } else {
799            asp
800        };
801        let sp = csp.cast::<Chunk>();
802        let ss = Chunk::to_mem(sp).cast::<Segment>();
803        let tnext = Chunk::plus_offset(sp, ssize);
804        let mut p = tnext;
805        let mut nfences = 0;
806
807        // reset the top to our new space
808        let size = tsize - self.top_foot_size();
809        self.init_top(tbase.cast(), size);
810
811        // set up our segment record
812        debug_assert!(self.is_aligned(ss as usize));
813        Chunk::set_size_and_pinuse_of_inuse_chunk(sp, ssize);
814        *ss = self.seg; // push our current record
815        self.seg.base = tbase;
816        self.seg.size = tsize;
817        self.seg.flags = flags;
818        self.seg.next = ss;
819
820        // insert trailing fences
821        loop {
822            let nextp = Chunk::plus_offset(p, mem::size_of::<usize>());
823            (*p).head = Chunk::fencepost_head();
824            nfences += 1;
825            if ptr::addr_of!((*nextp).head).cast::<u8>() < old_end {
826                p = nextp;
827            } else {
828                break;
829            }
830        }
831        debug_assert!(nfences >= 2);
832
833        // insert the rest of the old top into a bin as an ordinary free chunk
834        if csp != old_top {
835            let q = old_top.cast::<Chunk>();
836            let psize = csp as usize - old_top as usize;
837            let tn = Chunk::plus_offset(q, psize);
838            Chunk::set_free_with_pinuse(q, psize, tn);
839            self.insert_chunk(q, psize);
840        }
841
842        self.check_top_chunk(self.top);
843        self.check_malloc_state();
844    }
845
846    unsafe fn segment_holding(&self, ptr: *mut u8) -> *mut Segment {
847        let mut sp = &self.seg as *const Segment as *mut Segment;
848        while !sp.is_null() {
849            if (*sp).base <= ptr && ptr < Segment::top(sp) {
850                return sp;
851            }
852            sp = (*sp).next;
853        }
854        ptr::null_mut()
855    }
856
857    unsafe fn tmalloc_small(&mut self, size: usize) -> *mut u8 {
858        let leastbit = least_bit(self.treemap);
859        let i = leastbit.trailing_zeros();
860        let mut v = *self.treebin_at(i);
861        let mut t = v;
862        let mut rsize = Chunk::size(TreeChunk::chunk(t)) - size;
863
864        loop {
865            t = TreeChunk::leftmost_child(t);
866            if t.is_null() {
867                break;
868            }
869            let trem = Chunk::size(TreeChunk::chunk(t)) - size;
870            if trem < rsize {
871                rsize = trem;
872                v = t;
873            }
874        }
875
876        let vc = TreeChunk::chunk(v);
877        let r = Chunk::plus_offset(vc, size).cast::<TreeChunk>();
878        debug_assert_eq!(Chunk::size(vc), rsize + size);
879        self.unlink_large_chunk(v);
880        if rsize < self.min_chunk_size() {
881            Chunk::set_inuse_and_pinuse(vc, rsize + size);
882        } else {
883            let rc = TreeChunk::chunk(r);
884            Chunk::set_size_and_pinuse_of_inuse_chunk(vc, size);
885            Chunk::set_size_and_pinuse_of_free_chunk(rc, rsize);
886            self.replace_dv(rc, rsize);
887        }
888        Chunk::to_mem(vc)
889    }
890
891    unsafe fn tmalloc_large(&mut self, size: usize) -> *mut u8 {
892        let mut v = ptr::null_mut();
893        let mut rsize = !size + 1;
894        let idx = self.compute_tree_index(size);
895        let mut t = *self.treebin_at(idx);
896        if !t.is_null() {
897            // Traverse thre tree for this bin looking for a node with size
898            // equal to the `size` above.
899            let mut sizebits = size << leftshift_for_tree_index(idx);
900            // Keep track of the deepest untaken right subtree
901            let mut rst = ptr::null_mut();
902            loop {
903                let csize = Chunk::size(TreeChunk::chunk(t));
904                if csize >= size && csize - size < rsize {
905                    v = t;
906                    rsize = csize - size;
907                    if rsize == 0 {
908                        break;
909                    }
910                }
911                let rt = (*t).child[1];
912                t = (*t).child[(sizebits >> (mem::size_of::<usize>() * 8 - 1)) & 1];
913                if !rt.is_null() && rt != t {
914                    rst = rt;
915                }
916                if t.is_null() {
917                    // Reset `t` to the least subtree holding sizes greater than
918                    // the `size` above, breaking out
919                    t = rst;
920                    break;
921                }
922                sizebits <<= 1;
923            }
924        }
925
926        // Set t to the root of the next non-empty treebin
927        if t.is_null() && v.is_null() {
928            let leftbits = left_bits(1 << idx) & self.treemap;
929            if leftbits != 0 {
930                let leastbit = least_bit(leftbits);
931                let i = leastbit.trailing_zeros();
932                t = *self.treebin_at(i);
933            }
934        }
935
936        // Find the smallest of this tree or subtree
937        while !t.is_null() {
938            let csize = Chunk::size(TreeChunk::chunk(t));
939            if csize >= size && csize - size < rsize {
940                rsize = csize - size;
941                v = t;
942            }
943            t = TreeChunk::leftmost_child(t);
944        }
945
946        // If dv is a better fit, then return null so malloc will use it
947        if v.is_null() || (self.dvsize >= size && !(rsize < self.dvsize - size)) {
948            return ptr::null_mut();
949        }
950
951        let vc = TreeChunk::chunk(v);
952        let r = Chunk::plus_offset(vc, size);
953        debug_assert_eq!(Chunk::size(vc), rsize + size);
954        self.unlink_large_chunk(v);
955        if rsize < self.min_chunk_size() {
956            Chunk::set_inuse_and_pinuse(vc, rsize + size);
957        } else {
958            Chunk::set_size_and_pinuse_of_inuse_chunk(vc, size);
959            Chunk::set_size_and_pinuse_of_free_chunk(r, rsize);
960            self.insert_chunk(r, rsize);
961        }
962        Chunk::to_mem(vc)
963    }
964
965    unsafe fn smallbin_at(&mut self, idx: u32) -> *mut Chunk {
966        let idx = usize::try_from(idx * 2).unwrap();
967        debug_assert!(idx < self.smallbins.len());
968        self.smallbins.as_mut_ptr().add(idx).cast()
969    }
970
971    unsafe fn treebin_at(&mut self, idx: u32) -> *mut *mut TreeChunk {
972        let idx = usize::try_from(idx).unwrap();
973        debug_assert!(idx < self.treebins.len());
974        self.treebins.as_mut_ptr().add(idx)
975    }
976
977    fn compute_tree_index(&self, size: usize) -> u32 {
978        let x = size >> TREEBIN_SHIFT;
979        if x == 0 {
980            0
981        } else if x > 0xffff {
982            NTREEBINS_U32 - 1
983        } else {
984            let k = mem::size_of_val(&x) * 8 - 1 - (x.leading_zeros() as usize);
985            ((k << 1) + (size >> (k + TREEBIN_SHIFT - 1) & 1)) as u32
986        }
987    }
988
989    unsafe fn unlink_first_small_chunk(&mut self, head: *mut Chunk, next: *mut Chunk, idx: u32) {
990        let ptr = (*next).prev;
991        debug_assert!(next != head);
992        debug_assert!(next != ptr);
993        debug_assert_eq!(Chunk::size(next), self.small_index2size(idx));
994        if head == ptr {
995            self.clear_smallmap(idx);
996        } else {
997            (*ptr).next = head;
998            (*head).prev = ptr;
999        }
1000    }
1001
1002    unsafe fn replace_dv(&mut self, chunk: *mut Chunk, size: usize) {
1003        let dvs = self.dvsize;
1004        debug_assert!(self.is_small(dvs));
1005        if dvs != 0 {
1006            let dv = self.dv;
1007            self.insert_small_chunk(dv, dvs);
1008        }
1009        self.dvsize = size;
1010        self.dv = chunk;
1011    }
1012
1013    unsafe fn insert_chunk(&mut self, chunk: *mut Chunk, size: usize) {
1014        if self.is_small(size) {
1015            self.insert_small_chunk(chunk, size);
1016        } else {
1017            self.insert_large_chunk(chunk.cast(), size);
1018        }
1019    }
1020
1021    unsafe fn insert_small_chunk(&mut self, chunk: *mut Chunk, size: usize) {
1022        let idx = self.small_index(size);
1023        let head = self.smallbin_at(idx);
1024        let mut f = head;
1025        debug_assert!(size >= self.min_chunk_size());
1026        if !self.smallmap_is_marked(idx) {
1027            self.mark_smallmap(idx);
1028        } else {
1029            f = (*head).prev;
1030        }
1031
1032        (*head).prev = chunk;
1033        (*f).next = chunk;
1034        (*chunk).prev = f;
1035        (*chunk).next = head;
1036    }
1037
1038    unsafe fn insert_large_chunk(&mut self, chunk: *mut TreeChunk, size: usize) {
1039        let idx = self.compute_tree_index(size);
1040        let h = self.treebin_at(idx);
1041        (*chunk).index = idx;
1042        (*chunk).child[0] = ptr::null_mut();
1043        (*chunk).child[1] = ptr::null_mut();
1044        let chunkc = TreeChunk::chunk(chunk);
1045        if !self.treemap_is_marked(idx) {
1046            *h = chunk;
1047            (*chunk).parent = h.cast(); // TODO: dubious?
1048            (*chunkc).next = chunkc;
1049            (*chunkc).prev = chunkc;
1050            self.mark_treemap(idx);
1051        } else {
1052            let mut t = *h;
1053            let mut k = size << leftshift_for_tree_index(idx);
1054            loop {
1055                if Chunk::size(TreeChunk::chunk(t)) != size {
1056                    let c = &mut (*t).child[(k >> mem::size_of::<usize>() * 8 - 1) & 1];
1057                    k <<= 1;
1058                    if !c.is_null() {
1059                        t = *c;
1060                    } else {
1061                        *c = chunk;
1062                        (*chunk).parent = t;
1063                        (*chunkc).next = chunkc;
1064                        (*chunkc).prev = chunkc;
1065                        break;
1066                    }
1067                } else {
1068                    let tc = TreeChunk::chunk(t);
1069                    let f = (*tc).prev;
1070                    (*f).next = chunkc;
1071                    (*tc).prev = chunkc;
1072                    (*chunkc).prev = f;
1073                    (*chunkc).next = tc;
1074                    (*chunk).parent = ptr::null_mut();
1075                    break;
1076                }
1077            }
1078        }
1079    }
1080
1081    unsafe fn smallmap_is_marked(&self, idx: u32) -> bool {
1082        self.smallmap & (1 << idx) != 0
1083    }
1084
1085    unsafe fn mark_smallmap(&mut self, idx: u32) {
1086        self.smallmap |= 1 << idx;
1087    }
1088
1089    unsafe fn clear_smallmap(&mut self, idx: u32) {
1090        self.smallmap &= !(1 << idx);
1091    }
1092
1093    unsafe fn treemap_is_marked(&self, idx: u32) -> bool {
1094        self.treemap & (1 << idx) != 0
1095    }
1096
1097    unsafe fn mark_treemap(&mut self, idx: u32) {
1098        self.treemap |= 1 << idx;
1099    }
1100
1101    unsafe fn clear_treemap(&mut self, idx: u32) {
1102        self.treemap &= !(1 << idx);
1103    }
1104
1105    unsafe fn unlink_chunk(&mut self, chunk: *mut Chunk, size: usize) {
1106        if self.is_small(size) {
1107            self.unlink_small_chunk(chunk, size)
1108        } else {
1109            self.unlink_large_chunk(chunk.cast());
1110        }
1111    }
1112
1113    unsafe fn unlink_small_chunk(&mut self, chunk: *mut Chunk, size: usize) {
1114        let f = (*chunk).prev;
1115        let b = (*chunk).next;
1116        let idx = self.small_index(size);
1117        debug_assert!(chunk != b);
1118        debug_assert!(chunk != f);
1119        debug_assert_eq!(Chunk::size(chunk), self.small_index2size(idx));
1120        if b == f {
1121            self.clear_smallmap(idx);
1122        } else {
1123            (*f).next = b;
1124            (*b).prev = f;
1125        }
1126    }
1127
1128    unsafe fn unlink_large_chunk(&mut self, chunk: *mut TreeChunk) {
1129        let xp = (*chunk).parent;
1130        let mut r;
1131        if TreeChunk::next(chunk) != chunk {
1132            let f = TreeChunk::prev(chunk);
1133            r = TreeChunk::next(chunk);
1134            (*f).chunk.next = TreeChunk::chunk(r);
1135            (*r).chunk.prev = TreeChunk::chunk(f);
1136        } else {
1137            let mut rp = &mut (*chunk).child[1];
1138            if rp.is_null() {
1139                rp = &mut (*chunk).child[0];
1140            }
1141            r = *rp;
1142            if !rp.is_null() {
1143                loop {
1144                    let mut cp = &mut (**rp).child[1];
1145                    if cp.is_null() {
1146                        cp = &mut (**rp).child[0];
1147                    }
1148                    if cp.is_null() {
1149                        break;
1150                    }
1151                    rp = cp;
1152                }
1153                r = *rp;
1154                *rp = ptr::null_mut();
1155            }
1156        }
1157
1158        if xp.is_null() {
1159            return;
1160        }
1161
1162        let h = self.treebin_at((*chunk).index);
1163        if chunk == *h {
1164            *h = r;
1165            if r.is_null() {
1166                self.clear_treemap((*chunk).index);
1167            }
1168        } else {
1169            if (*xp).child[0] == chunk {
1170                (*xp).child[0] = r;
1171            } else {
1172                (*xp).child[1] = r;
1173            }
1174        }
1175
1176        if !r.is_null() {
1177            (*r).parent = xp;
1178            let c0 = (*chunk).child[0];
1179            if !c0.is_null() {
1180                (*r).child[0] = c0;
1181                (*c0).parent = r;
1182            }
1183            let c1 = (*chunk).child[1];
1184            if !c1.is_null() {
1185                (*r).child[1] = c1;
1186                (*c1).parent = r;
1187            }
1188        }
1189    }
1190
1191    pub unsafe fn validate_size(&mut self, ptr: *mut u8, size: usize) {
1192        let p = Chunk::from_mem(ptr);
1193        let psize = Chunk::size(p);
1194
1195        let min_overhead = self.overhead_for(p);
1196        assert!(psize >= size + min_overhead);
1197
1198        if !Chunk::mmapped(p) {
1199            let max_overhead =
1200                min_overhead + self.min_chunk_size() * 2 + mem::align_of::<usize>() - 1;
1201
1202            assert!(psize <= size + max_overhead);
1203        }
1204    }
1205
1206    pub unsafe fn free(&mut self, mem: *mut u8) {
1207        self.check_malloc_state();
1208
1209        let mut p = Chunk::from_mem(mem);
1210        let mut psize = Chunk::size(p);
1211        let next = Chunk::plus_offset(p, psize);
1212        if !Chunk::pinuse(p) {
1213            let prevsize = (*p).prev_foot;
1214
1215            if Chunk::mmapped(p) {
1216                psize += prevsize + self.mmap_foot_pad();
1217                if self
1218                    .system_allocator
1219                    .free(p.cast::<u8>().sub(prevsize), psize)
1220                {
1221                    self.footprint -= psize;
1222                }
1223                return;
1224            }
1225
1226            let prev = Chunk::minus_offset(p, prevsize);
1227            psize += prevsize;
1228            p = prev;
1229            if p != self.dv {
1230                self.unlink_chunk(p, prevsize);
1231            } else if (*next).head & INUSE == INUSE {
1232                self.dvsize = psize;
1233                Chunk::set_free_with_pinuse(p, psize, next);
1234                return;
1235            }
1236        }
1237
1238        // Consolidate forward if we can
1239        if !Chunk::cinuse(next) {
1240            if next == self.top {
1241                self.topsize += psize;
1242                let tsize = self.topsize;
1243                self.top = p;
1244                (*p).head = tsize | PINUSE;
1245                if p == self.dv {
1246                    self.dv = ptr::null_mut();
1247                    self.dvsize = 0;
1248                }
1249                if self.should_trim(tsize) {
1250                    self.sys_trim(0);
1251                }
1252                return;
1253            } else if next == self.dv {
1254                self.dvsize += psize;
1255                let dsize = self.dvsize;
1256                self.dv = p;
1257                Chunk::set_size_and_pinuse_of_free_chunk(p, dsize);
1258                return;
1259            } else {
1260                let nsize = Chunk::size(next);
1261                psize += nsize;
1262                self.unlink_chunk(next, nsize);
1263                Chunk::set_size_and_pinuse_of_free_chunk(p, psize);
1264                if p == self.dv {
1265                    self.dvsize = psize;
1266                    return;
1267                }
1268            }
1269        } else {
1270            Chunk::set_free_with_pinuse(p, psize, next);
1271        }
1272
1273        if self.is_small(psize) {
1274            self.insert_small_chunk(p, psize);
1275            self.check_free_chunk(p);
1276        } else {
1277            self.insert_large_chunk(p.cast(), psize);
1278            self.check_free_chunk(p);
1279            self.release_checks -= 1;
1280            if self.release_checks == 0 {
1281                self.release_unused_segments();
1282            }
1283        }
1284    }
1285
1286    fn should_trim(&self, size: usize) -> bool {
1287        size > self.trim_check
1288    }
1289
1290    unsafe fn sys_trim(&mut self, mut pad: usize) -> bool {
1291        let mut released = 0;
1292        if pad < self.max_request() && !self.top.is_null() {
1293            pad += self.top_foot_size();
1294            if self.topsize > pad {
1295                let unit = DEFAULT_GRANULARITY;
1296                let extra = ((self.topsize - pad + unit - 1) / unit - 1) * unit;
1297                let sp = self.segment_holding(self.top.cast());
1298                debug_assert!(!sp.is_null());
1299
1300                if !Segment::is_extern(sp) {
1301                    if Segment::can_release_part(&self.system_allocator, sp) {
1302                        if (*sp).size >= extra && !self.has_segment_link(sp) {
1303                            let newsize = (*sp).size - extra;
1304                            if self
1305                                .system_allocator
1306                                .free_part((*sp).base, (*sp).size, newsize)
1307                            {
1308                                released = extra;
1309                            }
1310                        }
1311                    }
1312                }
1313
1314                if released != 0 {
1315                    (*sp).size -= released;
1316                    self.footprint -= released;
1317                    let top = self.top;
1318                    let topsize = self.topsize - released;
1319                    self.init_top(top, topsize);
1320                    self.check_top_chunk(self.top);
1321                }
1322            }
1323
1324            released += self.release_unused_segments();
1325
1326            if released == 0 && self.topsize > self.trim_check {
1327                self.trim_check = usize::max_value();
1328            }
1329        }
1330
1331        released != 0
1332    }
1333
1334    unsafe fn has_segment_link(&self, ptr: *mut Segment) -> bool {
1335        let mut sp = &self.seg as *const Segment as *mut Segment;
1336        while !sp.is_null() {
1337            if Segment::holds(ptr, sp.cast()) {
1338                return true;
1339            }
1340            sp = (*sp).next;
1341        }
1342        false
1343    }
1344
1345    /// Unmap and unlink any mapped segments that don't contain used chunks
1346    unsafe fn release_unused_segments(&mut self) -> usize {
1347        let mut released = 0;
1348        let mut nsegs = 0;
1349        let mut pred: *mut Segment = &mut self.seg;
1350        let mut sp = (*pred).next;
1351        while !sp.is_null() {
1352            let base = (*sp).base;
1353            let size = (*sp).size;
1354            let next = (*sp).next;
1355            nsegs += 1;
1356
1357            if Segment::can_release_part(&self.system_allocator, sp) && !Segment::is_extern(sp) {
1358                let p = self.align_as_chunk(base);
1359                let psize = Chunk::size(p);
1360                // We can unmap if the first chunk holds the entire segment and
1361                // isn't pinned.
1362                let chunk_top = p.cast::<u8>().add(psize);
1363                let top = base.add(size - self.top_foot_size());
1364                if !Chunk::inuse(p) && chunk_top >= top {
1365                    let tp = p.cast::<TreeChunk>();
1366                    debug_assert!(Segment::holds(sp, sp.cast()));
1367                    if p == self.dv {
1368                        self.dv = ptr::null_mut();
1369                        self.dvsize = 0;
1370                    } else {
1371                        self.unlink_large_chunk(tp);
1372                    }
1373                    if self.system_allocator.free(base, size) {
1374                        released += size;
1375                        self.footprint -= size;
1376                        // unlink our obsolete record
1377                        sp = pred;
1378                        (*sp).next = next;
1379                    } else {
1380                        // back out if we can't unmap
1381                        self.insert_large_chunk(tp, psize);
1382                    }
1383                }
1384            }
1385            pred = sp;
1386            sp = next;
1387        }
1388        self.release_checks = if nsegs > MAX_RELEASE_CHECK_RATE {
1389            nsegs
1390        } else {
1391            MAX_RELEASE_CHECK_RATE
1392        };
1393        return released;
1394    }
1395
1396    // Sanity checks
1397
1398    unsafe fn check_any_chunk(&self, p: *mut Chunk) {
1399        if !cfg!(all(feature = "debug", debug_assertions)) {
1400            return;
1401        }
1402        debug_assert!(
1403            self.is_aligned(Chunk::to_mem(p) as usize) || (*p).head == Chunk::fencepost_head()
1404        );
1405        debug_assert!(p as *mut u8 >= self.least_addr);
1406    }
1407
1408    unsafe fn check_top_chunk(&self, p: *mut Chunk) {
1409        if !cfg!(all(feature = "debug", debug_assertions)) {
1410            return;
1411        }
1412        let sp = self.segment_holding(p.cast());
1413        let sz = (*p).head & !INUSE;
1414        debug_assert!(!sp.is_null());
1415        debug_assert!(
1416            self.is_aligned(Chunk::to_mem(p) as usize) || (*p).head == Chunk::fencepost_head()
1417        );
1418        debug_assert!(p as *mut u8 >= self.least_addr);
1419        debug_assert_eq!(sz, self.topsize);
1420        debug_assert!(sz > 0);
1421        debug_assert_eq!(
1422            sz,
1423            (*sp).base as usize + (*sp).size - p as usize - self.top_foot_size()
1424        );
1425        debug_assert!(Chunk::pinuse(p));
1426        debug_assert!(!Chunk::pinuse(Chunk::plus_offset(p, sz)));
1427    }
1428
1429    unsafe fn check_malloced_chunk(&self, mem: *mut u8, s: usize) {
1430        if !cfg!(all(feature = "debug", debug_assertions)) {
1431            return;
1432        }
1433        if mem.is_null() {
1434            return;
1435        }
1436        let p = Chunk::from_mem(mem);
1437        let sz = (*p).head & !INUSE;
1438        self.check_inuse_chunk(p);
1439        debug_assert_eq!(align_up(sz, self.malloc_alignment()), sz);
1440        debug_assert!(sz >= self.min_chunk_size());
1441        debug_assert!(sz >= s);
1442        debug_assert!(Chunk::mmapped(p) || sz < (s + self.min_chunk_size()));
1443    }
1444
1445    unsafe fn check_inuse_chunk(&self, p: *mut Chunk) {
1446        self.check_any_chunk(p);
1447        debug_assert!(Chunk::inuse(p));
1448        debug_assert!(Chunk::pinuse(Chunk::next(p)));
1449        debug_assert!(Chunk::mmapped(p) || Chunk::pinuse(p) || Chunk::next(Chunk::prev(p)) == p);
1450        if Chunk::mmapped(p) {
1451            self.check_mmapped_chunk(p);
1452        }
1453    }
1454
1455    unsafe fn check_mmapped_chunk(&self, p: *mut Chunk) {
1456        if !cfg!(all(feature = "debug", debug_assertions)) {
1457            return;
1458        }
1459        let sz = Chunk::size(p);
1460        let len = sz + (*p).prev_foot + self.mmap_foot_pad();
1461        debug_assert!(Chunk::mmapped(p));
1462        debug_assert!(
1463            self.is_aligned(Chunk::to_mem(p) as usize) || (*p).head == Chunk::fencepost_head()
1464        );
1465        debug_assert!(p as *mut u8 >= self.least_addr);
1466        debug_assert!(!self.is_small(sz));
1467        debug_assert_eq!(align_up(len, self.system_allocator.page_size()), len);
1468        debug_assert_eq!((*Chunk::plus_offset(p, sz)).head, Chunk::fencepost_head());
1469        debug_assert_eq!(
1470            (*Chunk::plus_offset(p, sz + mem::size_of::<usize>())).head,
1471            0
1472        );
1473    }
1474
1475    unsafe fn check_free_chunk(&self, p: *mut Chunk) {
1476        if !cfg!(all(feature = "debug", debug_assertions)) {
1477            return;
1478        }
1479        let sz = Chunk::size(p);
1480        let next = Chunk::plus_offset(p, sz);
1481        self.check_any_chunk(p);
1482        debug_assert!(!Chunk::inuse(p));
1483        debug_assert!(!Chunk::pinuse(Chunk::next(p)));
1484        debug_assert!(!Chunk::mmapped(p));
1485        if p != self.dv && p != self.top {
1486            if sz >= self.min_chunk_size() {
1487                debug_assert_eq!(align_up(sz, self.malloc_alignment()), sz);
1488                debug_assert!(self.is_aligned(Chunk::to_mem(p) as usize));
1489                debug_assert_eq!((*next).prev_foot, sz);
1490                debug_assert!(Chunk::pinuse(p));
1491                debug_assert!(next == self.top || Chunk::inuse(next));
1492                debug_assert_eq!((*(*p).next).prev, p);
1493                debug_assert_eq!((*(*p).prev).next, p);
1494            } else {
1495                debug_assert_eq!(sz, mem::size_of::<usize>());
1496            }
1497        }
1498    }
1499
1500    unsafe fn check_malloc_state(&mut self) {
1501        if !cfg!(all(feature = "debug", debug_assertions)) {
1502            return;
1503        }
1504        for i in 0..NSMALLBINS_U32 {
1505            self.check_smallbin(i);
1506        }
1507        for i in 0..NTREEBINS_U32 {
1508            self.check_treebin(i);
1509        }
1510        if self.dvsize != 0 {
1511            self.check_any_chunk(self.dv);
1512            debug_assert_eq!(self.dvsize, Chunk::size(self.dv));
1513            debug_assert!(self.dvsize >= self.min_chunk_size());
1514            let dv = self.dv;
1515            debug_assert!(!self.bin_find(dv));
1516        }
1517        if !self.top.is_null() {
1518            self.check_top_chunk(self.top);
1519            debug_assert!(self.topsize > 0);
1520            let top = self.top;
1521            debug_assert!(!self.bin_find(top));
1522        }
1523        let total = self.traverse_and_check();
1524        debug_assert!(total <= self.footprint);
1525        debug_assert!(self.footprint <= self.max_footprint);
1526    }
1527
1528    unsafe fn check_smallbin(&mut self, idx: u32) {
1529        if !cfg!(all(feature = "debug", debug_assertions)) {
1530            return;
1531        }
1532        let b = self.smallbin_at(idx);
1533        let mut p = (*b).next;
1534        let empty = self.smallmap & (1 << idx) == 0;
1535        if p == b {
1536            debug_assert!(empty)
1537        }
1538        if !empty {
1539            while p != b {
1540                let size = Chunk::size(p);
1541                self.check_free_chunk(p);
1542                debug_assert_eq!(self.small_index(size), idx);
1543                debug_assert!((*p).next == b || Chunk::size((*p).next) == Chunk::size(p));
1544                let q = Chunk::next(p);
1545                if (*q).head != Chunk::fencepost_head() {
1546                    self.check_inuse_chunk(q);
1547                }
1548                p = (*p).next;
1549            }
1550        }
1551    }
1552
1553    unsafe fn check_treebin(&mut self, idx: u32) {
1554        if !cfg!(all(feature = "debug", debug_assertions)) {
1555            return;
1556        }
1557        let t = *self.treebin_at(idx);
1558        let empty = self.treemap & (1 << idx) == 0;
1559        if t.is_null() {
1560            debug_assert!(empty);
1561        }
1562        if !empty {
1563            self.check_tree(t);
1564        }
1565    }
1566
1567    unsafe fn check_tree(&mut self, t: *mut TreeChunk) {
1568        if !cfg!(all(feature = "debug", debug_assertions)) {
1569            return;
1570        }
1571        let tc = TreeChunk::chunk(t);
1572        let tindex = (*t).index;
1573        let tsize = Chunk::size(tc);
1574        let idx = self.compute_tree_index(tsize);
1575        debug_assert_eq!(tindex, idx);
1576        debug_assert!(tsize >= self.min_large_size());
1577        debug_assert!(tsize >= self.min_size_for_tree_index(idx));
1578        debug_assert!(idx == NTREEBINS_U32 - 1 || tsize < self.min_size_for_tree_index(idx + 1));
1579
1580        let mut u = t;
1581        let mut head = ptr::null_mut::<TreeChunk>();
1582        loop {
1583            let uc = TreeChunk::chunk(u);
1584            self.check_any_chunk(uc);
1585            debug_assert_eq!((*u).index, tindex);
1586            debug_assert_eq!(Chunk::size(uc), tsize);
1587            debug_assert!(!Chunk::inuse(uc));
1588            debug_assert!(!Chunk::pinuse(Chunk::next(uc)));
1589            debug_assert_eq!((*(*uc).next).prev, uc);
1590            debug_assert_eq!((*(*uc).prev).next, uc);
1591            let left = (*u).child[0];
1592            let right = (*u).child[1];
1593            if (*u).parent.is_null() {
1594                debug_assert!(left.is_null());
1595                debug_assert!(right.is_null());
1596            } else {
1597                debug_assert!(head.is_null());
1598                head = u;
1599                debug_assert!((*u).parent != u);
1600                // TODO: unsure why this triggers UB in stacked borrows in MIRI
1601                // (works in tree borrows though)
1602                #[cfg(not(miri))]
1603                debug_assert!(
1604                    (*(*u).parent).child[0] == u
1605                        || (*(*u).parent).child[1] == u
1606                        || *((*u).parent as *mut *mut TreeChunk) == u
1607                );
1608                if !left.is_null() {
1609                    debug_assert_eq!((*left).parent, u);
1610                    debug_assert!(left != u);
1611                    self.check_tree(left);
1612                }
1613                if !right.is_null() {
1614                    debug_assert_eq!((*right).parent, u);
1615                    debug_assert!(right != u);
1616                    self.check_tree(right);
1617                }
1618                if !left.is_null() && !right.is_null() {
1619                    debug_assert!(
1620                        Chunk::size(TreeChunk::chunk(left)) < Chunk::size(TreeChunk::chunk(right))
1621                    );
1622                }
1623            }
1624
1625            u = TreeChunk::prev(u);
1626            if u == t {
1627                break;
1628            }
1629        }
1630        debug_assert!(!head.is_null());
1631    }
1632
1633    fn min_size_for_tree_index(&self, idx: u32) -> usize {
1634        let idx = usize::try_from(idx).unwrap();
1635        (1 << ((idx >> 1) + TREEBIN_SHIFT)) | ((idx & 1) << ((idx >> 1) + TREEBIN_SHIFT - 1))
1636    }
1637
1638    unsafe fn bin_find(&mut self, chunk: *mut Chunk) -> bool {
1639        let size = Chunk::size(chunk);
1640        if self.is_small(size) {
1641            let sidx = self.small_index(size);
1642            let b = self.smallbin_at(sidx);
1643            if !self.smallmap_is_marked(sidx) {
1644                return false;
1645            }
1646            let mut p = b;
1647            loop {
1648                if p == chunk {
1649                    return true;
1650                }
1651                p = (*p).prev;
1652                if p == b {
1653                    return false;
1654                }
1655            }
1656        } else {
1657            let tidx = self.compute_tree_index(size);
1658            if !self.treemap_is_marked(tidx) {
1659                return false;
1660            }
1661            let mut t = *self.treebin_at(tidx);
1662            let mut sizebits = size << leftshift_for_tree_index(tidx);
1663            while !t.is_null() && Chunk::size(TreeChunk::chunk(t)) != size {
1664                t = (*t).child[(sizebits >> (mem::size_of::<usize>() * 8 - 1)) & 1];
1665                sizebits <<= 1;
1666            }
1667            if t.is_null() {
1668                return false;
1669            }
1670            let mut u = t;
1671            let chunk = chunk.cast();
1672            loop {
1673                if u == chunk {
1674                    return true;
1675                }
1676                u = TreeChunk::prev(u);
1677                if u == t {
1678                    return false;
1679                }
1680            }
1681        }
1682    }
1683
1684    unsafe fn traverse_and_check(&self) -> usize {
1685        0
1686    }
1687
1688    pub unsafe fn trim(&mut self, pad: usize) -> bool {
1689        self.sys_trim(pad)
1690    }
1691
1692    pub unsafe fn destroy(mut self) -> usize {
1693        let mut freed = 0;
1694        let mut sp: *mut Segment = &mut self.seg;
1695        while !sp.is_null() {
1696            let base = (*sp).base;
1697            let size = (*sp).size;
1698            let can_free = !base.is_null() && !Segment::is_extern(sp);
1699            sp = (*sp).next;
1700
1701            if can_free && self.system_allocator.free(base, size) {
1702                freed += size;
1703            }
1704        }
1705        freed
1706    }
1707}
1708
1709const PINUSE: usize = 1 << 0;
1710const CINUSE: usize = 1 << 1;
1711const FLAG4: usize = 1 << 2;
1712const INUSE: usize = PINUSE | CINUSE;
1713const FLAG_BITS: usize = PINUSE | CINUSE | FLAG4;
1714
1715impl Chunk {
1716    unsafe fn fencepost_head() -> usize {
1717        INUSE | mem::size_of::<usize>()
1718    }
1719
1720    unsafe fn size(me: *mut Chunk) -> usize {
1721        (*me).head & !FLAG_BITS
1722    }
1723
1724    unsafe fn next(me: *mut Chunk) -> *mut Chunk {
1725        me.cast::<u8>().add((*me).head & !FLAG_BITS).cast()
1726    }
1727
1728    unsafe fn prev(me: *mut Chunk) -> *mut Chunk {
1729        me.cast::<u8>().sub((*me).prev_foot).cast()
1730    }
1731
1732    unsafe fn cinuse(me: *mut Chunk) -> bool {
1733        (*me).head & CINUSE != 0
1734    }
1735
1736    unsafe fn pinuse(me: *mut Chunk) -> bool {
1737        (*me).head & PINUSE != 0
1738    }
1739
1740    unsafe fn clear_pinuse(me: *mut Chunk) {
1741        (*me).head &= !PINUSE;
1742    }
1743
1744    unsafe fn inuse(me: *mut Chunk) -> bool {
1745        (*me).head & INUSE != PINUSE
1746    }
1747
1748    unsafe fn mmapped(me: *mut Chunk) -> bool {
1749        (*me).head & INUSE == 0
1750    }
1751
1752    unsafe fn set_inuse(me: *mut Chunk, size: usize) {
1753        (*me).head = ((*me).head & PINUSE) | size | CINUSE;
1754        let next = Chunk::plus_offset(me, size);
1755        (*next).head |= PINUSE;
1756    }
1757
1758    unsafe fn set_inuse_and_pinuse(me: *mut Chunk, size: usize) {
1759        (*me).head = PINUSE | size | CINUSE;
1760        let next = Chunk::plus_offset(me, size);
1761        (*next).head |= PINUSE;
1762    }
1763
1764    unsafe fn set_size_and_pinuse_of_inuse_chunk(me: *mut Chunk, size: usize) {
1765        (*me).head = size | PINUSE | CINUSE;
1766    }
1767
1768    unsafe fn set_size_and_pinuse_of_free_chunk(me: *mut Chunk, size: usize) {
1769        (*me).head = size | PINUSE;
1770        Chunk::set_foot(me, size);
1771    }
1772
1773    unsafe fn set_free_with_pinuse(p: *mut Chunk, size: usize, n: *mut Chunk) {
1774        Chunk::clear_pinuse(n);
1775        Chunk::set_size_and_pinuse_of_free_chunk(p, size);
1776    }
1777
1778    unsafe fn set_foot(me: *mut Chunk, size: usize) {
1779        let next = Chunk::plus_offset(me, size);
1780        (*next).prev_foot = size;
1781    }
1782
1783    unsafe fn plus_offset(me: *mut Chunk, offset: usize) -> *mut Chunk {
1784        me.cast::<u8>().add(offset).cast()
1785    }
1786
1787    unsafe fn minus_offset(me: *mut Chunk, offset: usize) -> *mut Chunk {
1788        me.cast::<u8>().sub(offset).cast()
1789    }
1790
1791    unsafe fn to_mem(me: *mut Chunk) -> *mut u8 {
1792        me.cast::<u8>().add(Chunk::mem_offset())
1793    }
1794
1795    fn mem_offset() -> usize {
1796        2 * mem::size_of::<usize>()
1797    }
1798
1799    unsafe fn from_mem(mem: *mut u8) -> *mut Chunk {
1800        mem.sub(2 * mem::size_of::<usize>()).cast()
1801    }
1802}
1803
1804impl TreeChunk {
1805    unsafe fn leftmost_child(me: *mut TreeChunk) -> *mut TreeChunk {
1806        let left = (*me).child[0];
1807        if left.is_null() {
1808            (*me).child[1]
1809        } else {
1810            left
1811        }
1812    }
1813
1814    unsafe fn chunk(me: *mut TreeChunk) -> *mut Chunk {
1815        ptr::addr_of_mut!((*me).chunk)
1816    }
1817
1818    unsafe fn next(me: *mut TreeChunk) -> *mut TreeChunk {
1819        (*TreeChunk::chunk(me)).next.cast()
1820    }
1821
1822    unsafe fn prev(me: *mut TreeChunk) -> *mut TreeChunk {
1823        (*TreeChunk::chunk(me)).prev.cast()
1824    }
1825}
1826
1827const EXTERN: u32 = 1 << 0;
1828
1829impl Segment {
1830    unsafe fn is_extern(seg: *mut Segment) -> bool {
1831        (*seg).flags & EXTERN != 0
1832    }
1833
1834    unsafe fn can_release_part<A: Allocator>(system_allocator: &A, seg: *mut Segment) -> bool {
1835        system_allocator.can_release_part((*seg).flags >> 1)
1836    }
1837
1838    unsafe fn sys_flags(seg: *mut Segment) -> u32 {
1839        (*seg).flags >> 1
1840    }
1841
1842    unsafe fn holds(seg: *mut Segment, addr: *mut u8) -> bool {
1843        (*seg).base <= addr && addr < Segment::top(seg)
1844    }
1845
1846    unsafe fn top(seg: *mut Segment) -> *mut u8 {
1847        (*seg).base.add((*seg).size)
1848    }
1849}
1850
1851#[cfg(test)]
1852mod tests {
1853    use super::*;
1854    use crate::System;
1855
1856    // Prime the allocator with some allocations such that there will be free
1857    // chunks in the treemap
1858    unsafe fn setup_treemap<A: Allocator>(a: &mut Dlmalloc<A>) {
1859        let large_request_size = NSMALLBINS * (1 << SMALLBIN_SHIFT);
1860        assert!(!a.is_small(large_request_size));
1861        let large_request1 = a.malloc(large_request_size);
1862        assert_ne!(large_request1, ptr::null_mut());
1863        let large_request2 = a.malloc(large_request_size);
1864        assert_ne!(large_request2, ptr::null_mut());
1865        a.free(large_request1);
1866        assert_ne!(a.treemap, 0);
1867    }
1868
1869    #[test]
1870    // Test allocating, with a non-empty treemap, a specific size that used to
1871    // trigger an integer overflow bug
1872    fn treemap_alloc_overflow_minimal() {
1873        let mut a = Dlmalloc::new(System::new());
1874        unsafe {
1875            setup_treemap(&mut a);
1876            let min_idx31_size = (0xc000 << TREEBIN_SHIFT) - a.chunk_overhead() + 1;
1877            assert_ne!(a.malloc(min_idx31_size), ptr::null_mut());
1878        }
1879    }
1880
1881    #[test]
1882    #[cfg(not(miri))]
1883    // Test allocating the maximum request size with a non-empty treemap
1884    fn treemap_alloc_max() {
1885        let mut a = Dlmalloc::new(System::new());
1886        unsafe {
1887            setup_treemap(&mut a);
1888            let max_request_size = a.max_request() - 1;
1889            assert_eq!(a.malloc(max_request_size), ptr::null_mut());
1890        }
1891    }
1892}