1
0
mirror of https://github.com/upx/upx synced 2025-09-28 19:06:07 +08:00

Find and use AT_PAGESZ

modified:   stub/src/powerpc-linux.elf-entry.S
	modified:   stub/src/powerpc-linux.elf-fold.S
This commit is contained in:
John Reiser 2017-02-05 13:31:17 -08:00
parent 71f4cd7c85
commit 5a47e8d2c6
2 changed files with 50 additions and 88 deletions

View File

@ -146,80 +146,56 @@ die:
li a0,127
li 0,__NR_exit; sc
zfind:
lwz t0,0(a0); addi a0,a0,4
cmpi cr7,t0,0; bne+ cr7,zfind
ret
/* Decompress the rest of this loader, and jump to it. */
unfold:
mflr r30 // &{ b_info={sz_unc, sz_cpr, {4 char}}, folded_loader...}
la a0,32*4(sp)
call zfind // a0= envp
call zfind // a0= &Elf32_auxv
mr r28,a0 // save for folded code
// set a1= actual page size in Elf32_auxv_t
AT_NULL= 0 // <elf.h>
AT_PAGESZ= 6
a_type= 0
a_val= 4
sz_auxv= 2*4
1:
lwz t0,a_type(a0)
lwz a1,a_val(a0); addi a0,a0,sz_auxv
cmpi cr0,t0,AT_PAGESZ; beq- 2f
cmpi cr0,t0,AT_NULL; bne+ 1b
li a1,PAGE_SIZE // not found; use default
2:
mr r27,a1 // save for folded code
li a5,0 // off_t
li a4,-1 // fd; cater to *BSD for MAP_ANON
li a3,MAP_PRIVATE | MAP_ANONYMOUS
li a2,PROT_READ | PROT_WRITE
li a1,PAGE_SIZE
add a1,a1,a1 // allocate twice 4K
lwz a0,sz_cpr(r30)
add a0,a0,r30
addi a0,a0,sz_b_info+PAGE_SIZE-1
rlwinm a0,a0,0,0,31-PAGE_SHIFT // next page boundary after fold
mr 23,a1 // save PAGE_SIZE value
mr 14,a0 // save address being allocated
li 0,__NR_mmap
lwz a0,sz_cpr(r30) // sizeof(folded_loader)
addi a0,a0,sz_b_info
addi t0,a1,-1 // ~page_mask
add a0,a0,r30 // beyond folded_loader
add a0,a0,t0 // + page_size -1
and t0,t0,a0 // fragment above page boundary
sub a0,a0,t0 // next page boundary after folded code
li r0,__NR_mmap
mr t4,a0 // save address being allocated
sc
cmp 0,0,a0,14
beq alloc4
b alloc64
/* try to allocate a 4k page - if failure then allocate 64k page */
alloc4:
/* deallocate stub allocation */
mr a1,23
li 0,__NR_munmap
sc
/* allocate 4k page */
li a5,0 // off_t
li a4,-1 // fd; cater to *BSD for MAP_ANON
li a3,MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS
li a2,PROT_READ | PROT_WRITE | PROT_EXEC
li a1,PAGE_SIZE
lwz a0,sz_cpr(r30)
add a0,a0,r30
addi a0,a0,sz_b_info+PAGE_SIZE-1
rlwinm a0,a0,0,0,31-PAGE_SHIFT // next page boundary after fold
li 0,__NR_mmap
mr 23,a1 // save PAGE_SIZE value
mr 14,a0 // save address being allocated
sc
cmpi 0,0,a0,14 //
bne decomp
b msg_SELinux // Branch if SummaryOverflow (failure)
/* 64k page */
alloc64:
/* deallocate stub allocation */
mr a1,23
li 0,__NR_munmap
sc
/* allocate 64k page */
li a5,0 // off_t
li a4,-1 // fd; cater to *BSD for MAP_ANON
li a3,MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS
li a2,PROT_READ | PROT_WRITE | PROT_EXEC
lis a1,1 // 64K
lwz a0,sz_cpr(r30)
add a0,a0,r30
addi a0,a0,sz_b_info-1
add a0,a0,a1
rlwinm a0,a0,0,0,31-PAGE_SHIFT64 // next page boundary after fold
li 0,__NR_mmap
mr 23,a1 // save PAGE_SIZE value
sc
cmpi 0,0,a0,22 // return code
beq msg_SELinux
decomp:
0:
cmp cr0,t4,a0; bne msg_SELinux
mtctr r31
lwz r0,sz_unc(r30)
lbz meth,b_method(r30)
la ldst,31*4(sp) // &slot on stack
stw r0,31*4(sp) // lzma uses for EOF
stw 23,23*4(sp) // save pag shift value
mr dst,a0
mtlr a0 // &continuation
lwz lsrc,sz_cpr(r30)

View File

@ -46,32 +46,17 @@ OVERHEAD= 2048
LINKAREA= 6*4 // (sp,pc,cr, xx,yy.zz) save area per calling convention
/* In:
r31= &decompress; also 8+ (char *)&(#bytes which preceed &-8(r31)
r28= &Elf32_auxv_t
r27= actual page size
*/
fold_begin:
//// teq r0,r0 // debugging
call L90
#include "arch/powerpc/32/bxx.S"
/* The SysV convention for argument registers after execve is nice:
a0= argc
a1= argv
a2= envp
a3= auxvp
a4= fini
sp= ~0xf & (-2*4 + (void *)&argc) // 0(sp): old_sp, pc
Instead, Linux gives only
sp= &{argc,argv...,0,env...,0,auxv...,strings} // 16-byte aligned?
We must figure out the rest, particularly auxvp.
*/
zfind:
lwz t0,0(a6); addi a6,a6,4
cmpi cr7,t0,0; bne+ cr7,zfind
ret
L90:
la sp,6*4(sp) // trim save area used by decompressor
mflr a5 // &ppcbxx: f_unfilter
lwz a6,0(sp) // sp at execve
call zfind // a6= &env
call zfind // a6= &Elf32_auxv
mr a6,r28 // a6= &Elf32_auxv
lwz a1,-8(r31) // #bytes which preceed -8(r31)
rlwinm r30,a5,0,0,31-12 // r30= &this_page
mr a4,r31 // &decompress: f_expand
@ -80,7 +65,7 @@ L90:
addi r29,r29,-8 // &our_Elf32_Ehdr
addi a1,a1,-(szElf32_Ehdr + 2*szElf32_Phdr)
addi a0,r29,(szElf32_Ehdr + 2*szElf32_Phdr) // &{l_info; p_info; b_info}
lwz a7,23*4(sp) // pagesize
mr a7,r27 // pagesize
addi sp,sp,-(LINKAREA+OVERHEAD)
lwz a3,sz_unc+sz_p_info+sz_l_info(a0) // sz_elf_headers
call upx_main // Out: a0= entry
@ -111,8 +96,9 @@ SYS_munmap= 91
SYS_mprotect= 125
mmap: .globl mmap
li 0,SYS_mmap
li r0,SYS_mmap
sysgo:
teq r0,r0 // debugging
sc
bns+ no_fail // 'bns': branch if No Summary[Overflow]
li a0,-1 // failure; IGNORE errno
@ -120,18 +106,18 @@ no_fail:
ret
exit: .globl exit
li 0,SYS_exit; b sysgo
li r0,SYS_exit; b 5f
read: .globl read
li 0,SYS_read; b sysgo
li r0,SYS_read; 5: b 5f
open: .globl open
li 0,SYS_open; b sysgo
li r0,SYS_open; 5: b 5f
close: .globl close
li 0,SYS_close; b sysgo
li r0,SYS_close; 5: b 5f
mprotect: .globl mprotect
li 0,SYS_mprotect; b sysgo
li r0,SYS_mprotect; 5: b 5f
munmap: .globl munmap
li 0,SYS_munmap; b sysgo
li r0,SYS_munmap; 5: b 5f
brk: .globl brk
li 0,SYS_brk; b sysgo
li r0,SYS_brk; 5: b sysgo
/* vim:set ts=8 sw=8 et: */