1
0
mirror of https://github.com/upx/upx synced 2025-09-28 19:06:07 +08:00

Revamp do_xmap() to work under exec-shield and exec-shield-randomize,

and with pre-linked PT_INTERP such as ld-linux.so.2.
	l_lx_elf.c l_lx_sh.c

committer: jreiser <jreiser> 1084923593 +0000
This commit is contained in:
John Reiser 2004-05-18 23:39:53 +00:00
parent 0e45021a5a
commit aee150474c
2 changed files with 158 additions and 95 deletions

View File

@ -236,44 +236,87 @@ auxv_up(Elf32_auxv_t *av, int const type, unsigned const value)
}
}
// The PF_* and PROT_* bits are {1,2,4}; the conversion table fits in 32 bits.
#define REP8(x) \
((x)|((x)<<4)|((x)<<8)|((x)<<12)|((x)<<16)|((x)<<20)|((x)<<24)|((x)<<28))
#define EXP8(y) \
((1&(y)) ? 0xf0f0f0f0 : (2&(y)) ? 0xff00ff00 : (4&(y)) ? 0xffff0000 : 0)
static inline unsigned
__attribute__((regparm(1), stdcall))
pf_to_prot(unsigned const pf)
{
return (PROT_READ|PROT_WRITE|PROT_EXEC) & (
( (REP8(PROT_EXEC ) & EXP8(PF_X))
|(REP8(PROT_READ ) & EXP8(PF_R))
|(REP8(PROT_WRITE) & EXP8(PF_W))
) >> ((pf & (PF_R|PF_W|PF_X))<<2) );
}
// Find convex hull of PT_LOAD (the minimal interval which covers all PT_LOAD),
// and mmap that much, to be sure that a kernel using exec-shield-randomize
// won't place the first piece in a way that leaves no room for the rest.
static unsigned long // returns relocation constant
__attribute__((regparm(3), stdcall))
xfind_pages(unsigned mflags, Elf32_Phdr const *phdr, int phnum,
char **const p_brk
)
{
size_t lo= ~0, hi= 0, szlo= 0;
char *addr;
mflags += MAP_PRIVATE | MAP_ANONYMOUS; // '+' can optimize better than '|'
for (; --phnum>=0; ++phdr) if (PT_LOAD==phdr->p_type) {
if (phdr->p_vaddr < lo) {
lo = phdr->p_vaddr;
szlo = phdr->p_filesz;
}
if (hi < (phdr->p_memsz + phdr->p_vaddr)) {
hi = phdr->p_memsz + phdr->p_vaddr;
}
}
szlo += ~PAGE_MASK & lo; // page fragment on lo edge
lo -= ~PAGE_MASK & lo; // round down to page boundary
hi = PAGE_MASK & (hi - lo - PAGE_MASK -1); // page length
szlo = PAGE_MASK & (szlo - PAGE_MASK -1); // page length
addr = do_mmap((void *)lo, hi, PROT_READ|PROT_WRITE|PROT_EXEC, mflags, 0, 0);
*p_brk = hi + addr; // the logical value of brk(0)
munmap(szlo + addr, hi - szlo); // desirable if PT_LOAD non-contiguous
return (unsigned long)addr - lo;
}
static Elf32_Addr // entry address
do_xmap(int const fdi, Elf32_Ehdr const *const ehdr, struct Extent *const xi,
Elf32_auxv_t *const av)
{
Elf32_Phdr const *phdr = (Elf32_Phdr const *) (ehdr->e_phoff +
(char const *)ehdr);
unsigned long base = (ET_DYN==ehdr->e_type) ? 0x40000000 : 0;
char *v_brk;
unsigned long const reloc = xfind_pages(
((ET_DYN!=ehdr->e_type) ? MAP_FIXED : 0), phdr, ehdr->e_phnum, &v_brk);
int j;
for (j=0; j < ehdr->e_phnum; ++phdr, ++j)
if (PT_PHDR==phdr->p_type) {
auxv_up(av, AT_PHDR, phdr->p_vaddr);
auxv_up(av, AT_PHDR, phdr->p_vaddr + reloc);
}
else if (PT_LOAD==phdr->p_type) {
unsigned const prot = pf_to_prot(phdr->p_flags);
struct Extent xo;
size_t mlen = xo.size = phdr->p_filesz;
char *addr = xo.buf = (char *)phdr->p_vaddr;
char *haddr = phdr->p_memsz + (char *)phdr->p_vaddr;
char *haddr = phdr->p_memsz + addr;
size_t frag = (int)addr &~ PAGE_MASK;
mlen += frag;
addr -= frag;
if (ET_DYN==ehdr->e_type) {
addr += base;
haddr += base;
}
else { // There is only one brk, the one for the ET_EXEC
// Not needed if compressed a.elf is invoked directly.
// Needed only if compressed shell script invokes compressed shell.
do_brk(haddr+OVERHEAD); // Also takes care of whole pages of .bss
}
addr += reloc;
haddr += reloc;
// Decompressor can overrun the destination by 3 bytes.
if (addr != do_mmap(addr, mlen + (xi ? 3 : 0), PROT_READ | PROT_WRITE,
MAP_FIXED | MAP_PRIVATE | (xi ? MAP_ANONYMOUS : 0),
fdi, phdr->p_offset - frag) ) {
err_exit(8);
}
if (0==base) {
base = (unsigned long)addr;
}
if (xi) {
unpackExtent(xi, &xo, (f_expand *)fdi,
((phdr->p_flags & PF_X) ? (f_unfilter *)(2+ fdi) : 0));
@ -288,52 +331,36 @@ do_xmap(int const fdi, Elf32_Ehdr const *const ehdr, struct Extent *const xi,
auxv_up((Elf32_auxv_t *)(~1 & (int)av), AT_NULL, (unsigned)hatch);
}
}
if (phdr->p_memsz != phdr->p_filesz) { // .bss
if (ET_DYN==ehdr->e_type) { // PT_INTERP whole pages of .bss?
addr += frag + mlen;
mlen = haddr - addr;
if (0 < (int)mlen) { // need more pages, too
if (addr != do_mmap(addr, mlen, PROT_READ | PROT_WRITE,
MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, 0, 0 ) ) {
err_exit(9);
if (0!=mprotect(addr, mlen, prot)) {
err_exit(10);
ERR_LAB
}
}
}
addr += mlen + frag; /* page boundary on hi end */
if (addr < haddr) { // need pages for .bss
if (addr != do_mmap(addr, haddr - addr, prot,
MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, 0, 0 ) ) {
err_exit(9);
}
}
else { // no .bss
int prot = 0;
if (phdr->p_flags & PF_X) { prot |= PROT_EXEC; }
if (phdr->p_flags & PF_W) { prot |= PROT_WRITE; }
if (phdr->p_flags & PF_R) { prot |= PROT_READ; }
if (0!=mprotect(addr, mlen, prot)) {
err_exit(10);
else if (xi) { // cleanup if decompressor overrun crosses page boundary
mlen = ~PAGE_MASK & (3+ mlen);
if (mlen<=3) { // page fragment was overrun buffer only
munmap(addr, mlen);
}
if (xi) { // cleanup if decompressor overrun crosses page boundary
mlen += 3;
addr += mlen;
mlen &= ~PAGE_MASK;
if (mlen<=3) { // page fragment was overrun buffer only
munmap(addr - mlen, mlen);
}
}
}
if (ET_DYN!=ehdr->e_type) {
// Needed only if compressed shell script invokes compressed shell.
do_brk(haddr);
}
}
if (!xi) {
if (!xi) { // 2nd call (PT_INTERP); close()+check is smaller here
if (0!=close(fdi)) {
err_exit(11);
}
}
if (ET_DYN==ehdr->e_type) {
return ehdr->e_entry + base;
}
else {
return ehdr->e_entry;
else { // 1st call (main); also have (0!=av) here
if (ET_DYN!=ehdr->e_type) {
// Needed only if compressed shell script invokes compressed shell.
do_brk(v_brk);
}
}
return ehdr->e_entry + reloc;
}

View File

@ -168,11 +168,72 @@ bzero(char *p, size_t len)
} while (--len);
}
#define REP8(x) \
((x)|((x)<<4)|((x)<<8)|((x)<<12)|((x)<<16)|((x)<<20)|((x)<<24)|((x)<<28))
#define EXP8(y) \
((1&(y)) ? 0xf0f0f0f0 : (2&(y)) ? 0xff00ff00 : (4&(y)) ? 0xffff0000 : 0)
static inline unsigned
__attribute__((regparm(1), stdcall))
pf_to_prot(unsigned const pf)
{
return 7 & (( (REP8(PROT_EXEC ) & EXP8(PF_X))
|(REP8(PROT_READ ) & EXP8(PF_R))
|(REP8(PROT_WRITE) & EXP8(PF_W)) )
>> ((pf & (PF_R|PF_W|PF_X))<<2) );
}
// Find convex hull of PT_LOAD (the minimal interval which covers all PT_LOAD),
// and mmap that much, to be sure that a kernel using exec-shield-randomize
// won't place the first piece in a way that leaves no room for the rest.
static unsigned long // returns relocation constant
__attribute__((regparm(3), stdcall))
xfind_pages(unsigned mflags, Elf32_Phdr const *phdr, int phnum)
{
size_t lo= ~0, hi= 0, szlo= 0;
char *addr;
mflags += MAP_PRIVATE | MAP_ANONYMOUS; // '+' can optimize better than '|'
for (; --phnum>=0; ++phdr) if (PT_LOAD==phdr->p_type) {
if (phdr->p_vaddr < lo) {
lo = phdr->p_vaddr;
szlo = phdr->p_filesz;
}
if (hi < (phdr->p_memsz + phdr->p_vaddr)) {
hi = phdr->p_memsz + phdr->p_vaddr;
}
}
if (MAP_FIXED & mflags) { // the "shell", and not the PT_INTERP
// This is a dirty hack to set the proper value for brk(0) as seen by
// the "shell" which we will mmap() soon, upon return to do_xmap().
// It depends on our own brk() starting out at 0x08048000, which is the
// default base address used by /bin/ld for an ET_EXEC. We must brk()
// now. If we wait until after mmap() of shell pages, then the kernel
// "vma" containing our original brk() of 0x08048000 will not be contiguous
// with hi [the mmap'ed pages from the shell will be in between],
// and various linux kernels will not move the brk() in this case;
// the typical symptom is SIGSEGV early in ld-linux.so.2 (the PT_INTERP).
do_brk((void *)hi);
}
szlo += ~PAGE_MASK & lo; // page fragment on lo edge
lo -= ~PAGE_MASK & lo; // round down to page boundary
hi = PAGE_MASK & (hi - lo - PAGE_MASK -1); // page length
szlo = PAGE_MASK & (szlo - PAGE_MASK -1); // page length
addr = do_mmap((void *)lo, hi, PROT_READ|PROT_WRITE|PROT_EXEC, mflags, 0, 0);
// Doing this may destroy the brk() that we set so carefully above.
// The munmap() is "needed" only for discontiguous PT_LOAD,
// and neither shells nor ld-linux.so.2 have that.
// munmap(szlo + addr, hi - szlo);
return (unsigned long)addr - lo;
}
// This do_xmap() has no Extent *xi input because it doesn't decompress anything;
// it only maps the shell and its PT_INTERP. So, it was specialized by hand
// to reduce compiled instruction size. gdb 2.91.66 does not notice that
// to reduce compiled instruction size. gcc 2.91.66 does not notice that
// there is only one call to this static function (from getexec(), which
// would specify 0 for xi), so gdb does not propagate the constant parameter.
// would specify 0 for xi), so gcc does not propagate the constant parameter.
// Notice there is no make_hatch(), either.
static Elf32_Addr // entry address
@ -180,77 +241,52 @@ do_xmap(int const fdi, Elf32_Ehdr const *const ehdr, Elf32_auxv_t *const av)
{
Elf32_Phdr const *phdr = (Elf32_Phdr const *) (ehdr->e_phoff +
(char const *)ehdr);
unsigned long base = (ET_DYN==ehdr->e_type) ? 0x40000000 : 0;
unsigned long const reloc = xfind_pages(
((ET_DYN!=ehdr->e_type) ? MAP_FIXED : 0), phdr, ehdr->e_phnum);
int j;
for (j=0; j < ehdr->e_phnum; ++phdr, ++j)
if (PT_PHDR==phdr->p_type) {
av[AT_PHDR -1].a_un.a_val = phdr->p_vaddr;
}
else if (PT_LOAD==phdr->p_type) {
unsigned const prot = pf_to_prot(phdr->p_flags);
struct Extent xo;
size_t mlen = xo.size = phdr->p_filesz;
char *addr = xo.buf = (char *)phdr->p_vaddr;
char *haddr = phdr->p_memsz + (char *)phdr->p_vaddr;
char *haddr = phdr->p_memsz + addr;
size_t frag = (int)addr &~ PAGE_MASK;
mlen += frag;
addr -= frag;
if (ET_DYN==ehdr->e_type) {
addr += base;
haddr += base;
}
else { // There is only one brk, the one for the ET_EXEC
do_brk(haddr+OVERHEAD); // Also takes care of whole pages of .bss
}
addr += reloc;
haddr += reloc;
// Decompressor can overrun the destination by 3 bytes.
if (addr != do_mmap(addr, mlen, PROT_READ | PROT_WRITE,
MAP_FIXED | MAP_PRIVATE,
fdi, phdr->p_offset - frag) ) {
err_exit(8);
}
if (0==base) {
base = (unsigned long)addr;
}
bzero(addr, frag); // fragment at lo end
frag = (-mlen) &~ PAGE_MASK; // distance to next page boundary
bzero(mlen+addr, frag); // fragment at hi end
if (phdr->p_memsz != phdr->p_filesz) { // .bss
if (ET_DYN==ehdr->e_type) { // PT_INTERP whole pages of .bss?
addr += frag + mlen;
mlen = haddr - addr;
if (0 < (int)mlen) { // need more pages, too
if (addr != do_mmap(addr, mlen, PROT_READ | PROT_WRITE,
MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, 0, 0 ) ) {
err_exit(9);
if (0!=mprotect(addr, mlen, prot)) {
err_exit(10);
ERR_LAB
}
}
}
addr += mlen + frag; /* page boundary on hi end */
if (addr < haddr) { // need pages for .bss
if (addr != do_mmap(addr, haddr - addr, prot,
MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, 0, 0 ) ) {
err_exit(9);
}
}
else { // no .bss
int prot = 0;
if (phdr->p_flags & PF_X) { prot |= PROT_EXEC; }
if (phdr->p_flags & PF_W) { prot |= PROT_WRITE; }
if (phdr->p_flags & PF_R) { prot |= PROT_READ; }
if (0!=mprotect(addr, mlen, prot)) {
err_exit(10);
}
}
if (ET_DYN!=ehdr->e_type) {
do_brk(haddr);
}
}
if (0!=close(fdi)) {
err_exit(11);
}
if (ET_DYN==ehdr->e_type) {
return ehdr->e_entry + base;
}
else {
return ehdr->e_entry;
if (0!=close(fdi)) {
err_exit(11);
}
return ehdr->e_entry + reloc;
}
static Elf32_Addr // entry address
getexec(char const *const fname, Elf32_Ehdr *const ehdr, Elf32_auxv_t *const av)
{