1
0
mirror of https://github.com/upx/upx synced 2025-09-28 19:06:07 +08:00
upx/src/packer.cpp
2006-12-06 07:27:23 +01:00

1471 lines
43 KiB
C++

/* packer.cpp --
This file is part of the UPX executable compressor.
Copyright (C) 1996-2006 Markus Franz Xaver Johannes Oberhumer
Copyright (C) 1996-2006 Laszlo Molnar
All Rights Reserved.
UPX and the UCL library are free software; you can redistribute them
and/or modify them under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING.
If not, write to the Free Software Foundation, Inc.,
59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
Markus F.X.J. Oberhumer Laszlo Molnar
markus@oberhumer.com ml1050@users.sourceforge.net
*/
//#define WANT_STL
#include "conf.h"
#include "file.h"
#include "packer.h"
#include "filter.h"
#include "linker.h"
#include "ui.h"
/*************************************************************************
//
**************************************************************************/
Packer::Packer(InputFile *f) :
bele(NULL),
fi(f), file_size(-1), ph_format(-1), ph_version(-1),
uip(NULL), linker(NULL),
last_patch(NULL), last_patch_len(0), last_patch_off(0)
{
if (fi != NULL)
file_size = fi->st.st_size;
uip = new UiPacker(this);
memset(&ph, 0, sizeof(ph));
}
Packer::~Packer()
{
delete uip; uip = NULL;
delete linker; linker = NULL;
}
// for PackMaster
void Packer::assertPacker() const
{
assert(getFormat() > 0);
assert(getFormat() <= 255);
assert(getVersion() >= 11);
assert(getVersion() <= 14);
assert(strlen(getName()) <= 13);
assert(strlen(getFullName(opt)) <= 26); // "i386-linux.kernel.bvmlinuz"
assert(strlen(getFullName(NULL)) <= 26); // "i386-linux.kernel.bvmlinuz"
if (bele == NULL) fprintf(stderr, "%s\n", getName());
assert(bele != NULL);
#if 1
Linker *l = newLinker();
if (bele != l->bele) fprintf(stderr, "%s\n", getName());
assert(bele == l->bele);
delete l;
#endif
}
/*************************************************************************
// public entries called from class PackMaster
**************************************************************************/
void Packer::doPack(OutputFile *fo)
{
uip->uiPackStart(fo);
pack(fo);
uip->uiPackEnd(fo);
}
void Packer::doUnpack(OutputFile *fo)
{
uip->uiUnpackStart(fo);
unpack(fo);
uip->uiUnpackEnd(fo);
}
void Packer::doTest()
{
uip->uiTestStart();
test();
uip->uiTestEnd();
}
void Packer::doList()
{
uip->uiListStart();
list();
uip->uiListEnd();
}
void Packer::doFileInfo()
{
uip->uiFileInfoStart();
fileInfo();
uip->uiFileInfoEnd();
}
/*************************************************************************
// default actions
**************************************************************************/
void Packer::test()
{
unpack(NULL);
}
void Packer::list()
{
uip->uiList();
}
void Packer::fileInfo()
{
// FIXME: subclasses should list their sections here
// We also should try to get a nice layout...
}
bool Packer::testUnpackVersion(int version) const
{
if (version != ph_version && ph_version != -1)
throwCantUnpack("program has been modified; run a virus checker!");
if (!canUnpackVersion(version))
throwCantUnpack("I am not compatible with older versions of UPX");
return true;
}
bool Packer::testUnpackFormat(int format) const
{
if (format != ph_format && ph_format != -1)
throwCantUnpack("program has been modified; run a virus checker!");
return canUnpackFormat(format);
}
bool ph_skipVerify(const PackHeader &ph)
{
if (M_IS_DEFLATE(ph.method))
return false;
if (M_IS_LZMA(ph.method))
return false;
if (ph.level > 1)
return false;
return true;
}
/*************************************************************************
// compress - wrap call to low-level upx_compress()
**************************************************************************/
bool Packer::compress(upx_bytep in, upx_bytep out,
const upx_compress_config_t *cconf_parm)
{
ph.c_len = 0;
assert(ph.level >= 1); assert(ph.level <= 10);
// Avoid too many progress bar updates. 64 is s->bar_len in ui.cpp.
unsigned step = (ph.u_len < 64*1024) ? 0 : ph.u_len / 64;
// save current checksums
ph.saved_u_adler = ph.u_adler;
ph.saved_c_adler = ph.c_adler;
// update checksum of uncompressed data
ph.u_adler = upx_adler32(in, ph.u_len, ph.u_adler);
// set compression paramters
upx_compress_config_t cconf; cconf.reset();
if (cconf_parm)
cconf = *cconf_parm;
// cconf options
if (M_IS_NRV2B(ph.method) || M_IS_NRV2D(ph.method) || M_IS_NRV2E(ph.method))
{
if (opt->crp.crp_ucl.c_flags != -1)
cconf.conf_ucl.c_flags = opt->crp.crp_ucl.c_flags;
if (opt->crp.crp_ucl.p_level != -1)
cconf.conf_ucl.p_level = opt->crp.crp_ucl.p_level;
if (opt->crp.crp_ucl.h_level != -1)
cconf.conf_ucl.h_level = opt->crp.crp_ucl.h_level;
if (opt->crp.crp_ucl.max_offset != UINT_MAX && opt->crp.crp_ucl.max_offset < cconf.conf_ucl.max_offset)
cconf.conf_ucl.max_offset = opt->crp.crp_ucl.max_offset;
if (opt->crp.crp_ucl.max_match != UINT_MAX && opt->crp.crp_ucl.max_match < cconf.conf_ucl.max_match)
cconf.conf_ucl.max_match = opt->crp.crp_ucl.max_match;
#if defined(WITH_NRV)
if (ph.level >= 7 || (ph.level >= 4 && ph.u_len >= 512*1024))
step = 0;
#endif
}
if (M_IS_LZMA(ph.method))
{
oassign(cconf.conf_lzma.pos_bits, opt->crp.crp_lzma.pos_bits);
oassign(cconf.conf_lzma.lit_pos_bits, opt->crp.crp_lzma.lit_pos_bits);
oassign(cconf.conf_lzma.lit_context_bits, opt->crp.crp_lzma.lit_context_bits);
oassign(cconf.conf_lzma.dict_size, opt->crp.crp_lzma.dict_size);
oassign(cconf.conf_lzma.num_fast_bytes, opt->crp.crp_lzma.num_fast_bytes);
}
if (M_IS_DEFLATE(ph.method))
{
oassign(cconf.conf_zlib.mem_level, opt->crp.crp_zlib.mem_level);
oassign(cconf.conf_zlib.window_bits, opt->crp.crp_zlib.window_bits);
oassign(cconf.conf_zlib.strategy, opt->crp.crp_zlib.strategy);
}
if (uip->ui_pass >= 0)
uip->ui_pass++;
uip->startCallback(ph.u_len, step, uip->ui_pass, uip->ui_total_passes);
uip->firstCallback();
//OutputFile::dump("data.raw", in, ph.u_len);
// compress
int r = upx_compress(in, ph.u_len, out, &ph.c_len,
uip->getCallback(),
ph.method, ph.level, &cconf, &ph.compress_result);
//uip->finalCallback(ph.u_len, ph.c_len);
uip->endCallback();
if (r == UPX_E_OUT_OF_MEMORY)
throwCantPack("out of memory");
if (r != UPX_E_OK)
throwInternalError("compression failed");
if (M_IS_NRV2B(ph.method) || M_IS_NRV2D(ph.method) || M_IS_NRV2E(ph.method))
{
const ucl_uint *res = ph.compress_result.result_ucl.result;
//ph.min_offset_found = res[0];
ph.max_offset_found = res[1];
//ph.min_match_found = res[2];
ph.max_match_found = res[3];
//ph.min_run_found = res[4];
ph.max_run_found = res[5];
ph.first_offset_found = res[6];
//ph.same_match_offsets_found = res[7];
if (cconf_parm)
{
assert(cconf.conf_ucl.max_offset == 0 || cconf.conf_ucl.max_offset >= ph.max_offset_found);
assert(cconf.conf_ucl.max_match == 0 || cconf.conf_ucl.max_match >= ph.max_match_found);
}
}
//printf("\nPacker::compress: %d/%d: %7d -> %7d\n", ph.method, ph.level, ph.u_len, ph.c_len);
if (!checkCompressionRatio(ph.u_len, ph.c_len))
return false;
// return in any case if not compressible
if (ph.c_len >= ph.u_len)
return false;
// update checksum of compressed data
ph.c_adler = upx_adler32(out, ph.c_len, ph.c_adler);
// Decompress and verify. Skip this when using the fastest level.
if (!ph_skipVerify(ph))
{
// decompress
unsigned new_len = ph.u_len;
r = upx_decompress(out, ph.c_len, in, &new_len, ph.method, &ph.compress_result);
//printf("%d %d: %d %d %d\n", ph.method, r, ph.c_len, ph.u_len, new_len);
if (r != UPX_E_OK)
throwInternalError("decompression failed");
if (new_len != ph.u_len)
throwInternalError("decompression failed (size error)");
// verify decompression
if (ph.u_adler != upx_adler32(in, ph.u_len, ph.saved_u_adler))
throwInternalError("decompression failed (checksum error)");
}
return true;
}
#if 0
bool Packer::compress(upx_bytep in, upx_bytep out,
const upx_compress_config_t *cconf)
{
return ph_compress(ph, in, out, cconf);
}
#endif
bool Packer::checkDefaultCompressionRatio(unsigned u_len, unsigned c_len) const
{
assert((int)u_len > 0);
assert((int)c_len > 0);
if (c_len >= u_len)
return false;
unsigned gain = u_len - c_len;
if (gain < 512) // need at least 512 bytes gain
return false;
#if 1
if (gain >= 4096) // ok if we have 4096 bytes gain
return true;
if (gain >= u_len / 16) // ok if we have 6.25% gain
return true;
return false;
#else
return true;
#endif
}
bool Packer::checkCompressionRatio(unsigned u_len, unsigned c_len) const
{
return checkDefaultCompressionRatio(u_len, c_len);
}
bool Packer::checkFinalCompressionRatio(const OutputFile *fo) const
{
const unsigned u_len = file_size;
const unsigned c_len = fo->getBytesWritten();
return checkDefaultCompressionRatio(u_len, c_len);
}
/*************************************************************************
// decompress
**************************************************************************/
void ph_decompress(PackHeader &ph, const upx_bytep in, upx_bytep out,
bool verify_checksum, Filter *ft)
{
unsigned adler;
// verify checksum of compressed data
if (verify_checksum)
{
adler = upx_adler32(in, ph.c_len, ph.saved_c_adler);
if (adler != ph.c_adler)
throwChecksumError();
}
// decompress
unsigned new_len = ph.u_len;
int r = upx_decompress(in, ph.c_len, out, &new_len, ph.method, &ph.compress_result);
if (r != UPX_E_OK || new_len != ph.u_len)
throwCompressedDataViolation();
// verify checksum of decompressed data
if (verify_checksum)
{
if (ft) {
ft->unfilter(out, ph.u_len);
}
adler = upx_adler32(out, ph.u_len, ph.saved_u_adler);
if (adler != ph.u_adler)
throwChecksumError();
}
}
void Packer::decompress(const upx_bytep in, upx_bytep out,
bool verify_checksum, Filter *ft)
{
ph_decompress(ph, in, out, verify_checksum, ft);
}
/*************************************************************************
// overlapping decompression
**************************************************************************/
bool ph_testOverlappingDecompression(const PackHeader &ph, const upx_bytep buf,
unsigned overlap_overhead)
{
if (ph.c_len >= ph.u_len)
return false;
assert((int)overlap_overhead >= 0);
// Because upx_test_overlap() does not use the asm_fast decompressor
// we must account for extra 3 bytes that asm_fast does use,
// or else we may fail at runtime decompression.
unsigned extra = 0;
if (M_IS_NRV2B(ph.method) || M_IS_NRV2D(ph.method) || M_IS_NRV2E(ph.method))
extra = 3;
if (overlap_overhead <= 4 + extra) // don't waste time here
return false;
overlap_overhead -= extra;
unsigned src_off = ph.u_len + overlap_overhead - ph.c_len;
unsigned new_len = ph.u_len;
int r = upx_test_overlap(buf - src_off, src_off,
ph.c_len, &new_len, ph.method, &ph.compress_result);
return (r == UPX_E_OK && new_len == ph.u_len);
}
bool Packer::testOverlappingDecompression(const upx_bytep buf,
unsigned overlap_overhead) const
{
return ph_testOverlappingDecompression(ph, buf, overlap_overhead);
}
void Packer::verifyOverlappingDecompression(Filter *ft)
{
assert(ph.c_len < ph.u_len);
assert((int)ph.overlap_overhead > 0);
// Idea:
// obuf[] was allocated with MemBuffer::allocForCompression(), and
// its contents are no longer needed, i.e. the compressed data
// must have been already written.
// We now can perform a real overlapping decompression and
// verify the checksum.
//
// Note:
// This verify is just because of complete paranoia that there
// could be a hidden bug in the upx_test_overlap implementation,
// and it should not be necessary at all.
//
// See also:
// Filter::verifyUnfilter()
if (ph_skipVerify(ph))
return;
unsigned offset = (ph.u_len + ph.overlap_overhead) - ph.c_len;
if (offset + ph.c_len > obuf.getSize())
return;
memmove(obuf + offset, obuf, ph.c_len);
decompress(obuf + offset, obuf, true, ft);
obuf.checkState();
}
/*************************************************************************
// Find overhead for in-place decompression in an heuristic way
// (using a binary search). Return 0 on error.
//
// To speed up things:
// - you can pass the range of an acceptable interval (so that
// we can succeed early)
// - you can enforce an upper_limit (so that we can fail early)
**************************************************************************/
unsigned Packer::findOverlapOverhead(const upx_bytep buf,
unsigned range,
unsigned upper_limit) const
{
assert((int) range >= 0);
// prepare to deal with very pessimistic values
unsigned low = 1;
unsigned high = UPX_MIN(ph.u_len / 4 + 512, upper_limit);
// but be optimistic for first try (speedup)
unsigned m = UPX_MIN(16, high);
//
unsigned overhead = 0;
unsigned nr = 0; // statistics
while (high >= low)
{
assert(m >= low); assert(m <= high);
assert(m < overhead || overhead == 0);
nr++;
bool success = testOverlappingDecompression(buf, m);
//printf("testOverlapOverhead: %d %d -> %d\n", nr, m, (int)success);
if (success)
{
overhead = m;
// Succeed early if m lies in [low .. low+range-1], i.e. if
// if the range of the current interval is <= range.
//if (m <= low + range - 1)
if (m + 1 <= low + range) // avoid underflow
break;
high = m - 1;
}
else
low = m + 1;
m = (low + high) / 2;
}
//printf("findOverlapOverhead: %d (%d tries)\n", overhead, nr);
if (overhead == 0)
throwInternalError("this is an oo bug");
UNUSED(nr);
return overhead;
}
/*************************************************************************
// file i/o utils
**************************************************************************/
void Packer::handleStub(InputFile *fif, OutputFile *fo, long size)
{
if (fo)
{
if (size > 0)
{
// copy stub from exe
info("Copying original stub: %ld bytes", size);
ByteArray(stub, size);
fif->seek(0,SEEK_SET);
fif->readx(stub,size);
fo->write(stub,size);
}
else
{
// no stub
}
}
}
void Packer::checkOverlay(unsigned overlay)
{
assert((int)overlay >= 0);
assert((off_t)overlay < file_size);
if (overlay == 0)
return;
info("Found overlay: %d bytes", overlay);
if (opt->overlay == opt->SKIP_OVERLAY)
throw OverlayException("file has overlay -- skipped; try `--overlay=copy'");
}
void Packer::copyOverlay(OutputFile *fo, unsigned overlay,
MemBuffer *buf,
bool do_seek)
{
assert((int)overlay >= 0);
assert((off_t)overlay < file_size);
buf->checkState();
if (!fo || overlay == 0)
return;
if (opt->overlay != opt->COPY_OVERLAY)
{
assert(opt->overlay == opt->STRIP_OVERLAY);
infoWarning("stripping overlay: %d bytes", overlay);
return;
}
info("Copying overlay: %d bytes", overlay);
if (do_seek)
fi->seek(-(off_t)overlay, SEEK_END);
// get buffer size, align to improve i/o speed
unsigned buf_size = buf->getSize();
if (buf_size > 65536)
buf_size = ALIGN_DOWN(buf_size, 4096);
assert((int)buf_size > 0);
do {
unsigned len = overlay < buf_size ? overlay : buf_size;
fi->readx(buf, len);
fo->write(buf, len);
overlay -= len;
} while (overlay > 0);
buf->checkState();
}
// Create a pseudo-unique program id.
unsigned Packer::getRandomId() const
{
if (opt->debug.disable_random_id)
return 0x01020304;
unsigned id = 0;
#if 0 && defined(__unix__)
// Don't consume precious bytes from /dev/urandom.
int fd = open("/dev/urandom", O_RDONLY);
if (fd < 0)
fd = open("/dev/random", O_RDONLY);
if (fd >= 0)
{
if (read(fd, &id, 4) != 4)
id = 0;
close(fd);
}
#endif
while (id == 0)
{
#if !defined(HAVE_GETTIMEOFDAY) || defined(__DJGPP__)
id ^= (unsigned) time(NULL);
id ^= ((unsigned) clock()) << 12;
#else
struct timeval tv;
gettimeofday(&tv, 0);
id ^= (unsigned) tv.tv_sec;
id ^= ((unsigned) tv.tv_usec) << 12; // shift into high-bits
#endif
#if defined(HAVE_GETPID)
id ^= (unsigned) getpid();
#endif
id ^= (unsigned) fi->st.st_ino;
id ^= (unsigned) fi->st.st_atime;
id ^= (unsigned) rand();
}
return id;
}
/*************************************************************************
// packheader util
**************************************************************************/
// this is called directly after the constructor from class PackMaster
void Packer::initPackHeader()
{
memset(&ph, 0, sizeof(ph));
ph.version = getVersion();
ph.format = getFormat();
ph.method = -1;
ph.level = -1;
ph.u_adler = ph.c_adler = ph.saved_u_adler = ph.saved_c_adler = upx_adler32(NULL,0);
ph.buf_offset = -1;
ph.u_file_size = file_size;
}
// this is called directly after canPack() from class PackMaster
void Packer::updatePackHeader()
{
assert(opt->cmd == CMD_COMPRESS);
//
const int *m = getCompressionMethods(opt->method, opt->level);
ph.method = m[0];
ph.level = opt->level;
if (ph.level < 0)
ph.level = file_size < 512*1024 ? 8 : 7;
//
assert(isValidCompressionMethod(ph.method));
assert(1 <= ph.level && ph.level <= 10);
}
// FIXME: remove patchPackHeader() and fold into relocateLoader();
// then make linker->relocate() private (friend Packer)
int Packer::patchPackHeader(void *b, int blen)
{
assert(isValidFilter(ph.filter));
const int size = ph.getPackHeaderSize();
if (linker->findSection("UPX1HEAD", false))
assert(size == linker->getSectionSize("UPX1HEAD"));
int boff = find_le32(b, blen, UPX_MAGIC_LE32);
checkPatch(b, blen, boff, size);
unsigned char *p = (unsigned char *)b + boff;
ph.putPackHeader(p);
return boff;
}
bool Packer::getPackHeader(void *b, int blen)
{
if (!ph.fillPackHeader((unsigned char *)b, blen))
return false;
if (ph.version > getVersion())
throwCantUnpack("need a newer version of UPX");
// Some formats might be able to unpack old versions because
// their implementation hasn't changed. Ask them.
if (opt->cmd != CMD_FILEINFO)
if (!testUnpackVersion(ph.version))
return false;
if (ph.c_len >= ph.u_len || (off_t)ph.c_len >= file_size
|| ph.version <= 0 || ph.version >= 0xff)
throwCantUnpack("header corrupted");
else if ((off_t)ph.u_len > ph.u_file_size)
{
#if 0
// FIXME: does this check make sense w.r.t. overlays ???
if (ph.format == UPX_F_W32_PE || ph.format == UPX_F_DOS_EXE)
// may get longer
((void)0);
else
throwCantUnpack("header size corrupted");
#endif
}
if (!isValidCompressionMethod(ph.method))
throwCantUnpack("unknown compression method (try a newer version of UPX)");
// Some formats might be able to unpack "subformats". Ask them.
if (!testUnpackFormat(ph.format))
return false;
return true;
}
bool Packer::readPackHeader(int len)
{
assert((int)len > 0);
MemBuffer buf(len);
len = fi->read(buf, len);
if (len <= 0)
return false;
return getPackHeader(buf, len);
}
void Packer::checkAlreadyPacked(const void *b, int blen)
{
int boff = find_le32(b, blen, UPX_MAGIC_LE32);
if (boff < 0)
return;
// FIXME: could add some more checks to verify that this
// is a real PackHeader, e.g.
//
//PackHeader tmp;
//if (!tmp.fillPackHeader((unsigned char *)b + boff, blen - boff))
// return;
//
// This also would require that the buffer in `b' holds
// the full PackHeader, and not only the 4 magic bytes.
throwAlreadyPacked();
}
/*************************************************************************
// patch util for loader
**************************************************************************/
void Packer::checkPatch(void *b, int blen, int boff, int size)
{
if (b == NULL && blen == 0 && boff == 0 && size == 0)
{
// reset
last_patch = NULL;
last_patch_len = 0;
last_patch_off = 0;
return;
}
if (b == NULL || blen <= 0 || boff < 0 || size <= 0)
throwBadLoader();
if (boff + size <= 0 || boff + size > blen)
throwBadLoader();
//printf("checkPatch: %p %5d %5d %2d\n", b, blen, boff, size);
if (b == last_patch)
{
if (boff + size > last_patch_off)
throwInternalError("invalid patch order");
// The next check is not strictly necessary, but the buffer
// length should better not increase...
if (blen > last_patch_len)
throwInternalError("invalid patch order (length)");
}
else
last_patch = b;
last_patch_len = blen;
last_patch_off = boff;
}
int Packer::patch_be16(void *b, int blen, unsigned old, unsigned new_)
{
int boff = find_be16(b, blen, old);
checkPatch(b, blen, boff, 2);
unsigned char *p = (unsigned char *)b + boff;
set_be16(p, new_);
return boff;
}
int Packer::patch_be16(void *b, int blen, const void *old, unsigned new_)
{
int boff = find(b, blen, old, 2);
checkPatch(b, blen, boff, 2);
unsigned char *p = (unsigned char *)b + boff;
set_be16(p, new_);
return boff;
}
int Packer::patch_be32(void *b, int blen, unsigned old, unsigned new_)
{
int boff = find_be32(b, blen, old);
checkPatch(b, blen, boff, 4);
unsigned char *p = (unsigned char *)b + boff;
set_be32(p, new_);
return boff;
}
int Packer::patch_be32(void *b, int blen, const void *old, unsigned new_)
{
int boff = find(b, blen, old, 4);
checkPatch(b, blen, boff, 4);
unsigned char *p = (unsigned char *)b + boff;
set_be32(p, new_);
return boff;
}
int Packer::patch_le16(void *b, int blen, unsigned old, unsigned new_)
{
int boff = find_le16(b, blen, old);
checkPatch(b, blen, boff, 2);
unsigned char *p = (unsigned char *)b + boff;
set_le16(p, new_);
return boff;
}
int Packer::patch_le16(void *b, int blen, const void *old, unsigned new_)
{
int boff = find(b, blen, old, 2);
checkPatch(b, blen, boff, 2);
unsigned char *p = (unsigned char *)b + boff;
set_le16(p, new_);
return boff;
}
int Packer::patch_le32(void *b, int blen, unsigned old, unsigned new_)
{
int boff = find_le32(b, blen, old);
checkPatch(b, blen, boff, 4);
unsigned char *p = (unsigned char *)b + boff;
set_le32(p, new_);
return boff;
}
int Packer::patch_le32(void *b, int blen, const void *old, unsigned new_)
{
int boff = find(b, blen, old, 4);
checkPatch(b, blen, boff, 4);
unsigned char *p = (unsigned char *)b + boff;
set_le32(p, new_);
return boff;
}
/*************************************************************************
// relocation util
**************************************************************************/
upx_byte *Packer::optimizeReloc32(upx_byte *in, unsigned relocnum,
upx_byte *out, upx_byte *image,
int bswap, int *big)
{
if (opt->exact)
throwCantPackExact();
*big = 0;
if (relocnum == 0)
return out;
qsort(in,relocnum,4,le32_compare);
unsigned jc,pc,oc;
upx_byte *fix = out;
pc = (unsigned) -4;
for (jc = 0; jc<relocnum; jc++)
{
oc = get_le32(in+jc*4) - pc;
if (oc == 0)
continue;
else if ((int)oc < 4)
throwCantPack("overlapping fixups");
else if (oc < 0xF0)
*fix++ = (unsigned char) oc;
else if (oc < 0x100000)
{
*fix++ = (unsigned char) (0xF0+(oc>>16));
*fix++ = (unsigned char) oc;
*fix++ = (unsigned char) (oc>>8);
}
else
{
*big = 1;
*fix++ = 0xf0;
*fix++ = 0;
*fix++ = 0;
set_le32(fix,oc);
fix += 4;
}
pc += oc;
if (bswap)
acc_ua_swab32s(image + pc);
}
*fix++ = 0;
return fix;
}
unsigned Packer::unoptimizeReloc32(upx_byte **in, upx_byte *image,
MemBuffer *out, int bswap)
{
upx_byte *p;
unsigned relocn = 0;
for (p = *in; *p; p++, relocn++)
if (*p >= 0xF0)
{
if (*p == 0xF0 && get_le16(p+1) == 0)
p += 4;
p += 2;
}
//fprintf(stderr,"relocnum=%x\n",relocn);
out->alloc(4*relocn+4); // one extra data
LE32 *outp = (LE32*) (unsigned char *) *out;
LE32 *relocs = outp;
unsigned jc = (unsigned) -4;
for (p = *in; *p; p++)
{
if (*p < 0xF0)
jc += *p;
else
{
unsigned dif = (*p & 0x0F)*0x10000 + get_le16(p+1);
p += 2;
if (dif == 0)
{
dif = get_le32(p+1);
p += 4;
}
jc += dif;
}
*relocs++ = jc;
if (bswap && image)
acc_ua_swab32s(image + jc);
}
//fprintf(stderr,"relocnum=%x\n",relocn);
*in = p+1;
return (unsigned) (relocs - outp);
}
/*************************************************************************
// loader util (interface to linker)
**************************************************************************/
static const char *getIdentstr(unsigned *size, int small)
{
static char identbig[] =
"\n\0"
"$Info: "
"This file is packed with the UPX executable packer http://upx.sf.net $"
"\n\0"
"$Id: UPX "
UPX_VERSION_STRING4
" Copyright (C) 1996-" UPX_VERSION_YEAR " the UPX Team. All Rights Reserved. $"
"\n";
static char identsmall[] =
"\n"
"$Id: UPX "
"(C) 1996-" UPX_VERSION_YEAR " the UPX Team. All Rights Reserved. http://upx.sf.net $"
"\n";
static char identtiny[] = UPX_VERSION_STRING4;
static int done;
if (!done && (opt->debug.fake_stub_version[0] || opt->debug.fake_stub_year[0]))
{
struct strinfo_t { char *s; int size; };
static const strinfo_t strlist[] = {
{ identbig, (int)sizeof(identbig) },
{ identsmall, (int)sizeof(identsmall) },
{ identtiny, (int)sizeof(identtiny) },
{ NULL, 0 } };
const strinfo_t* iter;
for (iter = strlist; iter->s; ++iter)
{
if (opt->debug.fake_stub_version[0])
mem_replace(iter->s, iter->size, UPX_VERSION_STRING4, 4, opt->debug.fake_stub_version);
if (opt->debug.fake_stub_year[0])
mem_replace(iter->s, iter->size, UPX_VERSION_YEAR, 4, opt->debug.fake_stub_year);
}
done = 1;
}
if (small < 0)
small = opt->small;
if (small >= 2)
{
*size = sizeof(identtiny);
return identtiny;
}
else if (small >= 1)
{
*size = sizeof(identsmall);
return identsmall;
}
else
{
*size = sizeof(identbig);
return identbig;
}
}
void Packer::initLoader(const void *pdata, int plen, int small)
{
delete linker;
linker = newLinker();
assert(bele == linker->bele);
linker->init(pdata, plen);
unsigned size;
char const * const ident = getIdentstr(&size, small);
linker->addSection("IDENTSTR", ident, size, 0);
}
void Packer::addLoader(const char *s)
{
if (s)
linker->addLoader(s);
}
// provide specialization for [T = char]
template <>
void __acc_cdecl_va Packer::addLoader<char>(const char *s, ...)
{
va_list ap;
va_start(ap, s);
while (s != NULL)
{
linker->addLoader(s);
s = va_arg(ap, const char *);
}
va_end(ap);
}
upx_byte *Packer::getLoader() const
{
int size = -1;
upx_byte *oloader = linker->getLoader(&size);
if (oloader == NULL || size <= 0)
throwBadLoader();
return oloader;
}
int Packer::getLoaderSize() const
{
int size = -1;
upx_byte *oloader = linker->getLoader(&size);
if (oloader == NULL || size <= 0)
throwBadLoader();
return size;
}
int Packer::getLoaderSection(const char *name, int *slen) const
{
int size = -1;
int ostart = linker->getSection(name, &size);
if (ostart < 0 || size <= 0)
throwBadLoader();
if (slen)
*slen = size;
return ostart;
}
// same, but the size of the section may be == 0
int Packer::getLoaderSectionStart(const char *name, int *slen) const
{
int size = -1;
int ostart = linker->getSection(name, &size);
if (ostart < 0 || size < 0)
throwBadLoader();
if (slen)
*slen = size;
return ostart;
}
void Packer::relocateLoader()
{
linker->relocate();
#if 0
// "relocate" packheader
if (linker->findSection("UPX1HEAD", false))
{
int lsize = -1;
int loff = getLoaderSectionStart("UPX1HEAD", &lsize);
assert(lsize == ph.getPackHeaderSize());
unsigned char *p = getLoader() + loff;
assert(get_le32(p) == UPX_MAGIC_LE32);
//patchPackHeader(p, lsize);
ph.putPackHeader(p);
}
#endif
}
/*************************************************************************
// Try compression with several methods and filters, choose the best
/ or first working one. Needs buildLoader().
//
// Required inputs:
// this->ph
// ulen
// parm_ft
// clevel
// addvalue
// buf_len (optional)
//
// - updates this->ph
// - updates *ft
// - ibuf[] is restored to the original unfiltered version
// - obuf[] contains the best compressed version
//
// filter_strategy:
// n: try the first N filters, use best one
// -1: try all filters, use first working one
// -2: try only the opt->filter filter
// -3: use no filter at all
//
// This has been prepared for generalization into class Packer so that
// opt->all_methods and/or opt->all_filters are available for all
// executable formats.
//
// It will replace the tryFilters() / compress() call sequence.
//
// 2006-02-15: hdr_buf and hdr_u_len are default empty input "header" array
// to fix a 2-pass problem with Elf headers. As of today there can be
// only one decompression method per executable output file, and that method
// is the one that gives best compression for .text and loader. However,
// the Elf headers precede .text in the output file, and are written first.
// "--brute" compression often compressed the Elf headers using nrv2b
// but the .text (and loader) with nrv2e. This often resulted in SIGSEGV
// during decompression.
// The workaround is for hdr_buf and hdr_u_len to describe the Elf headers
// (typically less than 512 bytes) when .text is passed in, and include
// them in the calculation of shortest output. Then the result
// this->ph.method will say which [single] method to use for everthing.
// The Elf headers are never filtered. They are short enough (< 512 bytes)
// that compressing them more than once per method (once here when choosing,
// once again just before writing [because compressWithFilters discards])
// is OK because of the simplicity of not having two output arrays.
**************************************************************************/
static int prepareMethods(int *methods, int ph_method, const int *all_methods)
{
int nmethods = 0;
if (!opt->all_methods || all_methods == NULL)
{
methods[nmethods++] = ph_method;
return nmethods;
}
for (int mm = 0; all_methods[mm] != M_END; ++mm)
{
int method = all_methods[mm];
if (method == M_ULTRA_BRUTE && !opt->ultra_brute)
break;
if (method == M_SKIP || method == M_ULTRA_BRUTE)
continue;
if (opt->all_methods && !opt->all_methods_use_lzma && M_IS_LZMA(method))
continue;
// use this method
assert(Packer::isValidCompressionMethod(method));
methods[nmethods++] = method;
}
return nmethods;
}
static int prepareFilters(int *filters, int &filter_strategy,
const int *all_filters)
{
int nfilters = 0;
// setup filter filter_strategy
if (filter_strategy == 0)
{
if (opt->all_filters)
// choose best from all available filters
filter_strategy = INT_MAX;
else if (opt->filter >= 0 && Filter::isValidFilter(opt->filter, all_filters))
// try opt->filter
filter_strategy = -2;
else
// try the first working filter
filter_strategy = -1;
}
assert(filter_strategy != 0);
if (filter_strategy == -3)
goto done;
if (filter_strategy == -2)
{
if (opt->filter >= 0 && Filter::isValidFilter(opt->filter, all_filters))
{
filters[nfilters++] = opt->filter;
goto done;
}
filter_strategy = -1;
}
assert(filter_strategy >= -1);
const int *filter_list;
filter_list = all_filters;
while (filter_list && *filter_list != FT_END)
{
int filter_id = *filter_list++;
if (filter_id == FT_ULTRA_BRUTE && !opt->ultra_brute)
break;
if (filter_id == FT_SKIP || filter_id == FT_ULTRA_BRUTE)
continue;
if (filter_id == 0)
continue;
// use this filter
assert(Filter::isValidFilter(filter_id));
filters[nfilters++] = filter_id;
if (filter_strategy >= 0 && nfilters >= filter_strategy)
break;
}
done:
// filter_strategy now only means "stop after first successful filter"
filter_strategy = (filter_strategy < 0) ? -1 : 0;
// make sure that we have a "no filter" fallback
for (int i = 0; i < nfilters; i++)
if (filters[i] == 0)
return nfilters;
filters[nfilters++] = 0;
return nfilters;
}
void Packer::compressWithFilters(Filter *parm_ft,
const unsigned overlap_range,
const upx_compress_config_t *cconf,
int filter_strategy,
unsigned filter_off, unsigned compress_buf_off,
const upx_bytep hdr_buf, unsigned hdr_u_len)
{
// struct copies
const PackHeader orig_ph = this->ph;
PackHeader best_ph = this->ph;
const Filter orig_ft = *parm_ft;
Filter best_ft = *parm_ft;
//
const unsigned compress_buf_len = orig_ph.u_len;
const unsigned filter_len = orig_ft.buf_len ? orig_ft.buf_len : compress_buf_len;
//
best_ph.c_len = orig_ph.u_len;
best_ph.overlap_overhead = 0;
unsigned best_ph_lsize = 0;
unsigned best_hdr_c_len = 0;
// preconditions
assert(orig_ph.filter == 0);
assert(orig_ft.id == 0);
assert(filter_off + filter_len <= compress_buf_off + compress_buf_len);
// prepare methods and filters
int methods[256];
int nmethods = prepareMethods(methods, ph.method, getCompressionMethods(-1, ph.level));
assert(nmethods > 0); assert(nmethods < 256);
int filters[256];
int nfilters = prepareFilters(filters, filter_strategy, getFilters());
assert(nfilters > 0); assert(nfilters < 256);
#if 1
printf("compressWithFilters: m(%d):", nmethods);
for (int i = 0; i < nmethods; i++) printf(" %d", methods[i]);
printf(" f(%d):", nfilters);
for (int i = 0; i < nfilters; i++) printf(" %d", filters[i]);
printf("\n");
#endif
// update total_passes; previous (ui_total_passes > 0) means incremental
if (uip->ui_total_passes > 0)
uip->ui_total_passes -= 1;
if (filter_strategy < 0)
uip->ui_total_passes += nmethods;
else
uip->ui_total_passes += nfilters * nmethods;
// Working buffer for compressed data. Don't waste memory.
MemBuffer *otemp = &obuf;
MemBuffer otemp_buf;
// compress using all methods/filters
int nfilters_success_total = 0;
for (int mm = 0; mm < nmethods; mm++) // for all methods
{
assert(isValidCompressionMethod(methods[mm]));
unsigned hdr_c_len = 0;
if (hdr_buf && hdr_u_len)
{
if (nfilters_success_total != 0 && otemp == &obuf)
{
// do not overwrite obuf
otemp_buf.allocForCompression(compress_buf_len);
otemp = &otemp_buf;
}
int r = upx_compress(hdr_buf, hdr_u_len, *otemp, &hdr_c_len,
NULL, methods[mm], 10, NULL, NULL);
if (r != UPX_E_OK)
throwInternalError("header compression failed");
if (hdr_c_len >= hdr_u_len)
throwInternalError("header compression size increase");
}
int nfilters_success_mm = 0;
for (int ff = 0; ff < nfilters; ff++) // for all filters
{
assert(isValidFilter(filters[ff]));
ibuf.checkState();
obuf.checkState();
// get fresh packheader
ph = orig_ph;
ph.method = methods[mm];
ph.filter = filters[ff];
ph.overlap_overhead = 0;
// get fresh filter
Filter ft = orig_ft;
ft.init(ph.filter, orig_ft.addvalue);
// filter
optimizeFilter(&ft, ibuf + filter_off, filter_len);
bool success = ft.filter(ibuf + filter_off, filter_len);
if (ft.id != 0 && ft.calls == 0)
{
// filter did not do anything - no need to call ft.unfilter()
success = false;
}
if (!success)
{
// filter failed or was useless
if (filter_strategy >= 0)
{
// adjust ui passes
if (uip->ui_pass >= 0)
uip->ui_pass++;
}
continue;
}
// filter success
#if 0
printf("filter: id 0x%02x size %6d, calls %5d/%5d/%3d/%5d/%5d, cto 0x%02x\n",
ft.id, ft.buf_len, ft.calls, ft.noncalls, ft.wrongcalls, ft.firstcall, ft.lastcall, ft.cto);
#endif
if (nfilters_success_total != 0 && otemp == &obuf)
{
otemp_buf.allocForCompression(compress_buf_len);
otemp = &otemp_buf;
}
nfilters_success_total++;
nfilters_success_mm++;
ph.filter_cto = ft.cto;
ph.n_mru = ft.n_mru;
// compress
if (compress(ibuf + compress_buf_off, *otemp, cconf))
{
unsigned lsize = 0;
if (ph.c_len + lsize + hdr_c_len <= best_ph.c_len + best_ph_lsize + best_hdr_c_len)
{
// get results
ph.overlap_overhead = findOverlapOverhead(*otemp, overlap_range);
buildLoader(&ft);
lsize = getLoaderSize();
assert(lsize > 0);
}
#if 0
printf("\n%2d %02x: %d +%4d +%3d = %d (best: %d +%4d +%3d = %d)\n", ph.method, ph.filter,
ph.c_len, lsize, hdr_c_len, ph.c_len + lsize + hdr_c_len,
best_ph.c_len, best_ph_lsize, best_hdr_c_len, best_ph.c_len + best_ph_lsize + best_hdr_c_len);
#endif
bool update = false;
if (ph.c_len + lsize + hdr_c_len < best_ph.c_len + best_ph_lsize + best_hdr_c_len)
update = true;
else if (ph.c_len + lsize + hdr_c_len == best_ph.c_len + best_ph_lsize + best_hdr_c_len)
{
// prefer smaller loaders
if (lsize + hdr_c_len < best_ph_lsize + best_hdr_c_len)
update = true;
else if (lsize + hdr_c_len == best_ph_lsize + best_hdr_c_len)
{
// prefer less overlap_overhead
if (ph.overlap_overhead < best_ph.overlap_overhead)
update = true;
}
}
if (update)
{
assert((int)ph.overlap_overhead > 0);
// update obuf[] with best version
if (otemp != &obuf)
memcpy(obuf, *otemp, ph.c_len);
// save compression results
best_ph = ph;
best_ph_lsize = lsize;
best_hdr_c_len = hdr_c_len;
best_ft = ft;
}
}
// restore ibuf[] - unfilter with verify
ft.unfilter(ibuf + filter_off, filter_len, true);
//
ibuf.checkState();
obuf.checkState();
otemp->checkState();
//
if (filter_strategy < 0)
break;
}
assert(nfilters_success_mm > 0);
}
// postconditions 1)
assert(nfilters_success_total > 0);
assert(best_ph.u_len == orig_ph.u_len);
assert(best_ph.filter == best_ft.id);
assert(best_ph.filter_cto == best_ft.cto);
// FIXME assert(best_ph.n_mru == best_ft.n_mru);
// copy back results
this->ph = best_ph;
*parm_ft = best_ft;
// finally check compression ratio
if (best_ph.c_len + best_ph_lsize >= best_ph.u_len)
throwNotCompressible();
if (!checkCompressionRatio(best_ph.u_len, best_ph.c_len))
throwNotCompressible();
// postconditions 2)
assert(best_ph.overlap_overhead > 0);
// convenience
buildLoader(&best_ft);
}
/*
vi:ts=4:et:nowrap
*/