My apologies for leaving this thread for so long.
So indeed we use 10.8.1. We did not modify the original algorithm.
To decompress we use the following flags:
Code: Select all
decompressParams.m_struct_size = sizeof(lzham_decompress_params);
decompressParams.m_dict_size_log2 = dictionarySize;
decompressParams.m_table_update_rate = LZHAM_FASTEST_TABLE_UPDATE_RATE;
decompressParams.m_decompress_flags = 0;
decompressParams.m_num_seed_bytes = 0;
decompressParams.m_pSeed_bytes = nullptr;
decompressParams.m_table_max_update_interval = 0;
decompressParams.m_table_update_interval_slow_rate = 0;
We use variable sized compression dictionaries. In all places where compression is used the dictionarySize is calculated based on the size of the input with:
Code: Select all
std::min<uint32_t>(std::max<uint32_t>(floor_to_pow_2(uncompressedSize), LZHAM_MIN_DICT_SIZE_LOG2), 25);
floor_to_pow_2 is defined as:
Code: Select all
// http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup
uint32_t floor_to_pow_2(uint32_t v)
{
static const char LogTable256[256] =
{
#define LT(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
- 1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
LT(4), LT(5), LT(5), LT(6), LT(6), LT(6), LT(6),
LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7)
#undef LT
};
unsigned int t, tt; // temporaries
if ((tt = (v >> 16)) != 0)
return (t = (tt >> 8)) ? 24 + LogTable256[t] : 16 + LogTable256[tt];
return (t = v >> 8) ? 8 + LogTable256[t] : LogTable256[v];
}
I hope this helps you guys!