zdict.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135
  1. /*
  2. * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
  3. * All rights reserved.
  4. *
  5. * This source code is licensed under both the BSD-style license (found in the
  6. * LICENSE file in the root directory of this source tree) and the GPLv2 (found
  7. * in the COPYING file in the root directory of this source tree).
  8. * You may select, at your option, one of the above-listed licenses.
  9. */
  10. /*-**************************************
  11. * Tuning parameters
  12. ****************************************/
  13. #define MINRATIO 4 /* minimum nb of apparition to be selected in dictionary */
  14. #define ZDICT_MAX_SAMPLES_SIZE (2000U << 20)
  15. #define ZDICT_MIN_SAMPLES_SIZE (ZDICT_CONTENTSIZE_MIN * MINRATIO)
  16. /*-**************************************
  17. * Compiler Options
  18. ****************************************/
  19. /* Unix Large Files support (>4GB) */
  20. #define _FILE_OFFSET_BITS 64
  21. #if (defined(__sun__) && (!defined(__LP64__))) /* Sun Solaris 32-bits requires specific definitions */
  22. # define _LARGEFILE_SOURCE
  23. #elif ! defined(__LP64__) /* No point defining Large file for 64 bit */
  24. # define _LARGEFILE64_SOURCE
  25. #endif
  26. /*-*************************************
  27. * Dependencies
  28. ***************************************/
  29. #include <stdlib.h> /* malloc, free */
  30. #include <string.h> /* memset */
  31. #include <stdio.h> /* fprintf, fopen, ftello64 */
  32. #include <time.h> /* clock */
  33. #include "../common/mem.h" /* read */
  34. #include "../common/fse.h" /* FSE_normalizeCount, FSE_writeNCount */
  35. #define HUF_STATIC_LINKING_ONLY
  36. #include "../common/huf.h" /* HUF_buildCTable, HUF_writeCTable */
  37. #include "../common/zstd_internal.h" /* includes zstd.h */
  38. #include "../common/xxhash.h" /* XXH64 */
  39. #include "divsufsort.h"
  40. #ifndef ZDICT_STATIC_LINKING_ONLY
  41. # define ZDICT_STATIC_LINKING_ONLY
  42. #endif
  43. #include "zdict.h"
  44. #include "../compress/zstd_compress_internal.h" /* ZSTD_loadCEntropy() */
  45. /*-*************************************
  46. * Constants
  47. ***************************************/
  48. #define KB *(1 <<10)
  49. #define MB *(1 <<20)
  50. #define GB *(1U<<30)
  51. #define DICTLISTSIZE_DEFAULT 10000
  52. #define NOISELENGTH 32
  53. static const int g_compressionLevel_default = 3;
  54. static const U32 g_selectivity_default = 9;
  55. /*-*************************************
  56. * Console display
  57. ***************************************/
  58. #define DISPLAY(...) { fprintf(stderr, __VA_ARGS__); fflush( stderr ); }
  59. #define DISPLAYLEVEL(l, ...) if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */
  60. static clock_t ZDICT_clockSpan(clock_t nPrevious) { return clock() - nPrevious; }
  61. static void ZDICT_printHex(const void* ptr, size_t length)
  62. {
  63. const BYTE* const b = (const BYTE*)ptr;
  64. size_t u;
  65. for (u=0; u<length; u++) {
  66. BYTE c = b[u];
  67. if (c<32 || c>126) c = '.'; /* non-printable char */
  68. DISPLAY("%c", c);
  69. }
  70. }
  71. /*-********************************************************
  72. * Helper functions
  73. **********************************************************/
  74. unsigned ZDICT_isError(size_t errorCode) { return ERR_isError(errorCode); }
  75. const char* ZDICT_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
  76. unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize)
  77. {
  78. if (dictSize < 8) return 0;
  79. if (MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return 0;
  80. return MEM_readLE32((const char*)dictBuffer + 4);
  81. }
  82. size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictSize)
  83. {
  84. size_t headerSize;
  85. if (dictSize <= 8 || MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return ERROR(dictionary_corrupted);
  86. { unsigned offcodeMaxValue = MaxOff;
  87. ZSTD_compressedBlockState_t* bs = (ZSTD_compressedBlockState_t*)malloc(sizeof(ZSTD_compressedBlockState_t));
  88. U32* wksp = (U32*)malloc(HUF_WORKSPACE_SIZE);
  89. short* offcodeNCount = (short*)malloc((MaxOff+1)*sizeof(short));
  90. if (!bs || !wksp || !offcodeNCount) {
  91. headerSize = ERROR(memory_allocation);
  92. } else {
  93. ZSTD_reset_compressedBlockState(bs);
  94. headerSize = ZSTD_loadCEntropy(bs, wksp, offcodeNCount, &offcodeMaxValue, dictBuffer, dictSize);
  95. }
  96. free(bs);
  97. free(wksp);
  98. free(offcodeNCount);
  99. }
  100. return headerSize;
  101. }
  102. /*-********************************************************
  103. * Dictionary training functions
  104. **********************************************************/
  105. static unsigned ZDICT_NbCommonBytes (size_t val)
  106. {
  107. if (MEM_isLittleEndian()) {
  108. if (MEM_64bits()) {
  109. # if defined(_MSC_VER) && defined(_WIN64)
  110. unsigned long r = 0;
  111. _BitScanForward64( &r, (U64)val );
  112. return (unsigned)(r>>3);
  113. # elif defined(__GNUC__) && (__GNUC__ >= 3)
  114. return (__builtin_ctzll((U64)val) >> 3);
  115. # else
  116. static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
  117. return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
  118. # endif
  119. } else { /* 32 bits */
  120. # if defined(_MSC_VER)
  121. unsigned long r=0;
  122. _BitScanForward( &r, (U32)val );
  123. return (unsigned)(r>>3);
  124. # elif defined(__GNUC__) && (__GNUC__ >= 3)
  125. return (__builtin_ctz((U32)val) >> 3);
  126. # else
  127. static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
  128. return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
  129. # endif
  130. }
  131. } else { /* Big Endian CPU */
  132. if (MEM_64bits()) {
  133. # if defined(_MSC_VER) && defined(_WIN64)
  134. unsigned long r = 0;
  135. _BitScanReverse64( &r, val );
  136. return (unsigned)(r>>3);
  137. # elif defined(__GNUC__) && (__GNUC__ >= 3)
  138. return (__builtin_clzll(val) >> 3);
  139. # else
  140. unsigned r;
  141. const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
  142. if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
  143. if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
  144. r += (!val);
  145. return r;
  146. # endif
  147. } else { /* 32 bits */
  148. # if defined(_MSC_VER)
  149. unsigned long r = 0;
  150. _BitScanReverse( &r, (unsigned long)val );
  151. return (unsigned)(r>>3);
  152. # elif defined(__GNUC__) && (__GNUC__ >= 3)
  153. return (__builtin_clz((U32)val) >> 3);
  154. # else
  155. unsigned r;
  156. if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
  157. r += (!val);
  158. return r;
  159. # endif
  160. } }
  161. }
  162. /*! ZDICT_count() :
  163. Count the nb of common bytes between 2 pointers.
  164. Note : this function presumes end of buffer followed by noisy guard band.
  165. */
  166. static size_t ZDICT_count(const void* pIn, const void* pMatch)
  167. {
  168. const char* const pStart = (const char*)pIn;
  169. for (;;) {
  170. size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
  171. if (!diff) {
  172. pIn = (const char*)pIn+sizeof(size_t);
  173. pMatch = (const char*)pMatch+sizeof(size_t);
  174. continue;
  175. }
  176. pIn = (const char*)pIn+ZDICT_NbCommonBytes(diff);
  177. return (size_t)((const char*)pIn - pStart);
  178. }
  179. }
  180. typedef struct {
  181. U32 pos;
  182. U32 length;
  183. U32 savings;
  184. } dictItem;
  185. static void ZDICT_initDictItem(dictItem* d)
  186. {
  187. d->pos = 1;
  188. d->length = 0;
  189. d->savings = (U32)(-1);
  190. }
  191. #define LLIMIT 64 /* heuristic determined experimentally */
  192. #define MINMATCHLENGTH 7 /* heuristic determined experimentally */
  193. static dictItem ZDICT_analyzePos(
  194. BYTE* doneMarks,
  195. const int* suffix, U32 start,
  196. const void* buffer, U32 minRatio, U32 notificationLevel)
  197. {
  198. U32 lengthList[LLIMIT] = {0};
  199. U32 cumulLength[LLIMIT] = {0};
  200. U32 savings[LLIMIT] = {0};
  201. const BYTE* b = (const BYTE*)buffer;
  202. size_t maxLength = LLIMIT;
  203. size_t pos = suffix[start];
  204. U32 end = start;
  205. dictItem solution;
  206. /* init */
  207. memset(&solution, 0, sizeof(solution));
  208. doneMarks[pos] = 1;
  209. /* trivial repetition cases */
  210. if ( (MEM_read16(b+pos+0) == MEM_read16(b+pos+2))
  211. ||(MEM_read16(b+pos+1) == MEM_read16(b+pos+3))
  212. ||(MEM_read16(b+pos+2) == MEM_read16(b+pos+4)) ) {
  213. /* skip and mark segment */
  214. U16 const pattern16 = MEM_read16(b+pos+4);
  215. U32 u, patternEnd = 6;
  216. while (MEM_read16(b+pos+patternEnd) == pattern16) patternEnd+=2 ;
  217. if (b[pos+patternEnd] == b[pos+patternEnd-1]) patternEnd++;
  218. for (u=1; u<patternEnd; u++)
  219. doneMarks[pos+u] = 1;
  220. return solution;
  221. }
  222. /* look forward */
  223. { size_t length;
  224. do {
  225. end++;
  226. length = ZDICT_count(b + pos, b + suffix[end]);
  227. } while (length >= MINMATCHLENGTH);
  228. }
  229. /* look backward */
  230. { size_t length;
  231. do {
  232. length = ZDICT_count(b + pos, b + *(suffix+start-1));
  233. if (length >=MINMATCHLENGTH) start--;
  234. } while(length >= MINMATCHLENGTH);
  235. }
  236. /* exit if not found a minimum nb of repetitions */
  237. if (end-start < minRatio) {
  238. U32 idx;
  239. for(idx=start; idx<end; idx++)
  240. doneMarks[suffix[idx]] = 1;
  241. return solution;
  242. }
  243. { int i;
  244. U32 mml;
  245. U32 refinedStart = start;
  246. U32 refinedEnd = end;
  247. DISPLAYLEVEL(4, "\n");
  248. DISPLAYLEVEL(4, "found %3u matches of length >= %i at pos %7u ", (unsigned)(end-start), MINMATCHLENGTH, (unsigned)pos);
  249. DISPLAYLEVEL(4, "\n");
  250. for (mml = MINMATCHLENGTH ; ; mml++) {
  251. BYTE currentChar = 0;
  252. U32 currentCount = 0;
  253. U32 currentID = refinedStart;
  254. U32 id;
  255. U32 selectedCount = 0;
  256. U32 selectedID = currentID;
  257. for (id =refinedStart; id < refinedEnd; id++) {
  258. if (b[suffix[id] + mml] != currentChar) {
  259. if (currentCount > selectedCount) {
  260. selectedCount = currentCount;
  261. selectedID = currentID;
  262. }
  263. currentID = id;
  264. currentChar = b[ suffix[id] + mml];
  265. currentCount = 0;
  266. }
  267. currentCount ++;
  268. }
  269. if (currentCount > selectedCount) { /* for last */
  270. selectedCount = currentCount;
  271. selectedID = currentID;
  272. }
  273. if (selectedCount < minRatio)
  274. break;
  275. refinedStart = selectedID;
  276. refinedEnd = refinedStart + selectedCount;
  277. }
  278. /* evaluate gain based on new dict */
  279. start = refinedStart;
  280. pos = suffix[refinedStart];
  281. end = start;
  282. memset(lengthList, 0, sizeof(lengthList));
  283. /* look forward */
  284. { size_t length;
  285. do {
  286. end++;
  287. length = ZDICT_count(b + pos, b + suffix[end]);
  288. if (length >= LLIMIT) length = LLIMIT-1;
  289. lengthList[length]++;
  290. } while (length >=MINMATCHLENGTH);
  291. }
  292. /* look backward */
  293. { size_t length = MINMATCHLENGTH;
  294. while ((length >= MINMATCHLENGTH) & (start > 0)) {
  295. length = ZDICT_count(b + pos, b + suffix[start - 1]);
  296. if (length >= LLIMIT) length = LLIMIT - 1;
  297. lengthList[length]++;
  298. if (length >= MINMATCHLENGTH) start--;
  299. }
  300. }
  301. /* largest useful length */
  302. memset(cumulLength, 0, sizeof(cumulLength));
  303. cumulLength[maxLength-1] = lengthList[maxLength-1];
  304. for (i=(int)(maxLength-2); i>=0; i--)
  305. cumulLength[i] = cumulLength[i+1] + lengthList[i];
  306. for (i=LLIMIT-1; i>=MINMATCHLENGTH; i--) if (cumulLength[i]>=minRatio) break;
  307. maxLength = i;
  308. /* reduce maxLength in case of final into repetitive data */
  309. { U32 l = (U32)maxLength;
  310. BYTE const c = b[pos + maxLength-1];
  311. while (b[pos+l-2]==c) l--;
  312. maxLength = l;
  313. }
  314. if (maxLength < MINMATCHLENGTH) return solution; /* skip : no long-enough solution */
  315. /* calculate savings */
  316. savings[5] = 0;
  317. for (i=MINMATCHLENGTH; i<=(int)maxLength; i++)
  318. savings[i] = savings[i-1] + (lengthList[i] * (i-3));
  319. DISPLAYLEVEL(4, "Selected dict at position %u, of length %u : saves %u (ratio: %.2f) \n",
  320. (unsigned)pos, (unsigned)maxLength, (unsigned)savings[maxLength], (double)savings[maxLength] / maxLength);
  321. solution.pos = (U32)pos;
  322. solution.length = (U32)maxLength;
  323. solution.savings = savings[maxLength];
  324. /* mark positions done */
  325. { U32 id;
  326. for (id=start; id<end; id++) {
  327. U32 p, pEnd, length;
  328. U32 const testedPos = suffix[id];
  329. if (testedPos == pos)
  330. length = solution.length;
  331. else {
  332. length = (U32)ZDICT_count(b+pos, b+testedPos);
  333. if (length > solution.length) length = solution.length;
  334. }
  335. pEnd = (U32)(testedPos + length);
  336. for (p=testedPos; p<pEnd; p++)
  337. doneMarks[p] = 1;
  338. } } }
  339. return solution;
  340. }
  341. static int isIncluded(const void* in, const void* container, size_t length)
  342. {
  343. const char* const ip = (const char*) in;
  344. const char* const into = (const char*) container;
  345. size_t u;
  346. for (u=0; u<length; u++) { /* works because end of buffer is a noisy guard band */
  347. if (ip[u] != into[u]) break;
  348. }
  349. return u==length;
  350. }
  351. /*! ZDICT_tryMerge() :
  352. check if dictItem can be merged, do it if possible
  353. @return : id of destination elt, 0 if not merged
  354. */
  355. static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const void* buffer)
  356. {
  357. const U32 tableSize = table->pos;
  358. const U32 eltEnd = elt.pos + elt.length;
  359. const char* const buf = (const char*) buffer;
  360. /* tail overlap */
  361. U32 u; for (u=1; u<tableSize; u++) {
  362. if (u==eltNbToSkip) continue;
  363. if ((table[u].pos > elt.pos) && (table[u].pos <= eltEnd)) { /* overlap, existing > new */
  364. /* append */
  365. U32 const addedLength = table[u].pos - elt.pos;
  366. table[u].length += addedLength;
  367. table[u].pos = elt.pos;
  368. table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */
  369. table[u].savings += elt.length / 8; /* rough approx bonus */
  370. elt = table[u];
  371. /* sort : improve rank */
  372. while ((u>1) && (table[u-1].savings < elt.savings))
  373. table[u] = table[u-1], u--;
  374. table[u] = elt;
  375. return u;
  376. } }
  377. /* front overlap */
  378. for (u=1; u<tableSize; u++) {
  379. if (u==eltNbToSkip) continue;
  380. if ((table[u].pos + table[u].length >= elt.pos) && (table[u].pos < elt.pos)) { /* overlap, existing < new */
  381. /* append */
  382. int const addedLength = (int)eltEnd - (table[u].pos + table[u].length);
  383. table[u].savings += elt.length / 8; /* rough approx bonus */
  384. if (addedLength > 0) { /* otherwise, elt fully included into existing */
  385. table[u].length += addedLength;
  386. table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */
  387. }
  388. /* sort : improve rank */
  389. elt = table[u];
  390. while ((u>1) && (table[u-1].savings < elt.savings))
  391. table[u] = table[u-1], u--;
  392. table[u] = elt;
  393. return u;
  394. }
  395. if (MEM_read64(buf + table[u].pos) == MEM_read64(buf + elt.pos + 1)) {
  396. if (isIncluded(buf + table[u].pos, buf + elt.pos + 1, table[u].length)) {
  397. size_t const addedLength = MAX( (int)elt.length - (int)table[u].length , 1 );
  398. table[u].pos = elt.pos;
  399. table[u].savings += (U32)(elt.savings * addedLength / elt.length);
  400. table[u].length = MIN(elt.length, table[u].length + 1);
  401. return u;
  402. }
  403. }
  404. }
  405. return 0;
  406. }
  407. static void ZDICT_removeDictItem(dictItem* table, U32 id)
  408. {
  409. /* convention : table[0].pos stores nb of elts */
  410. U32 const max = table[0].pos;
  411. U32 u;
  412. if (!id) return; /* protection, should never happen */
  413. for (u=id; u<max-1; u++)
  414. table[u] = table[u+1];
  415. table->pos--;
  416. }
  417. static void ZDICT_insertDictItem(dictItem* table, U32 maxSize, dictItem elt, const void* buffer)
  418. {
  419. /* merge if possible */
  420. U32 mergeId = ZDICT_tryMerge(table, elt, 0, buffer);
  421. if (mergeId) {
  422. U32 newMerge = 1;
  423. while (newMerge) {
  424. newMerge = ZDICT_tryMerge(table, table[mergeId], mergeId, buffer);
  425. if (newMerge) ZDICT_removeDictItem(table, mergeId);
  426. mergeId = newMerge;
  427. }
  428. return;
  429. }
  430. /* insert */
  431. { U32 current;
  432. U32 nextElt = table->pos;
  433. if (nextElt >= maxSize) nextElt = maxSize-1;
  434. current = nextElt-1;
  435. while (table[current].savings < elt.savings) {
  436. table[current+1] = table[current];
  437. current--;
  438. }
  439. table[current+1] = elt;
  440. table->pos = nextElt+1;
  441. }
  442. }
  443. static U32 ZDICT_dictSize(const dictItem* dictList)
  444. {
  445. U32 u, dictSize = 0;
  446. for (u=1; u<dictList[0].pos; u++)
  447. dictSize += dictList[u].length;
  448. return dictSize;
  449. }
  450. static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize,
  451. const void* const buffer, size_t bufferSize, /* buffer must end with noisy guard band */
  452. const size_t* fileSizes, unsigned nbFiles,
  453. unsigned minRatio, U32 notificationLevel)
  454. {
  455. int* const suffix0 = (int*)malloc((bufferSize+2)*sizeof(*suffix0));
  456. int* const suffix = suffix0+1;
  457. U32* reverseSuffix = (U32*)malloc((bufferSize)*sizeof(*reverseSuffix));
  458. BYTE* doneMarks = (BYTE*)malloc((bufferSize+16)*sizeof(*doneMarks)); /* +16 for overflow security */
  459. U32* filePos = (U32*)malloc(nbFiles * sizeof(*filePos));
  460. size_t result = 0;
  461. clock_t displayClock = 0;
  462. clock_t const refreshRate = CLOCKS_PER_SEC * 3 / 10;
  463. # define DISPLAYUPDATE(l, ...) if (notificationLevel>=l) { \
  464. if (ZDICT_clockSpan(displayClock) > refreshRate) \
  465. { displayClock = clock(); DISPLAY(__VA_ARGS__); \
  466. if (notificationLevel>=4) fflush(stderr); } }
  467. /* init */
  468. DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */
  469. if (!suffix0 || !reverseSuffix || !doneMarks || !filePos) {
  470. result = ERROR(memory_allocation);
  471. goto _cleanup;
  472. }
  473. if (minRatio < MINRATIO) minRatio = MINRATIO;
  474. memset(doneMarks, 0, bufferSize+16);
  475. /* limit sample set size (divsufsort limitation)*/
  476. if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, "sample set too large : reduced to %u MB ...\n", (unsigned)(ZDICT_MAX_SAMPLES_SIZE>>20));
  477. while (bufferSize > ZDICT_MAX_SAMPLES_SIZE) bufferSize -= fileSizes[--nbFiles];
  478. /* sort */
  479. DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (unsigned)(bufferSize>>20));
  480. { int const divSuftSortResult = divsufsort((const unsigned char*)buffer, suffix, (int)bufferSize, 0);
  481. if (divSuftSortResult != 0) { result = ERROR(GENERIC); goto _cleanup; }
  482. }
  483. suffix[bufferSize] = (int)bufferSize; /* leads into noise */
  484. suffix0[0] = (int)bufferSize; /* leads into noise */
  485. /* build reverse suffix sort */
  486. { size_t pos;
  487. for (pos=0; pos < bufferSize; pos++)
  488. reverseSuffix[suffix[pos]] = (U32)pos;
  489. /* note filePos tracks borders between samples.
  490. It's not used at this stage, but planned to become useful in a later update */
  491. filePos[0] = 0;
  492. for (pos=1; pos<nbFiles; pos++)
  493. filePos[pos] = (U32)(filePos[pos-1] + fileSizes[pos-1]);
  494. }
  495. DISPLAYLEVEL(2, "finding patterns ... \n");
  496. DISPLAYLEVEL(3, "minimum ratio : %u \n", minRatio);
  497. { U32 cursor; for (cursor=0; cursor < bufferSize; ) {
  498. dictItem solution;
  499. if (doneMarks[cursor]) { cursor++; continue; }
  500. solution = ZDICT_analyzePos(doneMarks, suffix, reverseSuffix[cursor], buffer, minRatio, notificationLevel);
  501. if (solution.length==0) { cursor++; continue; }
  502. ZDICT_insertDictItem(dictList, dictListSize, solution, buffer);
  503. cursor += solution.length;
  504. DISPLAYUPDATE(2, "\r%4.2f %% \r", (double)cursor / bufferSize * 100);
  505. } }
  506. _cleanup:
  507. free(suffix0);
  508. free(reverseSuffix);
  509. free(doneMarks);
  510. free(filePos);
  511. return result;
  512. }
  513. static void ZDICT_fillNoise(void* buffer, size_t length)
  514. {
  515. unsigned const prime1 = 2654435761U;
  516. unsigned const prime2 = 2246822519U;
  517. unsigned acc = prime1;
  518. size_t p=0;
  519. for (p=0; p<length; p++) {
  520. acc *= prime2;
  521. ((unsigned char*)buffer)[p] = (unsigned char)(acc >> 21);
  522. }
  523. }
  524. typedef struct
  525. {
  526. ZSTD_CDict* dict; /* dictionary */
  527. ZSTD_CCtx* zc; /* working context */
  528. void* workPlace; /* must be ZSTD_BLOCKSIZE_MAX allocated */
  529. } EStats_ress_t;
  530. #define MAXREPOFFSET 1024
  531. static void ZDICT_countEStats(EStats_ress_t esr, const ZSTD_parameters* params,
  532. unsigned* countLit, unsigned* offsetcodeCount, unsigned* matchlengthCount, unsigned* litlengthCount, U32* repOffsets,
  533. const void* src, size_t srcSize,
  534. U32 notificationLevel)
  535. {
  536. size_t const blockSizeMax = MIN (ZSTD_BLOCKSIZE_MAX, 1 << params->cParams.windowLog);
  537. size_t cSize;
  538. if (srcSize > blockSizeMax) srcSize = blockSizeMax; /* protection vs large samples */
  539. { size_t const errorCode = ZSTD_compressBegin_usingCDict(esr.zc, esr.dict);
  540. if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_compressBegin_usingCDict failed \n"); return; }
  541. }
  542. cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize);
  543. if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (unsigned)srcSize); return; }
  544. if (cSize) { /* if == 0; block is not compressible */
  545. const seqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc);
  546. /* literals stats */
  547. { const BYTE* bytePtr;
  548. for(bytePtr = seqStorePtr->litStart; bytePtr < seqStorePtr->lit; bytePtr++)
  549. countLit[*bytePtr]++;
  550. }
  551. /* seqStats */
  552. { U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
  553. ZSTD_seqToCodes(seqStorePtr);
  554. { const BYTE* codePtr = seqStorePtr->ofCode;
  555. U32 u;
  556. for (u=0; u<nbSeq; u++) offsetcodeCount[codePtr[u]]++;
  557. }
  558. { const BYTE* codePtr = seqStorePtr->mlCode;
  559. U32 u;
  560. for (u=0; u<nbSeq; u++) matchlengthCount[codePtr[u]]++;
  561. }
  562. { const BYTE* codePtr = seqStorePtr->llCode;
  563. U32 u;
  564. for (u=0; u<nbSeq; u++) litlengthCount[codePtr[u]]++;
  565. }
  566. if (nbSeq >= 2) { /* rep offsets */
  567. const seqDef* const seq = seqStorePtr->sequencesStart;
  568. U32 offset1 = seq[0].offset - 3;
  569. U32 offset2 = seq[1].offset - 3;
  570. if (offset1 >= MAXREPOFFSET) offset1 = 0;
  571. if (offset2 >= MAXREPOFFSET) offset2 = 0;
  572. repOffsets[offset1] += 3;
  573. repOffsets[offset2] += 1;
  574. } } }
  575. }
  576. static size_t ZDICT_totalSampleSize(const size_t* fileSizes, unsigned nbFiles)
  577. {
  578. size_t total=0;
  579. unsigned u;
  580. for (u=0; u<nbFiles; u++) total += fileSizes[u];
  581. return total;
  582. }
  583. typedef struct { U32 offset; U32 count; } offsetCount_t;
  584. static void ZDICT_insertSortCount(offsetCount_t table[ZSTD_REP_NUM+1], U32 val, U32 count)
  585. {
  586. U32 u;
  587. table[ZSTD_REP_NUM].offset = val;
  588. table[ZSTD_REP_NUM].count = count;
  589. for (u=ZSTD_REP_NUM; u>0; u--) {
  590. offsetCount_t tmp;
  591. if (table[u-1].count >= table[u].count) break;
  592. tmp = table[u-1];
  593. table[u-1] = table[u];
  594. table[u] = tmp;
  595. }
  596. }
  597. /* ZDICT_flatLit() :
  598. * rewrite `countLit` to contain a mostly flat but still compressible distribution of literals.
  599. * necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode.
  600. */
  601. static void ZDICT_flatLit(unsigned* countLit)
  602. {
  603. int u;
  604. for (u=1; u<256; u++) countLit[u] = 2;
  605. countLit[0] = 4;
  606. countLit[253] = 1;
  607. countLit[254] = 1;
  608. }
  609. #define OFFCODE_MAX 30 /* only applicable to first block */
  610. static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,
  611. unsigned compressionLevel,
  612. const void* srcBuffer, const size_t* fileSizes, unsigned nbFiles,
  613. const void* dictBuffer, size_t dictBufferSize,
  614. unsigned notificationLevel)
  615. {
  616. unsigned countLit[256];
  617. HUF_CREATE_STATIC_CTABLE(hufTable, 255);
  618. unsigned offcodeCount[OFFCODE_MAX+1];
  619. short offcodeNCount[OFFCODE_MAX+1];
  620. U32 offcodeMax = ZSTD_highbit32((U32)(dictBufferSize + 128 KB));
  621. unsigned matchLengthCount[MaxML+1];
  622. short matchLengthNCount[MaxML+1];
  623. unsigned litLengthCount[MaxLL+1];
  624. short litLengthNCount[MaxLL+1];
  625. U32 repOffset[MAXREPOFFSET];
  626. offsetCount_t bestRepOffset[ZSTD_REP_NUM+1];
  627. EStats_ress_t esr = { NULL, NULL, NULL };
  628. ZSTD_parameters params;
  629. U32 u, huffLog = 11, Offlog = OffFSELog, mlLog = MLFSELog, llLog = LLFSELog, total;
  630. size_t pos = 0, errorCode;
  631. size_t eSize = 0;
  632. size_t const totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles);
  633. size_t const averageSampleSize = totalSrcSize / (nbFiles + !nbFiles);
  634. BYTE* dstPtr = (BYTE*)dstBuffer;
  635. /* init */
  636. DEBUGLOG(4, "ZDICT_analyzeEntropy");
  637. if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionaryCreation_failed); goto _cleanup; } /* too large dictionary */
  638. for (u=0; u<256; u++) countLit[u] = 1; /* any character must be described */
  639. for (u=0; u<=offcodeMax; u++) offcodeCount[u] = 1;
  640. for (u=0; u<=MaxML; u++) matchLengthCount[u] = 1;
  641. for (u=0; u<=MaxLL; u++) litLengthCount[u] = 1;
  642. memset(repOffset, 0, sizeof(repOffset));
  643. repOffset[1] = repOffset[4] = repOffset[8] = 1;
  644. memset(bestRepOffset, 0, sizeof(bestRepOffset));
  645. if (compressionLevel==0) compressionLevel = g_compressionLevel_default;
  646. params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize);
  647. esr.dict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dlm_byRef, ZSTD_dct_rawContent, params.cParams, ZSTD_defaultCMem);
  648. esr.zc = ZSTD_createCCtx();
  649. esr.workPlace = malloc(ZSTD_BLOCKSIZE_MAX);
  650. if (!esr.dict || !esr.zc || !esr.workPlace) {
  651. eSize = ERROR(memory_allocation);
  652. DISPLAYLEVEL(1, "Not enough memory \n");
  653. goto _cleanup;
  654. }
  655. /* collect stats on all samples */
  656. for (u=0; u<nbFiles; u++) {
  657. ZDICT_countEStats(esr, &params,
  658. countLit, offcodeCount, matchLengthCount, litLengthCount, repOffset,
  659. (const char*)srcBuffer + pos, fileSizes[u],
  660. notificationLevel);
  661. pos += fileSizes[u];
  662. }
  663. /* analyze, build stats, starting with literals */
  664. { size_t maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog);
  665. if (HUF_isError(maxNbBits)) {
  666. eSize = maxNbBits;
  667. DISPLAYLEVEL(1, " HUF_buildCTable error \n");
  668. goto _cleanup;
  669. }
  670. if (maxNbBits==8) { /* not compressible : will fail on HUF_writeCTable() */
  671. DISPLAYLEVEL(2, "warning : pathological dataset : literals are not compressible : samples are noisy or too regular \n");
  672. ZDICT_flatLit(countLit); /* replace distribution by a fake "mostly flat but still compressible" distribution, that HUF_writeCTable() can encode */
  673. maxNbBits = HUF_buildCTable (hufTable, countLit, 255, huffLog);
  674. assert(maxNbBits==9);
  675. }
  676. huffLog = (U32)maxNbBits;
  677. }
  678. /* looking for most common first offsets */
  679. { U32 offset;
  680. for (offset=1; offset<MAXREPOFFSET; offset++)
  681. ZDICT_insertSortCount(bestRepOffset, offset, repOffset[offset]);
  682. }
  683. /* note : the result of this phase should be used to better appreciate the impact on statistics */
  684. total=0; for (u=0; u<=offcodeMax; u++) total+=offcodeCount[u];
  685. errorCode = FSE_normalizeCount(offcodeNCount, Offlog, offcodeCount, total, offcodeMax);
  686. if (FSE_isError(errorCode)) {
  687. eSize = errorCode;
  688. DISPLAYLEVEL(1, "FSE_normalizeCount error with offcodeCount \n");
  689. goto _cleanup;
  690. }
  691. Offlog = (U32)errorCode;
  692. total=0; for (u=0; u<=MaxML; u++) total+=matchLengthCount[u];
  693. errorCode = FSE_normalizeCount(matchLengthNCount, mlLog, matchLengthCount, total, MaxML);
  694. if (FSE_isError(errorCode)) {
  695. eSize = errorCode;
  696. DISPLAYLEVEL(1, "FSE_normalizeCount error with matchLengthCount \n");
  697. goto _cleanup;
  698. }
  699. mlLog = (U32)errorCode;
  700. total=0; for (u=0; u<=MaxLL; u++) total+=litLengthCount[u];
  701. errorCode = FSE_normalizeCount(litLengthNCount, llLog, litLengthCount, total, MaxLL);
  702. if (FSE_isError(errorCode)) {
  703. eSize = errorCode;
  704. DISPLAYLEVEL(1, "FSE_normalizeCount error with litLengthCount \n");
  705. goto _cleanup;
  706. }
  707. llLog = (U32)errorCode;
  708. /* write result to buffer */
  709. { size_t const hhSize = HUF_writeCTable(dstPtr, maxDstSize, hufTable, 255, huffLog);
  710. if (HUF_isError(hhSize)) {
  711. eSize = hhSize;
  712. DISPLAYLEVEL(1, "HUF_writeCTable error \n");
  713. goto _cleanup;
  714. }
  715. dstPtr += hhSize;
  716. maxDstSize -= hhSize;
  717. eSize += hhSize;
  718. }
  719. { size_t const ohSize = FSE_writeNCount(dstPtr, maxDstSize, offcodeNCount, OFFCODE_MAX, Offlog);
  720. if (FSE_isError(ohSize)) {
  721. eSize = ohSize;
  722. DISPLAYLEVEL(1, "FSE_writeNCount error with offcodeNCount \n");
  723. goto _cleanup;
  724. }
  725. dstPtr += ohSize;
  726. maxDstSize -= ohSize;
  727. eSize += ohSize;
  728. }
  729. { size_t const mhSize = FSE_writeNCount(dstPtr, maxDstSize, matchLengthNCount, MaxML, mlLog);
  730. if (FSE_isError(mhSize)) {
  731. eSize = mhSize;
  732. DISPLAYLEVEL(1, "FSE_writeNCount error with matchLengthNCount \n");
  733. goto _cleanup;
  734. }
  735. dstPtr += mhSize;
  736. maxDstSize -= mhSize;
  737. eSize += mhSize;
  738. }
  739. { size_t const lhSize = FSE_writeNCount(dstPtr, maxDstSize, litLengthNCount, MaxLL, llLog);
  740. if (FSE_isError(lhSize)) {
  741. eSize = lhSize;
  742. DISPLAYLEVEL(1, "FSE_writeNCount error with litlengthNCount \n");
  743. goto _cleanup;
  744. }
  745. dstPtr += lhSize;
  746. maxDstSize -= lhSize;
  747. eSize += lhSize;
  748. }
  749. if (maxDstSize<12) {
  750. eSize = ERROR(dstSize_tooSmall);
  751. DISPLAYLEVEL(1, "not enough space to write RepOffsets \n");
  752. goto _cleanup;
  753. }
  754. # if 0
  755. MEM_writeLE32(dstPtr+0, bestRepOffset[0].offset);
  756. MEM_writeLE32(dstPtr+4, bestRepOffset[1].offset);
  757. MEM_writeLE32(dstPtr+8, bestRepOffset[2].offset);
  758. #else
  759. /* at this stage, we don't use the result of "most common first offset",
  760. as the impact of statistics is not properly evaluated */
  761. MEM_writeLE32(dstPtr+0, repStartValue[0]);
  762. MEM_writeLE32(dstPtr+4, repStartValue[1]);
  763. MEM_writeLE32(dstPtr+8, repStartValue[2]);
  764. #endif
  765. eSize += 12;
  766. _cleanup:
  767. ZSTD_freeCDict(esr.dict);
  768. ZSTD_freeCCtx(esr.zc);
  769. free(esr.workPlace);
  770. return eSize;
  771. }
  772. size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
  773. const void* customDictContent, size_t dictContentSize,
  774. const void* samplesBuffer, const size_t* samplesSizes,
  775. unsigned nbSamples, ZDICT_params_t params)
  776. {
  777. size_t hSize;
  778. #define HBUFFSIZE 256 /* should prove large enough for all entropy headers */
  779. BYTE header[HBUFFSIZE];
  780. int const compressionLevel = (params.compressionLevel == 0) ? g_compressionLevel_default : params.compressionLevel;
  781. U32 const notificationLevel = params.notificationLevel;
  782. /* check conditions */
  783. DEBUGLOG(4, "ZDICT_finalizeDictionary");
  784. if (dictBufferCapacity < dictContentSize) return ERROR(dstSize_tooSmall);
  785. if (dictContentSize < ZDICT_CONTENTSIZE_MIN) return ERROR(srcSize_wrong);
  786. if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) return ERROR(dstSize_tooSmall);
  787. /* dictionary header */
  788. MEM_writeLE32(header, ZSTD_MAGIC_DICTIONARY);
  789. { U64 const randomID = XXH64(customDictContent, dictContentSize, 0);
  790. U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;
  791. U32 const dictID = params.dictID ? params.dictID : compliantID;
  792. MEM_writeLE32(header+4, dictID);
  793. }
  794. hSize = 8;
  795. /* entropy tables */
  796. DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */
  797. DISPLAYLEVEL(2, "statistics ... \n");
  798. { size_t const eSize = ZDICT_analyzeEntropy(header+hSize, HBUFFSIZE-hSize,
  799. compressionLevel,
  800. samplesBuffer, samplesSizes, nbSamples,
  801. customDictContent, dictContentSize,
  802. notificationLevel);
  803. if (ZDICT_isError(eSize)) return eSize;
  804. hSize += eSize;
  805. }
  806. /* copy elements in final buffer ; note : src and dst buffer can overlap */
  807. if (hSize + dictContentSize > dictBufferCapacity) dictContentSize = dictBufferCapacity - hSize;
  808. { size_t const dictSize = hSize + dictContentSize;
  809. char* dictEnd = (char*)dictBuffer + dictSize;
  810. memmove(dictEnd - dictContentSize, customDictContent, dictContentSize);
  811. memcpy(dictBuffer, header, hSize);
  812. return dictSize;
  813. }
  814. }
  815. static size_t ZDICT_addEntropyTablesFromBuffer_advanced(
  816. void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
  817. const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
  818. ZDICT_params_t params)
  819. {
  820. int const compressionLevel = (params.compressionLevel == 0) ? g_compressionLevel_default : params.compressionLevel;
  821. U32 const notificationLevel = params.notificationLevel;
  822. size_t hSize = 8;
  823. /* calculate entropy tables */
  824. DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */
  825. DISPLAYLEVEL(2, "statistics ... \n");
  826. { size_t const eSize = ZDICT_analyzeEntropy((char*)dictBuffer+hSize, dictBufferCapacity-hSize,
  827. compressionLevel,
  828. samplesBuffer, samplesSizes, nbSamples,
  829. (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize,
  830. notificationLevel);
  831. if (ZDICT_isError(eSize)) return eSize;
  832. hSize += eSize;
  833. }
  834. /* add dictionary header (after entropy tables) */
  835. MEM_writeLE32(dictBuffer, ZSTD_MAGIC_DICTIONARY);
  836. { U64 const randomID = XXH64((char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, 0);
  837. U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;
  838. U32 const dictID = params.dictID ? params.dictID : compliantID;
  839. MEM_writeLE32((char*)dictBuffer+4, dictID);
  840. }
  841. if (hSize + dictContentSize < dictBufferCapacity)
  842. memmove((char*)dictBuffer + hSize, (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize);
  843. return MIN(dictBufferCapacity, hSize+dictContentSize);
  844. }
  845. /* Hidden declaration for dbio.c */
  846. size_t ZDICT_trainFromBuffer_unsafe_legacy(
  847. void* dictBuffer, size_t maxDictSize,
  848. const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
  849. ZDICT_legacy_params_t params);
  850. /*! ZDICT_trainFromBuffer_unsafe_legacy() :
  851. * Warning : `samplesBuffer` must be followed by noisy guard band.
  852. * @return : size of dictionary, or an error code which can be tested with ZDICT_isError()
  853. */
  854. size_t ZDICT_trainFromBuffer_unsafe_legacy(
  855. void* dictBuffer, size_t maxDictSize,
  856. const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
  857. ZDICT_legacy_params_t params)
  858. {
  859. U32 const dictListSize = MAX(MAX(DICTLISTSIZE_DEFAULT, nbSamples), (U32)(maxDictSize/16));
  860. dictItem* const dictList = (dictItem*)malloc(dictListSize * sizeof(*dictList));
  861. unsigned const selectivity = params.selectivityLevel == 0 ? g_selectivity_default : params.selectivityLevel;
  862. unsigned const minRep = (selectivity > 30) ? MINRATIO : nbSamples >> selectivity;
  863. size_t const targetDictSize = maxDictSize;
  864. size_t const samplesBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples);
  865. size_t dictSize = 0;
  866. U32 const notificationLevel = params.zParams.notificationLevel;
  867. /* checks */
  868. if (!dictList) return ERROR(memory_allocation);
  869. if (maxDictSize < ZDICT_DICTSIZE_MIN) { free(dictList); return ERROR(dstSize_tooSmall); } /* requested dictionary size is too small */
  870. if (samplesBuffSize < ZDICT_MIN_SAMPLES_SIZE) { free(dictList); return ERROR(dictionaryCreation_failed); } /* not enough source to create dictionary */
  871. /* init */
  872. ZDICT_initDictItem(dictList);
  873. /* build dictionary */
  874. ZDICT_trainBuffer_legacy(dictList, dictListSize,
  875. samplesBuffer, samplesBuffSize,
  876. samplesSizes, nbSamples,
  877. minRep, notificationLevel);
  878. /* display best matches */
  879. if (params.zParams.notificationLevel>= 3) {
  880. unsigned const nb = MIN(25, dictList[0].pos);
  881. unsigned const dictContentSize = ZDICT_dictSize(dictList);
  882. unsigned u;
  883. DISPLAYLEVEL(3, "\n %u segments found, of total size %u \n", (unsigned)dictList[0].pos-1, dictContentSize);
  884. DISPLAYLEVEL(3, "list %u best segments \n", nb-1);
  885. for (u=1; u<nb; u++) {
  886. unsigned const pos = dictList[u].pos;
  887. unsigned const length = dictList[u].length;
  888. U32 const printedLength = MIN(40, length);
  889. if ((pos > samplesBuffSize) || ((pos + length) > samplesBuffSize)) {
  890. free(dictList);
  891. return ERROR(GENERIC); /* should never happen */
  892. }
  893. DISPLAYLEVEL(3, "%3u:%3u bytes at pos %8u, savings %7u bytes |",
  894. u, length, pos, (unsigned)dictList[u].savings);
  895. ZDICT_printHex((const char*)samplesBuffer+pos, printedLength);
  896. DISPLAYLEVEL(3, "| \n");
  897. } }
  898. /* create dictionary */
  899. { unsigned dictContentSize = ZDICT_dictSize(dictList);
  900. if (dictContentSize < ZDICT_CONTENTSIZE_MIN) { free(dictList); return ERROR(dictionaryCreation_failed); } /* dictionary content too small */
  901. if (dictContentSize < targetDictSize/4) {
  902. DISPLAYLEVEL(2, "! warning : selected content significantly smaller than requested (%u < %u) \n", dictContentSize, (unsigned)maxDictSize);
  903. if (samplesBuffSize < 10 * targetDictSize)
  904. DISPLAYLEVEL(2, "! consider increasing the number of samples (total size : %u MB)\n", (unsigned)(samplesBuffSize>>20));
  905. if (minRep > MINRATIO) {
  906. DISPLAYLEVEL(2, "! consider increasing selectivity to produce larger dictionary (-s%u) \n", selectivity+1);
  907. DISPLAYLEVEL(2, "! note : larger dictionaries are not necessarily better, test its efficiency on samples \n");
  908. }
  909. }
  910. if ((dictContentSize > targetDictSize*3) && (nbSamples > 2*MINRATIO) && (selectivity>1)) {
  911. unsigned proposedSelectivity = selectivity-1;
  912. while ((nbSamples >> proposedSelectivity) <= MINRATIO) { proposedSelectivity--; }
  913. DISPLAYLEVEL(2, "! note : calculated dictionary significantly larger than requested (%u > %u) \n", dictContentSize, (unsigned)maxDictSize);
  914. DISPLAYLEVEL(2, "! consider increasing dictionary size, or produce denser dictionary (-s%u) \n", proposedSelectivity);
  915. DISPLAYLEVEL(2, "! always test dictionary efficiency on real samples \n");
  916. }
  917. /* limit dictionary size */
  918. { U32 const max = dictList->pos; /* convention : nb of useful elts within dictList */
  919. U32 currentSize = 0;
  920. U32 n; for (n=1; n<max; n++) {
  921. currentSize += dictList[n].length;
  922. if (currentSize > targetDictSize) { currentSize -= dictList[n].length; break; }
  923. }
  924. dictList->pos = n;
  925. dictContentSize = currentSize;
  926. }
  927. /* build dict content */
  928. { U32 u;
  929. BYTE* ptr = (BYTE*)dictBuffer + maxDictSize;
  930. for (u=1; u<dictList->pos; u++) {
  931. U32 l = dictList[u].length;
  932. ptr -= l;
  933. if (ptr<(BYTE*)dictBuffer) { free(dictList); return ERROR(GENERIC); } /* should not happen */
  934. memcpy(ptr, (const char*)samplesBuffer+dictList[u].pos, l);
  935. } }
  936. dictSize = ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, maxDictSize,
  937. samplesBuffer, samplesSizes, nbSamples,
  938. params.zParams);
  939. }
  940. /* clean up */
  941. free(dictList);
  942. return dictSize;
  943. }
  944. /* ZDICT_trainFromBuffer_legacy() :
  945. * issue : samplesBuffer need to be followed by a noisy guard band.
  946. * work around : duplicate the buffer, and add the noise */
  947. size_t ZDICT_trainFromBuffer_legacy(void* dictBuffer, size_t dictBufferCapacity,
  948. const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
  949. ZDICT_legacy_params_t params)
  950. {
  951. size_t result;
  952. void* newBuff;
  953. size_t const sBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples);
  954. if (sBuffSize < ZDICT_MIN_SAMPLES_SIZE) return 0; /* not enough content => no dictionary */
  955. newBuff = malloc(sBuffSize + NOISELENGTH);
  956. if (!newBuff) return ERROR(memory_allocation);
  957. memcpy(newBuff, samplesBuffer, sBuffSize);
  958. ZDICT_fillNoise((char*)newBuff + sBuffSize, NOISELENGTH); /* guard band, for end of buffer condition */
  959. result =
  960. ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, dictBufferCapacity, newBuff,
  961. samplesSizes, nbSamples, params);
  962. free(newBuff);
  963. return result;
  964. }
  965. size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,
  966. const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)
  967. {
  968. ZDICT_fastCover_params_t params;
  969. DEBUGLOG(3, "ZDICT_trainFromBuffer");
  970. memset(&params, 0, sizeof(params));
  971. params.d = 8;
  972. params.steps = 4;
  973. /* Default to level 6 since no compression level information is available */
  974. params.zParams.compressionLevel = 3;
  975. #if defined(DEBUGLEVEL) && (DEBUGLEVEL>=1)
  976. params.zParams.notificationLevel = DEBUGLEVEL;
  977. #endif
  978. return ZDICT_optimizeTrainFromBuffer_fastCover(dictBuffer, dictBufferCapacity,
  979. samplesBuffer, samplesSizes, nbSamples,
  980. &params);
  981. }
  982. size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
  983. const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)
  984. {
  985. ZDICT_params_t params;
  986. memset(&params, 0, sizeof(params));
  987. return ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, dictBufferCapacity,
  988. samplesBuffer, samplesSizes, nbSamples,
  989. params);
  990. }