zstd_lazy.c 94 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185
  1. /*
  2. * Copyright (c) Yann Collet, Facebook, Inc.
  3. * All rights reserved.
  4. *
  5. * This source code is licensed under both the BSD-style license (found in the
  6. * LICENSE file in the root directory of this source tree) and the GPLv2 (found
  7. * in the COPYING file in the root directory of this source tree).
  8. * You may select, at your option, one of the above-listed licenses.
  9. */
  10. #include "zstd_compress_internal.h"
  11. #include "zstd_lazy.h"
  12. /*-*************************************
  13. * Binary Tree search
  14. ***************************************/
  15. static void
  16. ZSTD_updateDUBT(ZSTD_matchState_t* ms,
  17. const BYTE* ip, const BYTE* iend,
  18. U32 mls)
  19. {
  20. const ZSTD_compressionParameters* const cParams = &ms->cParams;
  21. U32* const hashTable = ms->hashTable;
  22. U32 const hashLog = cParams->hashLog;
  23. U32* const bt = ms->chainTable;
  24. U32 const btLog = cParams->chainLog - 1;
  25. U32 const btMask = (1 << btLog) - 1;
  26. const BYTE* const base = ms->window.base;
  27. U32 const target = (U32)(ip - base);
  28. U32 idx = ms->nextToUpdate;
  29. if (idx != target)
  30. DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
  31. idx, target, ms->window.dictLimit);
  32. assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */
  33. (void)iend;
  34. assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */
  35. for ( ; idx < target ; idx++) {
  36. size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */
  37. U32 const matchIndex = hashTable[h];
  38. U32* const nextCandidatePtr = bt + 2*(idx&btMask);
  39. U32* const sortMarkPtr = nextCandidatePtr + 1;
  40. DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
  41. hashTable[h] = idx; /* Update Hash Table */
  42. *nextCandidatePtr = matchIndex; /* update BT like a chain */
  43. *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
  44. }
  45. ms->nextToUpdate = target;
  46. }
  47. /** ZSTD_insertDUBT1() :
  48. * sort one already inserted but unsorted position
  49. * assumption : curr >= btlow == (curr - btmask)
  50. * doesn't fail */
  51. static void
  52. ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
  53. U32 curr, const BYTE* inputEnd,
  54. U32 nbCompares, U32 btLow,
  55. const ZSTD_dictMode_e dictMode)
  56. {
  57. const ZSTD_compressionParameters* const cParams = &ms->cParams;
  58. U32* const bt = ms->chainTable;
  59. U32 const btLog = cParams->chainLog - 1;
  60. U32 const btMask = (1 << btLog) - 1;
  61. size_t commonLengthSmaller=0, commonLengthLarger=0;
  62. const BYTE* const base = ms->window.base;
  63. const BYTE* const dictBase = ms->window.dictBase;
  64. const U32 dictLimit = ms->window.dictLimit;
  65. const BYTE* const ip = (curr>=dictLimit) ? base + curr : dictBase + curr;
  66. const BYTE* const iend = (curr>=dictLimit) ? inputEnd : dictBase + dictLimit;
  67. const BYTE* const dictEnd = dictBase + dictLimit;
  68. const BYTE* const prefixStart = base + dictLimit;
  69. const BYTE* match;
  70. U32* smallerPtr = bt + 2*(curr&btMask);
  71. U32* largerPtr = smallerPtr + 1;
  72. U32 matchIndex = *smallerPtr; /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */
  73. U32 dummy32; /* to be nullified at the end */
  74. U32 const windowValid = ms->window.lowLimit;
  75. U32 const maxDistance = 1U << cParams->windowLog;
  76. U32 const windowLow = (curr - windowValid > maxDistance) ? curr - maxDistance : windowValid;
  77. DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
  78. curr, dictLimit, windowLow);
  79. assert(curr >= btLow);
  80. assert(ip < iend); /* condition for ZSTD_count */
  81. while (nbCompares-- && (matchIndex > windowLow)) {
  82. U32* const nextPtr = bt + 2*(matchIndex & btMask);
  83. size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
  84. assert(matchIndex < curr);
  85. /* note : all candidates are now supposed sorted,
  86. * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK
  87. * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */
  88. if ( (dictMode != ZSTD_extDict)
  89. || (matchIndex+matchLength >= dictLimit) /* both in current segment*/
  90. || (curr < dictLimit) /* both in extDict */) {
  91. const BYTE* const mBase = ( (dictMode != ZSTD_extDict)
  92. || (matchIndex+matchLength >= dictLimit)) ?
  93. base : dictBase;
  94. assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */
  95. || (curr < dictLimit) );
  96. match = mBase + matchIndex;
  97. matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
  98. } else {
  99. match = dictBase + matchIndex;
  100. matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
  101. if (matchIndex+matchLength >= dictLimit)
  102. match = base + matchIndex; /* preparation for next read of match[matchLength] */
  103. }
  104. DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
  105. curr, matchIndex, (U32)matchLength);
  106. if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
  107. break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
  108. }
  109. if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
  110. /* match is smaller than current */
  111. *smallerPtr = matchIndex; /* update smaller idx */
  112. commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
  113. if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
  114. DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
  115. matchIndex, btLow, nextPtr[1]);
  116. smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
  117. matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
  118. } else {
  119. /* match is larger than current */
  120. *largerPtr = matchIndex;
  121. commonLengthLarger = matchLength;
  122. if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
  123. DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
  124. matchIndex, btLow, nextPtr[0]);
  125. largerPtr = nextPtr;
  126. matchIndex = nextPtr[0];
  127. } }
  128. *smallerPtr = *largerPtr = 0;
  129. }
  130. static size_t
  131. ZSTD_DUBT_findBetterDictMatch (
  132. ZSTD_matchState_t* ms,
  133. const BYTE* const ip, const BYTE* const iend,
  134. size_t* offsetPtr,
  135. size_t bestLength,
  136. U32 nbCompares,
  137. U32 const mls,
  138. const ZSTD_dictMode_e dictMode)
  139. {
  140. const ZSTD_matchState_t * const dms = ms->dictMatchState;
  141. const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
  142. const U32 * const dictHashTable = dms->hashTable;
  143. U32 const hashLog = dmsCParams->hashLog;
  144. size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
  145. U32 dictMatchIndex = dictHashTable[h];
  146. const BYTE* const base = ms->window.base;
  147. const BYTE* const prefixStart = base + ms->window.dictLimit;
  148. U32 const curr = (U32)(ip-base);
  149. const BYTE* const dictBase = dms->window.base;
  150. const BYTE* const dictEnd = dms->window.nextSrc;
  151. U32 const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);
  152. U32 const dictLowLimit = dms->window.lowLimit;
  153. U32 const dictIndexDelta = ms->window.lowLimit - dictHighLimit;
  154. U32* const dictBt = dms->chainTable;
  155. U32 const btLog = dmsCParams->chainLog - 1;
  156. U32 const btMask = (1 << btLog) - 1;
  157. U32 const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask;
  158. size_t commonLengthSmaller=0, commonLengthLarger=0;
  159. (void)dictMode;
  160. assert(dictMode == ZSTD_dictMatchState);
  161. while (nbCompares-- && (dictMatchIndex > dictLowLimit)) {
  162. U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);
  163. size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
  164. const BYTE* match = dictBase + dictMatchIndex;
  165. matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
  166. if (dictMatchIndex+matchLength >= dictHighLimit)
  167. match = base + dictMatchIndex + dictIndexDelta; /* to prepare for next usage of match[matchLength] */
  168. if (matchLength > bestLength) {
  169. U32 matchIndex = dictMatchIndex + dictIndexDelta;
  170. if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
  171. DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
  172. curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + curr - matchIndex, dictMatchIndex, matchIndex);
  173. bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
  174. }
  175. if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
  176. break; /* drop, to guarantee consistency (miss a little bit of compression) */
  177. }
  178. }
  179. if (match[matchLength] < ip[matchLength]) {
  180. if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */
  181. commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
  182. dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
  183. } else {
  184. /* match is larger than current */
  185. if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */
  186. commonLengthLarger = matchLength;
  187. dictMatchIndex = nextPtr[0];
  188. }
  189. }
  190. if (bestLength >= MINMATCH) {
  191. U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
  192. DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
  193. curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
  194. }
  195. return bestLength;
  196. }
  197. static size_t
  198. ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
  199. const BYTE* const ip, const BYTE* const iend,
  200. size_t* offsetPtr,
  201. U32 const mls,
  202. const ZSTD_dictMode_e dictMode)
  203. {
  204. const ZSTD_compressionParameters* const cParams = &ms->cParams;
  205. U32* const hashTable = ms->hashTable;
  206. U32 const hashLog = cParams->hashLog;
  207. size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
  208. U32 matchIndex = hashTable[h];
  209. const BYTE* const base = ms->window.base;
  210. U32 const curr = (U32)(ip-base);
  211. U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
  212. U32* const bt = ms->chainTable;
  213. U32 const btLog = cParams->chainLog - 1;
  214. U32 const btMask = (1 << btLog) - 1;
  215. U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
  216. U32 const unsortLimit = MAX(btLow, windowLow);
  217. U32* nextCandidate = bt + 2*(matchIndex&btMask);
  218. U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1;
  219. U32 nbCompares = 1U << cParams->searchLog;
  220. U32 nbCandidates = nbCompares;
  221. U32 previousCandidate = 0;
  222. DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", curr);
  223. assert(ip <= iend-8); /* required for h calculation */
  224. assert(dictMode != ZSTD_dedicatedDictSearch);
  225. /* reach end of unsorted candidates list */
  226. while ( (matchIndex > unsortLimit)
  227. && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
  228. && (nbCandidates > 1) ) {
  229. DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
  230. matchIndex);
  231. *unsortedMark = previousCandidate; /* the unsortedMark becomes a reversed chain, to move up back to original position */
  232. previousCandidate = matchIndex;
  233. matchIndex = *nextCandidate;
  234. nextCandidate = bt + 2*(matchIndex&btMask);
  235. unsortedMark = bt + 2*(matchIndex&btMask) + 1;
  236. nbCandidates --;
  237. }
  238. /* nullify last candidate if it's still unsorted
  239. * simplification, detrimental to compression ratio, beneficial for speed */
  240. if ( (matchIndex > unsortLimit)
  241. && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
  242. DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
  243. matchIndex);
  244. *nextCandidate = *unsortedMark = 0;
  245. }
  246. /* batch sort stacked candidates */
  247. matchIndex = previousCandidate;
  248. while (matchIndex) { /* will end on matchIndex == 0 */
  249. U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
  250. U32 const nextCandidateIdx = *nextCandidateIdxPtr;
  251. ZSTD_insertDUBT1(ms, matchIndex, iend,
  252. nbCandidates, unsortLimit, dictMode);
  253. matchIndex = nextCandidateIdx;
  254. nbCandidates++;
  255. }
  256. /* find longest match */
  257. { size_t commonLengthSmaller = 0, commonLengthLarger = 0;
  258. const BYTE* const dictBase = ms->window.dictBase;
  259. const U32 dictLimit = ms->window.dictLimit;
  260. const BYTE* const dictEnd = dictBase + dictLimit;
  261. const BYTE* const prefixStart = base + dictLimit;
  262. U32* smallerPtr = bt + 2*(curr&btMask);
  263. U32* largerPtr = bt + 2*(curr&btMask) + 1;
  264. U32 matchEndIdx = curr + 8 + 1;
  265. U32 dummy32; /* to be nullified at the end */
  266. size_t bestLength = 0;
  267. matchIndex = hashTable[h];
  268. hashTable[h] = curr; /* Update Hash Table */
  269. while (nbCompares-- && (matchIndex > windowLow)) {
  270. U32* const nextPtr = bt + 2*(matchIndex & btMask);
  271. size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
  272. const BYTE* match;
  273. if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) {
  274. match = base + matchIndex;
  275. matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
  276. } else {
  277. match = dictBase + matchIndex;
  278. matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
  279. if (matchIndex+matchLength >= dictLimit)
  280. match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
  281. }
  282. if (matchLength > bestLength) {
  283. if (matchLength > matchEndIdx - matchIndex)
  284. matchEndIdx = matchIndex + (U32)matchLength;
  285. if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
  286. bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
  287. if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
  288. if (dictMode == ZSTD_dictMatchState) {
  289. nbCompares = 0; /* in addition to avoiding checking any
  290. * further in this loop, make sure we
  291. * skip checking in the dictionary. */
  292. }
  293. break; /* drop, to guarantee consistency (miss a little bit of compression) */
  294. }
  295. }
  296. if (match[matchLength] < ip[matchLength]) {
  297. /* match is smaller than current */
  298. *smallerPtr = matchIndex; /* update smaller idx */
  299. commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
  300. if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
  301. smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
  302. matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
  303. } else {
  304. /* match is larger than current */
  305. *largerPtr = matchIndex;
  306. commonLengthLarger = matchLength;
  307. if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
  308. largerPtr = nextPtr;
  309. matchIndex = nextPtr[0];
  310. } }
  311. *smallerPtr = *largerPtr = 0;
  312. if (dictMode == ZSTD_dictMatchState && nbCompares) {
  313. bestLength = ZSTD_DUBT_findBetterDictMatch(
  314. ms, ip, iend,
  315. offsetPtr, bestLength, nbCompares,
  316. mls, dictMode);
  317. }
  318. assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */
  319. ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
  320. if (bestLength >= MINMATCH) {
  321. U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
  322. DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
  323. curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
  324. }
  325. return bestLength;
  326. }
  327. }
  328. /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
  329. FORCE_INLINE_TEMPLATE size_t
  330. ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
  331. const BYTE* const ip, const BYTE* const iLimit,
  332. size_t* offsetPtr,
  333. const U32 mls /* template */,
  334. const ZSTD_dictMode_e dictMode)
  335. {
  336. DEBUGLOG(7, "ZSTD_BtFindBestMatch");
  337. if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
  338. ZSTD_updateDUBT(ms, ip, iLimit, mls);
  339. return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
  340. }
  341. static size_t
  342. ZSTD_BtFindBestMatch_selectMLS ( ZSTD_matchState_t* ms,
  343. const BYTE* ip, const BYTE* const iLimit,
  344. size_t* offsetPtr)
  345. {
  346. switch(ms->cParams.minMatch)
  347. {
  348. default : /* includes case 3 */
  349. case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
  350. case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
  351. case 7 :
  352. case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
  353. }
  354. }
  355. static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS (
  356. ZSTD_matchState_t* ms,
  357. const BYTE* ip, const BYTE* const iLimit,
  358. size_t* offsetPtr)
  359. {
  360. switch(ms->cParams.minMatch)
  361. {
  362. default : /* includes case 3 */
  363. case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
  364. case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
  365. case 7 :
  366. case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
  367. }
  368. }
  369. static size_t ZSTD_BtFindBestMatch_extDict_selectMLS (
  370. ZSTD_matchState_t* ms,
  371. const BYTE* ip, const BYTE* const iLimit,
  372. size_t* offsetPtr)
  373. {
  374. switch(ms->cParams.minMatch)
  375. {
  376. default : /* includes case 3 */
  377. case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
  378. case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
  379. case 7 :
  380. case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
  381. }
  382. }
  383. /***********************************
  384. * Dedicated dict search
  385. ***********************************/
  386. void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip)
  387. {
  388. const BYTE* const base = ms->window.base;
  389. U32 const target = (U32)(ip - base);
  390. U32* const hashTable = ms->hashTable;
  391. U32* const chainTable = ms->chainTable;
  392. U32 const chainSize = 1 << ms->cParams.chainLog;
  393. U32 idx = ms->nextToUpdate;
  394. U32 const minChain = chainSize < target ? target - chainSize : idx;
  395. U32 const bucketSize = 1 << ZSTD_LAZY_DDSS_BUCKET_LOG;
  396. U32 const cacheSize = bucketSize - 1;
  397. U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize;
  398. U32 const chainLimit = chainAttempts > 255 ? 255 : chainAttempts;
  399. /* We know the hashtable is oversized by a factor of `bucketSize`.
  400. * We are going to temporarily pretend `bucketSize == 1`, keeping only a
  401. * single entry. We will use the rest of the space to construct a temporary
  402. * chaintable.
  403. */
  404. U32 const hashLog = ms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
  405. U32* const tmpHashTable = hashTable;
  406. U32* const tmpChainTable = hashTable + ((size_t)1 << hashLog);
  407. U32 const tmpChainSize = ((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog;
  408. U32 const tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx;
  409. U32 hashIdx;
  410. assert(ms->cParams.chainLog <= 24);
  411. assert(ms->cParams.hashLog > ms->cParams.chainLog);
  412. assert(idx != 0);
  413. assert(tmpMinChain <= minChain);
  414. /* fill conventional hash table and conventional chain table */
  415. for ( ; idx < target; idx++) {
  416. U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch);
  417. if (idx >= tmpMinChain) {
  418. tmpChainTable[idx - tmpMinChain] = hashTable[h];
  419. }
  420. tmpHashTable[h] = idx;
  421. }
  422. /* sort chains into ddss chain table */
  423. {
  424. U32 chainPos = 0;
  425. for (hashIdx = 0; hashIdx < (1U << hashLog); hashIdx++) {
  426. U32 count;
  427. U32 countBeyondMinChain = 0;
  428. U32 i = tmpHashTable[hashIdx];
  429. for (count = 0; i >= tmpMinChain && count < cacheSize; count++) {
  430. /* skip through the chain to the first position that won't be
  431. * in the hash cache bucket */
  432. if (i < minChain) {
  433. countBeyondMinChain++;
  434. }
  435. i = tmpChainTable[i - tmpMinChain];
  436. }
  437. if (count == cacheSize) {
  438. for (count = 0; count < chainLimit;) {
  439. if (i < minChain) {
  440. if (!i || ++countBeyondMinChain > cacheSize) {
  441. /* only allow pulling `cacheSize` number of entries
  442. * into the cache or chainTable beyond `minChain`,
  443. * to replace the entries pulled out of the
  444. * chainTable into the cache. This lets us reach
  445. * back further without increasing the total number
  446. * of entries in the chainTable, guaranteeing the
  447. * DDSS chain table will fit into the space
  448. * allocated for the regular one. */
  449. break;
  450. }
  451. }
  452. chainTable[chainPos++] = i;
  453. count++;
  454. if (i < tmpMinChain) {
  455. break;
  456. }
  457. i = tmpChainTable[i - tmpMinChain];
  458. }
  459. } else {
  460. count = 0;
  461. }
  462. if (count) {
  463. tmpHashTable[hashIdx] = ((chainPos - count) << 8) + count;
  464. } else {
  465. tmpHashTable[hashIdx] = 0;
  466. }
  467. }
  468. assert(chainPos <= chainSize); /* I believe this is guaranteed... */
  469. }
  470. /* move chain pointers into the last entry of each hash bucket */
  471. for (hashIdx = (1 << hashLog); hashIdx; ) {
  472. U32 const bucketIdx = --hashIdx << ZSTD_LAZY_DDSS_BUCKET_LOG;
  473. U32 const chainPackedPointer = tmpHashTable[hashIdx];
  474. U32 i;
  475. for (i = 0; i < cacheSize; i++) {
  476. hashTable[bucketIdx + i] = 0;
  477. }
  478. hashTable[bucketIdx + bucketSize - 1] = chainPackedPointer;
  479. }
  480. /* fill the buckets of the hash table */
  481. for (idx = ms->nextToUpdate; idx < target; idx++) {
  482. U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch)
  483. << ZSTD_LAZY_DDSS_BUCKET_LOG;
  484. U32 i;
  485. /* Shift hash cache down 1. */
  486. for (i = cacheSize - 1; i; i--)
  487. hashTable[h + i] = hashTable[h + i - 1];
  488. hashTable[h] = idx;
  489. }
  490. ms->nextToUpdate = target;
  491. }
  492. /* Returns the longest match length found in the dedicated dict search structure.
  493. * If none are longer than the argument ml, then ml will be returned.
  494. */
  495. FORCE_INLINE_TEMPLATE
  496. size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nbAttempts,
  497. const ZSTD_matchState_t* const dms,
  498. const BYTE* const ip, const BYTE* const iLimit,
  499. const BYTE* const prefixStart, const U32 curr,
  500. const U32 dictLimit, const size_t ddsIdx) {
  501. const U32 ddsLowestIndex = dms->window.dictLimit;
  502. const BYTE* const ddsBase = dms->window.base;
  503. const BYTE* const ddsEnd = dms->window.nextSrc;
  504. const U32 ddsSize = (U32)(ddsEnd - ddsBase);
  505. const U32 ddsIndexDelta = dictLimit - ddsSize;
  506. const U32 bucketSize = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG);
  507. const U32 bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1;
  508. U32 ddsAttempt;
  509. U32 matchIndex;
  510. for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) {
  511. PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]);
  512. }
  513. {
  514. U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
  515. U32 const chainIndex = chainPackedPointer >> 8;
  516. PREFETCH_L1(&dms->chainTable[chainIndex]);
  517. }
  518. for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) {
  519. size_t currentMl=0;
  520. const BYTE* match;
  521. matchIndex = dms->hashTable[ddsIdx + ddsAttempt];
  522. match = ddsBase + matchIndex;
  523. if (!matchIndex) {
  524. return ml;
  525. }
  526. /* guaranteed by table construction */
  527. (void)ddsLowestIndex;
  528. assert(matchIndex >= ddsLowestIndex);
  529. assert(match+4 <= ddsEnd);
  530. if (MEM_read32(match) == MEM_read32(ip)) {
  531. /* assumption : matchIndex <= dictLimit-4 (by table construction) */
  532. currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
  533. }
  534. /* save best solution */
  535. if (currentMl > ml) {
  536. ml = currentMl;
  537. *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE;
  538. if (ip+currentMl == iLimit) {
  539. /* best possible, avoids read overflow on next attempt */
  540. return ml;
  541. }
  542. }
  543. }
  544. {
  545. U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
  546. U32 chainIndex = chainPackedPointer >> 8;
  547. U32 const chainLength = chainPackedPointer & 0xFF;
  548. U32 const chainAttempts = nbAttempts - ddsAttempt;
  549. U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts;
  550. U32 chainAttempt;
  551. for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) {
  552. PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]);
  553. }
  554. for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) {
  555. size_t currentMl=0;
  556. const BYTE* match;
  557. matchIndex = dms->chainTable[chainIndex];
  558. match = ddsBase + matchIndex;
  559. /* guaranteed by table construction */
  560. assert(matchIndex >= ddsLowestIndex);
  561. assert(match+4 <= ddsEnd);
  562. if (MEM_read32(match) == MEM_read32(ip)) {
  563. /* assumption : matchIndex <= dictLimit-4 (by table construction) */
  564. currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
  565. }
  566. /* save best solution */
  567. if (currentMl > ml) {
  568. ml = currentMl;
  569. *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE;
  570. if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
  571. }
  572. }
  573. }
  574. return ml;
  575. }
  576. /* *********************************
  577. * Hash Chain
  578. ***********************************/
  579. #define NEXT_IN_CHAIN(d, mask) chainTable[(d) & (mask)]
  580. /* Update chains up to ip (excluded)
  581. Assumption : always within prefix (i.e. not within extDict) */
  582. FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
  583. ZSTD_matchState_t* ms,
  584. const ZSTD_compressionParameters* const cParams,
  585. const BYTE* ip, U32 const mls)
  586. {
  587. U32* const hashTable = ms->hashTable;
  588. const U32 hashLog = cParams->hashLog;
  589. U32* const chainTable = ms->chainTable;
  590. const U32 chainMask = (1 << cParams->chainLog) - 1;
  591. const BYTE* const base = ms->window.base;
  592. const U32 target = (U32)(ip - base);
  593. U32 idx = ms->nextToUpdate;
  594. while(idx < target) { /* catch up */
  595. size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
  596. NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
  597. hashTable[h] = idx;
  598. idx++;
  599. }
  600. ms->nextToUpdate = target;
  601. return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
  602. }
  603. U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
  604. const ZSTD_compressionParameters* const cParams = &ms->cParams;
  605. return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
  606. }
  607. /* inlining is important to hardwire a hot branch (template emulation) */
  608. FORCE_INLINE_TEMPLATE
  609. size_t ZSTD_HcFindBestMatch_generic (
  610. ZSTD_matchState_t* ms,
  611. const BYTE* const ip, const BYTE* const iLimit,
  612. size_t* offsetPtr,
  613. const U32 mls, const ZSTD_dictMode_e dictMode)
  614. {
  615. const ZSTD_compressionParameters* const cParams = &ms->cParams;
  616. U32* const chainTable = ms->chainTable;
  617. const U32 chainSize = (1 << cParams->chainLog);
  618. const U32 chainMask = chainSize-1;
  619. const BYTE* const base = ms->window.base;
  620. const BYTE* const dictBase = ms->window.dictBase;
  621. const U32 dictLimit = ms->window.dictLimit;
  622. const BYTE* const prefixStart = base + dictLimit;
  623. const BYTE* const dictEnd = dictBase + dictLimit;
  624. const U32 curr = (U32)(ip-base);
  625. const U32 maxDistance = 1U << cParams->windowLog;
  626. const U32 lowestValid = ms->window.lowLimit;
  627. const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
  628. const U32 isDictionary = (ms->loadedDictEnd != 0);
  629. const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
  630. const U32 minChain = curr > chainSize ? curr - chainSize : 0;
  631. U32 nbAttempts = 1U << cParams->searchLog;
  632. size_t ml=4-1;
  633. const ZSTD_matchState_t* const dms = ms->dictMatchState;
  634. const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch
  635. ? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
  636. const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch
  637. ? ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
  638. U32 matchIndex;
  639. if (dictMode == ZSTD_dedicatedDictSearch) {
  640. const U32* entry = &dms->hashTable[ddsIdx];
  641. PREFETCH_L1(entry);
  642. }
  643. /* HC4 match finder */
  644. matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
  645. for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) {
  646. size_t currentMl=0;
  647. if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
  648. const BYTE* const match = base + matchIndex;
  649. assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
  650. if (match[ml] == ip[ml]) /* potentially better */
  651. currentMl = ZSTD_count(ip, match, iLimit);
  652. } else {
  653. const BYTE* const match = dictBase + matchIndex;
  654. assert(match+4 <= dictEnd);
  655. if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
  656. currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
  657. }
  658. /* save best solution */
  659. if (currentMl > ml) {
  660. ml = currentMl;
  661. *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
  662. if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
  663. }
  664. if (matchIndex <= minChain) break;
  665. matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
  666. }
  667. if (dictMode == ZSTD_dedicatedDictSearch) {
  668. ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts, dms,
  669. ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);
  670. } else if (dictMode == ZSTD_dictMatchState) {
  671. const U32* const dmsChainTable = dms->chainTable;
  672. const U32 dmsChainSize = (1 << dms->cParams.chainLog);
  673. const U32 dmsChainMask = dmsChainSize - 1;
  674. const U32 dmsLowestIndex = dms->window.dictLimit;
  675. const BYTE* const dmsBase = dms->window.base;
  676. const BYTE* const dmsEnd = dms->window.nextSrc;
  677. const U32 dmsSize = (U32)(dmsEnd - dmsBase);
  678. const U32 dmsIndexDelta = dictLimit - dmsSize;
  679. const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0;
  680. matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];
  681. for ( ; (matchIndex>=dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
  682. size_t currentMl=0;
  683. const BYTE* const match = dmsBase + matchIndex;
  684. assert(match+4 <= dmsEnd);
  685. if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
  686. currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
  687. /* save best solution */
  688. if (currentMl > ml) {
  689. ml = currentMl;
  690. *offsetPtr = curr - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;
  691. if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
  692. }
  693. if (matchIndex <= dmsMinChain) break;
  694. matchIndex = dmsChainTable[matchIndex & dmsChainMask];
  695. }
  696. }
  697. return ml;
  698. }
  699. FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
  700. ZSTD_matchState_t* ms,
  701. const BYTE* ip, const BYTE* const iLimit,
  702. size_t* offsetPtr)
  703. {
  704. switch(ms->cParams.minMatch)
  705. {
  706. default : /* includes case 3 */
  707. case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
  708. case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
  709. case 7 :
  710. case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
  711. }
  712. }
  713. static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS (
  714. ZSTD_matchState_t* ms,
  715. const BYTE* ip, const BYTE* const iLimit,
  716. size_t* offsetPtr)
  717. {
  718. switch(ms->cParams.minMatch)
  719. {
  720. default : /* includes case 3 */
  721. case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
  722. case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
  723. case 7 :
  724. case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
  725. }
  726. }
  727. static size_t ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS (
  728. ZSTD_matchState_t* ms,
  729. const BYTE* ip, const BYTE* const iLimit,
  730. size_t* offsetPtr)
  731. {
  732. switch(ms->cParams.minMatch)
  733. {
  734. default : /* includes case 3 */
  735. case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dedicatedDictSearch);
  736. case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dedicatedDictSearch);
  737. case 7 :
  738. case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dedicatedDictSearch);
  739. }
  740. }
  741. FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
  742. ZSTD_matchState_t* ms,
  743. const BYTE* ip, const BYTE* const iLimit,
  744. size_t* offsetPtr)
  745. {
  746. switch(ms->cParams.minMatch)
  747. {
  748. default : /* includes case 3 */
  749. case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
  750. case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
  751. case 7 :
  752. case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
  753. }
  754. }
  755. /* *********************************
  756. * (SIMD) Row-based matchfinder
  757. ***********************************/
  758. /* Constants for row-based hash */
  759. #define ZSTD_ROW_HASH_TAG_OFFSET 1 /* byte offset of hashes in the match state's tagTable from the beginning of a row */
  760. #define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */
  761. #define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1)
  762. #define ZSTD_ROW_HASH_CACHE_MASK (ZSTD_ROW_HASH_CACHE_SIZE - 1)
  763. typedef U32 ZSTD_VecMask; /* Clarifies when we are interacting with a U32 representing a mask of matches */
  764. #if !defined(ZSTD_NO_INTRINSICS) && defined(__SSE2__) /* SIMD SSE version */
  765. #include <emmintrin.h>
  766. typedef __m128i ZSTD_Vec128;
  767. /* Returns a 128-bit container with 128-bits from src */
  768. static ZSTD_Vec128 ZSTD_Vec128_read(const void* const src) {
  769. return _mm_loadu_si128((ZSTD_Vec128 const*)src);
  770. }
  771. /* Returns a ZSTD_Vec128 with the byte "val" packed 16 times */
  772. static ZSTD_Vec128 ZSTD_Vec128_set8(BYTE val) {
  773. return _mm_set1_epi8((char)val);
  774. }
  775. /* Do byte-by-byte comparison result of x and y. Then collapse 128-bit resultant mask
  776. * into a 32-bit mask that is the MSB of each byte.
  777. * */
  778. static ZSTD_VecMask ZSTD_Vec128_cmpMask8(ZSTD_Vec128 x, ZSTD_Vec128 y) {
  779. return (ZSTD_VecMask)_mm_movemask_epi8(_mm_cmpeq_epi8(x, y));
  780. }
  781. typedef struct {
  782. __m128i fst;
  783. __m128i snd;
  784. } ZSTD_Vec256;
  785. static ZSTD_Vec256 ZSTD_Vec256_read(const void* const ptr) {
  786. ZSTD_Vec256 v;
  787. v.fst = ZSTD_Vec128_read(ptr);
  788. v.snd = ZSTD_Vec128_read((ZSTD_Vec128 const*)ptr + 1);
  789. return v;
  790. }
  791. static ZSTD_Vec256 ZSTD_Vec256_set8(BYTE val) {
  792. ZSTD_Vec256 v;
  793. v.fst = ZSTD_Vec128_set8(val);
  794. v.snd = ZSTD_Vec128_set8(val);
  795. return v;
  796. }
  797. static ZSTD_VecMask ZSTD_Vec256_cmpMask8(ZSTD_Vec256 x, ZSTD_Vec256 y) {
  798. ZSTD_VecMask fstMask;
  799. ZSTD_VecMask sndMask;
  800. fstMask = ZSTD_Vec128_cmpMask8(x.fst, y.fst);
  801. sndMask = ZSTD_Vec128_cmpMask8(x.snd, y.snd);
  802. return fstMask | (sndMask << 16);
  803. }
  804. #elif !defined(ZSTD_NO_INTRINSICS) && defined(__ARM_NEON) /* SIMD ARM NEON Version */
  805. #include <arm_neon.h>
  806. typedef uint8x16_t ZSTD_Vec128;
  807. static ZSTD_Vec128 ZSTD_Vec128_read(const void* const src) {
  808. return vld1q_u8((const BYTE* const)src);
  809. }
  810. static ZSTD_Vec128 ZSTD_Vec128_set8(BYTE val) {
  811. return vdupq_n_u8(val);
  812. }
  813. /* Mimics '_mm_movemask_epi8()' from SSE */
  814. static U32 ZSTD_vmovmaskq_u8(ZSTD_Vec128 val) {
  815. /* Shift out everything but the MSB bits in each byte */
  816. uint16x8_t highBits = vreinterpretq_u16_u8(vshrq_n_u8(val, 7));
  817. /* Merge the even lanes together with vsra (right shift and add) */
  818. uint32x4_t paired16 = vreinterpretq_u32_u16(vsraq_n_u16(highBits, highBits, 7));
  819. uint64x2_t paired32 = vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14));
  820. uint8x16_t paired64 = vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28));
  821. /* Extract the low 8 bits from each lane, merge */
  822. return vgetq_lane_u8(paired64, 0) | ((U32)vgetq_lane_u8(paired64, 8) << 8);
  823. }
  824. static ZSTD_VecMask ZSTD_Vec128_cmpMask8(ZSTD_Vec128 x, ZSTD_Vec128 y) {
  825. return (ZSTD_VecMask)ZSTD_vmovmaskq_u8(vceqq_u8(x, y));
  826. }
  827. typedef struct {
  828. uint8x16_t fst;
  829. uint8x16_t snd;
  830. } ZSTD_Vec256;
  831. static ZSTD_Vec256 ZSTD_Vec256_read(const void* const ptr) {
  832. ZSTD_Vec256 v;
  833. v.fst = ZSTD_Vec128_read(ptr);
  834. v.snd = ZSTD_Vec128_read((ZSTD_Vec128 const*)ptr + 1);
  835. return v;
  836. }
  837. static ZSTD_Vec256 ZSTD_Vec256_set8(BYTE val) {
  838. ZSTD_Vec256 v;
  839. v.fst = ZSTD_Vec128_set8(val);
  840. v.snd = ZSTD_Vec128_set8(val);
  841. return v;
  842. }
  843. static ZSTD_VecMask ZSTD_Vec256_cmpMask8(ZSTD_Vec256 x, ZSTD_Vec256 y) {
  844. ZSTD_VecMask fstMask;
  845. ZSTD_VecMask sndMask;
  846. fstMask = ZSTD_Vec128_cmpMask8(x.fst, y.fst);
  847. sndMask = ZSTD_Vec128_cmpMask8(x.snd, y.snd);
  848. return fstMask | (sndMask << 16);
  849. }
  850. #else /* Scalar fallback version */
  851. #define VEC128_NB_SIZE_T (16 / sizeof(size_t))
  852. typedef struct {
  853. size_t vec[VEC128_NB_SIZE_T];
  854. } ZSTD_Vec128;
  855. static ZSTD_Vec128 ZSTD_Vec128_read(const void* const src) {
  856. ZSTD_Vec128 ret;
  857. ZSTD_memcpy(ret.vec, src, VEC128_NB_SIZE_T*sizeof(size_t));
  858. return ret;
  859. }
  860. static ZSTD_Vec128 ZSTD_Vec128_set8(BYTE val) {
  861. ZSTD_Vec128 ret = { {0} };
  862. int startBit = sizeof(size_t) * 8 - 8;
  863. for (;startBit >= 0; startBit -= 8) {
  864. unsigned j = 0;
  865. for (;j < VEC128_NB_SIZE_T; ++j) {
  866. ret.vec[j] |= ((size_t)val << startBit);
  867. }
  868. }
  869. return ret;
  870. }
  871. /* Compare x to y, byte by byte, generating a "matches" bitfield */
  872. static ZSTD_VecMask ZSTD_Vec128_cmpMask8(ZSTD_Vec128 x, ZSTD_Vec128 y) {
  873. ZSTD_VecMask res = 0;
  874. unsigned i = 0;
  875. unsigned l = 0;
  876. for (; i < VEC128_NB_SIZE_T; ++i) {
  877. const size_t cmp1 = x.vec[i];
  878. const size_t cmp2 = y.vec[i];
  879. unsigned j = 0;
  880. for (; j < sizeof(size_t); ++j, ++l) {
  881. if (((cmp1 >> j*8) & 0xFF) == ((cmp2 >> j*8) & 0xFF)) {
  882. res |= ((U32)1 << (j+i*sizeof(size_t)));
  883. }
  884. }
  885. }
  886. return res;
  887. }
  888. #define VEC256_NB_SIZE_T 2*VEC128_NB_SIZE_T
  889. typedef struct {
  890. size_t vec[VEC256_NB_SIZE_T];
  891. } ZSTD_Vec256;
  892. static ZSTD_Vec256 ZSTD_Vec256_read(const void* const src) {
  893. ZSTD_Vec256 ret;
  894. ZSTD_memcpy(ret.vec, src, VEC256_NB_SIZE_T*sizeof(size_t));
  895. return ret;
  896. }
  897. static ZSTD_Vec256 ZSTD_Vec256_set8(BYTE val) {
  898. ZSTD_Vec256 ret = { {0} };
  899. int startBit = sizeof(size_t) * 8 - 8;
  900. for (;startBit >= 0; startBit -= 8) {
  901. unsigned j = 0;
  902. for (;j < VEC256_NB_SIZE_T; ++j) {
  903. ret.vec[j] |= ((size_t)val << startBit);
  904. }
  905. }
  906. return ret;
  907. }
  908. /* Compare x to y, byte by byte, generating a "matches" bitfield */
  909. static ZSTD_VecMask ZSTD_Vec256_cmpMask8(ZSTD_Vec256 x, ZSTD_Vec256 y) {
  910. ZSTD_VecMask res = 0;
  911. unsigned i = 0;
  912. unsigned l = 0;
  913. for (; i < VEC256_NB_SIZE_T; ++i) {
  914. const size_t cmp1 = x.vec[i];
  915. const size_t cmp2 = y.vec[i];
  916. unsigned j = 0;
  917. for (; j < sizeof(size_t); ++j, ++l) {
  918. if (((cmp1 >> j*8) & 0xFF) == ((cmp2 >> j*8) & 0xFF)) {
  919. res |= ((U32)1 << (j+i*sizeof(size_t)));
  920. }
  921. }
  922. }
  923. return res;
  924. }
  925. #endif /* !defined(ZSTD_NO_INTRINSICS) && defined(__SSE2__) */
  926. /* ZSTD_VecMask_next():
  927. * Starting from the LSB, returns the idx of the next non-zero bit.
  928. * Basically counting the nb of trailing zeroes.
  929. */
  930. static U32 ZSTD_VecMask_next(ZSTD_VecMask val) {
  931. # if defined(_MSC_VER) /* Visual */
  932. unsigned long r=0;
  933. return _BitScanForward(&r, val) ? (U32)r : 0;
  934. # elif defined(__GNUC__) && (__GNUC__ >= 3)
  935. return (U32)__builtin_ctz(val);
  936. # else
  937. /* Software ctz version: http://graphics.stanford.edu/~seander/bithacks.html#ZerosOnRightMultLookup */
  938. static const U32 multiplyDeBruijnBitPosition[32] =
  939. {
  940. 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
  941. 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
  942. };
  943. return multiplyDeBruijnBitPosition[((U32)((val & -(int)val) * 0x077CB531U)) >> 27];
  944. # endif
  945. }
  946. /* ZSTD_VecMask_rotateRight():
  947. * Rotates a bitfield to the right by "rotation" bits.
  948. * If the rotation is greater than totalBits, the returned mask is 0.
  949. */
  950. FORCE_INLINE_TEMPLATE ZSTD_VecMask
  951. ZSTD_VecMask_rotateRight(ZSTD_VecMask mask, U32 const rotation, U32 const totalBits) {
  952. if (rotation == 0)
  953. return mask;
  954. switch (totalBits) {
  955. default:
  956. assert(0);
  957. case 16:
  958. return (mask >> rotation) | (U16)(mask << (16 - rotation));
  959. case 32:
  960. return (mask >> rotation) | (U32)(mask << (32 - rotation));
  961. }
  962. }
  963. /* ZSTD_row_nextIndex():
  964. * Returns the next index to insert at within a tagTable row, and updates the "head"
  965. * value to reflect the update. Essentially cycles backwards from [0, {entries per row})
  966. */
  967. FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextIndex(BYTE* const tagRow, U32 const rowMask) {
  968. U32 const next = (*tagRow - 1) & rowMask;
  969. *tagRow = (BYTE)next;
  970. return next;
  971. }
  972. /* ZSTD_isAligned():
  973. * Checks that a pointer is aligned to "align" bytes which must be a power of 2.
  974. */
  975. MEM_STATIC int ZSTD_isAligned(void const* ptr, size_t align) {
  976. assert((align & (align - 1)) == 0);
  977. return (((size_t)ptr) & (align - 1)) == 0;
  978. }
  979. /* ZSTD_row_prefetch():
  980. * Performs prefetching for the hashTable and tagTable at a given row.
  981. */
  982. FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, U16 const* tagTable, U32 const relRow, U32 const rowLog) {
  983. PREFETCH_L1(hashTable + relRow);
  984. if (rowLog == 5) {
  985. PREFETCH_L1(hashTable + relRow + 16);
  986. }
  987. PREFETCH_L1(tagTable + relRow);
  988. assert(rowLog == 4 || rowLog == 5);
  989. assert(ZSTD_isAligned(hashTable + relRow, 64)); /* prefetched hash row always 64-byte aligned */
  990. assert(ZSTD_isAligned(tagTable + relRow, (size_t)1 << rowLog)); /* prefetched tagRow sits on a multiple of 32 or 64 bytes */
  991. }
  992. /* ZSTD_row_fillHashCache():
  993. * Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries,
  994. * but not beyond iLimit.
  995. */
  996. static void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base,
  997. U32 const rowLog, U32 const mls,
  998. U32 idx, const BYTE* const iLimit)
  999. {
  1000. U32 const* const hashTable = ms->hashTable;
  1001. U16 const* const tagTable = ms->tagTable;
  1002. U32 const hashLog = ms->rowHashLog;
  1003. U32 const maxElemsToPrefetch = (base + idx) > iLimit ? 0 : (U32)(iLimit - (base + idx) + 1);
  1004. U32 const lim = idx + MIN(ZSTD_ROW_HASH_CACHE_SIZE, maxElemsToPrefetch);
  1005. for (; idx < lim; ++idx) {
  1006. U32 const hash = (U32)ZSTD_hashPtr(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
  1007. U32 const row = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
  1008. ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
  1009. ms->hashCache[idx & ZSTD_ROW_HASH_CACHE_MASK] = hash;
  1010. }
  1011. DEBUGLOG(6, "ZSTD_row_fillHashCache(): [%u %u %u %u %u %u %u %u]", ms->hashCache[0], ms->hashCache[1],
  1012. ms->hashCache[2], ms->hashCache[3], ms->hashCache[4],
  1013. ms->hashCache[5], ms->hashCache[6], ms->hashCache[7]);
  1014. }
  1015. /* ZSTD_row_nextCachedHash():
  1016. * Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at
  1017. * base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable.
  1018. */
  1019. FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,
  1020. U16 const* tagTable, BYTE const* base,
  1021. U32 idx, U32 const hashLog,
  1022. U32 const rowLog, U32 const mls)
  1023. {
  1024. U32 const newHash = (U32)ZSTD_hashPtr(base+idx+ZSTD_ROW_HASH_CACHE_SIZE, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
  1025. U32 const row = (newHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
  1026. ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
  1027. { U32 const hash = cache[idx & ZSTD_ROW_HASH_CACHE_MASK];
  1028. cache[idx & ZSTD_ROW_HASH_CACHE_MASK] = newHash;
  1029. return hash;
  1030. }
  1031. }
  1032. /* ZSTD_row_update_internal():
  1033. * Inserts the byte at ip into the appropriate position in the hash table.
  1034. * Determines the relative row, and the position within the {16, 32} entry row to insert at.
  1035. */
  1036. FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip,
  1037. U32 const mls, U32 const rowLog,
  1038. U32 const rowMask, U32 const useCache)
  1039. {
  1040. U32* const hashTable = ms->hashTable;
  1041. U16* const tagTable = ms->tagTable;
  1042. U32 const hashLog = ms->rowHashLog;
  1043. const BYTE* const base = ms->window.base;
  1044. const U32 target = (U32)(ip - base);
  1045. U32 idx = ms->nextToUpdate;
  1046. DEBUGLOG(6, "ZSTD_row_update_internal(): nextToUpdate=%u, current=%u", idx, target);
  1047. for (; idx < target; ++idx) {
  1048. U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, idx, hashLog, rowLog, mls)
  1049. : (U32)ZSTD_hashPtr(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
  1050. U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
  1051. U32* const row = hashTable + relRow;
  1052. BYTE* tagRow = (BYTE*)(tagTable + relRow); /* Though tagTable is laid out as a table of U16, each tag is only 1 byte.
  1053. Explicit cast allows us to get exact desired position within each row */
  1054. U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
  1055. assert(hash == ZSTD_hashPtr(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls));
  1056. ((BYTE*)tagRow)[pos + ZSTD_ROW_HASH_TAG_OFFSET] = hash & ZSTD_ROW_HASH_TAG_MASK;
  1057. row[pos] = idx;
  1058. }
  1059. ms->nextToUpdate = target;
  1060. }
  1061. /* ZSTD_row_update():
  1062. * External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary
  1063. * processing.
  1064. */
  1065. void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) {
  1066. const U32 rowLog = ms->cParams.searchLog < 5 ? 4 : 5;
  1067. const U32 rowMask = (1u << rowLog) - 1;
  1068. const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */);
  1069. DEBUGLOG(5, "ZSTD_row_update(), rowLog=%u", rowLog);
  1070. ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0 /* dont use cache */);
  1071. }
  1072. /* Returns a ZSTD_VecMask (U32) that has the nth bit set to 1 if the newly-computed "tag" matches
  1073. * the hash at the nth position in a row of the tagTable.
  1074. */
  1075. FORCE_INLINE_TEMPLATE
  1076. ZSTD_VecMask ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 head, const U32 rowEntries) {
  1077. ZSTD_VecMask matches = 0;
  1078. if (rowEntries == 16) {
  1079. ZSTD_Vec128 hashes = ZSTD_Vec128_read(tagRow + ZSTD_ROW_HASH_TAG_OFFSET);
  1080. ZSTD_Vec128 expandedTags = ZSTD_Vec128_set8(tag);
  1081. matches = ZSTD_Vec128_cmpMask8(hashes, expandedTags);
  1082. } else if (rowEntries == 32) {
  1083. ZSTD_Vec256 hashes = ZSTD_Vec256_read(tagRow + ZSTD_ROW_HASH_TAG_OFFSET);
  1084. ZSTD_Vec256 expandedTags = ZSTD_Vec256_set8(tag);
  1085. matches = ZSTD_Vec256_cmpMask8(hashes, expandedTags);
  1086. } else {
  1087. assert(0);
  1088. }
  1089. /* Each row is a circular buffer beginning at the value of "head". So we must rotate the "matches" bitfield
  1090. to match up with the actual layout of the entries within the hashTable */
  1091. return ZSTD_VecMask_rotateRight(matches, head, rowEntries);
  1092. }
  1093. /* The high-level approach of the SIMD row based match finder is as follows:
  1094. * - Figure out where to insert the new entry:
  1095. * - Generate a hash from a byte along with an additional 1-byte "short hash". The additional byte is our "tag"
  1096. * - The hashTable is effectively split into groups or "rows" of 16 or 32 entries of U32, and the hash determines
  1097. * which row to insert into.
  1098. * - Determine the correct position within the row to insert the entry into. Each row of 16 or 32 can
  1099. * be considered as a circular buffer with a "head" index that resides in the tagTable.
  1100. * - Also insert the "tag" into the equivalent row and position in the tagTable.
  1101. * - Note: The tagTable has 17 or 33 1-byte entries per row, due to 16 or 32 tags, and 1 "head" entry.
  1102. * The 17 or 33 entry rows are spaced out to occur every 32 or 64 bytes, respectively,
  1103. * for alignment/performance reasons, leaving some bytes unused.
  1104. * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte "short hash" and
  1105. * generate a bitfield that we can cycle through to check the collisions in the hash table.
  1106. * - Pick the longest match.
  1107. */
  1108. FORCE_INLINE_TEMPLATE
  1109. size_t ZSTD_RowFindBestMatch_generic (
  1110. ZSTD_matchState_t* ms,
  1111. const BYTE* const ip, const BYTE* const iLimit,
  1112. size_t* offsetPtr,
  1113. const U32 mls, const ZSTD_dictMode_e dictMode,
  1114. const U32 rowLog)
  1115. {
  1116. U32* const hashTable = ms->hashTable;
  1117. U16* const tagTable = ms->tagTable;
  1118. U32* const hashCache = ms->hashCache;
  1119. const U32 hashLog = ms->rowHashLog;
  1120. const ZSTD_compressionParameters* const cParams = &ms->cParams;
  1121. const BYTE* const base = ms->window.base;
  1122. const BYTE* const dictBase = ms->window.dictBase;
  1123. const U32 dictLimit = ms->window.dictLimit;
  1124. const BYTE* const prefixStart = base + dictLimit;
  1125. const BYTE* const dictEnd = dictBase + dictLimit;
  1126. const U32 curr = (U32)(ip-base);
  1127. const U32 maxDistance = 1U << cParams->windowLog;
  1128. const U32 lowestValid = ms->window.lowLimit;
  1129. const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
  1130. const U32 isDictionary = (ms->loadedDictEnd != 0);
  1131. const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
  1132. const U32 rowEntries = (1U << rowLog);
  1133. const U32 rowMask = rowEntries - 1;
  1134. const U32 cappedSearchLog = MIN(cParams->searchLog, rowLog); /* nb of searches is capped at nb entries per row */
  1135. U32 nbAttempts = 1U << cappedSearchLog;
  1136. size_t ml=4-1;
  1137. /* DMS/DDS variables that may be referenced laster */
  1138. const ZSTD_matchState_t* const dms = ms->dictMatchState;
  1139. size_t ddsIdx;
  1140. U32 ddsExtraAttempts; /* cctx hash tables are limited in searches, but allow extra searches into DDS */
  1141. U32 dmsTag;
  1142. U32* dmsRow;
  1143. BYTE* dmsTagRow;
  1144. if (dictMode == ZSTD_dedicatedDictSearch) {
  1145. const U32 ddsHashLog = dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
  1146. { /* Prefetch DDS hashtable entry */
  1147. ddsIdx = ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG;
  1148. PREFETCH_L1(&dms->hashTable[ddsIdx]);
  1149. }
  1150. ddsExtraAttempts = cParams->searchLog > rowLog ? 1U << (cParams->searchLog - rowLog) : 0;
  1151. }
  1152. if (dictMode == ZSTD_dictMatchState) {
  1153. /* Prefetch DMS rows */
  1154. U32* const dmsHashTable = dms->hashTable;
  1155. U16* const dmsTagTable = dms->tagTable;
  1156. U32 const dmsHash = (U32)ZSTD_hashPtr(ip, dms->rowHashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
  1157. U32 const dmsRelRow = (dmsHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
  1158. dmsTag = dmsHash & ZSTD_ROW_HASH_TAG_MASK;
  1159. dmsTagRow = (BYTE*)(dmsTagTable + dmsRelRow);
  1160. dmsRow = dmsHashTable + dmsRelRow;
  1161. ZSTD_row_prefetch(dmsHashTable, dmsTagTable, dmsRelRow, rowLog);
  1162. }
  1163. /* Update the hashTable and tagTable up to (but not including) ip */
  1164. ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1 /* useCache */);
  1165. { /* Get the hash for ip, compute the appropriate row */
  1166. U32 const hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, base, curr, hashLog, rowLog, mls);
  1167. U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
  1168. U32 const tag = hash & ZSTD_ROW_HASH_TAG_MASK;
  1169. U32* const row = hashTable + relRow;
  1170. BYTE* tagRow = (BYTE*)(tagTable + relRow);
  1171. U32 const head = *tagRow & rowMask;
  1172. U32 matchBuffer[32 /* maximum nb entries per row */];
  1173. size_t numMatches = 0;
  1174. size_t currMatch = 0;
  1175. ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, head, rowEntries);
  1176. /* Cycle through the matches and prefetch */
  1177. for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) {
  1178. U32 const matchPos = (head + ZSTD_VecMask_next(matches)) & rowMask;
  1179. U32 const matchIndex = row[matchPos];
  1180. assert(numMatches < rowEntries);
  1181. if (matchIndex < lowLimit)
  1182. break;
  1183. if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
  1184. PREFETCH_L1(base + matchIndex);
  1185. } else {
  1186. PREFETCH_L1(dictBase + matchIndex);
  1187. }
  1188. matchBuffer[numMatches++] = matchIndex;
  1189. }
  1190. /* Speed opt: insert current byte into hashtable too. This allows us to avoid one iteration of the loop
  1191. in ZSTD_row_update_internal() at the next search. */
  1192. {
  1193. U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
  1194. tagRow[pos + ZSTD_ROW_HASH_TAG_OFFSET] = (BYTE)tag;
  1195. row[pos] = ms->nextToUpdate++;
  1196. }
  1197. /* Return the longest match */
  1198. for (; currMatch < numMatches; ++currMatch) {
  1199. U32 const matchIndex = matchBuffer[currMatch];
  1200. size_t currentMl=0;
  1201. assert(matchIndex < curr);
  1202. assert(matchIndex >= lowLimit);
  1203. if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
  1204. const BYTE* const match = base + matchIndex;
  1205. assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
  1206. if (match[ml] == ip[ml]) /* potentially better */
  1207. currentMl = ZSTD_count(ip, match, iLimit);
  1208. } else {
  1209. const BYTE* const match = dictBase + matchIndex;
  1210. assert(match+4 <= dictEnd);
  1211. if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
  1212. currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
  1213. }
  1214. /* Save best solution */
  1215. if (currentMl > ml) {
  1216. ml = currentMl;
  1217. *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
  1218. if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
  1219. }
  1220. }
  1221. }
  1222. if (dictMode == ZSTD_dedicatedDictSearch) {
  1223. ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts + ddsExtraAttempts, dms,
  1224. ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);
  1225. } else if (dictMode == ZSTD_dictMatchState) {
  1226. /* TODO: Measure and potentially add prefetching to DMS */
  1227. const U32 dmsLowestIndex = dms->window.dictLimit;
  1228. const BYTE* const dmsBase = dms->window.base;
  1229. const BYTE* const dmsEnd = dms->window.nextSrc;
  1230. const U32 dmsSize = (U32)(dmsEnd - dmsBase);
  1231. const U32 dmsIndexDelta = dictLimit - dmsSize;
  1232. { U32 const head = *dmsTagRow & rowMask;
  1233. U32 matchBuffer[32 /* maximum nb row entries */];
  1234. size_t numMatches = 0;
  1235. size_t currMatch = 0;
  1236. ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, head, rowEntries);
  1237. for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) {
  1238. U32 const matchPos = (head + ZSTD_VecMask_next(matches)) & rowMask;
  1239. U32 const matchIndex = dmsRow[matchPos];
  1240. if (matchIndex < dmsLowestIndex)
  1241. break;
  1242. PREFETCH_L1(dmsBase + matchIndex);
  1243. matchBuffer[numMatches++] = matchIndex;
  1244. }
  1245. /* Return the longest match */
  1246. for (; currMatch < numMatches; ++currMatch) {
  1247. U32 const matchIndex = matchBuffer[currMatch];
  1248. size_t currentMl=0;
  1249. assert(matchIndex >= dmsLowestIndex);
  1250. assert(matchIndex < curr);
  1251. { const BYTE* const match = dmsBase + matchIndex;
  1252. assert(match+4 <= dmsEnd);
  1253. if (MEM_read32(match) == MEM_read32(ip))
  1254. currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
  1255. }
  1256. if (currentMl > ml) {
  1257. ml = currentMl;
  1258. *offsetPtr = curr - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;
  1259. if (ip+currentMl == iLimit) break;
  1260. }
  1261. }
  1262. }
  1263. }
  1264. return ml;
  1265. }
  1266. /* Inlining is important to hardwire a hot branch (template emulation) */
  1267. FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch_selectMLS (
  1268. ZSTD_matchState_t* ms,
  1269. const BYTE* ip, const BYTE* const iLimit,
  1270. const ZSTD_dictMode_e dictMode, size_t* offsetPtr, const U32 rowLog)
  1271. {
  1272. switch(ms->cParams.minMatch)
  1273. {
  1274. default : /* includes case 3 */
  1275. case 4 : return ZSTD_RowFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, dictMode, rowLog);
  1276. case 5 : return ZSTD_RowFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, dictMode, rowLog);
  1277. case 7 :
  1278. case 6 : return ZSTD_RowFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, dictMode, rowLog);
  1279. }
  1280. }
  1281. FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch_selectRowLog (
  1282. ZSTD_matchState_t* ms,
  1283. const BYTE* ip, const BYTE* const iLimit,
  1284. size_t* offsetPtr)
  1285. {
  1286. const U32 cappedSearchLog = MIN(ms->cParams.searchLog, 5);
  1287. switch(cappedSearchLog)
  1288. {
  1289. default :
  1290. case 4 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_noDict, offsetPtr, 4);
  1291. case 5 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_noDict, offsetPtr, 5);
  1292. }
  1293. }
  1294. FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch_dictMatchState_selectRowLog(
  1295. ZSTD_matchState_t* ms,
  1296. const BYTE* ip, const BYTE* const iLimit,
  1297. size_t* offsetPtr)
  1298. {
  1299. const U32 cappedSearchLog = MIN(ms->cParams.searchLog, 5);
  1300. switch(cappedSearchLog)
  1301. {
  1302. default :
  1303. case 4 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_dictMatchState, offsetPtr, 4);
  1304. case 5 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_dictMatchState, offsetPtr, 5);
  1305. }
  1306. }
  1307. FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch_dedicatedDictSearch_selectRowLog(
  1308. ZSTD_matchState_t* ms,
  1309. const BYTE* ip, const BYTE* const iLimit,
  1310. size_t* offsetPtr)
  1311. {
  1312. const U32 cappedSearchLog = MIN(ms->cParams.searchLog, 5);
  1313. switch(cappedSearchLog)
  1314. {
  1315. default :
  1316. case 4 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_dedicatedDictSearch, offsetPtr, 4);
  1317. case 5 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_dedicatedDictSearch, offsetPtr, 5);
  1318. }
  1319. }
  1320. FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch_extDict_selectRowLog (
  1321. ZSTD_matchState_t* ms,
  1322. const BYTE* ip, const BYTE* const iLimit,
  1323. size_t* offsetPtr)
  1324. {
  1325. const U32 cappedSearchLog = MIN(ms->cParams.searchLog, 5);
  1326. switch(cappedSearchLog)
  1327. {
  1328. default :
  1329. case 4 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_extDict, offsetPtr, 4);
  1330. case 5 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_extDict, offsetPtr, 5);
  1331. }
  1332. }
  1333. /* *******************************
  1334. * Common parser - lazy strategy
  1335. *********************************/
  1336. typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e;
  1337. FORCE_INLINE_TEMPLATE size_t
  1338. ZSTD_compressBlock_lazy_generic(
  1339. ZSTD_matchState_t* ms, seqStore_t* seqStore,
  1340. U32 rep[ZSTD_REP_NUM],
  1341. const void* src, size_t srcSize,
  1342. const searchMethod_e searchMethod, const U32 depth,
  1343. ZSTD_dictMode_e const dictMode)
  1344. {
  1345. const BYTE* const istart = (const BYTE*)src;
  1346. const BYTE* ip = istart;
  1347. const BYTE* anchor = istart;
  1348. const BYTE* const iend = istart + srcSize;
  1349. const BYTE* const ilimit = searchMethod == search_rowHash ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;
  1350. const BYTE* const base = ms->window.base;
  1351. const U32 prefixLowestIndex = ms->window.dictLimit;
  1352. const BYTE* const prefixLowest = base + prefixLowestIndex;
  1353. const U32 rowLog = ms->cParams.searchLog < 5 ? 4 : 5;
  1354. typedef size_t (*searchMax_f)(
  1355. ZSTD_matchState_t* ms,
  1356. const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
  1357. /**
  1358. * This table is indexed first by the four ZSTD_dictMode_e values, and then
  1359. * by the two searchMethod_e values. NULLs are placed for configurations
  1360. * that should never occur (extDict modes go to the other implementation
  1361. * below and there is no DDSS for binary tree search yet).
  1362. */
  1363. const searchMax_f searchFuncs[4][3] = {
  1364. {
  1365. ZSTD_HcFindBestMatch_selectMLS,
  1366. ZSTD_BtFindBestMatch_selectMLS,
  1367. ZSTD_RowFindBestMatch_selectRowLog
  1368. },
  1369. {
  1370. NULL,
  1371. NULL,
  1372. NULL
  1373. },
  1374. {
  1375. ZSTD_HcFindBestMatch_dictMatchState_selectMLS,
  1376. ZSTD_BtFindBestMatch_dictMatchState_selectMLS,
  1377. ZSTD_RowFindBestMatch_dictMatchState_selectRowLog
  1378. },
  1379. {
  1380. ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS,
  1381. NULL,
  1382. ZSTD_RowFindBestMatch_dedicatedDictSearch_selectRowLog
  1383. }
  1384. };
  1385. searchMax_f const searchMax = searchFuncs[dictMode][(int)searchMethod];
  1386. U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
  1387. const int isDMS = dictMode == ZSTD_dictMatchState;
  1388. const int isDDS = dictMode == ZSTD_dedicatedDictSearch;
  1389. const int isDxS = isDMS || isDDS;
  1390. const ZSTD_matchState_t* const dms = ms->dictMatchState;
  1391. const U32 dictLowestIndex = isDxS ? dms->window.dictLimit : 0;
  1392. const BYTE* const dictBase = isDxS ? dms->window.base : NULL;
  1393. const BYTE* const dictLowest = isDxS ? dictBase + dictLowestIndex : NULL;
  1394. const BYTE* const dictEnd = isDxS ? dms->window.nextSrc : NULL;
  1395. const U32 dictIndexDelta = isDxS ?
  1396. prefixLowestIndex - (U32)(dictEnd - dictBase) :
  1397. 0;
  1398. const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest));
  1399. assert(searchMax != NULL);
  1400. DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u) (searchFunc=%u)", (U32)dictMode, (U32)searchMethod);
  1401. ip += (dictAndPrefixLength == 0);
  1402. if (dictMode == ZSTD_noDict) {
  1403. U32 const curr = (U32)(ip - base);
  1404. U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog);
  1405. U32 const maxRep = curr - windowLow;
  1406. if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
  1407. if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
  1408. }
  1409. if (isDxS) {
  1410. /* dictMatchState repCode checks don't currently handle repCode == 0
  1411. * disabling. */
  1412. assert(offset_1 <= dictAndPrefixLength);
  1413. assert(offset_2 <= dictAndPrefixLength);
  1414. }
  1415. if (searchMethod == search_rowHash) {
  1416. ZSTD_row_fillHashCache(ms, base, rowLog,
  1417. MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */),
  1418. ms->nextToUpdate, ilimit);
  1419. }
  1420. /* Match Loop */
  1421. #if defined(__GNUC__) && defined(__x86_64__)
  1422. /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
  1423. * code alignment is perturbed. To fix the instability align the loop on 32-bytes.
  1424. */
  1425. __asm__(".p2align 5");
  1426. #endif
  1427. while (ip < ilimit) {
  1428. size_t matchLength=0;
  1429. size_t offset=0;
  1430. const BYTE* start=ip+1;
  1431. /* check repCode */
  1432. if (isDxS) {
  1433. const U32 repIndex = (U32)(ip - base) + 1 - offset_1;
  1434. const BYTE* repMatch = ((dictMode == ZSTD_dictMatchState || dictMode == ZSTD_dedicatedDictSearch)
  1435. && repIndex < prefixLowestIndex) ?
  1436. dictBase + (repIndex - dictIndexDelta) :
  1437. base + repIndex;
  1438. if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
  1439. && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
  1440. const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
  1441. matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
  1442. if (depth==0) goto _storeSequence;
  1443. }
  1444. }
  1445. if ( dictMode == ZSTD_noDict
  1446. && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
  1447. matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
  1448. if (depth==0) goto _storeSequence;
  1449. }
  1450. /* first search (depth 0) */
  1451. { size_t offsetFound = 999999999;
  1452. size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
  1453. if (ml2 > matchLength)
  1454. matchLength = ml2, start = ip, offset=offsetFound;
  1455. }
  1456. if (matchLength < 4) {
  1457. ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
  1458. continue;
  1459. }
  1460. /* let's try to find a better solution */
  1461. if (depth>=1)
  1462. while (ip<ilimit) {
  1463. ip ++;
  1464. if ( (dictMode == ZSTD_noDict)
  1465. && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
  1466. size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
  1467. int const gain2 = (int)(mlRep * 3);
  1468. int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
  1469. if ((mlRep >= 4) && (gain2 > gain1))
  1470. matchLength = mlRep, offset = 0, start = ip;
  1471. }
  1472. if (isDxS) {
  1473. const U32 repIndex = (U32)(ip - base) - offset_1;
  1474. const BYTE* repMatch = repIndex < prefixLowestIndex ?
  1475. dictBase + (repIndex - dictIndexDelta) :
  1476. base + repIndex;
  1477. if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
  1478. && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
  1479. const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
  1480. size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
  1481. int const gain2 = (int)(mlRep * 3);
  1482. int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
  1483. if ((mlRep >= 4) && (gain2 > gain1))
  1484. matchLength = mlRep, offset = 0, start = ip;
  1485. }
  1486. }
  1487. { size_t offset2=999999999;
  1488. size_t const ml2 = searchMax(ms, ip, iend, &offset2);
  1489. int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
  1490. int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
  1491. if ((ml2 >= 4) && (gain2 > gain1)) {
  1492. matchLength = ml2, offset = offset2, start = ip;
  1493. continue; /* search a better one */
  1494. } }
  1495. /* let's find an even better one */
  1496. if ((depth==2) && (ip<ilimit)) {
  1497. ip ++;
  1498. if ( (dictMode == ZSTD_noDict)
  1499. && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
  1500. size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
  1501. int const gain2 = (int)(mlRep * 4);
  1502. int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
  1503. if ((mlRep >= 4) && (gain2 > gain1))
  1504. matchLength = mlRep, offset = 0, start = ip;
  1505. }
  1506. if (isDxS) {
  1507. const U32 repIndex = (U32)(ip - base) - offset_1;
  1508. const BYTE* repMatch = repIndex < prefixLowestIndex ?
  1509. dictBase + (repIndex - dictIndexDelta) :
  1510. base + repIndex;
  1511. if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
  1512. && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
  1513. const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
  1514. size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
  1515. int const gain2 = (int)(mlRep * 4);
  1516. int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
  1517. if ((mlRep >= 4) && (gain2 > gain1))
  1518. matchLength = mlRep, offset = 0, start = ip;
  1519. }
  1520. }
  1521. { size_t offset2=999999999;
  1522. size_t const ml2 = searchMax(ms, ip, iend, &offset2);
  1523. int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
  1524. int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
  1525. if ((ml2 >= 4) && (gain2 > gain1)) {
  1526. matchLength = ml2, offset = offset2, start = ip;
  1527. continue;
  1528. } } }
  1529. break; /* nothing found : store previous solution */
  1530. }
  1531. /* NOTE:
  1532. * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
  1533. * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
  1534. * overflows the pointer, which is undefined behavior.
  1535. */
  1536. /* catch up */
  1537. if (offset) {
  1538. if (dictMode == ZSTD_noDict) {
  1539. while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest))
  1540. && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) ) /* only search for offset within prefix */
  1541. { start--; matchLength++; }
  1542. }
  1543. if (isDxS) {
  1544. U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
  1545. const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
  1546. const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
  1547. while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
  1548. }
  1549. offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
  1550. }
  1551. /* store sequence */
  1552. _storeSequence:
  1553. { size_t const litLength = start - anchor;
  1554. ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
  1555. anchor = ip = start + matchLength;
  1556. }
  1557. /* check immediate repcode */
  1558. if (isDxS) {
  1559. while (ip <= ilimit) {
  1560. U32 const current2 = (U32)(ip-base);
  1561. U32 const repIndex = current2 - offset_2;
  1562. const BYTE* repMatch = repIndex < prefixLowestIndex ?
  1563. dictBase - dictIndexDelta + repIndex :
  1564. base + repIndex;
  1565. if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
  1566. && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
  1567. const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
  1568. matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
  1569. offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset_2 <=> offset_1 */
  1570. ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
  1571. ip += matchLength;
  1572. anchor = ip;
  1573. continue;
  1574. }
  1575. break;
  1576. }
  1577. }
  1578. if (dictMode == ZSTD_noDict) {
  1579. while ( ((ip <= ilimit) & (offset_2>0))
  1580. && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
  1581. /* store sequence */
  1582. matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
  1583. offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
  1584. ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
  1585. ip += matchLength;
  1586. anchor = ip;
  1587. continue; /* faster when present ... (?) */
  1588. } } }
  1589. /* Save reps for next block */
  1590. rep[0] = offset_1 ? offset_1 : savedOffset;
  1591. rep[1] = offset_2 ? offset_2 : savedOffset;
  1592. /* Return the last literals size */
  1593. return (size_t)(iend - anchor);
  1594. }
  1595. size_t ZSTD_compressBlock_btlazy2(
  1596. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1597. void const* src, size_t srcSize)
  1598. {
  1599. return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);
  1600. }
  1601. size_t ZSTD_compressBlock_lazy2(
  1602. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1603. void const* src, size_t srcSize)
  1604. {
  1605. return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);
  1606. }
  1607. size_t ZSTD_compressBlock_lazy(
  1608. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1609. void const* src, size_t srcSize)
  1610. {
  1611. return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);
  1612. }
  1613. size_t ZSTD_compressBlock_greedy(
  1614. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1615. void const* src, size_t srcSize)
  1616. {
  1617. return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);
  1618. }
  1619. size_t ZSTD_compressBlock_btlazy2_dictMatchState(
  1620. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1621. void const* src, size_t srcSize)
  1622. {
  1623. return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);
  1624. }
  1625. size_t ZSTD_compressBlock_lazy2_dictMatchState(
  1626. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1627. void const* src, size_t srcSize)
  1628. {
  1629. return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);
  1630. }
  1631. size_t ZSTD_compressBlock_lazy_dictMatchState(
  1632. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1633. void const* src, size_t srcSize)
  1634. {
  1635. return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);
  1636. }
  1637. size_t ZSTD_compressBlock_greedy_dictMatchState(
  1638. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1639. void const* src, size_t srcSize)
  1640. {
  1641. return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);
  1642. }
  1643. size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
  1644. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1645. void const* src, size_t srcSize)
  1646. {
  1647. return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
  1648. }
  1649. size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
  1650. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1651. void const* src, size_t srcSize)
  1652. {
  1653. return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
  1654. }
  1655. size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
  1656. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1657. void const* src, size_t srcSize)
  1658. {
  1659. return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
  1660. }
  1661. /* Row-based matchfinder */
  1662. size_t ZSTD_compressBlock_lazy2_row(
  1663. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1664. void const* src, size_t srcSize)
  1665. {
  1666. return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict);
  1667. }
  1668. size_t ZSTD_compressBlock_lazy_row(
  1669. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1670. void const* src, size_t srcSize)
  1671. {
  1672. return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict);
  1673. }
  1674. size_t ZSTD_compressBlock_greedy_row(
  1675. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1676. void const* src, size_t srcSize)
  1677. {
  1678. return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict);
  1679. }
  1680. size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
  1681. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1682. void const* src, size_t srcSize)
  1683. {
  1684. return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState);
  1685. }
  1686. size_t ZSTD_compressBlock_lazy_dictMatchState_row(
  1687. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1688. void const* src, size_t srcSize)
  1689. {
  1690. return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState);
  1691. }
  1692. size_t ZSTD_compressBlock_greedy_dictMatchState_row(
  1693. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1694. void const* src, size_t srcSize)
  1695. {
  1696. return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState);
  1697. }
  1698. size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
  1699. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1700. void const* src, size_t srcSize)
  1701. {
  1702. return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dedicatedDictSearch);
  1703. }
  1704. size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
  1705. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1706. void const* src, size_t srcSize)
  1707. {
  1708. return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch);
  1709. }
  1710. size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
  1711. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1712. void const* src, size_t srcSize)
  1713. {
  1714. return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch);
  1715. }
  1716. FORCE_INLINE_TEMPLATE
  1717. size_t ZSTD_compressBlock_lazy_extDict_generic(
  1718. ZSTD_matchState_t* ms, seqStore_t* seqStore,
  1719. U32 rep[ZSTD_REP_NUM],
  1720. const void* src, size_t srcSize,
  1721. const searchMethod_e searchMethod, const U32 depth)
  1722. {
  1723. const BYTE* const istart = (const BYTE*)src;
  1724. const BYTE* ip = istart;
  1725. const BYTE* anchor = istart;
  1726. const BYTE* const iend = istart + srcSize;
  1727. const BYTE* const ilimit = searchMethod == search_rowHash ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;
  1728. const BYTE* const base = ms->window.base;
  1729. const U32 dictLimit = ms->window.dictLimit;
  1730. const BYTE* const prefixStart = base + dictLimit;
  1731. const BYTE* const dictBase = ms->window.dictBase;
  1732. const BYTE* const dictEnd = dictBase + dictLimit;
  1733. const BYTE* const dictStart = dictBase + ms->window.lowLimit;
  1734. const U32 windowLog = ms->cParams.windowLog;
  1735. const U32 rowLog = ms->cParams.searchLog < 5 ? 4 : 5;
  1736. typedef size_t (*searchMax_f)(
  1737. ZSTD_matchState_t* ms,
  1738. const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
  1739. const searchMax_f searchFuncs[3] = {
  1740. ZSTD_HcFindBestMatch_extDict_selectMLS,
  1741. ZSTD_BtFindBestMatch_extDict_selectMLS,
  1742. ZSTD_RowFindBestMatch_extDict_selectRowLog
  1743. };
  1744. searchMax_f searchMax = searchFuncs[(int)searchMethod];
  1745. U32 offset_1 = rep[0], offset_2 = rep[1];
  1746. DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod);
  1747. /* init */
  1748. ip += (ip == prefixStart);
  1749. if (searchMethod == search_rowHash) {
  1750. ZSTD_row_fillHashCache(ms, base, rowLog,
  1751. MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */),
  1752. ms->nextToUpdate, ilimit);
  1753. }
  1754. /* Match Loop */
  1755. #if defined(__GNUC__) && defined(__x86_64__)
  1756. /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
  1757. * code alignment is perturbed. To fix the instability align the loop on 32-bytes.
  1758. */
  1759. __asm__(".p2align 5");
  1760. #endif
  1761. while (ip < ilimit) {
  1762. size_t matchLength=0;
  1763. size_t offset=0;
  1764. const BYTE* start=ip+1;
  1765. U32 curr = (U32)(ip-base);
  1766. /* check repCode */
  1767. { const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr+1, windowLog);
  1768. const U32 repIndex = (U32)(curr+1 - offset_1);
  1769. const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
  1770. const BYTE* const repMatch = repBase + repIndex;
  1771. if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow */
  1772. & (offset_1 < curr+1 - windowLow) ) /* note: we are searching at curr+1 */
  1773. if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
  1774. /* repcode detected we should take it */
  1775. const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
  1776. matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
  1777. if (depth==0) goto _storeSequence;
  1778. } }
  1779. /* first search (depth 0) */
  1780. { size_t offsetFound = 999999999;
  1781. size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
  1782. if (ml2 > matchLength)
  1783. matchLength = ml2, start = ip, offset=offsetFound;
  1784. }
  1785. if (matchLength < 4) {
  1786. ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
  1787. continue;
  1788. }
  1789. /* let's try to find a better solution */
  1790. if (depth>=1)
  1791. while (ip<ilimit) {
  1792. ip ++;
  1793. curr++;
  1794. /* check repCode */
  1795. if (offset) {
  1796. const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
  1797. const U32 repIndex = (U32)(curr - offset_1);
  1798. const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
  1799. const BYTE* const repMatch = repBase + repIndex;
  1800. if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
  1801. & (offset_1 < curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
  1802. if (MEM_read32(ip) == MEM_read32(repMatch)) {
  1803. /* repcode detected */
  1804. const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
  1805. size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
  1806. int const gain2 = (int)(repLength * 3);
  1807. int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
  1808. if ((repLength >= 4) && (gain2 > gain1))
  1809. matchLength = repLength, offset = 0, start = ip;
  1810. } }
  1811. /* search match, depth 1 */
  1812. { size_t offset2=999999999;
  1813. size_t const ml2 = searchMax(ms, ip, iend, &offset2);
  1814. int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
  1815. int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
  1816. if ((ml2 >= 4) && (gain2 > gain1)) {
  1817. matchLength = ml2, offset = offset2, start = ip;
  1818. continue; /* search a better one */
  1819. } }
  1820. /* let's find an even better one */
  1821. if ((depth==2) && (ip<ilimit)) {
  1822. ip ++;
  1823. curr++;
  1824. /* check repCode */
  1825. if (offset) {
  1826. const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
  1827. const U32 repIndex = (U32)(curr - offset_1);
  1828. const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
  1829. const BYTE* const repMatch = repBase + repIndex;
  1830. if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
  1831. & (offset_1 < curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
  1832. if (MEM_read32(ip) == MEM_read32(repMatch)) {
  1833. /* repcode detected */
  1834. const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
  1835. size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
  1836. int const gain2 = (int)(repLength * 4);
  1837. int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
  1838. if ((repLength >= 4) && (gain2 > gain1))
  1839. matchLength = repLength, offset = 0, start = ip;
  1840. } }
  1841. /* search match, depth 2 */
  1842. { size_t offset2=999999999;
  1843. size_t const ml2 = searchMax(ms, ip, iend, &offset2);
  1844. int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
  1845. int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
  1846. if ((ml2 >= 4) && (gain2 > gain1)) {
  1847. matchLength = ml2, offset = offset2, start = ip;
  1848. continue;
  1849. } } }
  1850. break; /* nothing found : store previous solution */
  1851. }
  1852. /* catch up */
  1853. if (offset) {
  1854. U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
  1855. const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
  1856. const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
  1857. while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
  1858. offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
  1859. }
  1860. /* store sequence */
  1861. _storeSequence:
  1862. { size_t const litLength = start - anchor;
  1863. ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
  1864. anchor = ip = start + matchLength;
  1865. }
  1866. /* check immediate repcode */
  1867. while (ip <= ilimit) {
  1868. const U32 repCurrent = (U32)(ip-base);
  1869. const U32 windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog);
  1870. const U32 repIndex = repCurrent - offset_2;
  1871. const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
  1872. const BYTE* const repMatch = repBase + repIndex;
  1873. if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
  1874. & (offset_2 < repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
  1875. if (MEM_read32(ip) == MEM_read32(repMatch)) {
  1876. /* repcode detected we should take it */
  1877. const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
  1878. matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
  1879. offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */
  1880. ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
  1881. ip += matchLength;
  1882. anchor = ip;
  1883. continue; /* faster when present ... (?) */
  1884. }
  1885. break;
  1886. } }
  1887. /* Save reps for next block */
  1888. rep[0] = offset_1;
  1889. rep[1] = offset_2;
  1890. /* Return the last literals size */
  1891. return (size_t)(iend - anchor);
  1892. }
  1893. size_t ZSTD_compressBlock_greedy_extDict(
  1894. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1895. void const* src, size_t srcSize)
  1896. {
  1897. return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);
  1898. }
  1899. size_t ZSTD_compressBlock_lazy_extDict(
  1900. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1901. void const* src, size_t srcSize)
  1902. {
  1903. return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);
  1904. }
  1905. size_t ZSTD_compressBlock_lazy2_extDict(
  1906. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1907. void const* src, size_t srcSize)
  1908. {
  1909. return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);
  1910. }
  1911. size_t ZSTD_compressBlock_btlazy2_extDict(
  1912. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1913. void const* src, size_t srcSize)
  1914. {
  1915. return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
  1916. }
  1917. size_t ZSTD_compressBlock_greedy_extDict_row(
  1918. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1919. void const* src, size_t srcSize)
  1920. {
  1921. return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0);
  1922. }
  1923. size_t ZSTD_compressBlock_lazy_extDict_row(
  1924. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1925. void const* src, size_t srcSize)
  1926. {
  1927. return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1);
  1928. }
  1929. size_t ZSTD_compressBlock_lazy2_extDict_row(
  1930. ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
  1931. void const* src, size_t srcSize)
  1932. {
  1933. return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2);
  1934. }