Mercurial > jhg
comparison src/org/tmatesoft/hg/repo/HgBlameFacility.java @ 596:43cfa08ff3fd
HgBlameFacility refactoring: extract code to build file history that spans renames
| author | Artem Tikhomirov <tikhomirov.artem@gmail.com> |
|---|---|
| date | Thu, 02 May 2013 19:23:53 +0200 |
| parents | e49f9d9513fa |
| children |
comparison
equal
deleted
inserted
replaced
| 595:92c3ad9c2a51 | 596:43cfa08ff3fd |
|---|---|
| 14 * the terms of a license other than GNU General Public License | 14 * the terms of a license other than GNU General Public License |
| 15 * contact TMate Software at support@hg4j.com | 15 * contact TMate Software at support@hg4j.com |
| 16 */ | 16 */ |
| 17 package org.tmatesoft.hg.repo; | 17 package org.tmatesoft.hg.repo; |
| 18 | 18 |
| 19 import static org.tmatesoft.hg.core.HgIterateDirection.NewToOld; | |
| 20 import static org.tmatesoft.hg.core.HgIterateDirection.OldToNew; | 19 import static org.tmatesoft.hg.core.HgIterateDirection.OldToNew; |
| 21 import static org.tmatesoft.hg.repo.HgInternals.wrongRevisionIndex; | 20 import static org.tmatesoft.hg.repo.HgInternals.wrongRevisionIndex; |
| 22 import static org.tmatesoft.hg.repo.HgRepository.*; | 21 import static org.tmatesoft.hg.repo.HgRepository.NO_REVISION; |
| 23 | 22 import static org.tmatesoft.hg.repo.HgRepository.TIP; |
| 24 import java.util.Arrays; | |
| 25 import java.util.BitSet; | |
| 26 import java.util.Collections; | |
| 27 import java.util.LinkedList; | |
| 28 | 23 |
| 29 import org.tmatesoft.hg.core.HgCallbackTargetException; | 24 import org.tmatesoft.hg.core.HgCallbackTargetException; |
| 30 import org.tmatesoft.hg.core.HgIterateDirection; | 25 import org.tmatesoft.hg.core.HgIterateDirection; |
| 31 import org.tmatesoft.hg.core.Nodeid; | 26 import org.tmatesoft.hg.core.Nodeid; |
| 32 import org.tmatesoft.hg.internal.BlameHelper; | 27 import org.tmatesoft.hg.internal.BlameHelper; |
| 33 import org.tmatesoft.hg.internal.Callback; | 28 import org.tmatesoft.hg.internal.Callback; |
| 34 import org.tmatesoft.hg.internal.Experimental; | 29 import org.tmatesoft.hg.internal.Experimental; |
| 35 import org.tmatesoft.hg.internal.IntVector; | 30 import org.tmatesoft.hg.internal.FileHistory; |
| 31 import org.tmatesoft.hg.internal.FileRevisionHistoryChunk; | |
| 36 import org.tmatesoft.hg.util.Adaptable; | 32 import org.tmatesoft.hg.util.Adaptable; |
| 37 | 33 |
| 38 /** | 34 /** |
| 39 * Facility with diff/annotate functionality. | 35 * Facility with diff/annotate functionality. |
| 40 * | 36 * |
| 87 } | 83 } |
| 88 HgInternals.checkRevlogRange(changelogRevIndexStart, changelogRevIndexEnd, lastRevision); | 84 HgInternals.checkRevlogRange(changelogRevIndexStart, changelogRevIndexEnd, lastRevision); |
| 89 if (!df.exists()) { | 85 if (!df.exists()) { |
| 90 return; | 86 return; |
| 91 } | 87 } |
| 88 FileHistory fileHistory = new FileHistory(df, changelogRevIndexStart, changelogRevIndexEnd); | |
| 89 fileHistory.build(); | |
| 92 BlameHelper bh = new BlameHelper(insp, 10); | 90 BlameHelper bh = new BlameHelper(insp, 10); |
| 93 HgDataFile currentFile = df; | 91 for (FileRevisionHistoryChunk fhc : fileHistory.iterate(OldToNew)) { |
| 94 int fileLastClogRevIndex = changelogRevIndexEnd; | 92 // iteration order is not important here |
| 95 FileRevisionHistoryChunk nextChunk = null; | 93 bh.useFileUpTo(fhc.getFile(), fhc.getEndChangeset()); |
| 96 LinkedList<FileRevisionHistoryChunk> fileCompleteHistory = new LinkedList<FileRevisionHistoryChunk>(); | 94 } |
| 97 do { | |
| 98 FileRevisionHistoryChunk fileHistory = new FileRevisionHistoryChunk(currentFile); | |
| 99 fileHistory.init(fileLastClogRevIndex); | |
| 100 fileHistory.linkTo(nextChunk); | |
| 101 fileCompleteHistory.addFirst(fileHistory); // to get the list in old-to-new order | |
| 102 nextChunk = fileHistory; | |
| 103 bh.useFileUpTo(currentFile, fileLastClogRevIndex); | |
| 104 if (fileHistory.changeset(0) > changelogRevIndexStart && currentFile.isCopy()) { | |
| 105 // fileHistory.changeset(0) is the earliest revision we know about so far, | |
| 106 // once we get to revisions earlier than the requested start, stop digging. | |
| 107 // The reason there's NO == (i.e. not >=) because: | |
| 108 // (easy): once it's equal, we've reached our intended start | |
| 109 // (hard): if changelogRevIndexStart happens to be exact start of one of renames in the | |
| 110 // chain of renames (test-annotate2 repository, file1->file1a->file1b, i.e. points | |
| 111 // to the very start of file1a or file1 history), presence of == would get us to the next | |
| 112 // chunk and hence changed parents of present chunk's first element. Our annotate alg | |
| 113 // relies on parents only (i.e. knows nothing about 'last iteration element') to find out | |
| 114 // what to compare, and hence won't report all lines of 'last iteration element' (which is the | |
| 115 // first revision of the renamed file) as "added in this revision", leaving gaps in annotate | |
| 116 HgRepository repo = currentFile.getRepo(); | |
| 117 Nodeid originLastRev = currentFile.getCopySourceRevision(); | |
| 118 currentFile = repo.getFileNode(currentFile.getCopySourceName()); | |
| 119 fileLastClogRevIndex = currentFile.getChangesetRevisionIndex(currentFile.getRevisionIndex(originLastRev)); | |
| 120 // XXX perhaps, shall fail with meaningful exception if new file doesn't exist (.i/.d not found for whatever reason) | |
| 121 // or source revision is missing? | |
| 122 } else { | |
| 123 fileHistory.chopAtChangeset(changelogRevIndexStart); | |
| 124 currentFile = null; // stop iterating | |
| 125 } | |
| 126 } while (currentFile != null && fileLastClogRevIndex > changelogRevIndexStart); | |
| 127 // fileCompleteHistory is in (origin, intermediate target, ultimate target) order | |
| 128 | |
| 129 int[] fileClogParentRevs = new int[2]; | 95 int[] fileClogParentRevs = new int[2]; |
| 130 int[] fileParentRevs = new int[2]; | 96 int[] fileParentRevs = new int[2]; |
| 131 if (iterateOrder == NewToOld) { | 97 for (FileRevisionHistoryChunk fhc : fileHistory.iterate(iterateOrder)) { |
| 132 Collections.reverse(fileCompleteHistory); | 98 for (int fri : fhc.fileRevisions(iterateOrder)) { |
| 133 } | 99 int clogRevIndex = fhc.changeset(fri); |
| 134 boolean shallFilterStart = changelogRevIndexStart != 0; // no reason if complete history is walked | 100 // the way we built fileHistory ensures we won't walk past [changelogRevIndexStart..changelogRevIndexEnd] |
| 135 for (FileRevisionHistoryChunk fileHistory : fileCompleteHistory) { | 101 assert clogRevIndex >= changelogRevIndexStart; |
| 136 for (int fri : fileHistory.fileRevisions(iterateOrder)) { | 102 assert clogRevIndex <= changelogRevIndexEnd; |
| 137 int clogRevIndex = fileHistory.changeset(fri); | 103 fhc.fillFileParents(fri, fileParentRevs); |
| 138 if (shallFilterStart) { | 104 fhc.fillCsetParents(fri, fileClogParentRevs); |
| 139 if (iterateOrder == NewToOld) { | |
| 140 // clogRevIndex decreases | |
| 141 if (clogRevIndex < changelogRevIndexStart) { | |
| 142 break; | |
| 143 } | |
| 144 // fall-through, clogRevIndex is in the [start..end] range | |
| 145 } else { // old to new | |
| 146 // the way we built fileHistory ensures we won't walk past changelogRevIndexEnd | |
| 147 // here we ensure we start from the right one, the one indicated with changelogRevIndexStart | |
| 148 if (clogRevIndex < changelogRevIndexStart) { | |
| 149 continue; | |
| 150 } else { | |
| 151 shallFilterStart = false; // once boundary is crossed, no need to check | |
| 152 // fall-through | |
| 153 } | |
| 154 } | |
| 155 } | |
| 156 fileHistory.fillFileParents(fri, fileParentRevs); | |
| 157 fileHistory.fillCsetParents(fri, fileClogParentRevs); | |
| 158 bh.annotateChange(fri, clogRevIndex, fileParentRevs, fileClogParentRevs); | 105 bh.annotateChange(fri, clogRevIndex, fileParentRevs, fileClogParentRevs); |
| 159 } | 106 } |
| 160 } | 107 } |
| 161 } | 108 } |
| 162 | 109 |
| 193 public interface Inspector { | 140 public interface Inspector { |
| 194 void same(EqualBlock block) throws HgCallbackTargetException; | 141 void same(EqualBlock block) throws HgCallbackTargetException; |
| 195 void added(AddBlock block) throws HgCallbackTargetException; | 142 void added(AddBlock block) throws HgCallbackTargetException; |
| 196 void changed(ChangeBlock block) throws HgCallbackTargetException; | 143 void changed(ChangeBlock block) throws HgCallbackTargetException; |
| 197 void deleted(DeleteBlock block) throws HgCallbackTargetException; | 144 void deleted(DeleteBlock block) throws HgCallbackTargetException; |
| 198 } | |
| 199 | |
| 200 /** | |
| 201 * No need to keep "Block" prefix as long as there's only one {@link Inspector} | |
| 202 */ | |
| 203 @Deprecated | |
| 204 public interface BlockInspector extends Inspector { | |
| 205 } | 145 } |
| 206 | 146 |
| 207 /** | 147 /** |
| 208 * Represents content of a block, either as a sequence of bytes or a | 148 * Represents content of a block, either as a sequence of bytes or a |
| 209 * sequence of smaller blocks (lines), if appropriate (according to usage context). | 149 * sequence of smaller blocks (lines), if appropriate (according to usage context). |
| 351 | 291 |
| 352 private static int fileRevIndex(HgDataFile df, int csetRevIndex) { | 292 private static int fileRevIndex(HgDataFile df, int csetRevIndex) { |
| 353 Nodeid fileRev = df.getRepo().getManifest().getFileRevision(csetRevIndex, df.getPath()); | 293 Nodeid fileRev = df.getRepo().getManifest().getFileRevision(csetRevIndex, df.getPath()); |
| 354 return df.getRevisionIndex(fileRev); | 294 return df.getRevisionIndex(fileRev); |
| 355 } | 295 } |
| 356 | |
| 357 private static class FileRevisionHistoryChunk { | |
| 358 private final HgDataFile df; | |
| 359 // change ancestry, sequence of file revisions | |
| 360 private IntVector fileRevsToVisit; | |
| 361 // parent pairs of complete file history | |
| 362 private IntVector fileParentRevs; | |
| 363 // map file revision to changelog revision (sparse array, only file revisions to visit are set) | |
| 364 private int[] file2changelog; | |
| 365 private int originChangelogRev = BAD_REVISION, originFileRev = BAD_REVISION; | |
| 366 | |
| 367 public FileRevisionHistoryChunk(HgDataFile file) { | |
| 368 df = file; | |
| 369 } | |
| 370 | |
| 371 public void init(int changelogRevisionIndex) { | |
| 372 // XXX df.indexWalk(0, fileRevIndex, ) might be more effective | |
| 373 int fileRevIndex = fileRevIndex(df, changelogRevisionIndex); | |
| 374 int[] fileRevParents = new int[2]; | |
| 375 fileParentRevs = new IntVector((fileRevIndex+1) * 2, 0); | |
| 376 fileParentRevs.add(NO_REVISION, NO_REVISION); // parents of fileRevIndex == 0 | |
| 377 for (int i = 1; i <= fileRevIndex; i++) { | |
| 378 df.parents(i, fileRevParents, null, null); | |
| 379 fileParentRevs.add(fileRevParents[0], fileRevParents[1]); | |
| 380 } | |
| 381 // fileRevsToVisit keep file change ancestry from new to old | |
| 382 fileRevsToVisit = new IntVector(fileRevIndex + 1, 0); | |
| 383 // keep map of file revision to changelog revision | |
| 384 file2changelog = new int[fileRevIndex+1]; | |
| 385 // only elements worth visit would get mapped, so there would be unfilled areas in the file2changelog, | |
| 386 // prevent from error (make it explicit) by bad value | |
| 387 Arrays.fill(file2changelog, BAD_REVISION); | |
| 388 LinkedList<Integer> queue = new LinkedList<Integer>(); | |
| 389 BitSet seen = new BitSet(fileRevIndex + 1); | |
| 390 queue.add(fileRevIndex); | |
| 391 do { | |
| 392 int x = queue.removeFirst(); | |
| 393 if (seen.get(x)) { | |
| 394 continue; | |
| 395 } | |
| 396 seen.set(x); | |
| 397 fileRevsToVisit.add(x); | |
| 398 file2changelog[x] = df.getChangesetRevisionIndex(x); | |
| 399 int p1 = fileParentRevs.get(2*x); | |
| 400 int p2 = fileParentRevs.get(2*x + 1); | |
| 401 if (p1 != NO_REVISION) { | |
| 402 queue.addLast(p1); | |
| 403 } | |
| 404 if (p2 != NO_REVISION) { | |
| 405 queue.addLast(p2); | |
| 406 } | |
| 407 } while (!queue.isEmpty()); | |
| 408 // make sure no child is processed before we handled all (grand-)parents of the element | |
| 409 fileRevsToVisit.sort(false); | |
| 410 } | |
| 411 | |
| 412 public void linkTo(FileRevisionHistoryChunk target) { | |
| 413 // assume that target.init() has been called already | |
| 414 if (target == null) { | |
| 415 return; | |
| 416 } | |
| 417 target.originFileRev = fileRevsToVisit.get(0); // files to visit are new to old | |
| 418 target.originChangelogRev = changeset(target.originFileRev); | |
| 419 } | |
| 420 | |
| 421 /** | |
| 422 * Mark revision closest(ceil) to specified as the very first one (no parents) | |
| 423 */ | |
| 424 public void chopAtChangeset(int firstChangelogRevOfInterest) { | |
| 425 if (firstChangelogRevOfInterest == 0) { | |
| 426 return; // nothing to do | |
| 427 } | |
| 428 int i = 0, x = fileRevsToVisit.size(), fileRev = BAD_REVISION; | |
| 429 // fileRevsToVisit is new to old, greater numbers to smaller | |
| 430 while (i < x && changeset(fileRev = fileRevsToVisit.get(i)) >= firstChangelogRevOfInterest) { | |
| 431 i++; | |
| 432 } | |
| 433 assert fileRev != BAD_REVISION; // there's at least 1 revision in fileRevsToVisit | |
| 434 if (i == x && changeset(fileRev) != firstChangelogRevOfInterest) { | |
| 435 assert false : "Requested changeset shall belong to the chunk"; | |
| 436 return; | |
| 437 } | |
| 438 fileRevsToVisit.trimTo(i); // no need to iterate more | |
| 439 // pretend fileRev got no parents | |
| 440 fileParentRevs.set(fileRev * 2, NO_REVISION); | |
| 441 fileParentRevs.set(fileRev, NO_REVISION); | |
| 442 } | |
| 443 | |
| 444 public int[] fileRevisions(HgIterateDirection iterateOrder) { | |
| 445 // fileRevsToVisit is { r10, r7, r6, r5, r0 }, new to old | |
| 446 int[] rv = fileRevsToVisit.toArray(); | |
| 447 if (iterateOrder == OldToNew) { | |
| 448 // reverse return value | |
| 449 for (int a = 0, b = rv.length-1; a < b; a++, b--) { | |
| 450 int t = rv[b]; | |
| 451 rv[b] = rv[a]; | |
| 452 rv[a] = t; | |
| 453 } | |
| 454 } | |
| 455 return rv; | |
| 456 } | |
| 457 | |
| 458 public int changeset(int fileRevIndex) { | |
| 459 return file2changelog[fileRevIndex]; | |
| 460 } | |
| 461 | |
| 462 public void fillFileParents(int fileRevIndex, int[] fileParents) { | |
| 463 if (fileRevIndex == 0 && originFileRev != BAD_REVISION) { | |
| 464 // this chunk continues another file | |
| 465 assert originFileRev != NO_REVISION; | |
| 466 fileParents[0] = originFileRev; | |
| 467 fileParents[1] = NO_REVISION; | |
| 468 return; | |
| 469 } | |
| 470 fileParents[0] = fileParentRevs.get(fileRevIndex * 2); | |
| 471 fileParents[1] = fileParentRevs.get(fileRevIndex * 2 + 1); | |
| 472 } | |
| 473 | |
| 474 public void fillCsetParents(int fileRevIndex, int[] csetParents) { | |
| 475 if (fileRevIndex == 0 && originFileRev != BAD_REVISION) { | |
| 476 assert originFileRev != NO_REVISION; | |
| 477 csetParents[0] = originChangelogRev; | |
| 478 csetParents[1] = NO_REVISION; // I wonder if possible to start a copy with two parents? | |
| 479 return; | |
| 480 } | |
| 481 int fp1 = fileParentRevs.get(fileRevIndex * 2); | |
| 482 int fp2 = fileParentRevs.get(fileRevIndex * 2 + 1); | |
| 483 csetParents[0] = fp1 == NO_REVISION ? NO_REVISION : changeset(fp1); | |
| 484 csetParents[1] = fp2 == NO_REVISION ? NO_REVISION : changeset(fp2); | |
| 485 } | |
| 486 } | |
| 487 } | 296 } |
