1 /*
2 * Copyright (C) 2011, Google Inc.
3 * and other copyright owners as documented in the project's IP log.
4 *
5 * This program and the accompanying materials are made available
6 * under the terms of the Eclipse Distribution License v1.0 which
7 * accompanies this distribution, is reproduced below, and is
8 * available at http://www.eclipse.org/org/documents/edl-v10.php
9 *
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
23 *
24 * - Neither the name of the Eclipse Foundation, Inc. nor the
25 * names of its contributors may be used to endorse or promote
26 * products derived from this software without specific prior
27 * written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
30 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
31 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
32 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
34 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
36 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
38 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
41 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 */
43
44 package org.eclipse.jgit.internal.storage.dfs;
45
46 import static java.util.stream.Collectors.joining;
47
48 import java.io.FileNotFoundException;
49 import java.io.IOException;
50 import java.util.ArrayList;
51 import java.util.Arrays;
52 import java.util.Collection;
53 import java.util.Collections;
54 import java.util.Comparator;
55 import java.util.HashMap;
56 import java.util.HashSet;
57 import java.util.List;
58 import java.util.Map;
59 import java.util.Set;
60 import java.util.concurrent.atomic.AtomicReference;
61
62 import org.eclipse.jgit.internal.storage.pack.PackExt;
63 import org.eclipse.jgit.lib.AnyObjectId;
64 import org.eclipse.jgit.lib.ObjectDatabase;
65 import org.eclipse.jgit.lib.ObjectInserter;
66 import org.eclipse.jgit.lib.ObjectReader;
67
68 /**
69 * Manages objects stored in
70 * {@link org.eclipse.jgit.internal.storage.dfs.DfsPackFile} on a storage
71 * system.
72 */
73 public abstract class DfsObjDatabase extends ObjectDatabase {
74 private static final PackList NO_PACKS = new PackList(
75 new DfsPackFile[0],
76 new DfsReftable[0]) {
77 @Override
78 boolean dirty() {
79 return true;
80 }
81
82 @Override
83 void clearDirty() {
84 // Always dirty.
85 }
86
87 @Override
88 public void markDirty() {
89 // Always dirty.
90 }
91 };
92
93 /**
94 * Sources for a pack file.
95 * <p>
96 * <strong>Note:</strong> When sorting packs by source, do not use the default
97 * comparator based on {@link Enum#compareTo}. Prefer {@link
98 * #DEFAULT_COMPARATOR} or your own {@link ComparatorBuilder}.
99 */
100 public static enum PackSource {
101 /** The pack is created by ObjectInserter due to local activity. */
102 INSERT,
103
104 /**
105 * The pack is created by PackParser due to a network event.
106 * <p>
107 * A received pack can be from either a push into the repository, or a
108 * fetch into the repository, the direction doesn't matter. A received
109 * pack was built by the remote Git implementation and may not match the
110 * storage layout preferred by this version. Received packs are likely
111 * to be either compacted or garbage collected in the future.
112 */
113 RECEIVE,
114
115 /**
116 * The pack was created by compacting multiple packs together.
117 * <p>
118 * Packs created by compacting multiple packs together aren't nearly as
119 * efficient as a fully garbage collected repository, but may save disk
120 * space by reducing redundant copies of base objects.
121 *
122 * @see DfsPackCompactor
123 */
124 COMPACT,
125
126 /**
127 * Pack was created by Git garbage collection by this implementation.
128 * <p>
129 * This source is only used by the {@link DfsGarbageCollector} when it
130 * builds a pack file by traversing the object graph and copying all
131 * reachable objects into a new pack stream.
132 *
133 * @see DfsGarbageCollector
134 */
135 GC,
136
137 /** Created from non-heads by {@link DfsGarbageCollector}. */
138 GC_REST,
139
140 /**
141 * RefTreeGraph pack was created by Git garbage collection.
142 *
143 * @see DfsGarbageCollector
144 */
145 GC_TXN,
146
147 /**
148 * Pack was created by Git garbage collection.
149 * <p>
150 * This pack contains only unreachable garbage that was found during the
151 * last GC pass. It is retained in a new pack until it is safe to prune
152 * these objects from the repository.
153 */
154 UNREACHABLE_GARBAGE;
155
156 /**
157 * Default comparator for sources.
158 * <p>
159 * Sorts generally newer, smaller types such as {@code INSERT} and {@code
160 * RECEIVE} earlier; older, larger types such as {@code GC} later; and
161 * {@code UNREACHABLE_GARBAGE} at the end.
162 */
163 public static final Comparator<PackSource> DEFAULT_COMPARATOR =
164 new ComparatorBuilder()
165 .add(INSERT, RECEIVE)
166 .add(COMPACT)
167 .add(GC)
168 .add(GC_REST)
169 .add(GC_TXN)
170 .add(UNREACHABLE_GARBAGE)
171 .build();
172
173 /**
174 * Builder for describing {@link PackSource} ordering where some values are
175 * explicitly considered equal to others.
176 */
177 public static class ComparatorBuilder {
178 private final Map<PackSource, Integer> ranks = new HashMap<>();
179 private int counter;
180
181 /**
182 * Add a collection of sources that should sort as equal.
183 * <p>
184 * Sources in the input will sort after sources listed in previous calls
185 * to this method.
186 *
187 * @param sources
188 * sources in this equivalence class.
189 * @return this.
190 */
191 public ComparatorBuilder add(PackSource... sources) {
192 for (PackSource s : sources) {
193 ranks.put(s, Integer.valueOf(counter));
194 }
195 counter++;
196 return this;
197 }
198
199 /**
200 * Build the comparator.
201 *
202 * @return new comparator instance.
203 * @throws IllegalArgumentException
204 * not all {@link PackSource} instances were explicitly assigned
205 * an equivalence class.
206 */
207 public Comparator<PackSource> build() {
208 return new PackSourceComparator(ranks);
209 }
210 }
211
212 private static class PackSourceComparator implements Comparator<PackSource> {
213 private final Map<PackSource, Integer> ranks;
214
215 private PackSourceComparator(Map<PackSource, Integer> ranks) {
216 if (!ranks.keySet().equals(
217 new HashSet<>(Arrays.asList(PackSource.values())))) {
218 throw new IllegalArgumentException();
219 }
220 this.ranks = new HashMap<>(ranks);
221 }
222
223 @Override
224 public int compare(PackSource a, PackSource b) {
225 return ranks.get(a).compareTo(ranks.get(b));
226 }
227
228 @Override
229 public String toString() {
230 return Arrays.stream(PackSource.values())
231 .map(s -> s + "=" + ranks.get(s)) //$NON-NLS-1$
232 .collect(joining(", ", getClass().getSimpleName() + "{", "}")); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
233 }
234 }
235 }
236
237 private final AtomicReference<PackList> packList;
238
239 private final DfsRepository repository;
240
241 private DfsReaderOptions readerOptions;
242
243 private Comparator<DfsPackDescription> packComparator;
244
245 /**
246 * Initialize an object database for our repository.
247 *
248 * @param repository
249 * repository owning this object database.
250 * @param options
251 * how readers should access the object database.
252 */
253 protected DfsObjDatabase(DfsRepository repository,
254 DfsReaderOptions options) {
255 this.repository = repository;
256 this.packList = new AtomicReference<>(NO_PACKS);
257 this.readerOptions = options;
258 this.packComparator = DfsPackDescription.objectLookupComparator();
259 }
260
261 /**
262 * Get configured reader options, such as read-ahead.
263 *
264 * @return configured reader options, such as read-ahead.
265 */
266 public DfsReaderOptions getReaderOptions() {
267 return readerOptions;
268 }
269
270 /**
271 * Set the comparator used when searching for objects across packs.
272 * <p>
273 * An optimal comparator will find more objects without having to load large
274 * idx files from storage only to find that they don't contain the object.
275 * See {@link DfsPackDescription#objectLookupComparator()} for the default
276 * heuristics.
277 *
278 * @param packComparator
279 * comparator.
280 */
281 public void setPackComparator(Comparator<DfsPackDescription> packComparator) {
282 this.packComparator = packComparator;
283 }
284
285 /** {@inheritDoc} */
286 @Override
287 public DfsReader newReader() {
288 return new DfsReader(this);
289 }
290
291 /** {@inheritDoc} */
292 @Override
293 public ObjectInserter newInserter() {
294 return new DfsInserter(this);
295 }
296
297 /**
298 * Scan and list all available pack files in the repository.
299 *
300 * @return list of available packs. The returned array is shared with the
301 * implementation and must not be modified by the caller.
302 * @throws java.io.IOException
303 * the pack list cannot be initialized.
304 */
305 public DfsPackFile[] getPacks() throws IOException {
306 return getPackList().packs;
307 }
308
309 /**
310 * Scan and list all available reftable files in the repository.
311 *
312 * @return list of available reftables. The returned array is shared with
313 * the implementation and must not be modified by the caller.
314 * @throws java.io.IOException
315 * the pack list cannot be initialized.
316 */
317 public DfsReftable[] getReftables() throws IOException {
318 return getPackList().reftables;
319 }
320
321 /**
322 * Scan and list all available pack files in the repository.
323 *
324 * @return list of available packs, with some additional metadata. The
325 * returned array is shared with the implementation and must not be
326 * modified by the caller.
327 * @throws java.io.IOException
328 * the pack list cannot be initialized.
329 */
330 public PackList getPackList() throws IOException {
331 return scanPacks(NO_PACKS);
332 }
333
334 /**
335 * Get repository owning this object database.
336 *
337 * @return repository owning this object database.
338 */
339 protected DfsRepository getRepository() {
340 return repository;
341 }
342
343 /**
344 * List currently known pack files in the repository, without scanning.
345 *
346 * @return list of available packs. The returned array is shared with the
347 * implementation and must not be modified by the caller.
348 */
349 public DfsPackFile[] getCurrentPacks() {
350 return getCurrentPackList().packs;
351 }
352
353 /**
354 * List currently known reftable files in the repository, without scanning.
355 *
356 * @return list of available reftables. The returned array is shared with
357 * the implementation and must not be modified by the caller.
358 */
359 public DfsReftable[] getCurrentReftables() {
360 return getCurrentPackList().reftables;
361 }
362
363 /**
364 * List currently known pack files in the repository, without scanning.
365 *
366 * @return list of available packs, with some additional metadata. The
367 * returned array is shared with the implementation and must not be
368 * modified by the caller.
369 */
370 public PackList getCurrentPackList() {
371 return packList.get();
372 }
373
374 /**
375 * Does the requested object exist in this database?
376 * <p>
377 * This differs from ObjectDatabase's implementation in that we can selectively
378 * ignore unreachable (garbage) objects.
379 *
380 * @param objectId
381 * identity of the object to test for existence of.
382 * @param avoidUnreachableObjects
383 * if true, ignore objects that are unreachable.
384 * @return true if the specified object is stored in this database.
385 * @throws java.io.IOException
386 * the object store cannot be accessed.
387 */
388 public boolean has(AnyObjectId objectId, boolean avoidUnreachableObjects)
389 throws IOException {
390 try (ObjectReader or = newReader()) {
391 or.setAvoidUnreachableObjects(avoidUnreachableObjects);
392 return or.has(objectId);
393 }
394 }
395
396 /**
397 * Generate a new unique name for a pack file.
398 *
399 * @param source
400 * where the pack stream is created.
401 * @return a unique name for the pack file. Must not collide with any other
402 * pack file name in the same DFS.
403 * @throws java.io.IOException
404 * a new unique pack description cannot be generated.
405 */
406 protected abstract DfsPackDescription newPack(PackSource source)
407 throws IOException;
408
409 /**
410 * Generate a new unique name for a pack file.
411 *
412 * <p>
413 * Default implementation of this method would be equivalent to
414 * {@code newPack(source).setEstimatedPackSize(estimatedPackSize)}. But the
415 * clients can override this method to use the given
416 * {@code estomatedPackSize} value more efficiently in the process of
417 * creating a new
418 * {@link org.eclipse.jgit.internal.storage.dfs.DfsPackDescription} object.
419 *
420 * @param source
421 * where the pack stream is created.
422 * @param estimatedPackSize
423 * the estimated size of the pack.
424 * @return a unique name for the pack file. Must not collide with any other
425 * pack file name in the same DFS.
426 * @throws java.io.IOException
427 * a new unique pack description cannot be generated.
428 */
429 protected DfsPackDescription newPack(PackSource source,
430 long estimatedPackSize) throws IOException {
431 DfsPackDescription pack = newPack(source);
432 pack.setEstimatedPackSize(estimatedPackSize);
433 return pack;
434 }
435
436 /**
437 * Commit a pack and index pair that was written to the DFS.
438 * <p>
439 * Committing the pack/index pair makes them visible to readers. The JGit
440 * DFS code always writes the pack, then the index. This allows a simple
441 * commit process to do nothing if readers always look for both files to
442 * exist and the DFS performs atomic creation of the file (e.g. stream to a
443 * temporary file and rename to target on close).
444 * <p>
445 * During pack compaction or GC the new pack file may be replacing other
446 * older files. Implementations should remove those older files (if any) as
447 * part of the commit of the new file.
448 * <p>
449 * This method is a trivial wrapper around
450 * {@link #commitPackImpl(Collection, Collection)} that calls the
451 * implementation and fires events.
452 *
453 * @param desc
454 * description of the new packs.
455 * @param replaces
456 * if not null, list of packs to remove.
457 * @throws java.io.IOException
458 * the packs cannot be committed. On failure a rollback must
459 * also be attempted by the caller.
460 */
461 protected void commitPack(Collection<DfsPackDescription> desc,
462 Collection<DfsPackDescription> replaces) throws IOException {
463 commitPackImpl(desc, replaces);
464 getRepository().fireEvent(new DfsPacksChangedEvent());
465 }
466
467 /**
468 * Implementation of pack commit.
469 *
470 * @see #commitPack(Collection, Collection)
471 * @param desc
472 * description of the new packs.
473 * @param replaces
474 * if not null, list of packs to remove.
475 * @throws java.io.IOException
476 * the packs cannot be committed.
477 */
478 protected abstract void commitPackImpl(Collection<DfsPackDescription> desc,
479 Collection<DfsPackDescription> replaces) throws IOException;
480
481 /**
482 * Try to rollback a pack creation.
483 * <p>
484 * JGit DFS always writes the pack first, then the index. If the pack does
485 * not yet exist, then neither does the index. A safe DFS implementation
486 * would try to remove both files to ensure they are really gone.
487 * <p>
488 * A rollback does not support failures, as it only occurs when there is
489 * already a failure in progress. A DFS implementor may wish to log
490 * warnings/error messages when a rollback fails, but should not send new
491 * exceptions up the Java callstack.
492 *
493 * @param desc
494 * pack to delete.
495 */
496 protected abstract void rollbackPack(Collection<DfsPackDescription> desc);
497
498 /**
499 * List the available pack files.
500 * <p>
501 * The returned list must support random access and must be mutable by the
502 * caller. It is sorted in place using the natural sorting of the returned
503 * DfsPackDescription objects.
504 *
505 * @return available packs. May be empty if there are no packs.
506 * @throws java.io.IOException
507 * the packs cannot be listed and the object database is not
508 * functional to the caller.
509 */
510 protected abstract List<DfsPackDescription> listPacks() throws IOException;
511
512 /**
513 * Open a pack, pack index, or other related file for reading.
514 *
515 * @param desc
516 * description of pack related to the data that will be read.
517 * This is an instance previously obtained from
518 * {@link #listPacks()}, but not necessarily from the same
519 * DfsObjDatabase instance.
520 * @param ext
521 * file extension that will be read i.e "pack" or "idx".
522 * @return channel to read the file.
523 * @throws java.io.FileNotFoundException
524 * the file does not exist.
525 * @throws java.io.IOException
526 * the file cannot be opened.
527 */
528 protected abstract ReadableChannel openFile(
529 DfsPackDescription desc, PackExt ext)
530 throws FileNotFoundException, IOException;
531
532 /**
533 * Open a pack, pack index, or other related file for writing.
534 *
535 * @param desc
536 * description of pack related to the data that will be written.
537 * This is an instance previously obtained from
538 * {@link #newPack(PackSource)}.
539 * @param ext
540 * file extension that will be written i.e "pack" or "idx".
541 * @return channel to write the file.
542 * @throws java.io.IOException
543 * the file cannot be opened.
544 */
545 protected abstract DfsOutputStream writeFile(
546 DfsPackDescription desc, PackExt ext) throws IOException;
547
548 void addPack(DfsPackFile newPack) throws IOException {
549 PackList o, n;
550 do {
551 o = packList.get();
552 if (o == NO_PACKS) {
553 // The repository may not have needed any existing objects to
554 // complete the current task of creating a pack (e.g. push of a
555 // pack with no external deltas). Because we don't scan for
556 // newly added packs on missed object lookups, scan now to
557 // make sure all older packs are available in the packList.
558 o = scanPacks(o);
559
560 // Its possible the scan identified the pack we were asked to
561 // add, as the pack was already committed via commitPack().
562 // If this is the case return without changing the list.
563 for (DfsPackFile p : o.packs) {
564 if (p.key.equals(newPack.key)) {
565 return;
566 }
567 }
568 }
569
570 DfsPackFile[] packs = new DfsPackFile[1 + o.packs.length];
571 packs[0] = newPack;
572 System.arraycopy(o.packs, 0, packs, 1, o.packs.length);
573 n = new PackListImpl(packs, o.reftables);
574 } while (!packList.compareAndSet(o, n));
575 }
576
577 void addReftable(DfsPackDescription add, Set<DfsPackDescription> remove)
578 throws IOException {
579 PackList o, n;
580 do {
581 o = packList.get();
582 if (o == NO_PACKS) {
583 o = scanPacks(o);
584 for (DfsReftable t : o.reftables) {
585 if (t.getPackDescription().equals(add)) {
586 return;
587 }
588 }
589 }
590
591 List<DfsReftable> tables = new ArrayList<>(1 + o.reftables.length);
592 for (DfsReftable t : o.reftables) {
593 if (!remove.contains(t.getPackDescription())) {
594 tables.add(t);
595 }
596 }
597 tables.add(new DfsReftable(add));
598 n = new PackListImpl(o.packs, tables.toArray(new DfsReftable[0]));
599 } while (!packList.compareAndSet(o, n));
600 }
601
602 PackList scanPacks(PackList original) throws IOException {
603 PackList o, n;
604 synchronized (packList) {
605 do {
606 o = packList.get();
607 if (o != original) {
608 // Another thread did the scan for us, while we
609 // were blocked on the monitor above.
610 //
611 return o;
612 }
613 n = scanPacksImpl(o);
614 if (n == o)
615 return n;
616 } while (!packList.compareAndSet(o, n));
617 }
618 getRepository().fireEvent(new DfsPacksChangedEvent());
619 return n;
620 }
621
622 private PackList scanPacksImpl(PackList old) throws IOException {
623 DfsBlockCache cache = DfsBlockCache.getInstance();
624 Map<DfsPackDescription, DfsPackFile> packs = packMap(old);
625 Map<DfsPackDescription, DfsReftable> reftables = reftableMap(old);
626
627 List<DfsPackDescription> scanned = listPacks();
628 Collections.sort(scanned, packComparator);
629
630 List<DfsPackFile> newPacks = new ArrayList<>(scanned.size());
631 List<DfsReftable> newReftables = new ArrayList<>(scanned.size());
632 boolean foundNew = false;
633 for (DfsPackDescription dsc : scanned) {
634 DfsPackFile oldPack = packs.remove(dsc);
635 if (oldPack != null) {
636 newPacks.add(oldPack);
637 } else if (dsc.hasFileExt(PackExt.PACK)) {
638 newPacks.add(new DfsPackFile(cache, dsc));
639 foundNew = true;
640 }
641
642 DfsReftable oldReftable = reftables.remove(dsc);
643 if (oldReftable != null) {
644 newReftables.add(oldReftable);
645 } else if (dsc.hasFileExt(PackExt.REFTABLE)) {
646 newReftables.add(new DfsReftable(cache, dsc));
647 foundNew = true;
648 }
649 }
650
651 if (newPacks.isEmpty() && newReftables.isEmpty())
652 return new PackListImpl(NO_PACKS.packs, NO_PACKS.reftables);
653 if (!foundNew) {
654 old.clearDirty();
655 return old;
656 }
657 Collections.sort(newReftables, reftableComparator());
658 return new PackListImpl(
659 newPacks.toArray(new DfsPackFile[0]),
660 newReftables.toArray(new DfsReftable[0]));
661 }
662
663 private static Map<DfsPackDescription, DfsPackFile> packMap(PackList old) {
664 Map<DfsPackDescription, DfsPackFile> forReuse = new HashMap<>();
665 for (DfsPackFile p : old.packs) {
666 if (!p.invalid()) {
667 forReuse.put(p.desc, p);
668 }
669 }
670 return forReuse;
671 }
672
673 private static Map<DfsPackDescription, DfsReftable> reftableMap(PackList old) {
674 Map<DfsPackDescription, DfsReftable> forReuse = new HashMap<>();
675 for (DfsReftable p : old.reftables) {
676 if (!p.invalid()) {
677 forReuse.put(p.desc, p);
678 }
679 }
680 return forReuse;
681 }
682
683 /**
684 * Get comparator to sort {@link DfsReftable} by priority.
685 *
686 * @return comparator to sort {@link DfsReftable} by priority.
687 */
688 protected Comparator<DfsReftable> reftableComparator() {
689 return Comparator.comparing(
690 DfsReftable::getPackDescription,
691 DfsPackDescription.reftableComparator());
692 }
693
694 /**
695 * Clears the cached list of packs, forcing them to be scanned again.
696 */
697 protected void clearCache() {
698 packList.set(NO_PACKS);
699 }
700
701 /** {@inheritDoc} */
702 @Override
703 public void close() {
704 packList.set(NO_PACKS);
705 }
706
707 /** Snapshot of packs scanned in a single pass. */
708 public static abstract class PackList {
709 /** All known packs, sorted. */
710 public final DfsPackFile[] packs;
711
712 /** All known reftables, sorted. */
713 public final DfsReftable[] reftables;
714
715 private long lastModified = -1;
716
717 PackList(DfsPackFile[] packs, DfsReftable[] reftables) {
718 this.packs = packs;
719 this.reftables = reftables;
720 }
721
722 /** @return last modified time of all packs, in milliseconds. */
723 public long getLastModified() {
724 if (lastModified < 0) {
725 long max = 0;
726 for (DfsPackFile pack : packs) {
727 max = Math.max(max, pack.getPackDescription().getLastModified());
728 }
729 lastModified = max;
730 }
731 return lastModified;
732 }
733
734 abstract boolean dirty();
735 abstract void clearDirty();
736
737 /**
738 * Mark pack list as dirty.
739 * <p>
740 * Used when the caller knows that new data might have been written to the
741 * repository that could invalidate open readers depending on this pack list,
742 * for example if refs are newly scanned.
743 */
744 public abstract void markDirty();
745 }
746
747 private static final class PackListImpl extends PackList {
748 private volatile boolean dirty;
749
750 PackListImpl(DfsPackFile[] packs, DfsReftable[] reftables) {
751 super(packs, reftables);
752 }
753
754 @Override
755 boolean dirty() {
756 return dirty;
757 }
758
759 @Override
760 void clearDirty() {
761 dirty = false;
762 }
763
764 @Override
765 public void markDirty() {
766 dirty = true;
767 }
768 }
769 }