View Javadoc
1   /*
2    * Copyright (C) 2008-2010, Google Inc.
3    * Copyright (C) 2008, Marek Zawirski <marek.zawirski@gmail.com>
4    * and other copyright owners as documented in the project's IP log.
5    *
6    * This program and the accompanying materials are made available
7    * under the terms of the Eclipse Distribution License v1.0 which
8    * accompanies this distribution, is reproduced below, and is
9    * available at http://www.eclipse.org/org/documents/edl-v10.php
10   *
11   * All rights reserved.
12   *
13   * Redistribution and use in source and binary forms, with or
14   * without modification, are permitted provided that the following
15   * conditions are met:
16   *
17   * - Redistributions of source code must retain the above copyright
18   *   notice, this list of conditions and the following disclaimer.
19   *
20   * - Redistributions in binary form must reproduce the above
21   *   copyright notice, this list of conditions and the following
22   *   disclaimer in the documentation and/or other materials provided
23   *   with the distribution.
24   *
25   * - Neither the name of the Eclipse Foundation, Inc. nor the
26   *   names of its contributors may be used to endorse or promote
27   *   products derived from this software without specific prior
28   *   written permission.
29   *
30   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
31   * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
32   * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
33   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34   * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
35   * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36   * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
37   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
38   * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
39   * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
40   * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
42   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43   */
44  
45  package org.eclipse.jgit.internal.storage.pack;
46  
47  import static org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation.PACK_DELTA;
48  import static org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation.PACK_WHOLE;
49  import static org.eclipse.jgit.lib.Constants.OBJECT_ID_LENGTH;
50  import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
51  import static org.eclipse.jgit.lib.Constants.OBJ_COMMIT;
52  import static org.eclipse.jgit.lib.Constants.OBJ_TAG;
53  import static org.eclipse.jgit.lib.Constants.OBJ_TREE;
54  
55  import java.io.IOException;
56  import java.io.OutputStream;
57  import java.lang.ref.WeakReference;
58  import java.security.MessageDigest;
59  import java.text.MessageFormat;
60  import java.util.ArrayList;
61  import java.util.Arrays;
62  import java.util.Collection;
63  import java.util.Collections;
64  import java.util.Comparator;
65  import java.util.HashSet;
66  import java.util.Iterator;
67  import java.util.List;
68  import java.util.Map;
69  import java.util.NoSuchElementException;
70  import java.util.Set;
71  import java.util.concurrent.ConcurrentHashMap;
72  import java.util.concurrent.ExecutionException;
73  import java.util.concurrent.Executor;
74  import java.util.concurrent.ExecutorService;
75  import java.util.concurrent.Executors;
76  import java.util.concurrent.Future;
77  import java.util.concurrent.TimeUnit;
78  import java.util.zip.CRC32;
79  import java.util.zip.CheckedOutputStream;
80  import java.util.zip.Deflater;
81  import java.util.zip.DeflaterOutputStream;
82  
83  import org.eclipse.jgit.annotations.NonNull;
84  import org.eclipse.jgit.annotations.Nullable;
85  import org.eclipse.jgit.errors.CorruptObjectException;
86  import org.eclipse.jgit.errors.IncorrectObjectTypeException;
87  import org.eclipse.jgit.errors.LargeObjectException;
88  import org.eclipse.jgit.errors.MissingObjectException;
89  import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException;
90  import org.eclipse.jgit.internal.JGitText;
91  import org.eclipse.jgit.internal.storage.file.PackBitmapIndexBuilder;
92  import org.eclipse.jgit.internal.storage.file.PackBitmapIndexWriterV1;
93  import org.eclipse.jgit.internal.storage.file.PackIndexWriter;
94  import org.eclipse.jgit.lib.AnyObjectId;
95  import org.eclipse.jgit.lib.AsyncObjectSizeQueue;
96  import org.eclipse.jgit.lib.BatchingProgressMonitor;
97  import org.eclipse.jgit.lib.BitmapIndex;
98  import org.eclipse.jgit.lib.BitmapIndex.BitmapBuilder;
99  import org.eclipse.jgit.lib.BitmapObject;
100 import org.eclipse.jgit.lib.Constants;
101 import org.eclipse.jgit.lib.NullProgressMonitor;
102 import org.eclipse.jgit.lib.ObjectId;
103 import org.eclipse.jgit.lib.ObjectIdOwnerMap;
104 import org.eclipse.jgit.lib.ObjectIdSet;
105 import org.eclipse.jgit.lib.ObjectLoader;
106 import org.eclipse.jgit.lib.ObjectReader;
107 import org.eclipse.jgit.lib.ProgressMonitor;
108 import org.eclipse.jgit.lib.Repository;
109 import org.eclipse.jgit.lib.ThreadSafeProgressMonitor;
110 import org.eclipse.jgit.revwalk.AsyncRevObjectQueue;
111 import org.eclipse.jgit.revwalk.BitmapWalker;
112 import org.eclipse.jgit.revwalk.DepthWalk;
113 import org.eclipse.jgit.revwalk.ObjectWalk;
114 import org.eclipse.jgit.revwalk.RevCommit;
115 import org.eclipse.jgit.revwalk.RevFlag;
116 import org.eclipse.jgit.revwalk.RevObject;
117 import org.eclipse.jgit.revwalk.RevSort;
118 import org.eclipse.jgit.revwalk.RevTag;
119 import org.eclipse.jgit.revwalk.RevTree;
120 import org.eclipse.jgit.storage.pack.PackConfig;
121 import org.eclipse.jgit.storage.pack.PackStatistics;
122 import org.eclipse.jgit.transport.ObjectCountCallback;
123 import org.eclipse.jgit.transport.WriteAbortedException;
124 import org.eclipse.jgit.util.BlockList;
125 import org.eclipse.jgit.util.TemporaryBuffer;
126 
127 /**
128  * <p>
129  * PackWriter class is responsible for generating pack files from specified set
130  * of objects from repository. This implementation produce pack files in format
131  * version 2.
132  * </p>
133  * <p>
134  * Source of objects may be specified in two ways:
135  * <ul>
136  * <li>(usually) by providing sets of interesting and uninteresting objects in
137  * repository - all interesting objects and their ancestors except uninteresting
138  * objects and their ancestors will be included in pack, or</li>
139  * <li>by providing iterator of {@link org.eclipse.jgit.revwalk.RevObject}
140  * specifying exact list and order of objects in pack</li>
141  * </ul>
142  * <p>
143  * Typical usage consists of creating an instance, configuring options,
144  * preparing the list of objects by calling {@link #preparePack(Iterator)} or
145  * {@link #preparePack(ProgressMonitor, Set, Set)}, and streaming with
146  * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}. If the
147  * pack is being stored as a file the matching index can be written out after
148  * writing the pack by {@link #writeIndex(OutputStream)}. An optional bitmap
149  * index can be made by calling {@link #prepareBitmapIndex(ProgressMonitor)}
150  * followed by {@link #writeBitmapIndex(OutputStream)}.
151  * </p>
152  * <p>
153  * Class provide set of configurable options and
154  * {@link org.eclipse.jgit.lib.ProgressMonitor} support, as operations may take
155  * a long time for big repositories. Deltas searching algorithm is <b>NOT
156  * IMPLEMENTED</b> yet - this implementation relies only on deltas and objects
157  * reuse.
158  * </p>
159  * <p>
160  * This class is not thread safe. It is intended to be used in one thread as a
161  * single pass to produce one pack. Invoking methods multiple times or out of
162  * order is not supported as internal data structures are destroyed during
163  * certain phases to save memory when packing large repositories.
164  * </p>
165  */
166 public class PackWriter implements AutoCloseable {
167 	private static final int PACK_VERSION_GENERATED = 2;
168 
169 	/** Empty set of objects for {@code preparePack()}. */
170 	public static final Set<ObjectId> NONE = Collections.emptySet();
171 
172 	private static final Map<WeakReference<PackWriter>, Boolean> instances =
173 			new ConcurrentHashMap<>();
174 
175 	private static final Iterable<PackWriter> instancesIterable = new Iterable<PackWriter>() {
176 		@Override
177 		public Iterator<PackWriter> iterator() {
178 			return new Iterator<PackWriter>() {
179 				private final Iterator<WeakReference<PackWriter>> it =
180 						instances.keySet().iterator();
181 				private PackWriter next;
182 
183 				@Override
184 				public boolean hasNext() {
185 					if (next != null)
186 						return true;
187 					while (it.hasNext()) {
188 						WeakReference<PackWriter> ref = it.next();
189 						next = ref.get();
190 						if (next != null)
191 							return true;
192 						it.remove();
193 					}
194 					return false;
195 				}
196 
197 				@Override
198 				public PackWriter next() {
199 					if (hasNext()) {
200 						PackWriter result = next;
201 						next = null;
202 						return result;
203 					}
204 					throw new NoSuchElementException();
205 				}
206 
207 				@Override
208 				public void remove() {
209 					throw new UnsupportedOperationException();
210 				}
211 			};
212 		}
213 	};
214 
215 	/**
216 	 * Get all allocated, non-released PackWriters instances.
217 	 *
218 	 * @return all allocated, non-released PackWriters instances.
219 	 */
220 	public static Iterable<PackWriter> getInstances() {
221 		return instancesIterable;
222 	}
223 
224 	@SuppressWarnings("unchecked")
225 	BlockList<ObjectToPack> objectsLists[] = new BlockList[OBJ_TAG + 1];
226 	{
227 		objectsLists[OBJ_COMMIT] = new BlockList<>();
228 		objectsLists[OBJ_TREE] = new BlockList<>();
229 		objectsLists[OBJ_BLOB] = new BlockList<>();
230 		objectsLists[OBJ_TAG] = new BlockList<>();
231 	}
232 
233 	private ObjectIdOwnerMap<ObjectToPack> objectsMap = new ObjectIdOwnerMap<>();
234 
235 	// edge objects for thin packs
236 	private List<ObjectToPack> edgeObjects = new BlockList<>();
237 
238 	// Objects the client is known to have already.
239 	private BitmapBuilder haveObjects;
240 
241 	private List<CachedPack> cachedPacks = new ArrayList<>(2);
242 
243 	private Set<ObjectId> tagTargets = NONE;
244 
245 	private Set<? extends ObjectId> excludeFromBitmapSelection = NONE;
246 
247 	private ObjectIdSet[] excludeInPacks;
248 
249 	private ObjectIdSet excludeInPackLast;
250 
251 	private Deflater myDeflater;
252 
253 	private final ObjectReader reader;
254 
255 	/** {@link #reader} recast to the reuse interface, if it supports it. */
256 	private final ObjectReuseAsIs reuseSupport;
257 
258 	final PackConfig config;
259 
260 	private final PackStatistics.Accumulator stats;
261 
262 	private final MutableState state;
263 
264 	private final WeakReference<PackWriter> selfRef;
265 
266 	private PackStatistics.ObjectType.Accumulator typeStats;
267 
268 	private List<ObjectToPack> sortedByName;
269 
270 	private byte packcsum[];
271 
272 	private boolean deltaBaseAsOffset;
273 
274 	private boolean reuseDeltas;
275 
276 	private boolean reuseDeltaCommits;
277 
278 	private boolean reuseValidate;
279 
280 	private boolean thin;
281 
282 	private boolean useCachedPacks;
283 
284 	private boolean useBitmaps;
285 
286 	private boolean ignoreMissingUninteresting = true;
287 
288 	private boolean pruneCurrentObjectList;
289 
290 	private boolean shallowPack;
291 
292 	private boolean canBuildBitmaps;
293 
294 	private boolean indexDisabled;
295 
296 	private int depth;
297 
298 	private Collection<? extends ObjectId> unshallowObjects;
299 
300 	private PackBitmapIndexBuilder writeBitmaps;
301 
302 	private CRC32 crc32;
303 
304 	private ObjectCountCallback callback;
305 
306 	/**
307 	 * Create writer for specified repository.
308 	 * <p>
309 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
310 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
311 	 *
312 	 * @param repo
313 	 *            repository where objects are stored.
314 	 */
315 	public PackWriter(final Repository repo) {
316 		this(repo, repo.newObjectReader());
317 	}
318 
319 	/**
320 	 * Create a writer to load objects from the specified reader.
321 	 * <p>
322 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
323 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
324 	 *
325 	 * @param reader
326 	 *            reader to read from the repository with.
327 	 */
328 	public PackWriter(final ObjectReader reader) {
329 		this(new PackConfig(), reader);
330 	}
331 
332 	/**
333 	 * Create writer for specified repository.
334 	 * <p>
335 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
336 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
337 	 *
338 	 * @param repo
339 	 *            repository where objects are stored.
340 	 * @param reader
341 	 *            reader to read from the repository with.
342 	 */
343 	public PackWriter(final Repository repo, final ObjectReader reader) {
344 		this(new PackConfig(repo), reader);
345 	}
346 
347 	/**
348 	 * Create writer with a specified configuration.
349 	 * <p>
350 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
351 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
352 	 *
353 	 * @param config
354 	 *            configuration for the pack writer.
355 	 * @param reader
356 	 *            reader to read from the repository with.
357 	 */
358 	public PackWriter(final PackConfig config, final ObjectReader reader) {
359 		this(config, reader, null);
360 	}
361 
362 	/**
363 	 * Create writer with a specified configuration.
364 	 * <p>
365 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
366 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
367 	 *
368 	 * @param config
369 	 *            configuration for the pack writer.
370 	 * @param reader
371 	 *            reader to read from the repository with.
372 	 * @param statsAccumulator
373 	 *            accumulator for statics
374 	 */
375 	public PackWriter(PackConfig config, final ObjectReader reader,
376 			@Nullable PackStatistics.Accumulator statsAccumulator) {
377 		this.config = config;
378 		this.reader = reader;
379 		if (reader instanceof ObjectReuseAsIs)
380 			reuseSupport = ((ObjectReuseAsIs) reader);
381 		else
382 			reuseSupport = null;
383 
384 		deltaBaseAsOffset = config.isDeltaBaseAsOffset();
385 		reuseDeltas = config.isReuseDeltas();
386 		reuseValidate = true; // be paranoid by default
387 		stats = statsAccumulator != null ? statsAccumulator
388 				: new PackStatistics.Accumulator();
389 		state = new MutableState();
390 		selfRef = new WeakReference<>(this);
391 		instances.put(selfRef, Boolean.TRUE);
392 	}
393 
394 	/**
395 	 * Set the {@code ObjectCountCallback}.
396 	 * <p>
397 	 * It should be set before calling
398 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}.
399 	 *
400 	 * @param callback
401 	 *            the callback to set
402 	 * @return this object for chaining.
403 	 */
404 	public PackWriter setObjectCountCallback(ObjectCountCallback callback) {
405 		this.callback = callback;
406 		return this;
407 	}
408 
409 	/**
410 	 * Records the set of shallow commits in the client.
411 	 *
412 	 * @param clientShallowCommits
413 	 *            the shallow commits in the client
414 	 */
415 	public void setClientShallowCommits(Set<ObjectId> clientShallowCommits) {
416 		stats.clientShallowCommits = Collections
417 				.unmodifiableSet(new HashSet<>(clientShallowCommits));
418 	}
419 
420 	/**
421 	 * Check whether writer can store delta base as an offset (new style
422 	 * reducing pack size) or should store it as an object id (legacy style,
423 	 * compatible with old readers).
424 	 *
425 	 * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
426 	 *
427 	 * @return true if delta base is stored as an offset; false if it is stored
428 	 *         as an object id.
429 	 */
430 	public boolean isDeltaBaseAsOffset() {
431 		return deltaBaseAsOffset;
432 	}
433 
434 	/**
435 	 * Set writer delta base format. Delta base can be written as an offset in a
436 	 * pack file (new approach reducing file size) or as an object id (legacy
437 	 * approach, compatible with old readers).
438 	 *
439 	 * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
440 	 *
441 	 * @param deltaBaseAsOffset
442 	 *            boolean indicating whether delta base can be stored as an
443 	 *            offset.
444 	 */
445 	public void setDeltaBaseAsOffset(boolean deltaBaseAsOffset) {
446 		this.deltaBaseAsOffset = deltaBaseAsOffset;
447 	}
448 
449 	/**
450 	 * Check if the writer will reuse commits that are already stored as deltas.
451 	 *
452 	 * @return true if the writer would reuse commits stored as deltas, assuming
453 	 *         delta reuse is already enabled.
454 	 */
455 	public boolean isReuseDeltaCommits() {
456 		return reuseDeltaCommits;
457 	}
458 
459 	/**
460 	 * Set the writer to reuse existing delta versions of commits.
461 	 *
462 	 * @param reuse
463 	 *            if true, the writer will reuse any commits stored as deltas.
464 	 *            By default the writer does not reuse delta commits.
465 	 */
466 	public void setReuseDeltaCommits(boolean reuse) {
467 		reuseDeltaCommits = reuse;
468 	}
469 
470 	/**
471 	 * Check if the writer validates objects before copying them.
472 	 *
473 	 * @return true if validation is enabled; false if the reader will handle
474 	 *         object validation as a side-effect of it consuming the output.
475 	 */
476 	public boolean isReuseValidatingObjects() {
477 		return reuseValidate;
478 	}
479 
480 	/**
481 	 * Enable (or disable) object validation during packing.
482 	 *
483 	 * @param validate
484 	 *            if true the pack writer will validate an object before it is
485 	 *            put into the output. This additional validation work may be
486 	 *            necessary to avoid propagating corruption from one local pack
487 	 *            file to another local pack file.
488 	 */
489 	public void setReuseValidatingObjects(boolean validate) {
490 		reuseValidate = validate;
491 	}
492 
493 	/**
494 	 * Whether this writer is producing a thin pack.
495 	 *
496 	 * @return true if this writer is producing a thin pack.
497 	 */
498 	public boolean isThin() {
499 		return thin;
500 	}
501 
502 	/**
503 	 * Whether writer may pack objects with delta base object not within set of
504 	 * objects to pack
505 	 *
506 	 * @param packthin
507 	 *            a boolean indicating whether writer may pack objects with
508 	 *            delta base object not within set of objects to pack, but
509 	 *            belonging to party repository (uninteresting/boundary) as
510 	 *            determined by set; this kind of pack is used only for
511 	 *            transport; true - to produce thin pack, false - otherwise.
512 	 */
513 	public void setThin(final boolean packthin) {
514 		thin = packthin;
515 	}
516 
517 	/**
518 	 * Whether to reuse cached packs.
519 	 *
520 	 * @return {@code true} to reuse cached packs. If true index creation isn't
521 	 *         available.
522 	 */
523 	public boolean isUseCachedPacks() {
524 		return useCachedPacks;
525 	}
526 
527 	/**
528 	 * Whether to use cached packs
529 	 *
530 	 * @param useCached
531 	 *            if set to {@code true} and a cached pack is present, it will
532 	 *            be appended onto the end of a thin-pack, reducing the amount
533 	 *            of working set space and CPU used by PackWriter. Enabling this
534 	 *            feature prevents PackWriter from creating an index for the
535 	 *            newly created pack, so its only suitable for writing to a
536 	 *            network client, where the client will make the index.
537 	 */
538 	public void setUseCachedPacks(boolean useCached) {
539 		useCachedPacks = useCached;
540 	}
541 
542 	/**
543 	 * Whether to use bitmaps
544 	 *
545 	 * @return {@code true} to use bitmaps for ObjectWalks, if available.
546 	 */
547 	public boolean isUseBitmaps() {
548 		return useBitmaps;
549 	}
550 
551 	/**
552 	 * Whether to use bitmaps
553 	 *
554 	 * @param useBitmaps
555 	 *            if set to true, bitmaps will be used when preparing a pack.
556 	 */
557 	public void setUseBitmaps(boolean useBitmaps) {
558 		this.useBitmaps = useBitmaps;
559 	}
560 
561 	/**
562 	 * Whether the index file cannot be created by this PackWriter.
563 	 *
564 	 * @return {@code true} if the index file cannot be created by this
565 	 *         PackWriter.
566 	 */
567 	public boolean isIndexDisabled() {
568 		return indexDisabled || !cachedPacks.isEmpty();
569 	}
570 
571 	/**
572 	 * Whether to disable creation of the index file.
573 	 *
574 	 * @param noIndex
575 	 *            {@code true} to disable creation of the index file.
576 	 */
577 	public void setIndexDisabled(boolean noIndex) {
578 		this.indexDisabled = noIndex;
579 	}
580 
581 	/**
582 	 * Whether to ignore missing uninteresting objects
583 	 *
584 	 * @return {@code true} to ignore objects that are uninteresting and also
585 	 *         not found on local disk; false to throw a
586 	 *         {@link org.eclipse.jgit.errors.MissingObjectException} out of
587 	 *         {@link #preparePack(ProgressMonitor, Set, Set)} if an
588 	 *         uninteresting object is not in the source repository. By default,
589 	 *         true, permitting gracefully ignoring of uninteresting objects.
590 	 */
591 	public boolean isIgnoreMissingUninteresting() {
592 		return ignoreMissingUninteresting;
593 	}
594 
595 	/**
596 	 * Whether writer should ignore non existing uninteresting objects
597 	 *
598 	 * @param ignore
599 	 *            {@code true} if writer should ignore non existing
600 	 *            uninteresting objects during construction set of objects to
601 	 *            pack; false otherwise - non existing uninteresting objects may
602 	 *            cause {@link org.eclipse.jgit.errors.MissingObjectException}
603 	 */
604 	public void setIgnoreMissingUninteresting(final boolean ignore) {
605 		ignoreMissingUninteresting = ignore;
606 	}
607 
608 	/**
609 	 * Set the tag targets that should be hoisted earlier during packing.
610 	 * <p>
611 	 * Callers may put objects into this set before invoking any of the
612 	 * preparePack methods to influence where an annotated tag's target is
613 	 * stored within the resulting pack. Typically these will be clustered
614 	 * together, and hoisted earlier in the file even if they are ancient
615 	 * revisions, allowing readers to find tag targets with better locality.
616 	 *
617 	 * @param objects
618 	 *            objects that annotated tags point at.
619 	 */
620 	public void setTagTargets(Set<ObjectId> objects) {
621 		tagTargets = objects;
622 	}
623 
624 	/**
625 	 * Configure this pack for a shallow clone.
626 	 *
627 	 * @param depth
628 	 *            maximum depth of history to return. 1 means return only the
629 	 *            "wants".
630 	 * @param unshallow
631 	 *            objects which used to be shallow on the client, but are being
632 	 *            extended as part of this fetch
633 	 */
634 	public void setShallowPack(int depth,
635 			Collection<? extends ObjectId> unshallow) {
636 		this.shallowPack = true;
637 		this.depth = depth;
638 		this.unshallowObjects = unshallow;
639 	}
640 
641 	/**
642 	 * Returns objects number in a pack file that was created by this writer.
643 	 *
644 	 * @return number of objects in pack.
645 	 * @throws java.io.IOException
646 	 *             a cached pack cannot supply its object count.
647 	 */
648 	public long getObjectCount() throws IOException {
649 		if (stats.totalObjects == 0) {
650 			long objCnt = 0;
651 
652 			objCnt += objectsLists[OBJ_COMMIT].size();
653 			objCnt += objectsLists[OBJ_TREE].size();
654 			objCnt += objectsLists[OBJ_BLOB].size();
655 			objCnt += objectsLists[OBJ_TAG].size();
656 
657 			for (CachedPack pack : cachedPacks)
658 				objCnt += pack.getObjectCount();
659 			return objCnt;
660 		}
661 		return stats.totalObjects;
662 	}
663 
664 	/**
665 	 * Returns the object ids in the pack file that was created by this writer.
666 	 * <p>
667 	 * This method can only be invoked after
668 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)} has
669 	 * been invoked and completed successfully.
670 	 *
671 	 * @return set of objects in pack.
672 	 * @throws java.io.IOException
673 	 *             a cached pack cannot supply its object ids.
674 	 */
675 	public ObjectIdOwnerMap<ObjectIdOwnerMap.Entry> getObjectSet()
676 			throws IOException {
677 		if (!cachedPacks.isEmpty())
678 			throw new IOException(
679 					JGitText.get().cachedPacksPreventsListingObjects);
680 
681 		if (writeBitmaps != null) {
682 			return writeBitmaps.getObjectSet();
683 		}
684 
685 		ObjectIdOwnerMap<ObjectIdOwnerMap.Entry> r = new ObjectIdOwnerMap<>();
686 		for (BlockList<ObjectToPack> objList : objectsLists) {
687 			if (objList != null) {
688 				for (ObjectToPack otp : objList)
689 					r.add(new ObjectIdOwnerMap.Entry(otp) {
690 						// A new entry that copies the ObjectId
691 					});
692 			}
693 		}
694 		return r;
695 	}
696 
697 	/**
698 	 * Add a pack index whose contents should be excluded from the result.
699 	 *
700 	 * @param idx
701 	 *            objects in this index will not be in the output pack.
702 	 */
703 	public void excludeObjects(ObjectIdSet idx) {
704 		if (excludeInPacks == null) {
705 			excludeInPacks = new ObjectIdSet[] { idx };
706 			excludeInPackLast = idx;
707 		} else {
708 			int cnt = excludeInPacks.length;
709 			ObjectIdSet[] newList = new ObjectIdSet[cnt + 1];
710 			System.arraycopy(excludeInPacks, 0, newList, 0, cnt);
711 			newList[cnt] = idx;
712 			excludeInPacks = newList;
713 		}
714 	}
715 
716 	/**
717 	 * Prepare the list of objects to be written to the pack stream.
718 	 * <p>
719 	 * Iterator <b>exactly</b> determines which objects are included in a pack
720 	 * and order they appear in pack (except that objects order by type is not
721 	 * needed at input). This order should conform general rules of ordering
722 	 * objects in git - by recency and path (type and delta-base first is
723 	 * internally secured) and responsibility for guaranteeing this order is on
724 	 * a caller side. Iterator must return each id of object to write exactly
725 	 * once.
726 	 * </p>
727 	 *
728 	 * @param objectsSource
729 	 *            iterator of object to store in a pack; order of objects within
730 	 *            each type is important, ordering by type is not needed;
731 	 *            allowed types for objects are
732 	 *            {@link org.eclipse.jgit.lib.Constants#OBJ_COMMIT},
733 	 *            {@link org.eclipse.jgit.lib.Constants#OBJ_TREE},
734 	 *            {@link org.eclipse.jgit.lib.Constants#OBJ_BLOB} and
735 	 *            {@link org.eclipse.jgit.lib.Constants#OBJ_TAG}; objects
736 	 *            returned by iterator may be later reused by caller as object
737 	 *            id and type are internally copied in each iteration.
738 	 * @throws java.io.IOException
739 	 *             when some I/O problem occur during reading objects.
740 	 */
741 	public void preparePack(@NonNull Iterator<RevObject> objectsSource)
742 			throws IOException {
743 		while (objectsSource.hasNext()) {
744 			addObject(objectsSource.next());
745 		}
746 	}
747 
748 	/**
749 	 * Prepare the list of objects to be written to the pack stream.
750 	 * <p>
751 	 * Basing on these 2 sets, another set of objects to put in a pack file is
752 	 * created: this set consists of all objects reachable (ancestors) from
753 	 * interesting objects, except uninteresting objects and their ancestors.
754 	 * This method uses class {@link org.eclipse.jgit.revwalk.ObjectWalk}
755 	 * extensively to find out that appropriate set of output objects and their
756 	 * optimal order in output pack. Order is consistent with general git
757 	 * in-pack rules: sort by object type, recency, path and delta-base first.
758 	 * </p>
759 	 *
760 	 * @param countingMonitor
761 	 *            progress during object enumeration.
762 	 * @param want
763 	 *            collection of objects to be marked as interesting (start
764 	 *            points of graph traversal). Must not be {@code null}.
765 	 * @param have
766 	 *            collection of objects to be marked as uninteresting (end
767 	 *            points of graph traversal). Pass {@link #NONE} if all objects
768 	 *            reachable from {@code want} are desired, such as when serving
769 	 *            a clone.
770 	 * @throws java.io.IOException
771 	 *             when some I/O problem occur during reading objects.
772 	 */
773 	public void preparePack(ProgressMonitor countingMonitor,
774 			@NonNull Set<? extends ObjectId> want,
775 			@NonNull Set<? extends ObjectId> have) throws IOException {
776 		preparePack(countingMonitor, want, have, NONE, NONE);
777 	}
778 
779 	/**
780 	 * Prepare the list of objects to be written to the pack stream.
781 	 * <p>
782 	 * Like {@link #preparePack(ProgressMonitor, Set, Set)} but also allows
783 	 * specifying commits that should not be walked past ("shallow" commits).
784 	 * The caller is responsible for filtering out commits that should not be
785 	 * shallow any more ("unshallow" commits as in {@link #setShallowPack}) from
786 	 * the shallow set.
787 	 *
788 	 * @param countingMonitor
789 	 *            progress during object enumeration.
790 	 * @param want
791 	 *            objects of interest, ancestors of which will be included in
792 	 *            the pack. Must not be {@code null}.
793 	 * @param have
794 	 *            objects whose ancestors (up to and including {@code shallow}
795 	 *            commits) do not need to be included in the pack because they
796 	 *            are already available from elsewhere. Must not be
797 	 *            {@code null}.
798 	 * @param shallow
799 	 *            commits indicating the boundary of the history marked with
800 	 *            {@code have}. Shallow commits have parents but those parents
801 	 *            are considered not to be already available. Parents of
802 	 *            {@code shallow} commits and earlier generations will be
803 	 *            included in the pack if requested by {@code want}. Must not be
804 	 *            {@code null}.
805 	 * @throws java.io.IOException
806 	 *             an I/O problem occurred while reading objects.
807 	 */
808 	public void preparePack(ProgressMonitor countingMonitor,
809 			@NonNull Set<? extends ObjectId> want,
810 			@NonNull Set<? extends ObjectId> have,
811 			@NonNull Set<? extends ObjectId> shallow) throws IOException {
812 		preparePack(countingMonitor, want, have, shallow, NONE);
813 	}
814 
815 	/**
816 	 * Prepare the list of objects to be written to the pack stream.
817 	 * <p>
818 	 * Like {@link #preparePack(ProgressMonitor, Set, Set)} but also allows
819 	 * specifying commits that should not be walked past ("shallow" commits).
820 	 * The caller is responsible for filtering out commits that should not be
821 	 * shallow any more ("unshallow" commits as in {@link #setShallowPack}) from
822 	 * the shallow set.
823 	 *
824 	 * @param countingMonitor
825 	 *            progress during object enumeration.
826 	 * @param want
827 	 *            objects of interest, ancestors of which will be included in
828 	 *            the pack. Must not be {@code null}.
829 	 * @param have
830 	 *            objects whose ancestors (up to and including {@code shallow}
831 	 *            commits) do not need to be included in the pack because they
832 	 *            are already available from elsewhere. Must not be
833 	 *            {@code null}.
834 	 * @param shallow
835 	 *            commits indicating the boundary of the history marked with
836 	 *            {@code have}. Shallow commits have parents but those parents
837 	 *            are considered not to be already available. Parents of
838 	 *            {@code shallow} commits and earlier generations will be
839 	 *            included in the pack if requested by {@code want}. Must not be
840 	 *            {@code null}.
841 	 * @param noBitmaps
842 	 *            collection of objects to be excluded from bitmap commit
843 	 *            selection.
844 	 * @throws java.io.IOException
845 	 *             an I/O problem occurred while reading objects.
846 	 */
847 	public void preparePack(ProgressMonitor countingMonitor,
848 			@NonNull Set<? extends ObjectId> want,
849 			@NonNull Set<? extends ObjectId> have,
850 			@NonNull Set<? extends ObjectId> shallow,
851 			@NonNull Set<? extends ObjectId> noBitmaps) throws IOException {
852 		try (ObjectWalk ow = getObjectWalk()) {
853 			ow.assumeShallow(shallow);
854 			preparePack(countingMonitor, ow, want, have, noBitmaps);
855 		}
856 	}
857 
858 	private ObjectWalk getObjectWalk() {
859 		return shallowPack ? new DepthWalk.ObjectWalk(reader, depth - 1)
860 				: new ObjectWalk(reader);
861 	}
862 
863 	/**
864 	 * Prepare the list of objects to be written to the pack stream.
865 	 * <p>
866 	 * Basing on these 2 sets, another set of objects to put in a pack file is
867 	 * created: this set consists of all objects reachable (ancestors) from
868 	 * interesting objects, except uninteresting objects and their ancestors.
869 	 * This method uses class {@link org.eclipse.jgit.revwalk.ObjectWalk}
870 	 * extensively to find out that appropriate set of output objects and their
871 	 * optimal order in output pack. Order is consistent with general git
872 	 * in-pack rules: sort by object type, recency, path and delta-base first.
873 	 * </p>
874 	 *
875 	 * @param countingMonitor
876 	 *            progress during object enumeration.
877 	 * @param walk
878 	 *            ObjectWalk to perform enumeration.
879 	 * @param interestingObjects
880 	 *            collection of objects to be marked as interesting (start
881 	 *            points of graph traversal). Must not be {@code null}.
882 	 * @param uninterestingObjects
883 	 *            collection of objects to be marked as uninteresting (end
884 	 *            points of graph traversal). Pass {@link #NONE} if all objects
885 	 *            reachable from {@code want} are desired, such as when serving
886 	 *            a clone.
887 	 * @param noBitmaps
888 	 *            collection of objects to be excluded from bitmap commit
889 	 *            selection.
890 	 * @throws java.io.IOException
891 	 *             when some I/O problem occur during reading objects.
892 	 */
893 	public void preparePack(ProgressMonitor countingMonitor,
894 			@NonNull ObjectWalk walk,
895 			@NonNull Set<? extends ObjectId> interestingObjects,
896 			@NonNull Set<? extends ObjectId> uninterestingObjects,
897 			@NonNull Set<? extends ObjectId> noBitmaps)
898 			throws IOException {
899 		if (countingMonitor == null)
900 			countingMonitor = NullProgressMonitor.INSTANCE;
901 		if (shallowPack && !(walk instanceof DepthWalk.ObjectWalk))
902 			throw new IllegalArgumentException(
903 					JGitText.get().shallowPacksRequireDepthWalk);
904 		findObjectsToPack(countingMonitor, walk, interestingObjects,
905 				uninterestingObjects, noBitmaps);
906 	}
907 
908 	/**
909 	 * Determine if the pack file will contain the requested object.
910 	 *
911 	 * @param id
912 	 *            the object to test the existence of.
913 	 * @return true if the object will appear in the output pack file.
914 	 * @throws java.io.IOException
915 	 *             a cached pack cannot be examined.
916 	 */
917 	public boolean willInclude(final AnyObjectId id) throws IOException {
918 		ObjectToPack obj = objectsMap.get(id);
919 		return obj != null && !obj.isEdge();
920 	}
921 
922 	/**
923 	 * Lookup the ObjectToPack object for a given ObjectId.
924 	 *
925 	 * @param id
926 	 *            the object to find in the pack.
927 	 * @return the object we are packing, or null.
928 	 */
929 	public ObjectToPack get(AnyObjectId id) {
930 		ObjectToPack obj = objectsMap.get(id);
931 		return obj != null && !obj.isEdge() ? obj : null;
932 	}
933 
934 	/**
935 	 * Computes SHA-1 of lexicographically sorted objects ids written in this
936 	 * pack, as used to name a pack file in repository.
937 	 *
938 	 * @return ObjectId representing SHA-1 name of a pack that was created.
939 	 */
940 	public ObjectId computeName() {
941 		final byte[] buf = new byte[OBJECT_ID_LENGTH];
942 		final MessageDigest md = Constants.newMessageDigest();
943 		for (ObjectToPack otp : sortByName()) {
944 			otp.copyRawTo(buf, 0);
945 			md.update(buf, 0, OBJECT_ID_LENGTH);
946 		}
947 		return ObjectId.fromRaw(md.digest());
948 	}
949 
950 	/**
951 	 * Returns the index format version that will be written.
952 	 * <p>
953 	 * This method can only be invoked after
954 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)} has
955 	 * been invoked and completed successfully.
956 	 *
957 	 * @return the index format version.
958 	 */
959 	public int getIndexVersion() {
960 		int indexVersion = config.getIndexVersion();
961 		if (indexVersion <= 0) {
962 			for (BlockList<ObjectToPack> objs : objectsLists)
963 				indexVersion = Math.max(indexVersion,
964 						PackIndexWriter.oldestPossibleFormat(objs));
965 		}
966 		return indexVersion;
967 	}
968 
969 	/**
970 	 * Create an index file to match the pack file just written.
971 	 * <p>
972 	 * Called after
973 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}.
974 	 * <p>
975 	 * Writing an index is only required for local pack storage. Packs sent on
976 	 * the network do not need to create an index.
977 	 *
978 	 * @param indexStream
979 	 *            output for the index data. Caller is responsible for closing
980 	 *            this stream.
981 	 * @throws java.io.IOException
982 	 *             the index data could not be written to the supplied stream.
983 	 */
984 	public void writeIndex(final OutputStream indexStream) throws IOException {
985 		if (isIndexDisabled())
986 			throw new IOException(JGitText.get().cachedPacksPreventsIndexCreation);
987 
988 		long writeStart = System.currentTimeMillis();
989 		final PackIndexWriter iw = PackIndexWriter.createVersion(
990 				indexStream, getIndexVersion());
991 		iw.write(sortByName(), packcsum);
992 		stats.timeWriting += System.currentTimeMillis() - writeStart;
993 	}
994 
995 	/**
996 	 * Create a bitmap index file to match the pack file just written.
997 	 * <p>
998 	 * Called after {@link #prepareBitmapIndex(ProgressMonitor)}.
999 	 *
1000 	 * @param bitmapIndexStream
1001 	 *            output for the bitmap index data. Caller is responsible for
1002 	 *            closing this stream.
1003 	 * @throws java.io.IOException
1004 	 *             the index data could not be written to the supplied stream.
1005 	 */
1006 	public void writeBitmapIndex(final OutputStream bitmapIndexStream)
1007 			throws IOException {
1008 		if (writeBitmaps == null)
1009 			throw new IOException(JGitText.get().bitmapsMustBePrepared);
1010 
1011 		long writeStart = System.currentTimeMillis();
1012 		final PackBitmapIndexWriterV1 iw = new PackBitmapIndexWriterV1(bitmapIndexStream);
1013 		iw.write(writeBitmaps, packcsum);
1014 		stats.timeWriting += System.currentTimeMillis() - writeStart;
1015 	}
1016 
1017 	private List<ObjectToPack> sortByName() {
1018 		if (sortedByName == null) {
1019 			int cnt = 0;
1020 			cnt += objectsLists[OBJ_COMMIT].size();
1021 			cnt += objectsLists[OBJ_TREE].size();
1022 			cnt += objectsLists[OBJ_BLOB].size();
1023 			cnt += objectsLists[OBJ_TAG].size();
1024 
1025 			sortedByName = new BlockList<>(cnt);
1026 			sortedByName.addAll(objectsLists[OBJ_COMMIT]);
1027 			sortedByName.addAll(objectsLists[OBJ_TREE]);
1028 			sortedByName.addAll(objectsLists[OBJ_BLOB]);
1029 			sortedByName.addAll(objectsLists[OBJ_TAG]);
1030 			Collections.sort(sortedByName);
1031 		}
1032 		return sortedByName;
1033 	}
1034 
1035 	private void beginPhase(PackingPhase phase, ProgressMonitor monitor,
1036 			long cnt) {
1037 		state.phase = phase;
1038 		String task;
1039 		switch (phase) {
1040 		case COUNTING:
1041 			task = JGitText.get().countingObjects;
1042 			break;
1043 		case GETTING_SIZES:
1044 			task = JGitText.get().searchForSizes;
1045 			break;
1046 		case FINDING_SOURCES:
1047 			task = JGitText.get().searchForReuse;
1048 			break;
1049 		case COMPRESSING:
1050 			task = JGitText.get().compressingObjects;
1051 			break;
1052 		case WRITING:
1053 			task = JGitText.get().writingObjects;
1054 			break;
1055 		case BUILDING_BITMAPS:
1056 			task = JGitText.get().buildingBitmaps;
1057 			break;
1058 		default:
1059 			throw new IllegalArgumentException(
1060 					MessageFormat.format(JGitText.get().illegalPackingPhase, phase));
1061 		}
1062 		monitor.beginTask(task, (int) cnt);
1063 	}
1064 
1065 	private void endPhase(ProgressMonitor monitor) {
1066 		monitor.endTask();
1067 	}
1068 
1069 	/**
1070 	 * Write the prepared pack to the supplied stream.
1071 	 * <p>
1072 	 * Called after
1073 	 * {@link #preparePack(ProgressMonitor, ObjectWalk, Set, Set, Set)} or
1074 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
1075 	 * <p>
1076 	 * Performs delta search if enabled and writes the pack stream.
1077 	 * <p>
1078 	 * All reused objects data checksum (Adler32/CRC32) is computed and
1079 	 * validated against existing checksum.
1080 	 *
1081 	 * @param compressMonitor
1082 	 *            progress monitor to report object compression work.
1083 	 * @param writeMonitor
1084 	 *            progress monitor to report the number of objects written.
1085 	 * @param packStream
1086 	 *            output stream of pack data. The stream should be buffered by
1087 	 *            the caller. The caller is responsible for closing the stream.
1088 	 * @throws java.io.IOException
1089 	 *             an error occurred reading a local object's data to include in
1090 	 *             the pack, or writing compressed object data to the output
1091 	 *             stream.
1092 	 * @throws WriteAbortedException
1093 	 *             the write operation is aborted by
1094 	 *             {@link org.eclipse.jgit.transport.ObjectCountCallback} .
1095 	 */
1096 	public void writePack(ProgressMonitor compressMonitor,
1097 			ProgressMonitor writeMonitor, OutputStream packStream)
1098 			throws IOException {
1099 		if (compressMonitor == null)
1100 			compressMonitor = NullProgressMonitor.INSTANCE;
1101 		if (writeMonitor == null)
1102 			writeMonitor = NullProgressMonitor.INSTANCE;
1103 
1104 		excludeInPacks = null;
1105 		excludeInPackLast = null;
1106 
1107 		boolean needSearchForReuse = reuseSupport != null && (
1108 				   reuseDeltas
1109 				|| config.isReuseObjects()
1110 				|| !cachedPacks.isEmpty());
1111 
1112 		if (compressMonitor instanceof BatchingProgressMonitor) {
1113 			long delay = 1000;
1114 			if (needSearchForReuse && config.isDeltaCompress())
1115 				delay = 500;
1116 			((BatchingProgressMonitor) compressMonitor).setDelayStart(
1117 					delay,
1118 					TimeUnit.MILLISECONDS);
1119 		}
1120 
1121 		if (needSearchForReuse)
1122 			searchForReuse(compressMonitor);
1123 		if (config.isDeltaCompress())
1124 			searchForDeltas(compressMonitor);
1125 
1126 		crc32 = new CRC32();
1127 		final PackOutputStream out = new PackOutputStream(
1128 			writeMonitor,
1129 			isIndexDisabled()
1130 				? packStream
1131 				: new CheckedOutputStream(packStream, crc32),
1132 			this);
1133 
1134 		long objCnt = getObjectCount();
1135 		stats.totalObjects = objCnt;
1136 		if (callback != null)
1137 			callback.setObjectCount(objCnt);
1138 		beginPhase(PackingPhase.WRITING, writeMonitor, objCnt);
1139 		long writeStart = System.currentTimeMillis();
1140 		try {
1141 			out.writeFileHeader(PACK_VERSION_GENERATED, objCnt);
1142 			out.flush();
1143 
1144 			writeObjects(out);
1145 			if (!edgeObjects.isEmpty() || !cachedPacks.isEmpty()) {
1146 				for (PackStatistics.ObjectType.Accumulator typeStat : stats.objectTypes) {
1147 					if (typeStat == null)
1148 						continue;
1149 					stats.thinPackBytes += typeStat.bytes;
1150 				}
1151 			}
1152 
1153 			stats.reusedPacks = Collections.unmodifiableList(cachedPacks);
1154 			for (CachedPack pack : cachedPacks) {
1155 				long deltaCnt = pack.getDeltaCount();
1156 				stats.reusedObjects += pack.getObjectCount();
1157 				stats.reusedDeltas += deltaCnt;
1158 				stats.totalDeltas += deltaCnt;
1159 				reuseSupport.copyPackAsIs(out, pack);
1160 			}
1161 			writeChecksum(out);
1162 			out.flush();
1163 		} finally {
1164 			stats.timeWriting = System.currentTimeMillis() - writeStart;
1165 			stats.depth = depth;
1166 
1167 			for (PackStatistics.ObjectType.Accumulator typeStat : stats.objectTypes) {
1168 				if (typeStat == null)
1169 					continue;
1170 				typeStat.cntDeltas += typeStat.reusedDeltas;
1171 				stats.reusedObjects += typeStat.reusedObjects;
1172 				stats.reusedDeltas += typeStat.reusedDeltas;
1173 				stats.totalDeltas += typeStat.cntDeltas;
1174 			}
1175 		}
1176 
1177 		stats.totalBytes = out.length();
1178 		reader.close();
1179 		endPhase(writeMonitor);
1180 	}
1181 
1182 	/**
1183 	 * Get statistics of what this PackWriter did in order to create the final
1184 	 * pack stream.
1185 	 *
1186 	 * @return description of what this PackWriter did in order to create the
1187 	 *         final pack stream. This should only be invoked after the calls to
1188 	 *         create the pack/index/bitmap have completed.
1189 	 */
1190 	public PackStatistics getStatistics() {
1191 		return new PackStatistics(stats);
1192 	}
1193 
1194 	/**
1195 	 * Get snapshot of the current state of this PackWriter.
1196 	 *
1197 	 * @return snapshot of the current state of this PackWriter.
1198 	 */
1199 	public State getState() {
1200 		return state.snapshot();
1201 	}
1202 
1203 	/**
1204 	 * {@inheritDoc}
1205 	 * <p>
1206 	 * Release all resources used by this writer.
1207 	 */
1208 	@Override
1209 	public void close() {
1210 		reader.close();
1211 		if (myDeflater != null) {
1212 			myDeflater.end();
1213 			myDeflater = null;
1214 		}
1215 		instances.remove(selfRef);
1216 	}
1217 
1218 	private void searchForReuse(ProgressMonitor monitor) throws IOException {
1219 		long cnt = 0;
1220 		cnt += objectsLists[OBJ_COMMIT].size();
1221 		cnt += objectsLists[OBJ_TREE].size();
1222 		cnt += objectsLists[OBJ_BLOB].size();
1223 		cnt += objectsLists[OBJ_TAG].size();
1224 
1225 		long start = System.currentTimeMillis();
1226 		beginPhase(PackingPhase.FINDING_SOURCES, monitor, cnt);
1227 		if (cnt <= 4096) {
1228 			// For small object counts, do everything as one list.
1229 			BlockList<ObjectToPack> tmp = new BlockList<>((int) cnt);
1230 			tmp.addAll(objectsLists[OBJ_TAG]);
1231 			tmp.addAll(objectsLists[OBJ_COMMIT]);
1232 			tmp.addAll(objectsLists[OBJ_TREE]);
1233 			tmp.addAll(objectsLists[OBJ_BLOB]);
1234 			searchForReuse(monitor, tmp);
1235 			if (pruneCurrentObjectList) {
1236 				// If the list was pruned, we need to re-prune the main lists.
1237 				pruneEdgesFromObjectList(objectsLists[OBJ_COMMIT]);
1238 				pruneEdgesFromObjectList(objectsLists[OBJ_TREE]);
1239 				pruneEdgesFromObjectList(objectsLists[OBJ_BLOB]);
1240 				pruneEdgesFromObjectList(objectsLists[OBJ_TAG]);
1241 			}
1242 		} else {
1243 			searchForReuse(monitor, objectsLists[OBJ_TAG]);
1244 			searchForReuse(monitor, objectsLists[OBJ_COMMIT]);
1245 			searchForReuse(monitor, objectsLists[OBJ_TREE]);
1246 			searchForReuse(monitor, objectsLists[OBJ_BLOB]);
1247 		}
1248 		endPhase(monitor);
1249 		stats.timeSearchingForReuse = System.currentTimeMillis() - start;
1250 
1251 		if (config.isReuseDeltas() && config.getCutDeltaChains()) {
1252 			cutDeltaChains(objectsLists[OBJ_TREE]);
1253 			cutDeltaChains(objectsLists[OBJ_BLOB]);
1254 		}
1255 	}
1256 
1257 	private void searchForReuse(ProgressMonitor monitor, List<ObjectToPack> list)
1258 			throws IOException, MissingObjectException {
1259 		pruneCurrentObjectList = false;
1260 		reuseSupport.selectObjectRepresentation(this, monitor, list);
1261 		if (pruneCurrentObjectList)
1262 			pruneEdgesFromObjectList(list);
1263 	}
1264 
1265 	private void cutDeltaChains(BlockList<ObjectToPack> list)
1266 			throws IOException {
1267 		int max = config.getMaxDeltaDepth();
1268 		for (int idx = list.size() - 1; idx >= 0; idx--) {
1269 			int d = 0;
1270 			ObjectToPack b = list.get(idx).getDeltaBase();
1271 			while (b != null) {
1272 				if (d < b.getChainLength())
1273 					break;
1274 				b.setChainLength(++d);
1275 				if (d >= max && b.isDeltaRepresentation()) {
1276 					reselectNonDelta(b);
1277 					break;
1278 				}
1279 				b = b.getDeltaBase();
1280 			}
1281 		}
1282 		if (config.isDeltaCompress()) {
1283 			for (ObjectToPack otp : list)
1284 				otp.clearChainLength();
1285 		}
1286 	}
1287 
1288 	private void searchForDeltas(ProgressMonitor monitor)
1289 			throws MissingObjectException, IncorrectObjectTypeException,
1290 			IOException {
1291 		// Commits and annotated tags tend to have too many differences to
1292 		// really benefit from delta compression. Consequently just don't
1293 		// bother examining those types here.
1294 		//
1295 		ObjectToPack[] list = new ObjectToPack[
1296 				  objectsLists[OBJ_TREE].size()
1297 				+ objectsLists[OBJ_BLOB].size()
1298 				+ edgeObjects.size()];
1299 		int cnt = 0;
1300 		cnt = findObjectsNeedingDelta(list, cnt, OBJ_TREE);
1301 		cnt = findObjectsNeedingDelta(list, cnt, OBJ_BLOB);
1302 		if (cnt == 0)
1303 			return;
1304 		int nonEdgeCnt = cnt;
1305 
1306 		// Queue up any edge objects that we might delta against.  We won't
1307 		// be sending these as we assume the other side has them, but we need
1308 		// them in the search phase below.
1309 		//
1310 		for (ObjectToPack eo : edgeObjects) {
1311 			eo.setWeight(0);
1312 			list[cnt++] = eo;
1313 		}
1314 
1315 		// Compute the sizes of the objects so we can do a proper sort.
1316 		// We let the reader skip missing objects if it chooses. For
1317 		// some readers this can be a huge win. We detect missing objects
1318 		// by having set the weights above to 0 and allowing the delta
1319 		// search code to discover the missing object and skip over it, or
1320 		// abort with an exception if we actually had to have it.
1321 		//
1322 		final long sizingStart = System.currentTimeMillis();
1323 		beginPhase(PackingPhase.GETTING_SIZES, monitor, cnt);
1324 		AsyncObjectSizeQueue<ObjectToPack> sizeQueue = reader.getObjectSize(
1325 				Arrays.<ObjectToPack> asList(list).subList(0, cnt), false);
1326 		try {
1327 			final long limit = Math.min(
1328 					config.getBigFileThreshold(),
1329 					Integer.MAX_VALUE);
1330 			for (;;) {
1331 				try {
1332 					if (!sizeQueue.next())
1333 						break;
1334 				} catch (MissingObjectException notFound) {
1335 					monitor.update(1);
1336 					if (ignoreMissingUninteresting) {
1337 						ObjectToPack otp = sizeQueue.getCurrent();
1338 						if (otp != null && otp.isEdge()) {
1339 							otp.setDoNotDelta();
1340 							continue;
1341 						}
1342 
1343 						otp = objectsMap.get(notFound.getObjectId());
1344 						if (otp != null && otp.isEdge()) {
1345 							otp.setDoNotDelta();
1346 							continue;
1347 						}
1348 					}
1349 					throw notFound;
1350 				}
1351 
1352 				ObjectToPack otp = sizeQueue.getCurrent();
1353 				if (otp == null)
1354 					otp = objectsMap.get(sizeQueue.getObjectId());
1355 
1356 				long sz = sizeQueue.getSize();
1357 				if (DeltaIndex.BLKSZ < sz && sz < limit)
1358 					otp.setWeight((int) sz);
1359 				else
1360 					otp.setDoNotDelta(); // too small, or too big
1361 				monitor.update(1);
1362 			}
1363 		} finally {
1364 			sizeQueue.release();
1365 		}
1366 		endPhase(monitor);
1367 		stats.timeSearchingForSizes = System.currentTimeMillis() - sizingStart;
1368 
1369 		// Sort the objects by path hash so like files are near each other,
1370 		// and then by size descending so that bigger files are first. This
1371 		// applies "Linus' Law" which states that newer files tend to be the
1372 		// bigger ones, because source files grow and hardly ever shrink.
1373 		//
1374 		Arrays.sort(list, 0, cnt, new Comparator<ObjectToPack>() {
1375 			@Override
1376 			public int compare(ObjectToPack a, ObjectToPack b) {
1377 				int cmp = (a.isDoNotDelta() ? 1 : 0)
1378 						- (b.isDoNotDelta() ? 1 : 0);
1379 				if (cmp != 0)
1380 					return cmp;
1381 
1382 				cmp = a.getType() - b.getType();
1383 				if (cmp != 0)
1384 					return cmp;
1385 
1386 				cmp = (a.getPathHash() >>> 1) - (b.getPathHash() >>> 1);
1387 				if (cmp != 0)
1388 					return cmp;
1389 
1390 				cmp = (a.getPathHash() & 1) - (b.getPathHash() & 1);
1391 				if (cmp != 0)
1392 					return cmp;
1393 
1394 				cmp = (a.isEdge() ? 0 : 1) - (b.isEdge() ? 0 : 1);
1395 				if (cmp != 0)
1396 					return cmp;
1397 
1398 				return b.getWeight() - a.getWeight();
1399 			}
1400 		});
1401 
1402 		// Above we stored the objects we cannot delta onto the end.
1403 		// Remove them from the list so we don't waste time on them.
1404 		while (0 < cnt && list[cnt - 1].isDoNotDelta()) {
1405 			if (!list[cnt - 1].isEdge())
1406 				nonEdgeCnt--;
1407 			cnt--;
1408 		}
1409 		if (cnt == 0)
1410 			return;
1411 
1412 		final long searchStart = System.currentTimeMillis();
1413 		searchForDeltas(monitor, list, cnt);
1414 		stats.deltaSearchNonEdgeObjects = nonEdgeCnt;
1415 		stats.timeCompressing = System.currentTimeMillis() - searchStart;
1416 
1417 		for (int i = 0; i < cnt; i++)
1418 			if (!list[i].isEdge() && list[i].isDeltaRepresentation())
1419 				stats.deltasFound++;
1420 	}
1421 
1422 	private int findObjectsNeedingDelta(ObjectToPack[] list, int cnt, int type) {
1423 		for (ObjectToPack otp : objectsLists[type]) {
1424 			if (otp.isDoNotDelta()) // delta is disabled for this path
1425 				continue;
1426 			if (otp.isDeltaRepresentation()) // already reusing a delta
1427 				continue;
1428 			otp.setWeight(0);
1429 			list[cnt++] = otp;
1430 		}
1431 		return cnt;
1432 	}
1433 
1434 	private void reselectNonDelta(ObjectToPack otp) throws IOException {
1435 		otp.clearDeltaBase();
1436 		otp.clearReuseAsIs();
1437 		boolean old = reuseDeltas;
1438 		reuseDeltas = false;
1439 		reuseSupport.selectObjectRepresentation(this,
1440 				NullProgressMonitor.INSTANCE,
1441 				Collections.singleton(otp));
1442 		reuseDeltas = old;
1443 	}
1444 
1445 	private void searchForDeltas(final ProgressMonitor monitor,
1446 			final ObjectToPack[] list, final int cnt)
1447 			throws MissingObjectException, IncorrectObjectTypeException,
1448 			LargeObjectException, IOException {
1449 		int threads = config.getThreads();
1450 		if (threads == 0)
1451 			threads = Runtime.getRuntime().availableProcessors();
1452 		if (threads <= 1 || cnt <= config.getDeltaSearchWindowSize())
1453 			singleThreadDeltaSearch(monitor, list, cnt);
1454 		else
1455 			parallelDeltaSearch(monitor, list, cnt, threads);
1456 	}
1457 
1458 	private void singleThreadDeltaSearch(ProgressMonitor monitor,
1459 			ObjectToPack[] list, int cnt) throws IOException {
1460 		long totalWeight = 0;
1461 		for (int i = 0; i < cnt; i++) {
1462 			ObjectToPack o = list[i];
1463 			totalWeight += DeltaTask.getAdjustedWeight(o);
1464 		}
1465 
1466 		long bytesPerUnit = 1;
1467 		while (DeltaTask.MAX_METER <= (totalWeight / bytesPerUnit))
1468 			bytesPerUnit <<= 10;
1469 		int cost = (int) (totalWeight / bytesPerUnit);
1470 		if (totalWeight % bytesPerUnit != 0)
1471 			cost++;
1472 
1473 		beginPhase(PackingPhase.COMPRESSING, monitor, cost);
1474 		new DeltaWindow(config, new DeltaCache(config), reader,
1475 				monitor, bytesPerUnit,
1476 				list, 0, cnt).search();
1477 		endPhase(monitor);
1478 	}
1479 
1480 	private void parallelDeltaSearch(ProgressMonitor monitor,
1481 			ObjectToPack[] list, int cnt, int threads) throws IOException {
1482 		DeltaCache dc = new ThreadSafeDeltaCache(config);
1483 		ThreadSafeProgressMonitor pm = new ThreadSafeProgressMonitor(monitor);
1484 		DeltaTask.Block taskBlock = new DeltaTask.Block(threads, config,
1485 				reader, dc, pm,
1486 				list, 0, cnt);
1487 		taskBlock.partitionTasks();
1488 		beginPhase(PackingPhase.COMPRESSING, monitor, taskBlock.cost());
1489 		pm.startWorkers(taskBlock.tasks.size());
1490 
1491 		Executor executor = config.getExecutor();
1492 		final List<Throwable> errors =
1493 				Collections.synchronizedList(new ArrayList<Throwable>(threads));
1494 		if (executor instanceof ExecutorService) {
1495 			// Caller supplied us a service, use it directly.
1496 			runTasks((ExecutorService) executor, pm, taskBlock, errors);
1497 		} else if (executor == null) {
1498 			// Caller didn't give us a way to run the tasks, spawn up a
1499 			// temporary thread pool and make sure it tears down cleanly.
1500 			ExecutorService pool = Executors.newFixedThreadPool(threads);
1501 			try {
1502 				runTasks(pool, pm, taskBlock, errors);
1503 			} finally {
1504 				pool.shutdown();
1505 				for (;;) {
1506 					try {
1507 						if (pool.awaitTermination(60, TimeUnit.SECONDS))
1508 							break;
1509 					} catch (InterruptedException e) {
1510 						throw new IOException(
1511 								JGitText.get().packingCancelledDuringObjectsWriting);
1512 					}
1513 				}
1514 			}
1515 		} else {
1516 			// The caller gave us an executor, but it might not do
1517 			// asynchronous execution.  Wrap everything and hope it
1518 			// can schedule these for us.
1519 			for (final DeltaTask task : taskBlock.tasks) {
1520 				executor.execute(new Runnable() {
1521 					@Override
1522 					public void run() {
1523 						try {
1524 							task.call();
1525 						} catch (Throwable failure) {
1526 							errors.add(failure);
1527 						}
1528 					}
1529 				});
1530 			}
1531 			try {
1532 				pm.waitForCompletion();
1533 			} catch (InterruptedException ie) {
1534 				// We can't abort the other tasks as we have no handle.
1535 				// Cross our fingers and just break out anyway.
1536 				//
1537 				throw new IOException(
1538 						JGitText.get().packingCancelledDuringObjectsWriting);
1539 			}
1540 		}
1541 
1542 		// If any task threw an error, try to report it back as
1543 		// though we weren't using a threaded search algorithm.
1544 		//
1545 		if (!errors.isEmpty()) {
1546 			Throwable err = errors.get(0);
1547 			if (err instanceof Error)
1548 				throw (Error) err;
1549 			if (err instanceof RuntimeException)
1550 				throw (RuntimeException) err;
1551 			if (err instanceof IOException)
1552 				throw (IOException) err;
1553 
1554 			throw new IOException(err.getMessage(), err);
1555 		}
1556 		endPhase(monitor);
1557 	}
1558 
1559 	private static void runTasks(ExecutorService pool,
1560 			ThreadSafeProgressMonitor pm,
1561 			DeltaTask.Block tb, List<Throwable> errors) throws IOException {
1562 		List<Future<?>> futures = new ArrayList<>(tb.tasks.size());
1563 		for (DeltaTask task : tb.tasks)
1564 			futures.add(pool.submit(task));
1565 
1566 		try {
1567 			pm.waitForCompletion();
1568 			for (Future<?> f : futures) {
1569 				try {
1570 					f.get();
1571 				} catch (ExecutionException failed) {
1572 					errors.add(failed.getCause());
1573 				}
1574 			}
1575 		} catch (InterruptedException ie) {
1576 			for (Future<?> f : futures)
1577 				f.cancel(true);
1578 			throw new IOException(
1579 					JGitText.get().packingCancelledDuringObjectsWriting);
1580 		}
1581 	}
1582 
1583 	private void writeObjects(PackOutputStream out) throws IOException {
1584 		writeObjects(out, objectsLists[OBJ_COMMIT]);
1585 		writeObjects(out, objectsLists[OBJ_TAG]);
1586 		writeObjects(out, objectsLists[OBJ_TREE]);
1587 		writeObjects(out, objectsLists[OBJ_BLOB]);
1588 	}
1589 
1590 	private void writeObjects(PackOutputStream out, List<ObjectToPack> list)
1591 			throws IOException {
1592 		if (list.isEmpty())
1593 			return;
1594 
1595 		typeStats = stats.objectTypes[list.get(0).getType()];
1596 		long beginOffset = out.length();
1597 
1598 		if (reuseSupport != null) {
1599 			reuseSupport.writeObjects(out, list);
1600 		} else {
1601 			for (ObjectToPack otp : list)
1602 				out.writeObject(otp);
1603 		}
1604 
1605 		typeStats.bytes += out.length() - beginOffset;
1606 		typeStats.cntObjects = list.size();
1607 	}
1608 
1609 	void writeObject(PackOutputStream out, ObjectToPack otp) throws IOException {
1610 		if (!otp.isWritten())
1611 			writeObjectImpl(out, otp);
1612 	}
1613 
1614 	private void writeObjectImpl(PackOutputStream out, ObjectToPack otp)
1615 			throws IOException {
1616 		if (otp.wantWrite()) {
1617 			// A cycle exists in this delta chain. This should only occur if a
1618 			// selected object representation disappeared during writing
1619 			// (for example due to a concurrent repack) and a different base
1620 			// was chosen, forcing a cycle. Select something other than a
1621 			// delta, and write this object.
1622 			reselectNonDelta(otp);
1623 		}
1624 		otp.markWantWrite();
1625 
1626 		while (otp.isReuseAsIs()) {
1627 			writeBase(out, otp.getDeltaBase());
1628 			if (otp.isWritten())
1629 				return; // Delta chain cycle caused this to write already.
1630 
1631 			crc32.reset();
1632 			otp.setOffset(out.length());
1633 			try {
1634 				reuseSupport.copyObjectAsIs(out, otp, reuseValidate);
1635 				out.endObject();
1636 				otp.setCRC((int) crc32.getValue());
1637 				typeStats.reusedObjects++;
1638 				if (otp.isDeltaRepresentation()) {
1639 					typeStats.reusedDeltas++;
1640 					typeStats.deltaBytes += out.length() - otp.getOffset();
1641 				}
1642 				return;
1643 			} catch (StoredObjectRepresentationNotAvailableException gone) {
1644 				if (otp.getOffset() == out.length()) {
1645 					otp.setOffset(0);
1646 					otp.clearDeltaBase();
1647 					otp.clearReuseAsIs();
1648 					reuseSupport.selectObjectRepresentation(this,
1649 							NullProgressMonitor.INSTANCE,
1650 							Collections.singleton(otp));
1651 					continue;
1652 				} else {
1653 					// Object writing already started, we cannot recover.
1654 					//
1655 					CorruptObjectException coe;
1656 					coe = new CorruptObjectException(otp, ""); //$NON-NLS-1$
1657 					coe.initCause(gone);
1658 					throw coe;
1659 				}
1660 			}
1661 		}
1662 
1663 		// If we reached here, reuse wasn't possible.
1664 		//
1665 		if (otp.isDeltaRepresentation())
1666 			writeDeltaObjectDeflate(out, otp);
1667 		else
1668 			writeWholeObjectDeflate(out, otp);
1669 		out.endObject();
1670 		otp.setCRC((int) crc32.getValue());
1671 	}
1672 
1673 	private void writeBase(PackOutputStream out, ObjectToPack base)
1674 			throws IOException {
1675 		if (base != null && !base.isWritten() && !base.isEdge())
1676 			writeObjectImpl(out, base);
1677 	}
1678 
1679 	private void writeWholeObjectDeflate(PackOutputStream out,
1680 			final ObjectToPack otp) throws IOException {
1681 		final Deflater deflater = deflater();
1682 		final ObjectLoader ldr = reader.open(otp, otp.getType());
1683 
1684 		crc32.reset();
1685 		otp.setOffset(out.length());
1686 		out.writeHeader(otp, ldr.getSize());
1687 
1688 		deflater.reset();
1689 		DeflaterOutputStream dst = new DeflaterOutputStream(out, deflater);
1690 		ldr.copyTo(dst);
1691 		dst.finish();
1692 	}
1693 
1694 	private void writeDeltaObjectDeflate(PackOutputStream out,
1695 			final ObjectToPack otp) throws IOException {
1696 		writeBase(out, otp.getDeltaBase());
1697 
1698 		crc32.reset();
1699 		otp.setOffset(out.length());
1700 
1701 		DeltaCache.Ref ref = otp.popCachedDelta();
1702 		if (ref != null) {
1703 			byte[] zbuf = ref.get();
1704 			if (zbuf != null) {
1705 				out.writeHeader(otp, otp.getCachedSize());
1706 				out.write(zbuf);
1707 				typeStats.cntDeltas++;
1708 				typeStats.deltaBytes += out.length() - otp.getOffset();
1709 				return;
1710 			}
1711 		}
1712 
1713 		try (TemporaryBuffer.Heap delta = delta(otp)) {
1714 			out.writeHeader(otp, delta.length());
1715 
1716 			Deflater deflater = deflater();
1717 			deflater.reset();
1718 			DeflaterOutputStream dst = new DeflaterOutputStream(out, deflater);
1719 			delta.writeTo(dst, null);
1720 			dst.finish();
1721 		}
1722 		typeStats.cntDeltas++;
1723 		typeStats.deltaBytes += out.length() - otp.getOffset();
1724 	}
1725 
1726 	private TemporaryBuffer.Heap delta(final ObjectToPack otp)
1727 			throws IOException {
1728 		DeltaIndex index = new DeltaIndex(buffer(otp.getDeltaBaseId()));
1729 		byte[] res = buffer(otp);
1730 
1731 		// We never would have proposed this pair if the delta would be
1732 		// larger than the unpacked version of the object. So using it
1733 		// as our buffer limit is valid: we will never reach it.
1734 		//
1735 		TemporaryBuffer.Heap delta = new TemporaryBuffer.Heap(res.length);
1736 		index.encode(delta, res);
1737 		return delta;
1738 	}
1739 
1740 	private byte[] buffer(AnyObjectId objId) throws IOException {
1741 		return buffer(config, reader, objId);
1742 	}
1743 
1744 	static byte[] buffer(PackConfig config, ObjectReader or, AnyObjectId objId)
1745 			throws IOException {
1746 		// PackWriter should have already pruned objects that
1747 		// are above the big file threshold, so our chances of
1748 		// the object being below it are very good. We really
1749 		// shouldn't be here, unless the implementation is odd.
1750 
1751 		return or.open(objId).getCachedBytes(config.getBigFileThreshold());
1752 	}
1753 
1754 	private Deflater deflater() {
1755 		if (myDeflater == null)
1756 			myDeflater = new Deflater(config.getCompressionLevel());
1757 		return myDeflater;
1758 	}
1759 
1760 	private void writeChecksum(PackOutputStream out) throws IOException {
1761 		packcsum = out.getDigest();
1762 		out.write(packcsum);
1763 	}
1764 
1765 	private void findObjectsToPack(@NonNull ProgressMonitor countingMonitor,
1766 			@NonNull ObjectWalk walker, @NonNull Set<? extends ObjectId> want,
1767 			@NonNull Set<? extends ObjectId> have,
1768 			@NonNull Set<? extends ObjectId> noBitmaps) throws IOException {
1769 		final long countingStart = System.currentTimeMillis();
1770 		beginPhase(PackingPhase.COUNTING, countingMonitor, ProgressMonitor.UNKNOWN);
1771 
1772 		stats.interestingObjects = Collections.unmodifiableSet(new HashSet<ObjectId>(want));
1773 		stats.uninterestingObjects = Collections.unmodifiableSet(new HashSet<ObjectId>(have));
1774 		excludeFromBitmapSelection = noBitmaps;
1775 
1776 		canBuildBitmaps = config.isBuildBitmaps()
1777 				&& !shallowPack
1778 				&& have.isEmpty()
1779 				&& (excludeInPacks == null || excludeInPacks.length == 0);
1780 		if (!shallowPack && useBitmaps) {
1781 			BitmapIndex bitmapIndex = reader.getBitmapIndex();
1782 			if (bitmapIndex != null) {
1783 				BitmapWalker bitmapWalker = new BitmapWalker(
1784 						walker, bitmapIndex, countingMonitor);
1785 				findObjectsToPackUsingBitmaps(bitmapWalker, want, have);
1786 				endPhase(countingMonitor);
1787 				stats.timeCounting = System.currentTimeMillis() - countingStart;
1788 				stats.bitmapIndexMisses = bitmapWalker.getCountOfBitmapIndexMisses();
1789 				return;
1790 			}
1791 		}
1792 
1793 		List<ObjectId> all = new ArrayList<>(want.size() + have.size());
1794 		all.addAll(want);
1795 		all.addAll(have);
1796 
1797 		final RevFlag include = walker.newFlag("include"); //$NON-NLS-1$
1798 		final RevFlag added = walker.newFlag("added"); //$NON-NLS-1$
1799 
1800 		walker.carry(include);
1801 
1802 		int haveEst = have.size();
1803 		if (have.isEmpty()) {
1804 			walker.sort(RevSort.COMMIT_TIME_DESC);
1805 		} else {
1806 			walker.sort(RevSort.TOPO);
1807 			if (thin)
1808 				walker.sort(RevSort.BOUNDARY, true);
1809 		}
1810 
1811 		List<RevObject> wantObjs = new ArrayList<>(want.size());
1812 		List<RevObject> haveObjs = new ArrayList<>(haveEst);
1813 		List<RevTag> wantTags = new ArrayList<>(want.size());
1814 
1815 		// Retrieve the RevWalk's versions of "want" and "have" objects to
1816 		// maintain any state previously set in the RevWalk.
1817 		AsyncRevObjectQueue q = walker.parseAny(all, true);
1818 		try {
1819 			for (;;) {
1820 				try {
1821 					RevObject o = q.next();
1822 					if (o == null)
1823 						break;
1824 					if (have.contains(o))
1825 						haveObjs.add(o);
1826 					if (want.contains(o)) {
1827 						o.add(include);
1828 						wantObjs.add(o);
1829 						if (o instanceof RevTag)
1830 							wantTags.add((RevTag) o);
1831 					}
1832 				} catch (MissingObjectException e) {
1833 					if (ignoreMissingUninteresting
1834 							&& have.contains(e.getObjectId()))
1835 						continue;
1836 					throw e;
1837 				}
1838 			}
1839 		} finally {
1840 			q.release();
1841 		}
1842 
1843 		if (!wantTags.isEmpty()) {
1844 			all = new ArrayList<>(wantTags.size());
1845 			for (RevTag tag : wantTags)
1846 				all.add(tag.getObject());
1847 			q = walker.parseAny(all, true);
1848 			try {
1849 				while (q.next() != null) {
1850 					// Just need to pop the queue item to parse the object.
1851 				}
1852 			} finally {
1853 				q.release();
1854 			}
1855 		}
1856 
1857 		if (walker instanceof DepthWalk.ObjectWalk) {
1858 			DepthWalk.ObjectWalk depthWalk = (DepthWalk.ObjectWalk) walker;
1859 			for (RevObject obj : wantObjs) {
1860 				depthWalk.markRoot(obj);
1861 			}
1862 			// Mark the tree objects associated with "have" commits as
1863 			// uninteresting to avoid writing redundant blobs. A normal RevWalk
1864 			// lazily propagates the "uninteresting" state from a commit to its
1865 			// tree during the walk, but DepthWalks can terminate early so
1866 			// preemptively propagate that state here.
1867 			for (RevObject obj : haveObjs) {
1868 				if (obj instanceof RevCommit) {
1869 					RevTree t = ((RevCommit) obj).getTree();
1870 					depthWalk.markUninteresting(t);
1871 				}
1872 			}
1873 
1874 			if (unshallowObjects != null) {
1875 				for (ObjectId id : unshallowObjects) {
1876 					depthWalk.markUnshallow(walker.parseAny(id));
1877 				}
1878 			}
1879 		} else {
1880 			for (RevObject obj : wantObjs)
1881 				walker.markStart(obj);
1882 		}
1883 		for (RevObject obj : haveObjs)
1884 			walker.markUninteresting(obj);
1885 
1886 		final int maxBases = config.getDeltaSearchWindowSize();
1887 		Set<RevTree> baseTrees = new HashSet<>();
1888 		BlockList<RevCommit> commits = new BlockList<>();
1889 		Set<ObjectId> roots = new HashSet<>();
1890 		RevCommit c;
1891 		while ((c = walker.next()) != null) {
1892 			if (exclude(c))
1893 				continue;
1894 			if (c.has(RevFlag.UNINTERESTING)) {
1895 				if (baseTrees.size() <= maxBases)
1896 					baseTrees.add(c.getTree());
1897 				continue;
1898 			}
1899 
1900 			commits.add(c);
1901 			if (c.getParentCount() == 0) {
1902 				roots.add(c.copy());
1903 			}
1904 			countingMonitor.update(1);
1905 		}
1906 		stats.rootCommits = Collections.unmodifiableSet(roots);
1907 
1908 		if (shallowPack) {
1909 			for (RevCommit cmit : commits) {
1910 				addObject(cmit, 0);
1911 			}
1912 		} else {
1913 			int commitCnt = 0;
1914 			boolean putTagTargets = false;
1915 			for (RevCommit cmit : commits) {
1916 				if (!cmit.has(added)) {
1917 					cmit.add(added);
1918 					addObject(cmit, 0);
1919 					commitCnt++;
1920 				}
1921 
1922 				for (int i = 0; i < cmit.getParentCount(); i++) {
1923 					RevCommit p = cmit.getParent(i);
1924 					if (!p.has(added) && !p.has(RevFlag.UNINTERESTING)
1925 							&& !exclude(p)) {
1926 						p.add(added);
1927 						addObject(p, 0);
1928 						commitCnt++;
1929 					}
1930 				}
1931 
1932 				if (!putTagTargets && 4096 < commitCnt) {
1933 					for (ObjectId id : tagTargets) {
1934 						RevObject obj = walker.lookupOrNull(id);
1935 						if (obj instanceof RevCommit
1936 								&& obj.has(include)
1937 								&& !obj.has(RevFlag.UNINTERESTING)
1938 								&& !obj.has(added)) {
1939 							obj.add(added);
1940 							addObject(obj, 0);
1941 						}
1942 					}
1943 					putTagTargets = true;
1944 				}
1945 			}
1946 		}
1947 		commits = null;
1948 
1949 		if (thin && !baseTrees.isEmpty()) {
1950 			BaseSearch bases = new BaseSearch(countingMonitor, baseTrees, //
1951 					objectsMap, edgeObjects, reader);
1952 			RevObject o;
1953 			while ((o = walker.nextObject()) != null) {
1954 				if (o.has(RevFlag.UNINTERESTING))
1955 					continue;
1956 				if (exclude(o))
1957 					continue;
1958 
1959 				int pathHash = walker.getPathHashCode();
1960 				byte[] pathBuf = walker.getPathBuffer();
1961 				int pathLen = walker.getPathLength();
1962 				bases.addBase(o.getType(), pathBuf, pathLen, pathHash);
1963 				addObject(o, pathHash);
1964 				countingMonitor.update(1);
1965 			}
1966 		} else {
1967 			RevObject o;
1968 			while ((o = walker.nextObject()) != null) {
1969 				if (o.has(RevFlag.UNINTERESTING))
1970 					continue;
1971 				if (exclude(o))
1972 					continue;
1973 				addObject(o, walker.getPathHashCode());
1974 				countingMonitor.update(1);
1975 			}
1976 		}
1977 
1978 		for (CachedPack pack : cachedPacks)
1979 			countingMonitor.update((int) pack.getObjectCount());
1980 		endPhase(countingMonitor);
1981 		stats.timeCounting = System.currentTimeMillis() - countingStart;
1982 		stats.bitmapIndexMisses = -1;
1983 	}
1984 
1985 	private void findObjectsToPackUsingBitmaps(
1986 			BitmapWalker bitmapWalker, Set<? extends ObjectId> want,
1987 			Set<? extends ObjectId> have)
1988 			throws MissingObjectException, IncorrectObjectTypeException,
1989 			IOException {
1990 		BitmapBuilder haveBitmap = bitmapWalker.findObjects(have, null, true);
1991 		BitmapBuilder wantBitmap = bitmapWalker.findObjects(want, haveBitmap,
1992 				false);
1993 		BitmapBuilder needBitmap = wantBitmap.andNot(haveBitmap);
1994 
1995 		if (useCachedPacks && reuseSupport != null && !reuseValidate
1996 				&& (excludeInPacks == null || excludeInPacks.length == 0))
1997 			cachedPacks.addAll(
1998 					reuseSupport.getCachedPacksAndUpdate(needBitmap));
1999 
2000 		for (BitmapObject obj : needBitmap) {
2001 			ObjectId objectId = obj.getObjectId();
2002 			if (exclude(objectId)) {
2003 				needBitmap.remove(objectId);
2004 				continue;
2005 			}
2006 			addObject(objectId, obj.getType(), 0);
2007 		}
2008 
2009 		if (thin)
2010 			haveObjects = haveBitmap;
2011 	}
2012 
2013 	private static void pruneEdgesFromObjectList(List<ObjectToPack> list) {
2014 		final int size = list.size();
2015 		int src = 0;
2016 		int dst = 0;
2017 
2018 		for (; src < size; src++) {
2019 			ObjectToPack obj = list.get(src);
2020 			if (obj.isEdge())
2021 				continue;
2022 			if (dst != src)
2023 				list.set(dst, obj);
2024 			dst++;
2025 		}
2026 
2027 		while (dst < list.size())
2028 			list.remove(list.size() - 1);
2029 	}
2030 
2031 	/**
2032 	 * Include one object to the output file.
2033 	 * <p>
2034 	 * Objects are written in the order they are added. If the same object is
2035 	 * added twice, it may be written twice, creating a larger than necessary
2036 	 * file.
2037 	 *
2038 	 * @param object
2039 	 *            the object to add.
2040 	 * @throws org.eclipse.jgit.errors.IncorrectObjectTypeException
2041 	 *             the object is an unsupported type.
2042 	 */
2043 	public void addObject(final RevObject object)
2044 			throws IncorrectObjectTypeException {
2045 		if (!exclude(object))
2046 			addObject(object, 0);
2047 	}
2048 
2049 	private void addObject(final RevObject object, final int pathHashCode) {
2050 		addObject(object, object.getType(), pathHashCode);
2051 	}
2052 
2053 	private void addObject(
2054 			final AnyObjectId src, final int type, final int pathHashCode) {
2055 		final ObjectToPack otp;
2056 		if (reuseSupport != null)
2057 			otp = reuseSupport.newObjectToPack(src, type);
2058 		else
2059 			otp = new ObjectToPack(src, type);
2060 		otp.setPathHash(pathHashCode);
2061 		objectsLists[type].add(otp);
2062 		objectsMap.add(otp);
2063 	}
2064 
2065 	private boolean exclude(AnyObjectId objectId) {
2066 		if (excludeInPacks == null)
2067 			return false;
2068 		if (excludeInPackLast.contains(objectId))
2069 			return true;
2070 		for (ObjectIdSet idx : excludeInPacks) {
2071 			if (idx.contains(objectId)) {
2072 				excludeInPackLast = idx;
2073 				return true;
2074 			}
2075 		}
2076 		return false;
2077 	}
2078 
2079 	/**
2080 	 * Select an object representation for this writer.
2081 	 * <p>
2082 	 * An {@link org.eclipse.jgit.lib.ObjectReader} implementation should invoke
2083 	 * this method once for each representation available for an object, to
2084 	 * allow the writer to find the most suitable one for the output.
2085 	 *
2086 	 * @param otp
2087 	 *            the object being packed.
2088 	 * @param next
2089 	 *            the next available representation from the repository.
2090 	 */
2091 	public void select(ObjectToPack otp, StoredObjectRepresentation next) {
2092 		int nFmt = next.getFormat();
2093 
2094 		if (!cachedPacks.isEmpty()) {
2095 			if (otp.isEdge())
2096 				return;
2097 			if ((nFmt == PACK_WHOLE) | (nFmt == PACK_DELTA)) {
2098 				for (CachedPack pack : cachedPacks) {
2099 					if (pack.hasObject(otp, next)) {
2100 						otp.setEdge();
2101 						otp.clearDeltaBase();
2102 						otp.clearReuseAsIs();
2103 						pruneCurrentObjectList = true;
2104 						return;
2105 					}
2106 				}
2107 			}
2108 		}
2109 
2110 		if (nFmt == PACK_DELTA && reuseDeltas && reuseDeltaFor(otp)) {
2111 			ObjectId baseId = next.getDeltaBase();
2112 			ObjectToPack ptr = objectsMap.get(baseId);
2113 			if (ptr != null && !ptr.isEdge()) {
2114 				otp.setDeltaBase(ptr);
2115 				otp.setReuseAsIs();
2116 			} else if (thin && have(ptr, baseId)) {
2117 				otp.setDeltaBase(baseId);
2118 				otp.setReuseAsIs();
2119 			} else {
2120 				otp.clearDeltaBase();
2121 				otp.clearReuseAsIs();
2122 			}
2123 		} else if (nFmt == PACK_WHOLE && config.isReuseObjects()) {
2124 			int nWeight = next.getWeight();
2125 			if (otp.isReuseAsIs() && !otp.isDeltaRepresentation()) {
2126 				// We've chosen another PACK_WHOLE format for this object,
2127 				// choose the one that has the smaller compressed size.
2128 				//
2129 				if (otp.getWeight() <= nWeight)
2130 					return;
2131 			}
2132 			otp.clearDeltaBase();
2133 			otp.setReuseAsIs();
2134 			otp.setWeight(nWeight);
2135 		} else {
2136 			otp.clearDeltaBase();
2137 			otp.clearReuseAsIs();
2138 		}
2139 
2140 		otp.setDeltaAttempted(reuseDeltas & next.wasDeltaAttempted());
2141 		otp.select(next);
2142 	}
2143 
2144 	private final boolean have(ObjectToPack ptr, AnyObjectId objectId) {
2145 		return (ptr != null && ptr.isEdge())
2146 				|| (haveObjects != null && haveObjects.contains(objectId));
2147 	}
2148 
2149 	/**
2150 	 * Prepares the bitmaps to be written to the bitmap index file.
2151 	 * <p>
2152 	 * Bitmaps can be used to speed up fetches and clones by storing the entire
2153 	 * object graph at selected commits. Writing a bitmap index is an optional
2154 	 * feature that not all pack users may require.
2155 	 * <p>
2156 	 * Called after {@link #writeIndex(OutputStream)}.
2157 	 * <p>
2158 	 * To reduce memory internal state is cleared during this method, rendering
2159 	 * the PackWriter instance useless for anything further than a call to write
2160 	 * out the new bitmaps with {@link #writeBitmapIndex(OutputStream)}.
2161 	 *
2162 	 * @param pm
2163 	 *            progress monitor to report bitmap building work.
2164 	 * @return whether a bitmap index may be written.
2165 	 * @throws java.io.IOException
2166 	 *             when some I/O problem occur during reading objects.
2167 	 */
2168 	public boolean prepareBitmapIndex(ProgressMonitor pm) throws IOException {
2169 		if (!canBuildBitmaps || getObjectCount() > Integer.MAX_VALUE
2170 				|| !cachedPacks.isEmpty())
2171 			return false;
2172 
2173 		if (pm == null)
2174 			pm = NullProgressMonitor.INSTANCE;
2175 
2176 		int numCommits = objectsLists[OBJ_COMMIT].size();
2177 		List<ObjectToPack> byName = sortByName();
2178 		sortedByName = null;
2179 		objectsLists = null;
2180 		objectsMap = null;
2181 		writeBitmaps = new PackBitmapIndexBuilder(byName);
2182 		byName = null;
2183 
2184 		PackWriterBitmapPreparer bitmapPreparer = new PackWriterBitmapPreparer(
2185 				reader, writeBitmaps, pm, stats.interestingObjects, config);
2186 
2187 		Collection<PackWriterBitmapPreparer.BitmapCommit> selectedCommits = bitmapPreparer
2188 				.selectCommits(numCommits, excludeFromBitmapSelection);
2189 
2190 		beginPhase(PackingPhase.BUILDING_BITMAPS, pm, selectedCommits.size());
2191 
2192 		BitmapWalker walker = bitmapPreparer.newBitmapWalker();
2193 		AnyObjectId last = null;
2194 		for (PackWriterBitmapPreparer.BitmapCommit cmit : selectedCommits) {
2195 			if (!cmit.isReuseWalker()) {
2196 				walker = bitmapPreparer.newBitmapWalker();
2197 			}
2198 			BitmapBuilder bitmap = walker.findObjects(
2199 					Collections.singleton(cmit), null, false);
2200 
2201 			if (last != null && cmit.isReuseWalker() && !bitmap.contains(last))
2202 				throw new IllegalStateException(MessageFormat.format(
2203 						JGitText.get().bitmapMissingObject, cmit.name(),
2204 						last.name()));
2205 			last = cmit;
2206 			writeBitmaps.addBitmap(cmit, bitmap.build(), cmit.getFlags());
2207 
2208 			pm.update(1);
2209 		}
2210 
2211 		endPhase(pm);
2212 		return true;
2213 	}
2214 
2215 	private boolean reuseDeltaFor(ObjectToPack otp) {
2216 		int type = otp.getType();
2217 		if ((type & 2) != 0) // OBJ_TREE(2) or OBJ_BLOB(3)
2218 			return true;
2219 		if (type == OBJ_COMMIT)
2220 			return reuseDeltaCommits;
2221 		if (type == OBJ_TAG)
2222 			return false;
2223 		return true;
2224 	}
2225 
2226 	/**
2227 	 * Summary of how PackWriter created the pack.
2228 	 *
2229 	 * @deprecated Use {@link PackStatistics} instead.
2230 	 */
2231 	@Deprecated
2232 	public static class Statistics {
2233 		/** Statistics about a single class of object. */
2234 		public static class ObjectType {
2235 			// All requests are forwarded to this object.
2236 			private PackStatistics.ObjectType objectType;
2237 
2238 			/**
2239 			 * Wraps an
2240 			 * {@link org.eclipse.jgit.storage.pack.PackStatistics.ObjectType}
2241 			 * instance to maintain backwards compatibility with existing API.
2242 			 *
2243 			 * @param type
2244 			 *            the wrapped instance
2245 			 */
2246 			public ObjectType(PackStatistics.ObjectType type) {
2247 				objectType = type;
2248 			}
2249 
2250 			/**
2251 			 * @return total number of objects output. This total includes the
2252 			 *         value of {@link #getDeltas()}.
2253 			 */
2254 			public long getObjects() {
2255 				return objectType.getObjects();
2256 			}
2257 
2258 			/**
2259 			 * @return total number of deltas output. This may be lower than the
2260 			 *         actual number of deltas if a cached pack was reused.
2261 			 */
2262 			public long getDeltas() {
2263 				return objectType.getDeltas();
2264 			}
2265 
2266 			/**
2267 			 * @return number of objects whose existing representation was
2268 			 *         reused in the output. This count includes
2269 			 *         {@link #getReusedDeltas()}.
2270 			 */
2271 			public long getReusedObjects() {
2272 				return objectType.getReusedObjects();
2273 			}
2274 
2275 			/**
2276 			 * @return number of deltas whose existing representation was reused
2277 			 *         in the output, as their base object was also output or
2278 			 *         was assumed present for a thin pack. This may be lower
2279 			 *         than the actual number of reused deltas if a cached pack
2280 			 *         was reused.
2281 			 */
2282 			public long getReusedDeltas() {
2283 				return objectType.getReusedDeltas();
2284 			}
2285 
2286 			/**
2287 			 * @return total number of bytes written. This size includes the
2288 			 *         object headers as well as the compressed data. This size
2289 			 *         also includes all of {@link #getDeltaBytes()}.
2290 			 */
2291 			public long getBytes() {
2292 				return objectType.getBytes();
2293 			}
2294 
2295 			/**
2296 			 * @return number of delta bytes written. This size includes the
2297 			 *         object headers for the delta objects.
2298 			 */
2299 			public long getDeltaBytes() {
2300 				return objectType.getDeltaBytes();
2301 			}
2302 		}
2303 
2304 		// All requests are forwarded to this object.
2305 		private PackStatistics statistics;
2306 
2307 		/**
2308 		 * Wraps a {@link PackStatistics} object to maintain backwards
2309 		 * compatibility with existing API.
2310 		 *
2311 		 * @param stats
2312 		 *            the wrapped PackStatitics object
2313 		 */
2314 		public Statistics(PackStatistics stats) {
2315 			statistics = stats;
2316 		}
2317 
2318 		/**
2319 		 * @return unmodifiable collection of objects to be included in the
2320 		 *         pack. May be null if the pack was hand-crafted in a unit
2321 		 *         test.
2322 		 */
2323 		public Set<ObjectId> getInterestingObjects() {
2324 			return statistics.getInterestingObjects();
2325 		}
2326 
2327 		/**
2328 		 * @return unmodifiable collection of objects that should be excluded
2329 		 *         from the pack, as the peer that will receive the pack already
2330 		 *         has these objects.
2331 		 */
2332 		public Set<ObjectId> getUninterestingObjects() {
2333 			return statistics.getUninterestingObjects();
2334 		}
2335 
2336 		/**
2337 		 * @return unmodifiable collection of the cached packs that were reused
2338 		 *         in the output, if any were selected for reuse.
2339 		 */
2340 		public Collection<CachedPack> getReusedPacks() {
2341 			return statistics.getReusedPacks();
2342 		}
2343 
2344 		/**
2345 		 * @return number of objects in the output pack that went through the
2346 		 *         delta search process in order to find a potential delta base.
2347 		 */
2348 		public int getDeltaSearchNonEdgeObjects() {
2349 			return statistics.getDeltaSearchNonEdgeObjects();
2350 		}
2351 
2352 		/**
2353 		 * @return number of objects in the output pack that went through delta
2354 		 *         base search and found a suitable base. This is a subset of
2355 		 *         {@link #getDeltaSearchNonEdgeObjects()}.
2356 		 */
2357 		public int getDeltasFound() {
2358 			return statistics.getDeltasFound();
2359 		}
2360 
2361 		/**
2362 		 * @return total number of objects output. This total includes the value
2363 		 *         of {@link #getTotalDeltas()}.
2364 		 */
2365 		public long getTotalObjects() {
2366 			return statistics.getTotalObjects();
2367 		}
2368 
2369 		/**
2370 		 * @return the count of objects that needed to be discovered through an
2371 		 *         object walk because they were not found in bitmap indices.
2372 		 *         Returns -1 if no bitmap indices were found.
2373 		 */
2374 		public long getBitmapIndexMisses() {
2375 			return statistics.getBitmapIndexMisses();
2376 		}
2377 
2378 		/**
2379 		 * @return total number of deltas output. This may be lower than the
2380 		 *         actual number of deltas if a cached pack was reused.
2381 		 */
2382 		public long getTotalDeltas() {
2383 			return statistics.getTotalDeltas();
2384 		}
2385 
2386 		/**
2387 		 * @return number of objects whose existing representation was reused in
2388 		 *         the output. This count includes {@link #getReusedDeltas()}.
2389 		 */
2390 		public long getReusedObjects() {
2391 			return statistics.getReusedObjects();
2392 		}
2393 
2394 		/**
2395 		 * @return number of deltas whose existing representation was reused in
2396 		 *         the output, as their base object was also output or was
2397 		 *         assumed present for a thin pack. This may be lower than the
2398 		 *         actual number of reused deltas if a cached pack was reused.
2399 		 */
2400 		public long getReusedDeltas() {
2401 			return statistics.getReusedDeltas();
2402 		}
2403 
2404 		/**
2405 		 * @return total number of bytes written. This size includes the pack
2406 		 *         header, trailer, thin pack, and reused cached pack(s).
2407 		 */
2408 		public long getTotalBytes() {
2409 			return statistics.getTotalBytes();
2410 		}
2411 
2412 		/**
2413 		 * @return size of the thin pack in bytes, if a thin pack was generated.
2414 		 *         A thin pack is created when the client already has objects
2415 		 *         and some deltas are created against those objects, or if a
2416 		 *         cached pack is being used and some deltas will reference
2417 		 *         objects in the cached pack. This size does not include the
2418 		 *         pack header or trailer.
2419 		 */
2420 		public long getThinPackBytes() {
2421 			return statistics.getThinPackBytes();
2422 		}
2423 
2424 		/**
2425 		 * @param typeCode
2426 		 *            object type code, e.g. OBJ_COMMIT or OBJ_TREE.
2427 		 * @return information about this type of object in the pack.
2428 		 */
2429 		public ObjectType byObjectType(int typeCode) {
2430 			return new ObjectType(statistics.byObjectType(typeCode));
2431 		}
2432 
2433 		/** @return true if the resulting pack file was a shallow pack. */
2434 		public boolean isShallow() {
2435 			return statistics.isShallow();
2436 		}
2437 
2438 		/** @return depth (in commits) the pack includes if shallow. */
2439 		public int getDepth() {
2440 			return statistics.getDepth();
2441 		}
2442 
2443 		/**
2444 		 * @return time in milliseconds spent enumerating the objects that need
2445 		 *         to be included in the output. This time includes any restarts
2446 		 *         that occur when a cached pack is selected for reuse.
2447 		 */
2448 		public long getTimeCounting() {
2449 			return statistics.getTimeCounting();
2450 		}
2451 
2452 		/**
2453 		 * @return time in milliseconds spent matching existing representations
2454 		 *         against objects that will be transmitted, or that the client
2455 		 *         can be assumed to already have.
2456 		 */
2457 		public long getTimeSearchingForReuse() {
2458 			return statistics.getTimeSearchingForReuse();
2459 		}
2460 
2461 		/**
2462 		 * @return time in milliseconds spent finding the sizes of all objects
2463 		 *         that will enter the delta compression search window. The
2464 		 *         sizes need to be known to better match similar objects
2465 		 *         together and improve delta compression ratios.
2466 		 */
2467 		public long getTimeSearchingForSizes() {
2468 			return statistics.getTimeSearchingForSizes();
2469 		}
2470 
2471 		/**
2472 		 * @return time in milliseconds spent on delta compression. This is
2473 		 *         observed wall-clock time and does not accurately track CPU
2474 		 *         time used when multiple threads were used to perform the
2475 		 *         delta compression.
2476 		 */
2477 		public long getTimeCompressing() {
2478 			return statistics.getTimeCompressing();
2479 		}
2480 
2481 		/**
2482 		 * @return time in milliseconds spent writing the pack output, from
2483 		 *         start of header until end of trailer. The transfer speed can
2484 		 *         be approximated by dividing {@link #getTotalBytes()} by this
2485 		 *         value.
2486 		 */
2487 		public long getTimeWriting() {
2488 			return statistics.getTimeWriting();
2489 		}
2490 
2491 		/** @return total time spent processing this pack. */
2492 		public long getTimeTotal() {
2493 			return statistics.getTimeTotal();
2494 		}
2495 
2496 		/**
2497 		 * @return get the average output speed in terms of bytes-per-second.
2498 		 *         {@code getTotalBytes() / (getTimeWriting() / 1000.0)}.
2499 		 */
2500 		public double getTransferRate() {
2501 			return statistics.getTransferRate();
2502 		}
2503 
2504 		/** @return formatted message string for display to clients. */
2505 		public String getMessage() {
2506 			return statistics.getMessage();
2507 		}
2508 	}
2509 
2510 	private class MutableState {
2511 		/** Estimated size of a single ObjectToPack instance. */
2512 		// Assume 64-bit pointers, since this is just an estimate.
2513 		private static final long OBJECT_TO_PACK_SIZE =
2514 				(2 * 8)               // Object header
2515 				+ (2 * 8) + (2 * 8)   // ObjectToPack fields
2516 				+ (8 + 8)             // PackedObjectInfo fields
2517 				+ 8                   // ObjectIdOwnerMap fields
2518 				+ 40                  // AnyObjectId fields
2519 				+ 8;                  // Reference in BlockList
2520 
2521 		private final long totalDeltaSearchBytes;
2522 
2523 		private volatile PackingPhase phase;
2524 
2525 		MutableState() {
2526 			phase = PackingPhase.COUNTING;
2527 			if (config.isDeltaCompress()) {
2528 				int threads = config.getThreads();
2529 				if (threads <= 0)
2530 					threads = Runtime.getRuntime().availableProcessors();
2531 				totalDeltaSearchBytes = (threads * config.getDeltaSearchMemoryLimit())
2532 						+ config.getBigFileThreshold();
2533 			} else
2534 				totalDeltaSearchBytes = 0;
2535 		}
2536 
2537 		State snapshot() {
2538 			long objCnt = 0;
2539 			BlockList<ObjectToPack>[] lists = objectsLists;
2540 			if (lists != null) {
2541 				objCnt += lists[OBJ_COMMIT].size();
2542 				objCnt += lists[OBJ_TREE].size();
2543 				objCnt += lists[OBJ_BLOB].size();
2544 				objCnt += lists[OBJ_TAG].size();
2545 				// Exclude CachedPacks.
2546 			}
2547 
2548 			long bytesUsed = OBJECT_TO_PACK_SIZE * objCnt;
2549 			PackingPhase curr = phase;
2550 			if (curr == PackingPhase.COMPRESSING)
2551 				bytesUsed += totalDeltaSearchBytes;
2552 			return new State(curr, bytesUsed);
2553 		}
2554 	}
2555 
2556 	/** Possible states that a PackWriter can be in. */
2557 	public static enum PackingPhase {
2558 		/** Counting objects phase. */
2559 		COUNTING,
2560 
2561 		/** Getting sizes phase. */
2562 		GETTING_SIZES,
2563 
2564 		/** Finding sources phase. */
2565 		FINDING_SOURCES,
2566 
2567 		/** Compressing objects phase. */
2568 		COMPRESSING,
2569 
2570 		/** Writing objects phase. */
2571 		WRITING,
2572 
2573 		/** Building bitmaps phase. */
2574 		BUILDING_BITMAPS;
2575 	}
2576 
2577 	/** Summary of the current state of a PackWriter. */
2578 	public class State {
2579 		private final PackingPhase phase;
2580 
2581 		private final long bytesUsed;
2582 
2583 		State(PackingPhase phase, long bytesUsed) {
2584 			this.phase = phase;
2585 			this.bytesUsed = bytesUsed;
2586 		}
2587 
2588 		/** @return the PackConfig used to build the writer. */
2589 		public PackConfig getConfig() {
2590 			return config;
2591 		}
2592 
2593 		/** @return the current phase of the writer. */
2594 		public PackingPhase getPhase() {
2595 			return phase;
2596 		}
2597 
2598 		/** @return an estimate of the total memory used by the writer. */
2599 		public long estimateBytesUsed() {
2600 			return bytesUsed;
2601 		}
2602 
2603 		@SuppressWarnings("nls")
2604 		@Override
2605 		public String toString() {
2606 			return "PackWriter.State[" + phase + ", memory=" + bytesUsed + "]";
2607 		}
2608 	}
2609 }