View Javadoc
1   /*
2    * Copyright (C) 2008-2010, Google Inc.
3    * Copyright (C) 2008, Marek Zawirski <marek.zawirski@gmail.com>
4    * and other copyright owners as documented in the project's IP log.
5    *
6    * This program and the accompanying materials are made available
7    * under the terms of the Eclipse Distribution License v1.0 which
8    * accompanies this distribution, is reproduced below, and is
9    * available at http://www.eclipse.org/org/documents/edl-v10.php
10   *
11   * All rights reserved.
12   *
13   * Redistribution and use in source and binary forms, with or
14   * without modification, are permitted provided that the following
15   * conditions are met:
16   *
17   * - Redistributions of source code must retain the above copyright
18   *   notice, this list of conditions and the following disclaimer.
19   *
20   * - Redistributions in binary form must reproduce the above
21   *   copyright notice, this list of conditions and the following
22   *   disclaimer in the documentation and/or other materials provided
23   *   with the distribution.
24   *
25   * - Neither the name of the Eclipse Foundation, Inc. nor the
26   *   names of its contributors may be used to endorse or promote
27   *   products derived from this software without specific prior
28   *   written permission.
29   *
30   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
31   * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
32   * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
33   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34   * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
35   * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36   * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
37   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
38   * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
39   * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
40   * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
42   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43   */
44  
45  package org.eclipse.jgit.internal.storage.pack;
46  
47  import static org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation.PACK_DELTA;
48  import static org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation.PACK_WHOLE;
49  import static org.eclipse.jgit.lib.Constants.OBJECT_ID_LENGTH;
50  import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
51  import static org.eclipse.jgit.lib.Constants.OBJ_COMMIT;
52  import static org.eclipse.jgit.lib.Constants.OBJ_TAG;
53  import static org.eclipse.jgit.lib.Constants.OBJ_TREE;
54  
55  import java.io.IOException;
56  import java.io.OutputStream;
57  import java.lang.ref.WeakReference;
58  import java.security.MessageDigest;
59  import java.text.MessageFormat;
60  import java.util.ArrayList;
61  import java.util.Arrays;
62  import java.util.Collection;
63  import java.util.Collections;
64  import java.util.Comparator;
65  import java.util.HashSet;
66  import java.util.Iterator;
67  import java.util.List;
68  import java.util.Map;
69  import java.util.NoSuchElementException;
70  import java.util.Set;
71  import java.util.concurrent.ConcurrentHashMap;
72  import java.util.concurrent.ExecutionException;
73  import java.util.concurrent.Executor;
74  import java.util.concurrent.ExecutorService;
75  import java.util.concurrent.Executors;
76  import java.util.concurrent.Future;
77  import java.util.concurrent.TimeUnit;
78  import java.util.zip.CRC32;
79  import java.util.zip.CheckedOutputStream;
80  import java.util.zip.Deflater;
81  import java.util.zip.DeflaterOutputStream;
82  
83  import org.eclipse.jgit.annotations.NonNull;
84  import org.eclipse.jgit.errors.CorruptObjectException;
85  import org.eclipse.jgit.errors.IncorrectObjectTypeException;
86  import org.eclipse.jgit.errors.LargeObjectException;
87  import org.eclipse.jgit.errors.MissingObjectException;
88  import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException;
89  import org.eclipse.jgit.internal.JGitText;
90  import org.eclipse.jgit.internal.storage.file.PackBitmapIndexBuilder;
91  import org.eclipse.jgit.internal.storage.file.PackBitmapIndexWriterV1;
92  import org.eclipse.jgit.internal.storage.file.PackIndexWriter;
93  import org.eclipse.jgit.lib.AnyObjectId;
94  import org.eclipse.jgit.lib.AsyncObjectSizeQueue;
95  import org.eclipse.jgit.lib.BatchingProgressMonitor;
96  import org.eclipse.jgit.lib.BitmapIndex;
97  import org.eclipse.jgit.lib.BitmapIndex.BitmapBuilder;
98  import org.eclipse.jgit.lib.BitmapObject;
99  import org.eclipse.jgit.lib.Constants;
100 import org.eclipse.jgit.lib.NullProgressMonitor;
101 import org.eclipse.jgit.lib.ObjectId;
102 import org.eclipse.jgit.lib.ObjectIdOwnerMap;
103 import org.eclipse.jgit.lib.ObjectIdSet;
104 import org.eclipse.jgit.lib.ObjectLoader;
105 import org.eclipse.jgit.lib.ObjectReader;
106 import org.eclipse.jgit.lib.ProgressMonitor;
107 import org.eclipse.jgit.lib.Repository;
108 import org.eclipse.jgit.lib.ThreadSafeProgressMonitor;
109 import org.eclipse.jgit.revwalk.AsyncRevObjectQueue;
110 import org.eclipse.jgit.revwalk.DepthWalk;
111 import org.eclipse.jgit.revwalk.ObjectWalk;
112 import org.eclipse.jgit.revwalk.RevCommit;
113 import org.eclipse.jgit.revwalk.RevFlag;
114 import org.eclipse.jgit.revwalk.RevObject;
115 import org.eclipse.jgit.revwalk.RevSort;
116 import org.eclipse.jgit.revwalk.RevTag;
117 import org.eclipse.jgit.revwalk.RevTree;
118 import org.eclipse.jgit.storage.pack.PackConfig;
119 import org.eclipse.jgit.storage.pack.PackStatistics;
120 import org.eclipse.jgit.transport.ObjectCountCallback;
121 import org.eclipse.jgit.transport.WriteAbortedException;
122 import org.eclipse.jgit.util.BlockList;
123 import org.eclipse.jgit.util.TemporaryBuffer;
124 
125 /**
126  * <p>
127  * PackWriter class is responsible for generating pack files from specified set
128  * of objects from repository. This implementation produce pack files in format
129  * version 2.
130  * </p>
131  * <p>
132  * Source of objects may be specified in two ways:
133  * <ul>
134  * <li>(usually) by providing sets of interesting and uninteresting objects in
135  * repository - all interesting objects and their ancestors except uninteresting
136  * objects and their ancestors will be included in pack, or</li>
137  * <li>by providing iterator of {@link RevObject} specifying exact list and
138  * order of objects in pack</li>
139  * </ul>
140  * <p>
141  * Typical usage consists of creating an instance, configuring options,
142  * preparing the list of objects by calling {@link #preparePack(Iterator)} or
143  * {@link #preparePack(ProgressMonitor, Set, Set)}, and streaming with
144  * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}. If the
145  * pack is being stored as a file the matching index can be written out after
146  * writing the pack by {@link #writeIndex(OutputStream)}. An optional bitmap
147  * index can be made by calling {@link #prepareBitmapIndex(ProgressMonitor)}
148  * followed by {@link #writeBitmapIndex(OutputStream)}.
149  * </p>
150  * <p>
151  * Class provide set of configurable options and {@link ProgressMonitor}
152  * support, as operations may take a long time for big repositories. Deltas
153  * searching algorithm is <b>NOT IMPLEMENTED</b> yet - this implementation
154  * relies only on deltas and objects reuse.
155  * </p>
156  * <p>
157  * This class is not thread safe. It is intended to be used in one thread as a
158  * single pass to produce one pack. Invoking methods multiple times or out of
159  * order is not supported as internal data structures are destroyed during
160  * certain phases to save memory when packing large repositories.
161  * </p>
162  */
163 public class PackWriter implements AutoCloseable {
164 	private static final int PACK_VERSION_GENERATED = 2;
165 
166 	/** Empty set of objects for {@code preparePack()}. */
167 	public static final Set<ObjectId> NONE = Collections.emptySet();
168 
169 	private static final Map<WeakReference<PackWriter>, Boolean> instances =
170 			new ConcurrentHashMap<>();
171 
172 	private static final Iterable<PackWriter> instancesIterable = new Iterable<PackWriter>() {
173 		@Override
174 		public Iterator<PackWriter> iterator() {
175 			return new Iterator<PackWriter>() {
176 				private final Iterator<WeakReference<PackWriter>> it =
177 						instances.keySet().iterator();
178 				private PackWriter next;
179 
180 				@Override
181 				public boolean hasNext() {
182 					if (next != null)
183 						return true;
184 					while (it.hasNext()) {
185 						WeakReference<PackWriter> ref = it.next();
186 						next = ref.get();
187 						if (next != null)
188 							return true;
189 						it.remove();
190 					}
191 					return false;
192 				}
193 
194 				@Override
195 				public PackWriter next() {
196 					if (hasNext()) {
197 						PackWriter result = next;
198 						next = null;
199 						return result;
200 					}
201 					throw new NoSuchElementException();
202 				}
203 
204 				@Override
205 				public void remove() {
206 					throw new UnsupportedOperationException();
207 				}
208 			};
209 		}
210 	};
211 
212 	/** @return all allocated, non-released PackWriters instances. */
213 	public static Iterable<PackWriter> getInstances() {
214 		return instancesIterable;
215 	}
216 
217 	@SuppressWarnings("unchecked")
218 	BlockList<ObjectToPack> objectsLists[] = new BlockList[OBJ_TAG + 1];
219 	{
220 		objectsLists[OBJ_COMMIT] = new BlockList<>();
221 		objectsLists[OBJ_TREE] = new BlockList<>();
222 		objectsLists[OBJ_BLOB] = new BlockList<>();
223 		objectsLists[OBJ_TAG] = new BlockList<>();
224 	}
225 
226 	private ObjectIdOwnerMap<ObjectToPack> objectsMap = new ObjectIdOwnerMap<>();
227 
228 	// edge objects for thin packs
229 	private List<ObjectToPack> edgeObjects = new BlockList<>();
230 
231 	// Objects the client is known to have already.
232 	private BitmapBuilder haveObjects;
233 
234 	private List<CachedPack> cachedPacks = new ArrayList<>(2);
235 
236 	private Set<ObjectId> tagTargets = NONE;
237 
238 	private Set<? extends ObjectId> excludeFromBitmapSelection = NONE;
239 
240 	private ObjectIdSet[] excludeInPacks;
241 
242 	private ObjectIdSet excludeInPackLast;
243 
244 	private Deflater myDeflater;
245 
246 	private final ObjectReader reader;
247 
248 	/** {@link #reader} recast to the reuse interface, if it supports it. */
249 	private final ObjectReuseAsIs reuseSupport;
250 
251 	final PackConfig config;
252 
253 	private final PackStatistics.Accumulator stats;
254 
255 	private final MutableState state;
256 
257 	private final WeakReference<PackWriter> selfRef;
258 
259 	private PackStatistics.ObjectType.Accumulator typeStats;
260 
261 	private List<ObjectToPack> sortedByName;
262 
263 	private byte packcsum[];
264 
265 	private boolean deltaBaseAsOffset;
266 
267 	private boolean reuseDeltas;
268 
269 	private boolean reuseDeltaCommits;
270 
271 	private boolean reuseValidate;
272 
273 	private boolean thin;
274 
275 	private boolean useCachedPacks;
276 
277 	private boolean useBitmaps;
278 
279 	private boolean ignoreMissingUninteresting = true;
280 
281 	private boolean pruneCurrentObjectList;
282 
283 	private boolean shallowPack;
284 
285 	private boolean canBuildBitmaps;
286 
287 	private boolean indexDisabled;
288 
289 	private int depth;
290 
291 	private Collection<? extends ObjectId> unshallowObjects;
292 
293 	private PackBitmapIndexBuilder writeBitmaps;
294 
295 	private CRC32 crc32;
296 
297 	private ObjectCountCallback callback;
298 
299 	/**
300 	 * Create writer for specified repository.
301 	 * <p>
302 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
303 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
304 	 *
305 	 * @param repo
306 	 *            repository where objects are stored.
307 	 */
308 	public PackWriter(final Repository repo) {
309 		this(repo, repo.newObjectReader());
310 	}
311 
312 	/**
313 	 * Create a writer to load objects from the specified reader.
314 	 * <p>
315 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
316 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
317 	 *
318 	 * @param reader
319 	 *            reader to read from the repository with.
320 	 */
321 	public PackWriter(final ObjectReader reader) {
322 		this(new PackConfig(), reader);
323 	}
324 
325 	/**
326 	 * Create writer for specified repository.
327 	 * <p>
328 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
329 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
330 	 *
331 	 * @param repo
332 	 *            repository where objects are stored.
333 	 * @param reader
334 	 *            reader to read from the repository with.
335 	 */
336 	public PackWriter(final Repository repo, final ObjectReader reader) {
337 		this(new PackConfig(repo), reader);
338 	}
339 
340 	/**
341 	 * Create writer with a specified configuration.
342 	 * <p>
343 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
344 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
345 	 *
346 	 * @param config
347 	 *            configuration for the pack writer.
348 	 * @param reader
349 	 *            reader to read from the repository with.
350 	 */
351 	public PackWriter(final PackConfig config, final ObjectReader reader) {
352 		this.config = config;
353 		this.reader = reader;
354 		if (reader instanceof ObjectReuseAsIs)
355 			reuseSupport = ((ObjectReuseAsIs) reader);
356 		else
357 			reuseSupport = null;
358 
359 		deltaBaseAsOffset = config.isDeltaBaseAsOffset();
360 		reuseDeltas = config.isReuseDeltas();
361 		reuseValidate = true; // be paranoid by default
362 		stats = new PackStatistics.Accumulator();
363 		state = new MutableState();
364 		selfRef = new WeakReference<>(this);
365 		instances.put(selfRef, Boolean.TRUE);
366 	}
367 
368 	/**
369 	 * Set the {@code ObjectCountCallback}.
370 	 * <p>
371 	 * It should be set before calling
372 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}.
373 	 *
374 	 * @param callback
375 	 *            the callback to set
376 	 *
377 	 * @return this object for chaining.
378 	 */
379 	public PackWriter setObjectCountCallback(ObjectCountCallback callback) {
380 		this.callback = callback;
381 		return this;
382 	}
383 
384 	/**
385 	 * Records the set of shallow commits in the client.
386 	 *
387 	 * @param clientShallowCommits
388 	 *            the shallow commits in the client
389 	 */
390 	public void setClientShallowCommits(Set<ObjectId> clientShallowCommits) {
391 		stats.clientShallowCommits = Collections
392 				.unmodifiableSet(new HashSet<>(clientShallowCommits));
393 	}
394 
395 	/**
396 	 * Check whether writer can store delta base as an offset (new style
397 	 * reducing pack size) or should store it as an object id (legacy style,
398 	 * compatible with old readers).
399 	 *
400 	 * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
401 	 *
402 	 * @return true if delta base is stored as an offset; false if it is stored
403 	 *         as an object id.
404 	 */
405 	public boolean isDeltaBaseAsOffset() {
406 		return deltaBaseAsOffset;
407 	}
408 
409 	/**
410 	 * Set writer delta base format. Delta base can be written as an offset in a
411 	 * pack file (new approach reducing file size) or as an object id (legacy
412 	 * approach, compatible with old readers).
413 	 *
414 	 * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
415 	 *
416 	 * @param deltaBaseAsOffset
417 	 *            boolean indicating whether delta base can be stored as an
418 	 *            offset.
419 	 */
420 	public void setDeltaBaseAsOffset(boolean deltaBaseAsOffset) {
421 		this.deltaBaseAsOffset = deltaBaseAsOffset;
422 	}
423 
424 	/**
425 	 * Check if the writer will reuse commits that are already stored as deltas.
426 	 *
427 	 * @return true if the writer would reuse commits stored as deltas, assuming
428 	 *         delta reuse is already enabled.
429 	 */
430 	public boolean isReuseDeltaCommits() {
431 		return reuseDeltaCommits;
432 	}
433 
434 	/**
435 	 * Set the writer to reuse existing delta versions of commits.
436 	 *
437 	 * @param reuse
438 	 *            if true, the writer will reuse any commits stored as deltas.
439 	 *            By default the writer does not reuse delta commits.
440 	 */
441 	public void setReuseDeltaCommits(boolean reuse) {
442 		reuseDeltaCommits = reuse;
443 	}
444 
445 	/**
446 	 * Check if the writer validates objects before copying them.
447 	 *
448 	 * @return true if validation is enabled; false if the reader will handle
449 	 *         object validation as a side-effect of it consuming the output.
450 	 */
451 	public boolean isReuseValidatingObjects() {
452 		return reuseValidate;
453 	}
454 
455 	/**
456 	 * Enable (or disable) object validation during packing.
457 	 *
458 	 * @param validate
459 	 *            if true the pack writer will validate an object before it is
460 	 *            put into the output. This additional validation work may be
461 	 *            necessary to avoid propagating corruption from one local pack
462 	 *            file to another local pack file.
463 	 */
464 	public void setReuseValidatingObjects(boolean validate) {
465 		reuseValidate = validate;
466 	}
467 
468 	/** @return true if this writer is producing a thin pack. */
469 	public boolean isThin() {
470 		return thin;
471 	}
472 
473 	/**
474 	 * @param packthin
475 	 *            a boolean indicating whether writer may pack objects with
476 	 *            delta base object not within set of objects to pack, but
477 	 *            belonging to party repository (uninteresting/boundary) as
478 	 *            determined by set; this kind of pack is used only for
479 	 *            transport; true - to produce thin pack, false - otherwise.
480 	 */
481 	public void setThin(final boolean packthin) {
482 		thin = packthin;
483 	}
484 
485 	/** @return true to reuse cached packs. If true index creation isn't available. */
486 	public boolean isUseCachedPacks() {
487 		return useCachedPacks;
488 	}
489 
490 	/**
491 	 * @param useCached
492 	 *            if set to true and a cached pack is present, it will be
493 	 *            appended onto the end of a thin-pack, reducing the amount of
494 	 *            working set space and CPU used by PackWriter. Enabling this
495 	 *            feature prevents PackWriter from creating an index for the
496 	 *            newly created pack, so its only suitable for writing to a
497 	 *            network client, where the client will make the index.
498 	 */
499 	public void setUseCachedPacks(boolean useCached) {
500 		useCachedPacks = useCached;
501 	}
502 
503 	/** @return true to use bitmaps for ObjectWalks, if available. */
504 	public boolean isUseBitmaps() {
505 		return useBitmaps;
506 	}
507 
508 	/**
509 	 * @param useBitmaps
510 	 *            if set to true, bitmaps will be used when preparing a pack.
511 	 */
512 	public void setUseBitmaps(boolean useBitmaps) {
513 		this.useBitmaps = useBitmaps;
514 	}
515 
516 	/** @return true if the index file cannot be created by this PackWriter. */
517 	public boolean isIndexDisabled() {
518 		return indexDisabled || !cachedPacks.isEmpty();
519 	}
520 
521 	/**
522 	 * @param noIndex
523 	 *            true to disable creation of the index file.
524 	 */
525 	public void setIndexDisabled(boolean noIndex) {
526 		this.indexDisabled = noIndex;
527 	}
528 
529 	/**
530 	 * @return true to ignore objects that are uninteresting and also not found
531 	 *         on local disk; false to throw a {@link MissingObjectException}
532 	 *         out of {@link #preparePack(ProgressMonitor, Set, Set)} if an
533 	 *         uninteresting object is not in the source repository. By default,
534 	 *         true, permitting gracefully ignoring of uninteresting objects.
535 	 */
536 	public boolean isIgnoreMissingUninteresting() {
537 		return ignoreMissingUninteresting;
538 	}
539 
540 	/**
541 	 * @param ignore
542 	 *            true if writer should ignore non existing uninteresting
543 	 *            objects during construction set of objects to pack; false
544 	 *            otherwise - non existing uninteresting objects may cause
545 	 *            {@link MissingObjectException}
546 	 */
547 	public void setIgnoreMissingUninteresting(final boolean ignore) {
548 		ignoreMissingUninteresting = ignore;
549 	}
550 
551 	/**
552 	 * Set the tag targets that should be hoisted earlier during packing.
553 	 * <p>
554 	 * Callers may put objects into this set before invoking any of the
555 	 * preparePack methods to influence where an annotated tag's target is
556 	 * stored within the resulting pack. Typically these will be clustered
557 	 * together, and hoisted earlier in the file even if they are ancient
558 	 * revisions, allowing readers to find tag targets with better locality.
559 	 *
560 	 * @param objects
561 	 *            objects that annotated tags point at.
562 	 */
563 	public void setTagTargets(Set<ObjectId> objects) {
564 		tagTargets = objects;
565 	}
566 
567 	/**
568 	 * Configure this pack for a shallow clone.
569 	 *
570 	 * @param depth
571 	 *            maximum depth of history to return. 1 means return only the
572 	 *            "wants".
573 	 * @param unshallow
574 	 *            objects which used to be shallow on the client, but are being
575 	 *            extended as part of this fetch
576 	 */
577 	public void setShallowPack(int depth,
578 			Collection<? extends ObjectId> unshallow) {
579 		this.shallowPack = true;
580 		this.depth = depth;
581 		this.unshallowObjects = unshallow;
582 	}
583 
584 	/**
585 	 * Returns objects number in a pack file that was created by this writer.
586 	 *
587 	 * @return number of objects in pack.
588 	 * @throws IOException
589 	 *             a cached pack cannot supply its object count.
590 	 */
591 	public long getObjectCount() throws IOException {
592 		if (stats.totalObjects == 0) {
593 			long objCnt = 0;
594 
595 			objCnt += objectsLists[OBJ_COMMIT].size();
596 			objCnt += objectsLists[OBJ_TREE].size();
597 			objCnt += objectsLists[OBJ_BLOB].size();
598 			objCnt += objectsLists[OBJ_TAG].size();
599 
600 			for (CachedPack pack : cachedPacks)
601 				objCnt += pack.getObjectCount();
602 			return objCnt;
603 		}
604 		return stats.totalObjects;
605 	}
606 
607 	/**
608 	 * Returns the object ids in the pack file that was created by this writer.
609 	 * <p>
610 	 * This method can only be invoked after
611 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)} has
612 	 * been invoked and completed successfully.
613 	 *
614 	 * @return set of objects in pack.
615 	 * @throws IOException
616 	 *             a cached pack cannot supply its object ids.
617 	 */
618 	public ObjectIdOwnerMap<ObjectIdOwnerMap.Entry> getObjectSet()
619 			throws IOException {
620 		if (!cachedPacks.isEmpty())
621 			throw new IOException(
622 					JGitText.get().cachedPacksPreventsListingObjects);
623 
624 		if (writeBitmaps != null) {
625 			return writeBitmaps.getObjectSet();
626 		}
627 
628 		ObjectIdOwnerMap<ObjectIdOwnerMap.Entry> r = new ObjectIdOwnerMap<>();
629 		for (BlockList<ObjectToPack> objList : objectsLists) {
630 			if (objList != null) {
631 				for (ObjectToPack otp : objList)
632 					r.add(new ObjectIdOwnerMap.Entry(otp) {
633 						// A new entry that copies the ObjectId
634 					});
635 			}
636 		}
637 		return r;
638 	}
639 
640 	/**
641 	 * Add a pack index whose contents should be excluded from the result.
642 	 *
643 	 * @param idx
644 	 *            objects in this index will not be in the output pack.
645 	 */
646 	public void excludeObjects(ObjectIdSet idx) {
647 		if (excludeInPacks == null) {
648 			excludeInPacks = new ObjectIdSet[] { idx };
649 			excludeInPackLast = idx;
650 		} else {
651 			int cnt = excludeInPacks.length;
652 			ObjectIdSet[] newList = new ObjectIdSet[cnt + 1];
653 			System.arraycopy(excludeInPacks, 0, newList, 0, cnt);
654 			newList[cnt] = idx;
655 			excludeInPacks = newList;
656 		}
657 	}
658 
659 	/**
660 	 * Prepare the list of objects to be written to the pack stream.
661 	 * <p>
662 	 * Iterator <b>exactly</b> determines which objects are included in a pack
663 	 * and order they appear in pack (except that objects order by type is not
664 	 * needed at input). This order should conform general rules of ordering
665 	 * objects in git - by recency and path (type and delta-base first is
666 	 * internally secured) and responsibility for guaranteeing this order is on
667 	 * a caller side. Iterator must return each id of object to write exactly
668 	 * once.
669 	 * </p>
670 	 *
671 	 * @param objectsSource
672 	 *            iterator of object to store in a pack; order of objects within
673 	 *            each type is important, ordering by type is not needed;
674 	 *            allowed types for objects are {@link Constants#OBJ_COMMIT},
675 	 *            {@link Constants#OBJ_TREE}, {@link Constants#OBJ_BLOB} and
676 	 *            {@link Constants#OBJ_TAG}; objects returned by iterator may be
677 	 *            later reused by caller as object id and type are internally
678 	 *            copied in each iteration.
679 	 * @throws IOException
680 	 *             when some I/O problem occur during reading objects.
681 	 */
682 	public void preparePack(@NonNull Iterator<RevObject> objectsSource)
683 			throws IOException {
684 		while (objectsSource.hasNext()) {
685 			addObject(objectsSource.next());
686 		}
687 	}
688 
689 	/**
690 	 * Prepare the list of objects to be written to the pack stream.
691 	 * <p>
692 	 * Basing on these 2 sets, another set of objects to put in a pack file is
693 	 * created: this set consists of all objects reachable (ancestors) from
694 	 * interesting objects, except uninteresting objects and their ancestors.
695 	 * This method uses class {@link ObjectWalk} extensively to find out that
696 	 * appropriate set of output objects and their optimal order in output pack.
697 	 * Order is consistent with general git in-pack rules: sort by object type,
698 	 * recency, path and delta-base first.
699 	 * </p>
700 	 *
701 	 * @param countingMonitor
702 	 *            progress during object enumeration.
703 	 * @param want
704 	 *            collection of objects to be marked as interesting (start
705 	 *            points of graph traversal). Must not be {@code null}.
706 	 * @param have
707 	 *            collection of objects to be marked as uninteresting (end
708 	 *            points of graph traversal). Pass {@link #NONE} if all objects
709 	 *            reachable from {@code want} are desired, such as when serving
710 	 *            a clone.
711 	 * @throws IOException
712 	 *             when some I/O problem occur during reading objects.
713 	 */
714 	public void preparePack(ProgressMonitor countingMonitor,
715 			@NonNull Set<? extends ObjectId> want,
716 			@NonNull Set<? extends ObjectId> have) throws IOException {
717 		preparePack(countingMonitor, want, have, NONE, NONE);
718 	}
719 
720 	/**
721 	 * Prepare the list of objects to be written to the pack stream.
722 	 * <p>
723 	 * Like {@link #preparePack(ProgressMonitor, Set, Set)} but also allows
724 	 * specifying commits that should not be walked past ("shallow" commits).
725 	 * The caller is responsible for filtering out commits that should not be
726 	 * shallow any more ("unshallow" commits as in {@link #setShallowPack}) from
727 	 * the shallow set.
728 	 *
729 	 * @param countingMonitor
730 	 *            progress during object enumeration.
731 	 * @param want
732 	 *            objects of interest, ancestors of which will be included in
733 	 *            the pack. Must not be {@code null}.
734 	 * @param have
735 	 *            objects whose ancestors (up to and including {@code shallow}
736 	 *            commits) do not need to be included in the pack because they
737 	 *            are already available from elsewhere. Must not be
738 	 *            {@code null}.
739 	 * @param shallow
740 	 *            commits indicating the boundary of the history marked with
741 	 *            {@code have}. Shallow commits have parents but those parents
742 	 *            are considered not to be already available. Parents of
743 	 *            {@code shallow} commits and earlier generations will be
744 	 *            included in the pack if requested by {@code want}. Must not be
745 	 *            {@code null}.
746 	 * @throws IOException
747 	 *             an I/O problem occurred while reading objects.
748 	 */
749 	public void preparePack(ProgressMonitor countingMonitor,
750 			@NonNull Set<? extends ObjectId> want,
751 			@NonNull Set<? extends ObjectId> have,
752 			@NonNull Set<? extends ObjectId> shallow) throws IOException {
753 		preparePack(countingMonitor, want, have, shallow, NONE);
754 	}
755 
756 	/**
757 	 * Prepare the list of objects to be written to the pack stream.
758 	 * <p>
759 	 * Like {@link #preparePack(ProgressMonitor, Set, Set)} but also allows
760 	 * specifying commits that should not be walked past ("shallow" commits).
761 	 * The caller is responsible for filtering out commits that should not be
762 	 * shallow any more ("unshallow" commits as in {@link #setShallowPack}) from
763 	 * the shallow set.
764 	 *
765 	 * @param countingMonitor
766 	 *            progress during object enumeration.
767 	 * @param want
768 	 *            objects of interest, ancestors of which will be included in
769 	 *            the pack. Must not be {@code null}.
770 	 * @param have
771 	 *            objects whose ancestors (up to and including {@code shallow}
772 	 *            commits) do not need to be included in the pack because they
773 	 *            are already available from elsewhere. Must not be
774 	 *            {@code null}.
775 	 * @param shallow
776 	 *            commits indicating the boundary of the history marked with
777 	 *            {@code have}. Shallow commits have parents but those parents
778 	 *            are considered not to be already available. Parents of
779 	 *            {@code shallow} commits and earlier generations will be
780 	 *            included in the pack if requested by {@code want}. Must not be
781 	 *            {@code null}.
782 	 * @param noBitmaps
783 	 *            collection of objects to be excluded from bitmap commit
784 	 *            selection.
785 	 * @throws IOException
786 	 *             an I/O problem occurred while reading objects.
787 	 */
788 	public void preparePack(ProgressMonitor countingMonitor,
789 			@NonNull Set<? extends ObjectId> want,
790 			@NonNull Set<? extends ObjectId> have,
791 			@NonNull Set<? extends ObjectId> shallow,
792 			@NonNull Set<? extends ObjectId> noBitmaps) throws IOException {
793 		try (ObjectWalk ow = getObjectWalk()) {
794 			ow.assumeShallow(shallow);
795 			preparePack(countingMonitor, ow, want, have, noBitmaps);
796 		}
797 	}
798 
799 	private ObjectWalk getObjectWalk() {
800 		return shallowPack ? new DepthWalk.ObjectWalk(reader, depth - 1)
801 				: new ObjectWalk(reader);
802 	}
803 
804 	/**
805 	 * Prepare the list of objects to be written to the pack stream.
806 	 * <p>
807 	 * Basing on these 2 sets, another set of objects to put in a pack file is
808 	 * created: this set consists of all objects reachable (ancestors) from
809 	 * interesting objects, except uninteresting objects and their ancestors.
810 	 * This method uses class {@link ObjectWalk} extensively to find out that
811 	 * appropriate set of output objects and their optimal order in output pack.
812 	 * Order is consistent with general git in-pack rules: sort by object type,
813 	 * recency, path and delta-base first.
814 	 * </p>
815 	 *
816 	 * @param countingMonitor
817 	 *            progress during object enumeration.
818 	 * @param walk
819 	 *            ObjectWalk to perform enumeration.
820 	 * @param interestingObjects
821 	 *            collection of objects to be marked as interesting (start
822 	 *            points of graph traversal). Must not be {@code null}.
823 	 * @param uninterestingObjects
824 	 *            collection of objects to be marked as uninteresting (end
825 	 *            points of graph traversal). Pass {@link #NONE} if all objects
826 	 *            reachable from {@code want} are desired, such as when serving
827 	 *            a clone.
828 	 * @param noBitmaps
829 	 *            collection of objects to be excluded from bitmap commit
830 	 *            selection.
831 	 * @throws IOException
832 	 *             when some I/O problem occur during reading objects.
833 	 */
834 	public void preparePack(ProgressMonitor countingMonitor,
835 			@NonNull ObjectWalk walk,
836 			@NonNull Set<? extends ObjectId> interestingObjects,
837 			@NonNull Set<? extends ObjectId> uninterestingObjects,
838 			@NonNull Set<? extends ObjectId> noBitmaps)
839 			throws IOException {
840 		if (countingMonitor == null)
841 			countingMonitor = NullProgressMonitor.INSTANCE;
842 		if (shallowPack && !(walk instanceof DepthWalk.ObjectWalk))
843 			throw new IllegalArgumentException(
844 					JGitText.get().shallowPacksRequireDepthWalk);
845 		findObjectsToPack(countingMonitor, walk, interestingObjects,
846 				uninterestingObjects, noBitmaps);
847 	}
848 
849 	/**
850 	 * Determine if the pack file will contain the requested object.
851 	 *
852 	 * @param id
853 	 *            the object to test the existence of.
854 	 * @return true if the object will appear in the output pack file.
855 	 * @throws IOException
856 	 *             a cached pack cannot be examined.
857 	 */
858 	public boolean willInclude(final AnyObjectId id) throws IOException {
859 		ObjectToPack obj = objectsMap.get(id);
860 		return obj != null && !obj.isEdge();
861 	}
862 
863 	/**
864 	 * Lookup the ObjectToPack object for a given ObjectId.
865 	 *
866 	 * @param id
867 	 *            the object to find in the pack.
868 	 * @return the object we are packing, or null.
869 	 */
870 	public ObjectToPack get(AnyObjectId id) {
871 		ObjectToPack obj = objectsMap.get(id);
872 		return obj != null && !obj.isEdge() ? obj : null;
873 	}
874 
875 	/**
876 	 * Computes SHA-1 of lexicographically sorted objects ids written in this
877 	 * pack, as used to name a pack file in repository.
878 	 *
879 	 * @return ObjectId representing SHA-1 name of a pack that was created.
880 	 */
881 	public ObjectId computeName() {
882 		final byte[] buf = new byte[OBJECT_ID_LENGTH];
883 		final MessageDigest md = Constants.newMessageDigest();
884 		for (ObjectToPack otp : sortByName()) {
885 			otp.copyRawTo(buf, 0);
886 			md.update(buf, 0, OBJECT_ID_LENGTH);
887 		}
888 		return ObjectId.fromRaw(md.digest());
889 	}
890 
891 	/**
892 	 * Returns the index format version that will be written.
893 	 * <p>
894 	 * This method can only be invoked after
895 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)} has
896 	 * been invoked and completed successfully.
897 	 *
898 	 * @return the index format version.
899 	 */
900 	public int getIndexVersion() {
901 		int indexVersion = config.getIndexVersion();
902 		if (indexVersion <= 0) {
903 			for (BlockList<ObjectToPack> objs : objectsLists)
904 				indexVersion = Math.max(indexVersion,
905 						PackIndexWriter.oldestPossibleFormat(objs));
906 		}
907 		return indexVersion;
908 	}
909 
910 	/**
911 	 * Create an index file to match the pack file just written.
912 	 * <p>
913 	 * Called after
914 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}.
915 	 * <p>
916 	 * Writing an index is only required for local pack storage. Packs sent on
917 	 * the network do not need to create an index.
918 	 *
919 	 * @param indexStream
920 	 *            output for the index data. Caller is responsible for closing
921 	 *            this stream.
922 	 * @throws IOException
923 	 *             the index data could not be written to the supplied stream.
924 	 */
925 	public void writeIndex(final OutputStream indexStream) throws IOException {
926 		if (isIndexDisabled())
927 			throw new IOException(JGitText.get().cachedPacksPreventsIndexCreation);
928 
929 		long writeStart = System.currentTimeMillis();
930 		final PackIndexWriter iw = PackIndexWriter.createVersion(
931 				indexStream, getIndexVersion());
932 		iw.write(sortByName(), packcsum);
933 		stats.timeWriting += System.currentTimeMillis() - writeStart;
934 	}
935 
936 	/**
937 	 * Create a bitmap index file to match the pack file just written.
938 	 * <p>
939 	 * Called after {@link #prepareBitmapIndex(ProgressMonitor)}.
940 	 *
941 	 * @param bitmapIndexStream
942 	 *            output for the bitmap index data. Caller is responsible for
943 	 *            closing this stream.
944 	 * @throws IOException
945 	 *             the index data could not be written to the supplied stream.
946 	 */
947 	public void writeBitmapIndex(final OutputStream bitmapIndexStream)
948 			throws IOException {
949 		if (writeBitmaps == null)
950 			throw new IOException(JGitText.get().bitmapsMustBePrepared);
951 
952 		long writeStart = System.currentTimeMillis();
953 		final PackBitmapIndexWriterV1 iw = new PackBitmapIndexWriterV1(bitmapIndexStream);
954 		iw.write(writeBitmaps, packcsum);
955 		stats.timeWriting += System.currentTimeMillis() - writeStart;
956 	}
957 
958 	private List<ObjectToPack> sortByName() {
959 		if (sortedByName == null) {
960 			int cnt = 0;
961 			cnt += objectsLists[OBJ_COMMIT].size();
962 			cnt += objectsLists[OBJ_TREE].size();
963 			cnt += objectsLists[OBJ_BLOB].size();
964 			cnt += objectsLists[OBJ_TAG].size();
965 
966 			sortedByName = new BlockList<>(cnt);
967 			sortedByName.addAll(objectsLists[OBJ_COMMIT]);
968 			sortedByName.addAll(objectsLists[OBJ_TREE]);
969 			sortedByName.addAll(objectsLists[OBJ_BLOB]);
970 			sortedByName.addAll(objectsLists[OBJ_TAG]);
971 			Collections.sort(sortedByName);
972 		}
973 		return sortedByName;
974 	}
975 
976 	private void beginPhase(PackingPhase phase, ProgressMonitor monitor,
977 			long cnt) {
978 		state.phase = phase;
979 		String task;
980 		switch (phase) {
981 		case COUNTING:
982 			task = JGitText.get().countingObjects;
983 			break;
984 		case GETTING_SIZES:
985 			task = JGitText.get().searchForSizes;
986 			break;
987 		case FINDING_SOURCES:
988 			task = JGitText.get().searchForReuse;
989 			break;
990 		case COMPRESSING:
991 			task = JGitText.get().compressingObjects;
992 			break;
993 		case WRITING:
994 			task = JGitText.get().writingObjects;
995 			break;
996 		case BUILDING_BITMAPS:
997 			task = JGitText.get().buildingBitmaps;
998 			break;
999 		default:
1000 			throw new IllegalArgumentException(
1001 					MessageFormat.format(JGitText.get().illegalPackingPhase, phase));
1002 		}
1003 		monitor.beginTask(task, (int) cnt);
1004 	}
1005 
1006 	private void endPhase(ProgressMonitor monitor) {
1007 		monitor.endTask();
1008 	}
1009 
1010 	/**
1011 	 * Write the prepared pack to the supplied stream.
1012 	 * <p>
1013 	 * Called after
1014 	 * {@link #preparePack(ProgressMonitor, ObjectWalk, Set, Set, Set)} or
1015 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
1016 	 * <p>
1017 	 * Performs delta search if enabled and writes the pack stream.
1018 	 * <p>
1019 	 * All reused objects data checksum (Adler32/CRC32) is computed and
1020 	 * validated against existing checksum.
1021 	 *
1022 	 * @param compressMonitor
1023 	 *            progress monitor to report object compression work.
1024 	 * @param writeMonitor
1025 	 *            progress monitor to report the number of objects written.
1026 	 * @param packStream
1027 	 *            output stream of pack data. The stream should be buffered by
1028 	 *            the caller. The caller is responsible for closing the stream.
1029 	 * @throws IOException
1030 	 *             an error occurred reading a local object's data to include in
1031 	 *             the pack, or writing compressed object data to the output
1032 	 *             stream.
1033 	 * @throws WriteAbortedException
1034 	 *             the write operation is aborted by {@link ObjectCountCallback}
1035 	 *             .
1036 	 */
1037 	public void writePack(ProgressMonitor compressMonitor,
1038 			ProgressMonitor writeMonitor, OutputStream packStream)
1039 			throws IOException {
1040 		if (compressMonitor == null)
1041 			compressMonitor = NullProgressMonitor.INSTANCE;
1042 		if (writeMonitor == null)
1043 			writeMonitor = NullProgressMonitor.INSTANCE;
1044 
1045 		excludeInPacks = null;
1046 		excludeInPackLast = null;
1047 
1048 		boolean needSearchForReuse = reuseSupport != null && (
1049 				   reuseDeltas
1050 				|| config.isReuseObjects()
1051 				|| !cachedPacks.isEmpty());
1052 
1053 		if (compressMonitor instanceof BatchingProgressMonitor) {
1054 			long delay = 1000;
1055 			if (needSearchForReuse && config.isDeltaCompress())
1056 				delay = 500;
1057 			((BatchingProgressMonitor) compressMonitor).setDelayStart(
1058 					delay,
1059 					TimeUnit.MILLISECONDS);
1060 		}
1061 
1062 		if (needSearchForReuse)
1063 			searchForReuse(compressMonitor);
1064 		if (config.isDeltaCompress())
1065 			searchForDeltas(compressMonitor);
1066 
1067 		crc32 = new CRC32();
1068 		final PackOutputStream out = new PackOutputStream(
1069 			writeMonitor,
1070 			isIndexDisabled()
1071 				? packStream
1072 				: new CheckedOutputStream(packStream, crc32),
1073 			this);
1074 
1075 		long objCnt = getObjectCount();
1076 		stats.totalObjects = objCnt;
1077 		if (callback != null)
1078 			callback.setObjectCount(objCnt);
1079 		beginPhase(PackingPhase.WRITING, writeMonitor, objCnt);
1080 		long writeStart = System.currentTimeMillis();
1081 		try {
1082 			out.writeFileHeader(PACK_VERSION_GENERATED, objCnt);
1083 			out.flush();
1084 
1085 			writeObjects(out);
1086 			if (!edgeObjects.isEmpty() || !cachedPacks.isEmpty()) {
1087 				for (PackStatistics.ObjectType.Accumulator typeStat : stats.objectTypes) {
1088 					if (typeStat == null)
1089 						continue;
1090 					stats.thinPackBytes += typeStat.bytes;
1091 				}
1092 			}
1093 
1094 			stats.reusedPacks = Collections.unmodifiableList(cachedPacks);
1095 			for (CachedPack pack : cachedPacks) {
1096 				long deltaCnt = pack.getDeltaCount();
1097 				stats.reusedObjects += pack.getObjectCount();
1098 				stats.reusedDeltas += deltaCnt;
1099 				stats.totalDeltas += deltaCnt;
1100 				reuseSupport.copyPackAsIs(out, pack);
1101 			}
1102 			writeChecksum(out);
1103 			out.flush();
1104 		} finally {
1105 			stats.timeWriting = System.currentTimeMillis() - writeStart;
1106 			stats.depth = depth;
1107 
1108 			for (PackStatistics.ObjectType.Accumulator typeStat : stats.objectTypes) {
1109 				if (typeStat == null)
1110 					continue;
1111 				typeStat.cntDeltas += typeStat.reusedDeltas;
1112 				stats.reusedObjects += typeStat.reusedObjects;
1113 				stats.reusedDeltas += typeStat.reusedDeltas;
1114 				stats.totalDeltas += typeStat.cntDeltas;
1115 			}
1116 		}
1117 
1118 		stats.totalBytes = out.length();
1119 		reader.close();
1120 		endPhase(writeMonitor);
1121 	}
1122 
1123 	/**
1124 	 * @return description of what this PackWriter did in order to create the
1125 	 *         final pack stream. This should only be invoked after the calls to
1126 	 *         create the pack/index/bitmap have completed.
1127 	 */
1128 	public PackStatistics getStatistics() {
1129 		return new PackStatistics(stats);
1130 	}
1131 
1132 	/** @return snapshot of the current state of this PackWriter. */
1133 	public State getState() {
1134 		return state.snapshot();
1135 	}
1136 
1137 	/**
1138 	 * Release all resources used by this writer.
1139 	 */
1140 	@Override
1141 	public void close() {
1142 		reader.close();
1143 		if (myDeflater != null) {
1144 			myDeflater.end();
1145 			myDeflater = null;
1146 		}
1147 		instances.remove(selfRef);
1148 	}
1149 
1150 	private void searchForReuse(ProgressMonitor monitor) throws IOException {
1151 		long cnt = 0;
1152 		cnt += objectsLists[OBJ_COMMIT].size();
1153 		cnt += objectsLists[OBJ_TREE].size();
1154 		cnt += objectsLists[OBJ_BLOB].size();
1155 		cnt += objectsLists[OBJ_TAG].size();
1156 
1157 		long start = System.currentTimeMillis();
1158 		beginPhase(PackingPhase.FINDING_SOURCES, monitor, cnt);
1159 		if (cnt <= 4096) {
1160 			// For small object counts, do everything as one list.
1161 			BlockList<ObjectToPack> tmp = new BlockList<>((int) cnt);
1162 			tmp.addAll(objectsLists[OBJ_TAG]);
1163 			tmp.addAll(objectsLists[OBJ_COMMIT]);
1164 			tmp.addAll(objectsLists[OBJ_TREE]);
1165 			tmp.addAll(objectsLists[OBJ_BLOB]);
1166 			searchForReuse(monitor, tmp);
1167 			if (pruneCurrentObjectList) {
1168 				// If the list was pruned, we need to re-prune the main lists.
1169 				pruneEdgesFromObjectList(objectsLists[OBJ_COMMIT]);
1170 				pruneEdgesFromObjectList(objectsLists[OBJ_TREE]);
1171 				pruneEdgesFromObjectList(objectsLists[OBJ_BLOB]);
1172 				pruneEdgesFromObjectList(objectsLists[OBJ_TAG]);
1173 			}
1174 		} else {
1175 			searchForReuse(monitor, objectsLists[OBJ_TAG]);
1176 			searchForReuse(monitor, objectsLists[OBJ_COMMIT]);
1177 			searchForReuse(monitor, objectsLists[OBJ_TREE]);
1178 			searchForReuse(monitor, objectsLists[OBJ_BLOB]);
1179 		}
1180 		endPhase(monitor);
1181 		stats.timeSearchingForReuse = System.currentTimeMillis() - start;
1182 
1183 		if (config.isReuseDeltas() && config.getCutDeltaChains()) {
1184 			cutDeltaChains(objectsLists[OBJ_TREE]);
1185 			cutDeltaChains(objectsLists[OBJ_BLOB]);
1186 		}
1187 	}
1188 
1189 	private void searchForReuse(ProgressMonitor monitor, List<ObjectToPack> list)
1190 			throws IOException, MissingObjectException {
1191 		pruneCurrentObjectList = false;
1192 		reuseSupport.selectObjectRepresentation(this, monitor, list);
1193 		if (pruneCurrentObjectList)
1194 			pruneEdgesFromObjectList(list);
1195 	}
1196 
1197 	private void cutDeltaChains(BlockList<ObjectToPack> list)
1198 			throws IOException {
1199 		int max = config.getMaxDeltaDepth();
1200 		for (int idx = list.size() - 1; idx >= 0; idx--) {
1201 			int d = 0;
1202 			ObjectToPack b = list.get(idx).getDeltaBase();
1203 			while (b != null) {
1204 				if (d < b.getChainLength())
1205 					break;
1206 				b.setChainLength(++d);
1207 				if (d >= max && b.isDeltaRepresentation()) {
1208 					reselectNonDelta(b);
1209 					break;
1210 				}
1211 				b = b.getDeltaBase();
1212 			}
1213 		}
1214 		if (config.isDeltaCompress()) {
1215 			for (ObjectToPack otp : list)
1216 				otp.clearChainLength();
1217 		}
1218 	}
1219 
1220 	private void searchForDeltas(ProgressMonitor monitor)
1221 			throws MissingObjectException, IncorrectObjectTypeException,
1222 			IOException {
1223 		// Commits and annotated tags tend to have too many differences to
1224 		// really benefit from delta compression. Consequently just don't
1225 		// bother examining those types here.
1226 		//
1227 		ObjectToPack[] list = new ObjectToPack[
1228 				  objectsLists[OBJ_TREE].size()
1229 				+ objectsLists[OBJ_BLOB].size()
1230 				+ edgeObjects.size()];
1231 		int cnt = 0;
1232 		cnt = findObjectsNeedingDelta(list, cnt, OBJ_TREE);
1233 		cnt = findObjectsNeedingDelta(list, cnt, OBJ_BLOB);
1234 		if (cnt == 0)
1235 			return;
1236 		int nonEdgeCnt = cnt;
1237 
1238 		// Queue up any edge objects that we might delta against.  We won't
1239 		// be sending these as we assume the other side has them, but we need
1240 		// them in the search phase below.
1241 		//
1242 		for (ObjectToPack eo : edgeObjects) {
1243 			eo.setWeight(0);
1244 			list[cnt++] = eo;
1245 		}
1246 
1247 		// Compute the sizes of the objects so we can do a proper sort.
1248 		// We let the reader skip missing objects if it chooses. For
1249 		// some readers this can be a huge win. We detect missing objects
1250 		// by having set the weights above to 0 and allowing the delta
1251 		// search code to discover the missing object and skip over it, or
1252 		// abort with an exception if we actually had to have it.
1253 		//
1254 		final long sizingStart = System.currentTimeMillis();
1255 		beginPhase(PackingPhase.GETTING_SIZES, monitor, cnt);
1256 		AsyncObjectSizeQueue<ObjectToPack> sizeQueue = reader.getObjectSize(
1257 				Arrays.<ObjectToPack> asList(list).subList(0, cnt), false);
1258 		try {
1259 			final long limit = Math.min(
1260 					config.getBigFileThreshold(),
1261 					Integer.MAX_VALUE);
1262 			for (;;) {
1263 				try {
1264 					if (!sizeQueue.next())
1265 						break;
1266 				} catch (MissingObjectException notFound) {
1267 					monitor.update(1);
1268 					if (ignoreMissingUninteresting) {
1269 						ObjectToPack otp = sizeQueue.getCurrent();
1270 						if (otp != null && otp.isEdge()) {
1271 							otp.setDoNotDelta();
1272 							continue;
1273 						}
1274 
1275 						otp = objectsMap.get(notFound.getObjectId());
1276 						if (otp != null && otp.isEdge()) {
1277 							otp.setDoNotDelta();
1278 							continue;
1279 						}
1280 					}
1281 					throw notFound;
1282 				}
1283 
1284 				ObjectToPack otp = sizeQueue.getCurrent();
1285 				if (otp == null)
1286 					otp = objectsMap.get(sizeQueue.getObjectId());
1287 
1288 				long sz = sizeQueue.getSize();
1289 				if (DeltaIndex.BLKSZ < sz && sz < limit)
1290 					otp.setWeight((int) sz);
1291 				else
1292 					otp.setDoNotDelta(); // too small, or too big
1293 				monitor.update(1);
1294 			}
1295 		} finally {
1296 			sizeQueue.release();
1297 		}
1298 		endPhase(monitor);
1299 		stats.timeSearchingForSizes = System.currentTimeMillis() - sizingStart;
1300 
1301 		// Sort the objects by path hash so like files are near each other,
1302 		// and then by size descending so that bigger files are first. This
1303 		// applies "Linus' Law" which states that newer files tend to be the
1304 		// bigger ones, because source files grow and hardly ever shrink.
1305 		//
1306 		Arrays.sort(list, 0, cnt, new Comparator<ObjectToPack>() {
1307 			@Override
1308 			public int compare(ObjectToPack a, ObjectToPack b) {
1309 				int cmp = (a.isDoNotDelta() ? 1 : 0)
1310 						- (b.isDoNotDelta() ? 1 : 0);
1311 				if (cmp != 0)
1312 					return cmp;
1313 
1314 				cmp = a.getType() - b.getType();
1315 				if (cmp != 0)
1316 					return cmp;
1317 
1318 				cmp = (a.getPathHash() >>> 1) - (b.getPathHash() >>> 1);
1319 				if (cmp != 0)
1320 					return cmp;
1321 
1322 				cmp = (a.getPathHash() & 1) - (b.getPathHash() & 1);
1323 				if (cmp != 0)
1324 					return cmp;
1325 
1326 				cmp = (a.isEdge() ? 0 : 1) - (b.isEdge() ? 0 : 1);
1327 				if (cmp != 0)
1328 					return cmp;
1329 
1330 				return b.getWeight() - a.getWeight();
1331 			}
1332 		});
1333 
1334 		// Above we stored the objects we cannot delta onto the end.
1335 		// Remove them from the list so we don't waste time on them.
1336 		while (0 < cnt && list[cnt - 1].isDoNotDelta()) {
1337 			if (!list[cnt - 1].isEdge())
1338 				nonEdgeCnt--;
1339 			cnt--;
1340 		}
1341 		if (cnt == 0)
1342 			return;
1343 
1344 		final long searchStart = System.currentTimeMillis();
1345 		searchForDeltas(monitor, list, cnt);
1346 		stats.deltaSearchNonEdgeObjects = nonEdgeCnt;
1347 		stats.timeCompressing = System.currentTimeMillis() - searchStart;
1348 
1349 		for (int i = 0; i < cnt; i++)
1350 			if (!list[i].isEdge() && list[i].isDeltaRepresentation())
1351 				stats.deltasFound++;
1352 	}
1353 
1354 	private int findObjectsNeedingDelta(ObjectToPack[] list, int cnt, int type) {
1355 		for (ObjectToPack otp : objectsLists[type]) {
1356 			if (otp.isDoNotDelta()) // delta is disabled for this path
1357 				continue;
1358 			if (otp.isDeltaRepresentation()) // already reusing a delta
1359 				continue;
1360 			otp.setWeight(0);
1361 			list[cnt++] = otp;
1362 		}
1363 		return cnt;
1364 	}
1365 
1366 	private void reselectNonDelta(ObjectToPack otp) throws IOException {
1367 		otp.clearDeltaBase();
1368 		otp.clearReuseAsIs();
1369 		boolean old = reuseDeltas;
1370 		reuseDeltas = false;
1371 		reuseSupport.selectObjectRepresentation(this,
1372 				NullProgressMonitor.INSTANCE,
1373 				Collections.singleton(otp));
1374 		reuseDeltas = old;
1375 	}
1376 
1377 	private void searchForDeltas(final ProgressMonitor monitor,
1378 			final ObjectToPack[] list, final int cnt)
1379 			throws MissingObjectException, IncorrectObjectTypeException,
1380 			LargeObjectException, IOException {
1381 		int threads = config.getThreads();
1382 		if (threads == 0)
1383 			threads = Runtime.getRuntime().availableProcessors();
1384 		if (threads <= 1 || cnt <= config.getDeltaSearchWindowSize())
1385 			singleThreadDeltaSearch(monitor, list, cnt);
1386 		else
1387 			parallelDeltaSearch(monitor, list, cnt, threads);
1388 	}
1389 
1390 	private void singleThreadDeltaSearch(ProgressMonitor monitor,
1391 			ObjectToPack[] list, int cnt) throws IOException {
1392 		long totalWeight = 0;
1393 		for (int i = 0; i < cnt; i++) {
1394 			ObjectToPack o = list[i];
1395 			totalWeight += DeltaTask.getAdjustedWeight(o);
1396 		}
1397 
1398 		long bytesPerUnit = 1;
1399 		while (DeltaTask.MAX_METER <= (totalWeight / bytesPerUnit))
1400 			bytesPerUnit <<= 10;
1401 		int cost = (int) (totalWeight / bytesPerUnit);
1402 		if (totalWeight % bytesPerUnit != 0)
1403 			cost++;
1404 
1405 		beginPhase(PackingPhase.COMPRESSING, monitor, cost);
1406 		new DeltaWindow(config, new DeltaCache(config), reader,
1407 				monitor, bytesPerUnit,
1408 				list, 0, cnt).search();
1409 		endPhase(monitor);
1410 	}
1411 
1412 	private void parallelDeltaSearch(ProgressMonitor monitor,
1413 			ObjectToPack[] list, int cnt, int threads) throws IOException {
1414 		DeltaCache dc = new ThreadSafeDeltaCache(config);
1415 		ThreadSafeProgressMonitor pm = new ThreadSafeProgressMonitor(monitor);
1416 		DeltaTask.Block taskBlock = new DeltaTask.Block(threads, config,
1417 				reader, dc, pm,
1418 				list, 0, cnt);
1419 		taskBlock.partitionTasks();
1420 		beginPhase(PackingPhase.COMPRESSING, monitor, taskBlock.cost());
1421 		pm.startWorkers(taskBlock.tasks.size());
1422 
1423 		Executor executor = config.getExecutor();
1424 		final List<Throwable> errors =
1425 				Collections.synchronizedList(new ArrayList<Throwable>(threads));
1426 		if (executor instanceof ExecutorService) {
1427 			// Caller supplied us a service, use it directly.
1428 			runTasks((ExecutorService) executor, pm, taskBlock, errors);
1429 		} else if (executor == null) {
1430 			// Caller didn't give us a way to run the tasks, spawn up a
1431 			// temporary thread pool and make sure it tears down cleanly.
1432 			ExecutorService pool = Executors.newFixedThreadPool(threads);
1433 			try {
1434 				runTasks(pool, pm, taskBlock, errors);
1435 			} finally {
1436 				pool.shutdown();
1437 				for (;;) {
1438 					try {
1439 						if (pool.awaitTermination(60, TimeUnit.SECONDS))
1440 							break;
1441 					} catch (InterruptedException e) {
1442 						throw new IOException(
1443 								JGitText.get().packingCancelledDuringObjectsWriting);
1444 					}
1445 				}
1446 			}
1447 		} else {
1448 			// The caller gave us an executor, but it might not do
1449 			// asynchronous execution.  Wrap everything and hope it
1450 			// can schedule these for us.
1451 			for (final DeltaTask task : taskBlock.tasks) {
1452 				executor.execute(new Runnable() {
1453 					@Override
1454 					public void run() {
1455 						try {
1456 							task.call();
1457 						} catch (Throwable failure) {
1458 							errors.add(failure);
1459 						}
1460 					}
1461 				});
1462 			}
1463 			try {
1464 				pm.waitForCompletion();
1465 			} catch (InterruptedException ie) {
1466 				// We can't abort the other tasks as we have no handle.
1467 				// Cross our fingers and just break out anyway.
1468 				//
1469 				throw new IOException(
1470 						JGitText.get().packingCancelledDuringObjectsWriting);
1471 			}
1472 		}
1473 
1474 		// If any task threw an error, try to report it back as
1475 		// though we weren't using a threaded search algorithm.
1476 		//
1477 		if (!errors.isEmpty()) {
1478 			Throwable err = errors.get(0);
1479 			if (err instanceof Error)
1480 				throw (Error) err;
1481 			if (err instanceof RuntimeException)
1482 				throw (RuntimeException) err;
1483 			if (err instanceof IOException)
1484 				throw (IOException) err;
1485 
1486 			IOException fail = new IOException(err.getMessage());
1487 			fail.initCause(err);
1488 			throw fail;
1489 		}
1490 		endPhase(monitor);
1491 	}
1492 
1493 	private static void runTasks(ExecutorService pool,
1494 			ThreadSafeProgressMonitor pm,
1495 			DeltaTask.Block tb, List<Throwable> errors) throws IOException {
1496 		List<Future<?>> futures = new ArrayList<>(tb.tasks.size());
1497 		for (DeltaTask task : tb.tasks)
1498 			futures.add(pool.submit(task));
1499 
1500 		try {
1501 			pm.waitForCompletion();
1502 			for (Future<?> f : futures) {
1503 				try {
1504 					f.get();
1505 				} catch (ExecutionException failed) {
1506 					errors.add(failed.getCause());
1507 				}
1508 			}
1509 		} catch (InterruptedException ie) {
1510 			for (Future<?> f : futures)
1511 				f.cancel(true);
1512 			throw new IOException(
1513 					JGitText.get().packingCancelledDuringObjectsWriting);
1514 		}
1515 	}
1516 
1517 	private void writeObjects(PackOutputStream out) throws IOException {
1518 		writeObjects(out, objectsLists[OBJ_COMMIT]);
1519 		writeObjects(out, objectsLists[OBJ_TAG]);
1520 		writeObjects(out, objectsLists[OBJ_TREE]);
1521 		writeObjects(out, objectsLists[OBJ_BLOB]);
1522 	}
1523 
1524 	private void writeObjects(PackOutputStream out, List<ObjectToPack> list)
1525 			throws IOException {
1526 		if (list.isEmpty())
1527 			return;
1528 
1529 		typeStats = stats.objectTypes[list.get(0).getType()];
1530 		long beginOffset = out.length();
1531 
1532 		if (reuseSupport != null) {
1533 			reuseSupport.writeObjects(out, list);
1534 		} else {
1535 			for (ObjectToPack otp : list)
1536 				out.writeObject(otp);
1537 		}
1538 
1539 		typeStats.bytes += out.length() - beginOffset;
1540 		typeStats.cntObjects = list.size();
1541 	}
1542 
1543 	void writeObject(PackOutputStream out, ObjectToPack otp) throws IOException {
1544 		if (!otp.isWritten())
1545 			writeObjectImpl(out, otp);
1546 	}
1547 
1548 	private void writeObjectImpl(PackOutputStream out, ObjectToPack otp)
1549 			throws IOException {
1550 		if (otp.wantWrite()) {
1551 			// A cycle exists in this delta chain. This should only occur if a
1552 			// selected object representation disappeared during writing
1553 			// (for example due to a concurrent repack) and a different base
1554 			// was chosen, forcing a cycle. Select something other than a
1555 			// delta, and write this object.
1556 			reselectNonDelta(otp);
1557 		}
1558 		otp.markWantWrite();
1559 
1560 		while (otp.isReuseAsIs()) {
1561 			writeBase(out, otp.getDeltaBase());
1562 			if (otp.isWritten())
1563 				return; // Delta chain cycle caused this to write already.
1564 
1565 			crc32.reset();
1566 			otp.setOffset(out.length());
1567 			try {
1568 				reuseSupport.copyObjectAsIs(out, otp, reuseValidate);
1569 				out.endObject();
1570 				otp.setCRC((int) crc32.getValue());
1571 				typeStats.reusedObjects++;
1572 				if (otp.isDeltaRepresentation()) {
1573 					typeStats.reusedDeltas++;
1574 					typeStats.deltaBytes += out.length() - otp.getOffset();
1575 				}
1576 				return;
1577 			} catch (StoredObjectRepresentationNotAvailableException gone) {
1578 				if (otp.getOffset() == out.length()) {
1579 					otp.setOffset(0);
1580 					otp.clearDeltaBase();
1581 					otp.clearReuseAsIs();
1582 					reuseSupport.selectObjectRepresentation(this,
1583 							NullProgressMonitor.INSTANCE,
1584 							Collections.singleton(otp));
1585 					continue;
1586 				} else {
1587 					// Object writing already started, we cannot recover.
1588 					//
1589 					CorruptObjectException coe;
1590 					coe = new CorruptObjectException(otp, ""); //$NON-NLS-1$
1591 					coe.initCause(gone);
1592 					throw coe;
1593 				}
1594 			}
1595 		}
1596 
1597 		// If we reached here, reuse wasn't possible.
1598 		//
1599 		if (otp.isDeltaRepresentation())
1600 			writeDeltaObjectDeflate(out, otp);
1601 		else
1602 			writeWholeObjectDeflate(out, otp);
1603 		out.endObject();
1604 		otp.setCRC((int) crc32.getValue());
1605 	}
1606 
1607 	private void writeBase(PackOutputStream out, ObjectToPack base)
1608 			throws IOException {
1609 		if (base != null && !base.isWritten() && !base.isEdge())
1610 			writeObjectImpl(out, base);
1611 	}
1612 
1613 	private void writeWholeObjectDeflate(PackOutputStream out,
1614 			final ObjectToPack otp) throws IOException {
1615 		final Deflater deflater = deflater();
1616 		final ObjectLoader ldr = reader.open(otp, otp.getType());
1617 
1618 		crc32.reset();
1619 		otp.setOffset(out.length());
1620 		out.writeHeader(otp, ldr.getSize());
1621 
1622 		deflater.reset();
1623 		DeflaterOutputStream dst = new DeflaterOutputStream(out, deflater);
1624 		ldr.copyTo(dst);
1625 		dst.finish();
1626 	}
1627 
1628 	private void writeDeltaObjectDeflate(PackOutputStream out,
1629 			final ObjectToPack otp) throws IOException {
1630 		writeBase(out, otp.getDeltaBase());
1631 
1632 		crc32.reset();
1633 		otp.setOffset(out.length());
1634 
1635 		DeltaCache.Ref ref = otp.popCachedDelta();
1636 		if (ref != null) {
1637 			byte[] zbuf = ref.get();
1638 			if (zbuf != null) {
1639 				out.writeHeader(otp, otp.getCachedSize());
1640 				out.write(zbuf);
1641 				typeStats.cntDeltas++;
1642 				typeStats.deltaBytes += out.length() - otp.getOffset();
1643 				return;
1644 			}
1645 		}
1646 
1647 		try (TemporaryBuffer.Heap delta = delta(otp)) {
1648 			out.writeHeader(otp, delta.length());
1649 
1650 			Deflater deflater = deflater();
1651 			deflater.reset();
1652 			DeflaterOutputStream dst = new DeflaterOutputStream(out, deflater);
1653 			delta.writeTo(dst, null);
1654 			dst.finish();
1655 		}
1656 		typeStats.cntDeltas++;
1657 		typeStats.deltaBytes += out.length() - otp.getOffset();
1658 	}
1659 
1660 	private TemporaryBuffer.Heap delta(final ObjectToPack otp)
1661 			throws IOException {
1662 		DeltaIndex index = new DeltaIndex(buffer(otp.getDeltaBaseId()));
1663 		byte[] res = buffer(otp);
1664 
1665 		// We never would have proposed this pair if the delta would be
1666 		// larger than the unpacked version of the object. So using it
1667 		// as our buffer limit is valid: we will never reach it.
1668 		//
1669 		TemporaryBuffer.Heap delta = new TemporaryBuffer.Heap(res.length);
1670 		index.encode(delta, res);
1671 		return delta;
1672 	}
1673 
1674 	private byte[] buffer(AnyObjectId objId) throws IOException {
1675 		return buffer(config, reader, objId);
1676 	}
1677 
1678 	static byte[] buffer(PackConfig config, ObjectReader or, AnyObjectId objId)
1679 			throws IOException {
1680 		// PackWriter should have already pruned objects that
1681 		// are above the big file threshold, so our chances of
1682 		// the object being below it are very good. We really
1683 		// shouldn't be here, unless the implementation is odd.
1684 
1685 		return or.open(objId).getCachedBytes(config.getBigFileThreshold());
1686 	}
1687 
1688 	private Deflater deflater() {
1689 		if (myDeflater == null)
1690 			myDeflater = new Deflater(config.getCompressionLevel());
1691 		return myDeflater;
1692 	}
1693 
1694 	private void writeChecksum(PackOutputStream out) throws IOException {
1695 		packcsum = out.getDigest();
1696 		out.write(packcsum);
1697 	}
1698 
1699 	private void findObjectsToPack(@NonNull ProgressMonitor countingMonitor,
1700 			@NonNull ObjectWalk walker, @NonNull Set<? extends ObjectId> want,
1701 			@NonNull Set<? extends ObjectId> have,
1702 			@NonNull Set<? extends ObjectId> noBitmaps) throws IOException {
1703 		final long countingStart = System.currentTimeMillis();
1704 		beginPhase(PackingPhase.COUNTING, countingMonitor, ProgressMonitor.UNKNOWN);
1705 
1706 		stats.interestingObjects = Collections.unmodifiableSet(new HashSet<ObjectId>(want));
1707 		stats.uninterestingObjects = Collections.unmodifiableSet(new HashSet<ObjectId>(have));
1708 		excludeFromBitmapSelection = noBitmaps;
1709 
1710 		canBuildBitmaps = config.isBuildBitmaps()
1711 				&& !shallowPack
1712 				&& have.isEmpty()
1713 				&& (excludeInPacks == null || excludeInPacks.length == 0);
1714 		if (!shallowPack && useBitmaps) {
1715 			BitmapIndex bitmapIndex = reader.getBitmapIndex();
1716 			if (bitmapIndex != null) {
1717 				PackWriterBitmapWalker bitmapWalker = new PackWriterBitmapWalker(
1718 						walker, bitmapIndex, countingMonitor);
1719 				findObjectsToPackUsingBitmaps(bitmapWalker, want, have);
1720 				endPhase(countingMonitor);
1721 				stats.timeCounting = System.currentTimeMillis() - countingStart;
1722 				stats.bitmapIndexMisses = bitmapWalker.getCountOfBitmapIndexMisses();
1723 				return;
1724 			}
1725 		}
1726 
1727 		List<ObjectId> all = new ArrayList<>(want.size() + have.size());
1728 		all.addAll(want);
1729 		all.addAll(have);
1730 
1731 		final RevFlag include = walker.newFlag("include"); //$NON-NLS-1$
1732 		final RevFlag added = walker.newFlag("added"); //$NON-NLS-1$
1733 
1734 		walker.carry(include);
1735 
1736 		int haveEst = have.size();
1737 		if (have.isEmpty()) {
1738 			walker.sort(RevSort.COMMIT_TIME_DESC);
1739 		} else {
1740 			walker.sort(RevSort.TOPO);
1741 			if (thin)
1742 				walker.sort(RevSort.BOUNDARY, true);
1743 		}
1744 
1745 		List<RevObject> wantObjs = new ArrayList<>(want.size());
1746 		List<RevObject> haveObjs = new ArrayList<>(haveEst);
1747 		List<RevTag> wantTags = new ArrayList<>(want.size());
1748 
1749 		// Retrieve the RevWalk's versions of "want" and "have" objects to
1750 		// maintain any state previously set in the RevWalk.
1751 		AsyncRevObjectQueue q = walker.parseAny(all, true);
1752 		try {
1753 			for (;;) {
1754 				try {
1755 					RevObject o = q.next();
1756 					if (o == null)
1757 						break;
1758 					if (have.contains(o))
1759 						haveObjs.add(o);
1760 					if (want.contains(o)) {
1761 						o.add(include);
1762 						wantObjs.add(o);
1763 						if (o instanceof RevTag)
1764 							wantTags.add((RevTag) o);
1765 					}
1766 				} catch (MissingObjectException e) {
1767 					if (ignoreMissingUninteresting
1768 							&& have.contains(e.getObjectId()))
1769 						continue;
1770 					throw e;
1771 				}
1772 			}
1773 		} finally {
1774 			q.release();
1775 		}
1776 
1777 		if (!wantTags.isEmpty()) {
1778 			all = new ArrayList<>(wantTags.size());
1779 			for (RevTag tag : wantTags)
1780 				all.add(tag.getObject());
1781 			q = walker.parseAny(all, true);
1782 			try {
1783 				while (q.next() != null) {
1784 					// Just need to pop the queue item to parse the object.
1785 				}
1786 			} finally {
1787 				q.release();
1788 			}
1789 		}
1790 
1791 		if (walker instanceof DepthWalk.ObjectWalk) {
1792 			DepthWalk.ObjectWalk depthWalk = (DepthWalk.ObjectWalk) walker;
1793 			for (RevObject obj : wantObjs) {
1794 				depthWalk.markRoot(obj);
1795 			}
1796 			// Mark the tree objects associated with "have" commits as
1797 			// uninteresting to avoid writing redundant blobs. A normal RevWalk
1798 			// lazily propagates the "uninteresting" state from a commit to its
1799 			// tree during the walk, but DepthWalks can terminate early so
1800 			// preemptively propagate that state here.
1801 			for (RevObject obj : haveObjs) {
1802 				if (obj instanceof RevCommit) {
1803 					RevTree t = ((RevCommit) obj).getTree();
1804 					depthWalk.markUninteresting(t);
1805 				}
1806 			}
1807 
1808 			if (unshallowObjects != null) {
1809 				for (ObjectId id : unshallowObjects) {
1810 					depthWalk.markUnshallow(walker.parseAny(id));
1811 				}
1812 			}
1813 		} else {
1814 			for (RevObject obj : wantObjs)
1815 				walker.markStart(obj);
1816 		}
1817 		for (RevObject obj : haveObjs)
1818 			walker.markUninteresting(obj);
1819 
1820 		final int maxBases = config.getDeltaSearchWindowSize();
1821 		Set<RevTree> baseTrees = new HashSet<>();
1822 		BlockList<RevCommit> commits = new BlockList<>();
1823 		Set<ObjectId> roots = new HashSet<>();
1824 		RevCommit c;
1825 		while ((c = walker.next()) != null) {
1826 			if (exclude(c))
1827 				continue;
1828 			if (c.has(RevFlag.UNINTERESTING)) {
1829 				if (baseTrees.size() <= maxBases)
1830 					baseTrees.add(c.getTree());
1831 				continue;
1832 			}
1833 
1834 			commits.add(c);
1835 			if (c.getParentCount() == 0) {
1836 				roots.add(c.copy());
1837 			}
1838 			countingMonitor.update(1);
1839 		}
1840 		stats.rootCommits = Collections.unmodifiableSet(roots);
1841 
1842 		if (shallowPack) {
1843 			for (RevCommit cmit : commits) {
1844 				addObject(cmit, 0);
1845 			}
1846 		} else {
1847 			int commitCnt = 0;
1848 			boolean putTagTargets = false;
1849 			for (RevCommit cmit : commits) {
1850 				if (!cmit.has(added)) {
1851 					cmit.add(added);
1852 					addObject(cmit, 0);
1853 					commitCnt++;
1854 				}
1855 
1856 				for (int i = 0; i < cmit.getParentCount(); i++) {
1857 					RevCommit p = cmit.getParent(i);
1858 					if (!p.has(added) && !p.has(RevFlag.UNINTERESTING)
1859 							&& !exclude(p)) {
1860 						p.add(added);
1861 						addObject(p, 0);
1862 						commitCnt++;
1863 					}
1864 				}
1865 
1866 				if (!putTagTargets && 4096 < commitCnt) {
1867 					for (ObjectId id : tagTargets) {
1868 						RevObject obj = walker.lookupOrNull(id);
1869 						if (obj instanceof RevCommit
1870 								&& obj.has(include)
1871 								&& !obj.has(RevFlag.UNINTERESTING)
1872 								&& !obj.has(added)) {
1873 							obj.add(added);
1874 							addObject(obj, 0);
1875 						}
1876 					}
1877 					putTagTargets = true;
1878 				}
1879 			}
1880 		}
1881 		commits = null;
1882 
1883 		if (thin && !baseTrees.isEmpty()) {
1884 			BaseSearch bases = new BaseSearch(countingMonitor, baseTrees, //
1885 					objectsMap, edgeObjects, reader);
1886 			RevObject o;
1887 			while ((o = walker.nextObject()) != null) {
1888 				if (o.has(RevFlag.UNINTERESTING))
1889 					continue;
1890 				if (exclude(o))
1891 					continue;
1892 
1893 				int pathHash = walker.getPathHashCode();
1894 				byte[] pathBuf = walker.getPathBuffer();
1895 				int pathLen = walker.getPathLength();
1896 				bases.addBase(o.getType(), pathBuf, pathLen, pathHash);
1897 				addObject(o, pathHash);
1898 				countingMonitor.update(1);
1899 			}
1900 		} else {
1901 			RevObject o;
1902 			while ((o = walker.nextObject()) != null) {
1903 				if (o.has(RevFlag.UNINTERESTING))
1904 					continue;
1905 				if (exclude(o))
1906 					continue;
1907 				addObject(o, walker.getPathHashCode());
1908 				countingMonitor.update(1);
1909 			}
1910 		}
1911 
1912 		for (CachedPack pack : cachedPacks)
1913 			countingMonitor.update((int) pack.getObjectCount());
1914 		endPhase(countingMonitor);
1915 		stats.timeCounting = System.currentTimeMillis() - countingStart;
1916 		stats.bitmapIndexMisses = -1;
1917 	}
1918 
1919 	private void findObjectsToPackUsingBitmaps(
1920 			PackWriterBitmapWalker bitmapWalker, Set<? extends ObjectId> want,
1921 			Set<? extends ObjectId> have)
1922 			throws MissingObjectException, IncorrectObjectTypeException,
1923 			IOException {
1924 		BitmapBuilder haveBitmap = bitmapWalker.findObjects(have, null, true);
1925 		BitmapBuilder wantBitmap = bitmapWalker.findObjects(want, haveBitmap,
1926 				false);
1927 		BitmapBuilder needBitmap = wantBitmap.andNot(haveBitmap);
1928 
1929 		if (useCachedPacks && reuseSupport != null && !reuseValidate
1930 				&& (excludeInPacks == null || excludeInPacks.length == 0))
1931 			cachedPacks.addAll(
1932 					reuseSupport.getCachedPacksAndUpdate(needBitmap));
1933 
1934 		for (BitmapObject obj : needBitmap) {
1935 			ObjectId objectId = obj.getObjectId();
1936 			if (exclude(objectId)) {
1937 				needBitmap.remove(objectId);
1938 				continue;
1939 			}
1940 			addObject(objectId, obj.getType(), 0);
1941 		}
1942 
1943 		if (thin)
1944 			haveObjects = haveBitmap;
1945 	}
1946 
1947 	private static void pruneEdgesFromObjectList(List<ObjectToPack> list) {
1948 		final int size = list.size();
1949 		int src = 0;
1950 		int dst = 0;
1951 
1952 		for (; src < size; src++) {
1953 			ObjectToPack obj = list.get(src);
1954 			if (obj.isEdge())
1955 				continue;
1956 			if (dst != src)
1957 				list.set(dst, obj);
1958 			dst++;
1959 		}
1960 
1961 		while (dst < list.size())
1962 			list.remove(list.size() - 1);
1963 	}
1964 
1965 	/**
1966 	 * Include one object to the output file.
1967 	 * <p>
1968 	 * Objects are written in the order they are added. If the same object is
1969 	 * added twice, it may be written twice, creating a larger than necessary
1970 	 * file.
1971 	 *
1972 	 * @param object
1973 	 *            the object to add.
1974 	 * @throws IncorrectObjectTypeException
1975 	 *             the object is an unsupported type.
1976 	 */
1977 	public void addObject(final RevObject object)
1978 			throws IncorrectObjectTypeException {
1979 		if (!exclude(object))
1980 			addObject(object, 0);
1981 	}
1982 
1983 	private void addObject(final RevObject object, final int pathHashCode) {
1984 		addObject(object, object.getType(), pathHashCode);
1985 	}
1986 
1987 	private void addObject(
1988 			final AnyObjectId src, final int type, final int pathHashCode) {
1989 		final ObjectToPack otp;
1990 		if (reuseSupport != null)
1991 			otp = reuseSupport.newObjectToPack(src, type);
1992 		else
1993 			otp = new ObjectToPack(src, type);
1994 		otp.setPathHash(pathHashCode);
1995 		objectsLists[type].add(otp);
1996 		objectsMap.add(otp);
1997 	}
1998 
1999 	private boolean exclude(AnyObjectId objectId) {
2000 		if (excludeInPacks == null)
2001 			return false;
2002 		if (excludeInPackLast.contains(objectId))
2003 			return true;
2004 		for (ObjectIdSet idx : excludeInPacks) {
2005 			if (idx.contains(objectId)) {
2006 				excludeInPackLast = idx;
2007 				return true;
2008 			}
2009 		}
2010 		return false;
2011 	}
2012 
2013 	/**
2014 	 * Select an object representation for this writer.
2015 	 * <p>
2016 	 * An {@link ObjectReader} implementation should invoke this method once for
2017 	 * each representation available for an object, to allow the writer to find
2018 	 * the most suitable one for the output.
2019 	 *
2020 	 * @param otp
2021 	 *            the object being packed.
2022 	 * @param next
2023 	 *            the next available representation from the repository.
2024 	 */
2025 	public void select(ObjectToPack otp, StoredObjectRepresentation next) {
2026 		int nFmt = next.getFormat();
2027 
2028 		if (!cachedPacks.isEmpty()) {
2029 			if (otp.isEdge())
2030 				return;
2031 			if ((nFmt == PACK_WHOLE) | (nFmt == PACK_DELTA)) {
2032 				for (CachedPack pack : cachedPacks) {
2033 					if (pack.hasObject(otp, next)) {
2034 						otp.setEdge();
2035 						otp.clearDeltaBase();
2036 						otp.clearReuseAsIs();
2037 						pruneCurrentObjectList = true;
2038 						return;
2039 					}
2040 				}
2041 			}
2042 		}
2043 
2044 		if (nFmt == PACK_DELTA && reuseDeltas && reuseDeltaFor(otp)) {
2045 			ObjectId baseId = next.getDeltaBase();
2046 			ObjectToPack ptr = objectsMap.get(baseId);
2047 			if (ptr != null && !ptr.isEdge()) {
2048 				otp.setDeltaBase(ptr);
2049 				otp.setReuseAsIs();
2050 			} else if (thin && have(ptr, baseId)) {
2051 				otp.setDeltaBase(baseId);
2052 				otp.setReuseAsIs();
2053 			} else {
2054 				otp.clearDeltaBase();
2055 				otp.clearReuseAsIs();
2056 			}
2057 		} else if (nFmt == PACK_WHOLE && config.isReuseObjects()) {
2058 			int nWeight = next.getWeight();
2059 			if (otp.isReuseAsIs() && !otp.isDeltaRepresentation()) {
2060 				// We've chosen another PACK_WHOLE format for this object,
2061 				// choose the one that has the smaller compressed size.
2062 				//
2063 				if (otp.getWeight() <= nWeight)
2064 					return;
2065 			}
2066 			otp.clearDeltaBase();
2067 			otp.setReuseAsIs();
2068 			otp.setWeight(nWeight);
2069 		} else {
2070 			otp.clearDeltaBase();
2071 			otp.clearReuseAsIs();
2072 		}
2073 
2074 		otp.setDeltaAttempted(reuseDeltas & next.wasDeltaAttempted());
2075 		otp.select(next);
2076 	}
2077 
2078 	private final boolean have(ObjectToPack ptr, AnyObjectId objectId) {
2079 		return (ptr != null && ptr.isEdge())
2080 				|| (haveObjects != null && haveObjects.contains(objectId));
2081 	}
2082 
2083 	/**
2084 	 * Prepares the bitmaps to be written to the bitmap index file.
2085 	 * <p>
2086 	 * Bitmaps can be used to speed up fetches and clones by storing the entire
2087 	 * object graph at selected commits. Writing a bitmap index is an optional
2088 	 * feature that not all pack users may require.
2089 	 * <p>
2090 	 * Called after {@link #writeIndex(OutputStream)}.
2091 	 * <p>
2092 	 * To reduce memory internal state is cleared during this method, rendering
2093 	 * the PackWriter instance useless for anything further than a call to write
2094 	 * out the new bitmaps with {@link #writeBitmapIndex(OutputStream)}.
2095 	 *
2096 	 * @param pm
2097 	 *            progress monitor to report bitmap building work.
2098 	 * @return whether a bitmap index may be written.
2099 	 * @throws IOException
2100 	 *             when some I/O problem occur during reading objects.
2101 	 */
2102 	public boolean prepareBitmapIndex(ProgressMonitor pm) throws IOException {
2103 		if (!canBuildBitmaps || getObjectCount() > Integer.MAX_VALUE
2104 				|| !cachedPacks.isEmpty())
2105 			return false;
2106 
2107 		if (pm == null)
2108 			pm = NullProgressMonitor.INSTANCE;
2109 
2110 		int numCommits = objectsLists[OBJ_COMMIT].size();
2111 		List<ObjectToPack> byName = sortByName();
2112 		sortedByName = null;
2113 		objectsLists = null;
2114 		objectsMap = null;
2115 		writeBitmaps = new PackBitmapIndexBuilder(byName);
2116 		byName = null;
2117 
2118 		PackWriterBitmapPreparer bitmapPreparer = new PackWriterBitmapPreparer(
2119 				reader, writeBitmaps, pm, stats.interestingObjects, config);
2120 
2121 		Collection<PackWriterBitmapPreparer.BitmapCommit> selectedCommits = bitmapPreparer
2122 				.selectCommits(numCommits, excludeFromBitmapSelection);
2123 
2124 		beginPhase(PackingPhase.BUILDING_BITMAPS, pm, selectedCommits.size());
2125 
2126 		PackWriterBitmapWalker walker = bitmapPreparer.newBitmapWalker();
2127 		AnyObjectId last = null;
2128 		for (PackWriterBitmapPreparer.BitmapCommit cmit : selectedCommits) {
2129 			if (!cmit.isReuseWalker()) {
2130 				walker = bitmapPreparer.newBitmapWalker();
2131 			}
2132 			BitmapBuilder bitmap = walker.findObjects(
2133 					Collections.singleton(cmit), null, false);
2134 
2135 			if (last != null && cmit.isReuseWalker() && !bitmap.contains(last))
2136 				throw new IllegalStateException(MessageFormat.format(
2137 						JGitText.get().bitmapMissingObject, cmit.name(),
2138 						last.name()));
2139 			last = cmit;
2140 			writeBitmaps.addBitmap(cmit, bitmap.build(), cmit.getFlags());
2141 
2142 			pm.update(1);
2143 		}
2144 
2145 		endPhase(pm);
2146 		return true;
2147 	}
2148 
2149 	private boolean reuseDeltaFor(ObjectToPack otp) {
2150 		int type = otp.getType();
2151 		if ((type & 2) != 0) // OBJ_TREE(2) or OBJ_BLOB(3)
2152 			return true;
2153 		if (type == OBJ_COMMIT)
2154 			return reuseDeltaCommits;
2155 		if (type == OBJ_TAG)
2156 			return false;
2157 		return true;
2158 	}
2159 
2160 	/**
2161 	 * Summary of how PackWriter created the pack.
2162 	 *
2163 	 * @deprecated Use {@link PackStatistics} instead.
2164 	 */
2165 	@Deprecated
2166 	public static class Statistics {
2167 		/** Statistics about a single class of object. */
2168 		public static class ObjectType {
2169 			// All requests are forwarded to this object.
2170 			private PackStatistics.ObjectType objectType;
2171 
2172 			/**
2173 			 * Wraps an
2174 			 * {@link org.eclipse.jgit.storage.pack.PackStatistics.ObjectType}
2175 			 * instance to maintain backwards compatibility with existing API.
2176 			 *
2177 			 * @param type
2178 			 *            the wrapped instance
2179 			 */
2180 			public ObjectType(PackStatistics.ObjectType type) {
2181 				objectType = type;
2182 			}
2183 
2184 			/**
2185 			 * @return total number of objects output. This total includes the
2186 			 *         value of {@link #getDeltas()}.
2187 			 */
2188 			public long getObjects() {
2189 				return objectType.getObjects();
2190 			}
2191 
2192 			/**
2193 			 * @return total number of deltas output. This may be lower than the
2194 			 *         actual number of deltas if a cached pack was reused.
2195 			 */
2196 			public long getDeltas() {
2197 				return objectType.getDeltas();
2198 			}
2199 
2200 			/**
2201 			 * @return number of objects whose existing representation was
2202 			 *         reused in the output. This count includes
2203 			 *         {@link #getReusedDeltas()}.
2204 			 */
2205 			public long getReusedObjects() {
2206 				return objectType.getReusedObjects();
2207 			}
2208 
2209 			/**
2210 			 * @return number of deltas whose existing representation was reused
2211 			 *         in the output, as their base object was also output or
2212 			 *         was assumed present for a thin pack. This may be lower
2213 			 *         than the actual number of reused deltas if a cached pack
2214 			 *         was reused.
2215 			 */
2216 			public long getReusedDeltas() {
2217 				return objectType.getReusedDeltas();
2218 			}
2219 
2220 			/**
2221 			 * @return total number of bytes written. This size includes the
2222 			 *         object headers as well as the compressed data. This size
2223 			 *         also includes all of {@link #getDeltaBytes()}.
2224 			 */
2225 			public long getBytes() {
2226 				return objectType.getBytes();
2227 			}
2228 
2229 			/**
2230 			 * @return number of delta bytes written. This size includes the
2231 			 *         object headers for the delta objects.
2232 			 */
2233 			public long getDeltaBytes() {
2234 				return objectType.getDeltaBytes();
2235 			}
2236 		}
2237 
2238 		// All requests are forwarded to this object.
2239 		private PackStatistics statistics;
2240 
2241 		/**
2242 		 * Wraps a {@link PackStatistics} object to maintain backwards
2243 		 * compatibility with existing API.
2244 		 *
2245 		 * @param stats
2246 		 *            the wrapped PackStatitics object
2247 		 */
2248 		public Statistics(PackStatistics stats) {
2249 			statistics = stats;
2250 		}
2251 
2252 		/**
2253 		 * @return unmodifiable collection of objects to be included in the
2254 		 *         pack. May be null if the pack was hand-crafted in a unit
2255 		 *         test.
2256 		 */
2257 		public Set<ObjectId> getInterestingObjects() {
2258 			return statistics.getInterestingObjects();
2259 		}
2260 
2261 		/**
2262 		 * @return unmodifiable collection of objects that should be excluded
2263 		 *         from the pack, as the peer that will receive the pack already
2264 		 *         has these objects.
2265 		 */
2266 		public Set<ObjectId> getUninterestingObjects() {
2267 			return statistics.getUninterestingObjects();
2268 		}
2269 
2270 		/**
2271 		 * @return unmodifiable collection of the cached packs that were reused
2272 		 *         in the output, if any were selected for reuse.
2273 		 */
2274 		public Collection<CachedPack> getReusedPacks() {
2275 			return statistics.getReusedPacks();
2276 		}
2277 
2278 		/**
2279 		 * @return number of objects in the output pack that went through the
2280 		 *         delta search process in order to find a potential delta base.
2281 		 */
2282 		public int getDeltaSearchNonEdgeObjects() {
2283 			return statistics.getDeltaSearchNonEdgeObjects();
2284 		}
2285 
2286 		/**
2287 		 * @return number of objects in the output pack that went through delta
2288 		 *         base search and found a suitable base. This is a subset of
2289 		 *         {@link #getDeltaSearchNonEdgeObjects()}.
2290 		 */
2291 		public int getDeltasFound() {
2292 			return statistics.getDeltasFound();
2293 		}
2294 
2295 		/**
2296 		 * @return total number of objects output. This total includes the value
2297 		 *         of {@link #getTotalDeltas()}.
2298 		 */
2299 		public long getTotalObjects() {
2300 			return statistics.getTotalObjects();
2301 		}
2302 
2303 		/**
2304 		 * @return the count of objects that needed to be discovered through an
2305 		 *         object walk because they were not found in bitmap indices.
2306 		 *         Returns -1 if no bitmap indices were found.
2307 		 */
2308 		public long getBitmapIndexMisses() {
2309 			return statistics.getBitmapIndexMisses();
2310 		}
2311 
2312 		/**
2313 		 * @return total number of deltas output. This may be lower than the
2314 		 *         actual number of deltas if a cached pack was reused.
2315 		 */
2316 		public long getTotalDeltas() {
2317 			return statistics.getTotalDeltas();
2318 		}
2319 
2320 		/**
2321 		 * @return number of objects whose existing representation was reused in
2322 		 *         the output. This count includes {@link #getReusedDeltas()}.
2323 		 */
2324 		public long getReusedObjects() {
2325 			return statistics.getReusedObjects();
2326 		}
2327 
2328 		/**
2329 		 * @return number of deltas whose existing representation was reused in
2330 		 *         the output, as their base object was also output or was
2331 		 *         assumed present for a thin pack. This may be lower than the
2332 		 *         actual number of reused deltas if a cached pack was reused.
2333 		 */
2334 		public long getReusedDeltas() {
2335 			return statistics.getReusedDeltas();
2336 		}
2337 
2338 		/**
2339 		 * @return total number of bytes written. This size includes the pack
2340 		 *         header, trailer, thin pack, and reused cached pack(s).
2341 		 */
2342 		public long getTotalBytes() {
2343 			return statistics.getTotalBytes();
2344 		}
2345 
2346 		/**
2347 		 * @return size of the thin pack in bytes, if a thin pack was generated.
2348 		 *         A thin pack is created when the client already has objects
2349 		 *         and some deltas are created against those objects, or if a
2350 		 *         cached pack is being used and some deltas will reference
2351 		 *         objects in the cached pack. This size does not include the
2352 		 *         pack header or trailer.
2353 		 */
2354 		public long getThinPackBytes() {
2355 			return statistics.getThinPackBytes();
2356 		}
2357 
2358 		/**
2359 		 * @param typeCode
2360 		 *            object type code, e.g. OBJ_COMMIT or OBJ_TREE.
2361 		 * @return information about this type of object in the pack.
2362 		 */
2363 		public ObjectType byObjectType(int typeCode) {
2364 			return new ObjectType(statistics.byObjectType(typeCode));
2365 		}
2366 
2367 		/** @return true if the resulting pack file was a shallow pack. */
2368 		public boolean isShallow() {
2369 			return statistics.isShallow();
2370 		}
2371 
2372 		/** @return depth (in commits) the pack includes if shallow. */
2373 		public int getDepth() {
2374 			return statistics.getDepth();
2375 		}
2376 
2377 		/**
2378 		 * @return time in milliseconds spent enumerating the objects that need
2379 		 *         to be included in the output. This time includes any restarts
2380 		 *         that occur when a cached pack is selected for reuse.
2381 		 */
2382 		public long getTimeCounting() {
2383 			return statistics.getTimeCounting();
2384 		}
2385 
2386 		/**
2387 		 * @return time in milliseconds spent matching existing representations
2388 		 *         against objects that will be transmitted, or that the client
2389 		 *         can be assumed to already have.
2390 		 */
2391 		public long getTimeSearchingForReuse() {
2392 			return statistics.getTimeSearchingForReuse();
2393 		}
2394 
2395 		/**
2396 		 * @return time in milliseconds spent finding the sizes of all objects
2397 		 *         that will enter the delta compression search window. The
2398 		 *         sizes need to be known to better match similar objects
2399 		 *         together and improve delta compression ratios.
2400 		 */
2401 		public long getTimeSearchingForSizes() {
2402 			return statistics.getTimeSearchingForSizes();
2403 		}
2404 
2405 		/**
2406 		 * @return time in milliseconds spent on delta compression. This is
2407 		 *         observed wall-clock time and does not accurately track CPU
2408 		 *         time used when multiple threads were used to perform the
2409 		 *         delta compression.
2410 		 */
2411 		public long getTimeCompressing() {
2412 			return statistics.getTimeCompressing();
2413 		}
2414 
2415 		/**
2416 		 * @return time in milliseconds spent writing the pack output, from
2417 		 *         start of header until end of trailer. The transfer speed can
2418 		 *         be approximated by dividing {@link #getTotalBytes()} by this
2419 		 *         value.
2420 		 */
2421 		public long getTimeWriting() {
2422 			return statistics.getTimeWriting();
2423 		}
2424 
2425 		/** @return total time spent processing this pack. */
2426 		public long getTimeTotal() {
2427 			return statistics.getTimeTotal();
2428 		}
2429 
2430 		/**
2431 		 * @return get the average output speed in terms of bytes-per-second.
2432 		 *         {@code getTotalBytes() / (getTimeWriting() / 1000.0)}.
2433 		 */
2434 		public double getTransferRate() {
2435 			return statistics.getTransferRate();
2436 		}
2437 
2438 		/** @return formatted message string for display to clients. */
2439 		public String getMessage() {
2440 			return statistics.getMessage();
2441 		}
2442 	}
2443 
2444 	private class MutableState {
2445 		/** Estimated size of a single ObjectToPack instance. */
2446 		// Assume 64-bit pointers, since this is just an estimate.
2447 		private static final long OBJECT_TO_PACK_SIZE =
2448 				(2 * 8)               // Object header
2449 				+ (2 * 8) + (2 * 8)   // ObjectToPack fields
2450 				+ (8 + 8)             // PackedObjectInfo fields
2451 				+ 8                   // ObjectIdOwnerMap fields
2452 				+ 40                  // AnyObjectId fields
2453 				+ 8;                  // Reference in BlockList
2454 
2455 		private final long totalDeltaSearchBytes;
2456 
2457 		private volatile PackingPhase phase;
2458 
2459 		MutableState() {
2460 			phase = PackingPhase.COUNTING;
2461 			if (config.isDeltaCompress()) {
2462 				int threads = config.getThreads();
2463 				if (threads <= 0)
2464 					threads = Runtime.getRuntime().availableProcessors();
2465 				totalDeltaSearchBytes = (threads * config.getDeltaSearchMemoryLimit())
2466 						+ config.getBigFileThreshold();
2467 			} else
2468 				totalDeltaSearchBytes = 0;
2469 		}
2470 
2471 		State snapshot() {
2472 			long objCnt = 0;
2473 			BlockList<ObjectToPack>[] lists = objectsLists;
2474 			if (lists != null) {
2475 				objCnt += lists[OBJ_COMMIT].size();
2476 				objCnt += lists[OBJ_TREE].size();
2477 				objCnt += lists[OBJ_BLOB].size();
2478 				objCnt += lists[OBJ_TAG].size();
2479 				// Exclude CachedPacks.
2480 			}
2481 
2482 			long bytesUsed = OBJECT_TO_PACK_SIZE * objCnt;
2483 			PackingPhase curr = phase;
2484 			if (curr == PackingPhase.COMPRESSING)
2485 				bytesUsed += totalDeltaSearchBytes;
2486 			return new State(curr, bytesUsed);
2487 		}
2488 	}
2489 
2490 	/** Possible states that a PackWriter can be in. */
2491 	public static enum PackingPhase {
2492 		/** Counting objects phase. */
2493 		COUNTING,
2494 
2495 		/** Getting sizes phase. */
2496 		GETTING_SIZES,
2497 
2498 		/** Finding sources phase. */
2499 		FINDING_SOURCES,
2500 
2501 		/** Compressing objects phase. */
2502 		COMPRESSING,
2503 
2504 		/** Writing objects phase. */
2505 		WRITING,
2506 
2507 		/** Building bitmaps phase. */
2508 		BUILDING_BITMAPS;
2509 	}
2510 
2511 	/** Summary of the current state of a PackWriter. */
2512 	public class State {
2513 		private final PackingPhase phase;
2514 
2515 		private final long bytesUsed;
2516 
2517 		State(PackingPhase phase, long bytesUsed) {
2518 			this.phase = phase;
2519 			this.bytesUsed = bytesUsed;
2520 		}
2521 
2522 		/** @return the PackConfig used to build the writer. */
2523 		public PackConfig getConfig() {
2524 			return config;
2525 		}
2526 
2527 		/** @return the current phase of the writer. */
2528 		public PackingPhase getPhase() {
2529 			return phase;
2530 		}
2531 
2532 		/** @return an estimate of the total memory used by the writer. */
2533 		public long estimateBytesUsed() {
2534 			return bytesUsed;
2535 		}
2536 
2537 		@SuppressWarnings("nls")
2538 		@Override
2539 		public String toString() {
2540 			return "PackWriter.State[" + phase + ", memory=" + bytesUsed + "]";
2541 		}
2542 	}
2543 }