View Javadoc
1   /*
2    * Copyright (C) 2008-2010, Google Inc.
3    * Copyright (C) 2008, Marek Zawirski <marek.zawirski@gmail.com>
4    * and other copyright owners as documented in the project's IP log.
5    *
6    * This program and the accompanying materials are made available
7    * under the terms of the Eclipse Distribution License v1.0 which
8    * accompanies this distribution, is reproduced below, and is
9    * available at http://www.eclipse.org/org/documents/edl-v10.php
10   *
11   * All rights reserved.
12   *
13   * Redistribution and use in source and binary forms, with or
14   * without modification, are permitted provided that the following
15   * conditions are met:
16   *
17   * - Redistributions of source code must retain the above copyright
18   *   notice, this list of conditions and the following disclaimer.
19   *
20   * - Redistributions in binary form must reproduce the above
21   *   copyright notice, this list of conditions and the following
22   *   disclaimer in the documentation and/or other materials provided
23   *   with the distribution.
24   *
25   * - Neither the name of the Eclipse Foundation, Inc. nor the
26   *   names of its contributors may be used to endorse or promote
27   *   products derived from this software without specific prior
28   *   written permission.
29   *
30   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
31   * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
32   * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
33   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34   * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
35   * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36   * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
37   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
38   * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
39   * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
40   * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
42   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43   */
44  
45  package org.eclipse.jgit.internal.storage.pack;
46  
47  import static org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation.PACK_DELTA;
48  import static org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation.PACK_WHOLE;
49  import static org.eclipse.jgit.lib.Constants.OBJECT_ID_LENGTH;
50  import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
51  import static org.eclipse.jgit.lib.Constants.OBJ_COMMIT;
52  import static org.eclipse.jgit.lib.Constants.OBJ_TAG;
53  import static org.eclipse.jgit.lib.Constants.OBJ_TREE;
54  
55  import java.io.IOException;
56  import java.io.OutputStream;
57  import java.lang.ref.WeakReference;
58  import java.security.MessageDigest;
59  import java.text.MessageFormat;
60  import java.util.ArrayList;
61  import java.util.Arrays;
62  import java.util.Collection;
63  import java.util.Collections;
64  import java.util.Comparator;
65  import java.util.HashSet;
66  import java.util.Iterator;
67  import java.util.List;
68  import java.util.Map;
69  import java.util.NoSuchElementException;
70  import java.util.Set;
71  import java.util.concurrent.ConcurrentHashMap;
72  import java.util.concurrent.ExecutionException;
73  import java.util.concurrent.Executor;
74  import java.util.concurrent.ExecutorService;
75  import java.util.concurrent.Executors;
76  import java.util.concurrent.Future;
77  import java.util.concurrent.TimeUnit;
78  import java.util.zip.CRC32;
79  import java.util.zip.CheckedOutputStream;
80  import java.util.zip.Deflater;
81  import java.util.zip.DeflaterOutputStream;
82  
83  import org.eclipse.jgit.annotations.NonNull;
84  import org.eclipse.jgit.annotations.Nullable;
85  import org.eclipse.jgit.errors.CorruptObjectException;
86  import org.eclipse.jgit.errors.IncorrectObjectTypeException;
87  import org.eclipse.jgit.errors.LargeObjectException;
88  import org.eclipse.jgit.errors.MissingObjectException;
89  import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException;
90  import org.eclipse.jgit.internal.JGitText;
91  import org.eclipse.jgit.internal.storage.file.PackBitmapIndexBuilder;
92  import org.eclipse.jgit.internal.storage.file.PackBitmapIndexWriterV1;
93  import org.eclipse.jgit.internal.storage.file.PackIndexWriter;
94  import org.eclipse.jgit.lib.AnyObjectId;
95  import org.eclipse.jgit.lib.AsyncObjectSizeQueue;
96  import org.eclipse.jgit.lib.BatchingProgressMonitor;
97  import org.eclipse.jgit.lib.BitmapIndex;
98  import org.eclipse.jgit.lib.BitmapIndex.BitmapBuilder;
99  import org.eclipse.jgit.lib.BitmapObject;
100 import org.eclipse.jgit.lib.Constants;
101 import org.eclipse.jgit.lib.NullProgressMonitor;
102 import org.eclipse.jgit.lib.ObjectId;
103 import org.eclipse.jgit.lib.ObjectIdOwnerMap;
104 import org.eclipse.jgit.lib.ObjectIdSet;
105 import org.eclipse.jgit.lib.ObjectLoader;
106 import org.eclipse.jgit.lib.ObjectReader;
107 import org.eclipse.jgit.lib.ProgressMonitor;
108 import org.eclipse.jgit.lib.Repository;
109 import org.eclipse.jgit.lib.ThreadSafeProgressMonitor;
110 import org.eclipse.jgit.revwalk.AsyncRevObjectQueue;
111 import org.eclipse.jgit.revwalk.BitmapWalker;
112 import org.eclipse.jgit.revwalk.DepthWalk;
113 import org.eclipse.jgit.revwalk.ObjectWalk;
114 import org.eclipse.jgit.revwalk.RevCommit;
115 import org.eclipse.jgit.revwalk.RevFlag;
116 import org.eclipse.jgit.revwalk.RevObject;
117 import org.eclipse.jgit.revwalk.RevSort;
118 import org.eclipse.jgit.revwalk.RevTag;
119 import org.eclipse.jgit.revwalk.RevTree;
120 import org.eclipse.jgit.storage.pack.PackConfig;
121 import org.eclipse.jgit.storage.pack.PackStatistics;
122 import org.eclipse.jgit.transport.ObjectCountCallback;
123 import org.eclipse.jgit.transport.WriteAbortedException;
124 import org.eclipse.jgit.util.BlockList;
125 import org.eclipse.jgit.util.TemporaryBuffer;
126 
127 /**
128  * <p>
129  * PackWriter class is responsible for generating pack files from specified set
130  * of objects from repository. This implementation produce pack files in format
131  * version 2.
132  * </p>
133  * <p>
134  * Source of objects may be specified in two ways:
135  * <ul>
136  * <li>(usually) by providing sets of interesting and uninteresting objects in
137  * repository - all interesting objects and their ancestors except uninteresting
138  * objects and their ancestors will be included in pack, or</li>
139  * <li>by providing iterator of {@link org.eclipse.jgit.revwalk.RevObject}
140  * specifying exact list and order of objects in pack</li>
141  * </ul>
142  * <p>
143  * Typical usage consists of creating an instance, configuring options,
144  * preparing the list of objects by calling {@link #preparePack(Iterator)} or
145  * {@link #preparePack(ProgressMonitor, Set, Set)}, and streaming with
146  * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}. If the
147  * pack is being stored as a file the matching index can be written out after
148  * writing the pack by {@link #writeIndex(OutputStream)}. An optional bitmap
149  * index can be made by calling {@link #prepareBitmapIndex(ProgressMonitor)}
150  * followed by {@link #writeBitmapIndex(OutputStream)}.
151  * </p>
152  * <p>
153  * Class provide set of configurable options and
154  * {@link org.eclipse.jgit.lib.ProgressMonitor} support, as operations may take
155  * a long time for big repositories. Deltas searching algorithm is <b>NOT
156  * IMPLEMENTED</b> yet - this implementation relies only on deltas and objects
157  * reuse.
158  * </p>
159  * <p>
160  * This class is not thread safe. It is intended to be used in one thread as a
161  * single pass to produce one pack. Invoking methods multiple times or out of
162  * order is not supported as internal data structures are destroyed during
163  * certain phases to save memory when packing large repositories.
164  * </p>
165  */
166 public class PackWriter implements AutoCloseable {
167 	private static final int PACK_VERSION_GENERATED = 2;
168 
169 	/** Empty set of objects for {@code preparePack()}. */
170 	public static final Set<ObjectId> NONE = Collections.emptySet();
171 
172 	private static final Map<WeakReference<PackWriter>, Boolean> instances =
173 			new ConcurrentHashMap<>();
174 
175 	private static final Iterable<PackWriter> instancesIterable = new Iterable<PackWriter>() {
176 		@Override
177 		public Iterator<PackWriter> iterator() {
178 			return new Iterator<PackWriter>() {
179 				private final Iterator<WeakReference<PackWriter>> it =
180 						instances.keySet().iterator();
181 				private PackWriter next;
182 
183 				@Override
184 				public boolean hasNext() {
185 					if (next != null)
186 						return true;
187 					while (it.hasNext()) {
188 						WeakReference<PackWriter> ref = it.next();
189 						next = ref.get();
190 						if (next != null)
191 							return true;
192 						it.remove();
193 					}
194 					return false;
195 				}
196 
197 				@Override
198 				public PackWriter next() {
199 					if (hasNext()) {
200 						PackWriter result = next;
201 						next = null;
202 						return result;
203 					}
204 					throw new NoSuchElementException();
205 				}
206 
207 				@Override
208 				public void remove() {
209 					throw new UnsupportedOperationException();
210 				}
211 			};
212 		}
213 	};
214 
215 	/**
216 	 * Get all allocated, non-released PackWriters instances.
217 	 *
218 	 * @return all allocated, non-released PackWriters instances.
219 	 */
220 	public static Iterable<PackWriter> getInstances() {
221 		return instancesIterable;
222 	}
223 
224 	@SuppressWarnings("unchecked")
225 	BlockList<ObjectToPack> objectsLists[] = new BlockList[OBJ_TAG + 1];
226 	{
227 		objectsLists[OBJ_COMMIT] = new BlockList<>();
228 		objectsLists[OBJ_TREE] = new BlockList<>();
229 		objectsLists[OBJ_BLOB] = new BlockList<>();
230 		objectsLists[OBJ_TAG] = new BlockList<>();
231 	}
232 
233 	private ObjectIdOwnerMap<ObjectToPack> objectsMap = new ObjectIdOwnerMap<>();
234 
235 	// edge objects for thin packs
236 	private List<ObjectToPack> edgeObjects = new BlockList<>();
237 
238 	// Objects the client is known to have already.
239 	private BitmapBuilder haveObjects;
240 
241 	private List<CachedPack> cachedPacks = new ArrayList<>(2);
242 
243 	private Set<ObjectId> tagTargets = NONE;
244 
245 	private Set<? extends ObjectId> excludeFromBitmapSelection = NONE;
246 
247 	private ObjectIdSet[] excludeInPacks;
248 
249 	private ObjectIdSet excludeInPackLast;
250 
251 	private Deflater myDeflater;
252 
253 	private final ObjectReader reader;
254 
255 	/** {@link #reader} recast to the reuse interface, if it supports it. */
256 	private final ObjectReuseAsIs reuseSupport;
257 
258 	final PackConfig config;
259 
260 	private final PackStatistics.Accumulator stats;
261 
262 	private final MutableState state;
263 
264 	private final WeakReference<PackWriter> selfRef;
265 
266 	private PackStatistics.ObjectType.Accumulator typeStats;
267 
268 	private List<ObjectToPack> sortedByName;
269 
270 	private byte packcsum[];
271 
272 	private boolean deltaBaseAsOffset;
273 
274 	private boolean reuseDeltas;
275 
276 	private boolean reuseDeltaCommits;
277 
278 	private boolean reuseValidate;
279 
280 	private boolean thin;
281 
282 	private boolean useCachedPacks;
283 
284 	private boolean useBitmaps;
285 
286 	private boolean ignoreMissingUninteresting = true;
287 
288 	private boolean pruneCurrentObjectList;
289 
290 	private boolean shallowPack;
291 
292 	private boolean canBuildBitmaps;
293 
294 	private boolean indexDisabled;
295 
296 	private int depth;
297 
298 	private Collection<? extends ObjectId> unshallowObjects;
299 
300 	private PackBitmapIndexBuilder writeBitmaps;
301 
302 	private CRC32 crc32;
303 
304 	private ObjectCountCallback callback;
305 
306 	private long filterBlobLimit = -1;
307 
308 	/**
309 	 * Create writer for specified repository.
310 	 * <p>
311 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
312 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
313 	 *
314 	 * @param repo
315 	 *            repository where objects are stored.
316 	 */
317 	public PackWriter(Repository repo) {
318 		this(repo, repo.newObjectReader());
319 	}
320 
321 	/**
322 	 * Create a writer to load objects from the specified reader.
323 	 * <p>
324 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
325 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
326 	 *
327 	 * @param reader
328 	 *            reader to read from the repository with.
329 	 */
330 	public PackWriter(ObjectReader reader) {
331 		this(new PackConfig(), reader);
332 	}
333 
334 	/**
335 	 * Create writer for specified repository.
336 	 * <p>
337 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
338 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
339 	 *
340 	 * @param repo
341 	 *            repository where objects are stored.
342 	 * @param reader
343 	 *            reader to read from the repository with.
344 	 */
345 	public PackWriter(Repository repo, ObjectReader reader) {
346 		this(new PackConfig(repo), reader);
347 	}
348 
349 	/**
350 	 * Create writer with a specified configuration.
351 	 * <p>
352 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
353 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
354 	 *
355 	 * @param config
356 	 *            configuration for the pack writer.
357 	 * @param reader
358 	 *            reader to read from the repository with.
359 	 */
360 	public PackWriter(PackConfig config, ObjectReader reader) {
361 		this(config, reader, null);
362 	}
363 
364 	/**
365 	 * Create writer with a specified configuration.
366 	 * <p>
367 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
368 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
369 	 *
370 	 * @param config
371 	 *            configuration for the pack writer.
372 	 * @param reader
373 	 *            reader to read from the repository with.
374 	 * @param statsAccumulator
375 	 *            accumulator for statics
376 	 */
377 	public PackWriter(PackConfig config, final ObjectReader reader,
378 			@Nullable PackStatistics.Accumulator statsAccumulator) {
379 		this.config = config;
380 		this.reader = reader;
381 		if (reader instanceof ObjectReuseAsIs)
382 			reuseSupport = ((ObjectReuseAsIs) reader);
383 		else
384 			reuseSupport = null;
385 
386 		deltaBaseAsOffset = config.isDeltaBaseAsOffset();
387 		reuseDeltas = config.isReuseDeltas();
388 		reuseValidate = true; // be paranoid by default
389 		stats = statsAccumulator != null ? statsAccumulator
390 				: new PackStatistics.Accumulator();
391 		state = new MutableState();
392 		selfRef = new WeakReference<>(this);
393 		instances.put(selfRef, Boolean.TRUE);
394 	}
395 
396 	/**
397 	 * Set the {@code ObjectCountCallback}.
398 	 * <p>
399 	 * It should be set before calling
400 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}.
401 	 *
402 	 * @param callback
403 	 *            the callback to set
404 	 * @return this object for chaining.
405 	 */
406 	public PackWriter setObjectCountCallback(ObjectCountCallback callback) {
407 		this.callback = callback;
408 		return this;
409 	}
410 
411 	/**
412 	 * Records the set of shallow commits in the client.
413 	 *
414 	 * @param clientShallowCommits
415 	 *            the shallow commits in the client
416 	 */
417 	public void setClientShallowCommits(Set<ObjectId> clientShallowCommits) {
418 		stats.clientShallowCommits = Collections
419 				.unmodifiableSet(new HashSet<>(clientShallowCommits));
420 	}
421 
422 	/**
423 	 * Check whether writer can store delta base as an offset (new style
424 	 * reducing pack size) or should store it as an object id (legacy style,
425 	 * compatible with old readers).
426 	 *
427 	 * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
428 	 *
429 	 * @return true if delta base is stored as an offset; false if it is stored
430 	 *         as an object id.
431 	 */
432 	public boolean isDeltaBaseAsOffset() {
433 		return deltaBaseAsOffset;
434 	}
435 
436 	/**
437 	 * Set writer delta base format. Delta base can be written as an offset in a
438 	 * pack file (new approach reducing file size) or as an object id (legacy
439 	 * approach, compatible with old readers).
440 	 *
441 	 * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
442 	 *
443 	 * @param deltaBaseAsOffset
444 	 *            boolean indicating whether delta base can be stored as an
445 	 *            offset.
446 	 */
447 	public void setDeltaBaseAsOffset(boolean deltaBaseAsOffset) {
448 		this.deltaBaseAsOffset = deltaBaseAsOffset;
449 	}
450 
451 	/**
452 	 * Check if the writer will reuse commits that are already stored as deltas.
453 	 *
454 	 * @return true if the writer would reuse commits stored as deltas, assuming
455 	 *         delta reuse is already enabled.
456 	 */
457 	public boolean isReuseDeltaCommits() {
458 		return reuseDeltaCommits;
459 	}
460 
461 	/**
462 	 * Set the writer to reuse existing delta versions of commits.
463 	 *
464 	 * @param reuse
465 	 *            if true, the writer will reuse any commits stored as deltas.
466 	 *            By default the writer does not reuse delta commits.
467 	 */
468 	public void setReuseDeltaCommits(boolean reuse) {
469 		reuseDeltaCommits = reuse;
470 	}
471 
472 	/**
473 	 * Check if the writer validates objects before copying them.
474 	 *
475 	 * @return true if validation is enabled; false if the reader will handle
476 	 *         object validation as a side-effect of it consuming the output.
477 	 */
478 	public boolean isReuseValidatingObjects() {
479 		return reuseValidate;
480 	}
481 
482 	/**
483 	 * Enable (or disable) object validation during packing.
484 	 *
485 	 * @param validate
486 	 *            if true the pack writer will validate an object before it is
487 	 *            put into the output. This additional validation work may be
488 	 *            necessary to avoid propagating corruption from one local pack
489 	 *            file to another local pack file.
490 	 */
491 	public void setReuseValidatingObjects(boolean validate) {
492 		reuseValidate = validate;
493 	}
494 
495 	/**
496 	 * Whether this writer is producing a thin pack.
497 	 *
498 	 * @return true if this writer is producing a thin pack.
499 	 */
500 	public boolean isThin() {
501 		return thin;
502 	}
503 
504 	/**
505 	 * Whether writer may pack objects with delta base object not within set of
506 	 * objects to pack
507 	 *
508 	 * @param packthin
509 	 *            a boolean indicating whether writer may pack objects with
510 	 *            delta base object not within set of objects to pack, but
511 	 *            belonging to party repository (uninteresting/boundary) as
512 	 *            determined by set; this kind of pack is used only for
513 	 *            transport; true - to produce thin pack, false - otherwise.
514 	 */
515 	public void setThin(boolean packthin) {
516 		thin = packthin;
517 	}
518 
519 	/**
520 	 * Whether to reuse cached packs.
521 	 *
522 	 * @return {@code true} to reuse cached packs. If true index creation isn't
523 	 *         available.
524 	 */
525 	public boolean isUseCachedPacks() {
526 		return useCachedPacks;
527 	}
528 
529 	/**
530 	 * Whether to use cached packs
531 	 *
532 	 * @param useCached
533 	 *            if set to {@code true} and a cached pack is present, it will
534 	 *            be appended onto the end of a thin-pack, reducing the amount
535 	 *            of working set space and CPU used by PackWriter. Enabling this
536 	 *            feature prevents PackWriter from creating an index for the
537 	 *            newly created pack, so its only suitable for writing to a
538 	 *            network client, where the client will make the index.
539 	 */
540 	public void setUseCachedPacks(boolean useCached) {
541 		useCachedPacks = useCached;
542 	}
543 
544 	/**
545 	 * Whether to use bitmaps
546 	 *
547 	 * @return {@code true} to use bitmaps for ObjectWalks, if available.
548 	 */
549 	public boolean isUseBitmaps() {
550 		return useBitmaps;
551 	}
552 
553 	/**
554 	 * Whether to use bitmaps
555 	 *
556 	 * @param useBitmaps
557 	 *            if set to true, bitmaps will be used when preparing a pack.
558 	 */
559 	public void setUseBitmaps(boolean useBitmaps) {
560 		this.useBitmaps = useBitmaps;
561 	}
562 
563 	/**
564 	 * Whether the index file cannot be created by this PackWriter.
565 	 *
566 	 * @return {@code true} if the index file cannot be created by this
567 	 *         PackWriter.
568 	 */
569 	public boolean isIndexDisabled() {
570 		return indexDisabled || !cachedPacks.isEmpty();
571 	}
572 
573 	/**
574 	 * Whether to disable creation of the index file.
575 	 *
576 	 * @param noIndex
577 	 *            {@code true} to disable creation of the index file.
578 	 */
579 	public void setIndexDisabled(boolean noIndex) {
580 		this.indexDisabled = noIndex;
581 	}
582 
583 	/**
584 	 * Whether to ignore missing uninteresting objects
585 	 *
586 	 * @return {@code true} to ignore objects that are uninteresting and also
587 	 *         not found on local disk; false to throw a
588 	 *         {@link org.eclipse.jgit.errors.MissingObjectException} out of
589 	 *         {@link #preparePack(ProgressMonitor, Set, Set)} if an
590 	 *         uninteresting object is not in the source repository. By default,
591 	 *         true, permitting gracefully ignoring of uninteresting objects.
592 	 */
593 	public boolean isIgnoreMissingUninteresting() {
594 		return ignoreMissingUninteresting;
595 	}
596 
597 	/**
598 	 * Whether writer should ignore non existing uninteresting objects
599 	 *
600 	 * @param ignore
601 	 *            {@code true} if writer should ignore non existing
602 	 *            uninteresting objects during construction set of objects to
603 	 *            pack; false otherwise - non existing uninteresting objects may
604 	 *            cause {@link org.eclipse.jgit.errors.MissingObjectException}
605 	 */
606 	public void setIgnoreMissingUninteresting(boolean ignore) {
607 		ignoreMissingUninteresting = ignore;
608 	}
609 
610 	/**
611 	 * Set the tag targets that should be hoisted earlier during packing.
612 	 * <p>
613 	 * Callers may put objects into this set before invoking any of the
614 	 * preparePack methods to influence where an annotated tag's target is
615 	 * stored within the resulting pack. Typically these will be clustered
616 	 * together, and hoisted earlier in the file even if they are ancient
617 	 * revisions, allowing readers to find tag targets with better locality.
618 	 *
619 	 * @param objects
620 	 *            objects that annotated tags point at.
621 	 */
622 	public void setTagTargets(Set<ObjectId> objects) {
623 		tagTargets = objects;
624 	}
625 
626 	/**
627 	 * Configure this pack for a shallow clone.
628 	 *
629 	 * @param depth
630 	 *            maximum depth of history to return. 1 means return only the
631 	 *            "wants".
632 	 * @param unshallow
633 	 *            objects which used to be shallow on the client, but are being
634 	 *            extended as part of this fetch
635 	 */
636 	public void setShallowPack(int depth,
637 			Collection<? extends ObjectId> unshallow) {
638 		this.shallowPack = true;
639 		this.depth = depth;
640 		this.unshallowObjects = unshallow;
641 	}
642 
643 	/**
644 	 * @param bytes exclude blobs of size greater than this
645 	 */
646 	public void setFilterBlobLimit(long bytes) {
647 		filterBlobLimit = bytes;
648 	}
649 
650 	/**
651 	 * Returns objects number in a pack file that was created by this writer.
652 	 *
653 	 * @return number of objects in pack.
654 	 * @throws java.io.IOException
655 	 *             a cached pack cannot supply its object count.
656 	 */
657 	public long getObjectCount() throws IOException {
658 		if (stats.totalObjects == 0) {
659 			long objCnt = 0;
660 
661 			objCnt += objectsLists[OBJ_COMMIT].size();
662 			objCnt += objectsLists[OBJ_TREE].size();
663 			objCnt += objectsLists[OBJ_BLOB].size();
664 			objCnt += objectsLists[OBJ_TAG].size();
665 
666 			for (CachedPack pack : cachedPacks)
667 				objCnt += pack.getObjectCount();
668 			return objCnt;
669 		}
670 		return stats.totalObjects;
671 	}
672 
673 	/**
674 	 * Returns the object ids in the pack file that was created by this writer.
675 	 * <p>
676 	 * This method can only be invoked after
677 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)} has
678 	 * been invoked and completed successfully.
679 	 *
680 	 * @return set of objects in pack.
681 	 * @throws java.io.IOException
682 	 *             a cached pack cannot supply its object ids.
683 	 */
684 	public ObjectIdOwnerMap<ObjectIdOwnerMap.Entry> getObjectSet()
685 			throws IOException {
686 		if (!cachedPacks.isEmpty())
687 			throw new IOException(
688 					JGitText.get().cachedPacksPreventsListingObjects);
689 
690 		if (writeBitmaps != null) {
691 			return writeBitmaps.getObjectSet();
692 		}
693 
694 		ObjectIdOwnerMap<ObjectIdOwnerMap.Entry> r = new ObjectIdOwnerMap<>();
695 		for (BlockList<ObjectToPack> objList : objectsLists) {
696 			if (objList != null) {
697 				for (ObjectToPack otp : objList)
698 					r.add(new ObjectIdOwnerMap.Entry(otp) {
699 						// A new entry that copies the ObjectId
700 					});
701 			}
702 		}
703 		return r;
704 	}
705 
706 	/**
707 	 * Add a pack index whose contents should be excluded from the result.
708 	 *
709 	 * @param idx
710 	 *            objects in this index will not be in the output pack.
711 	 */
712 	public void excludeObjects(ObjectIdSet idx) {
713 		if (excludeInPacks == null) {
714 			excludeInPacks = new ObjectIdSet[] { idx };
715 			excludeInPackLast = idx;
716 		} else {
717 			int cnt = excludeInPacks.length;
718 			ObjectIdSet[] newList = new ObjectIdSet[cnt + 1];
719 			System.arraycopy(excludeInPacks, 0, newList, 0, cnt);
720 			newList[cnt] = idx;
721 			excludeInPacks = newList;
722 		}
723 	}
724 
725 	/**
726 	 * Prepare the list of objects to be written to the pack stream.
727 	 * <p>
728 	 * Iterator <b>exactly</b> determines which objects are included in a pack
729 	 * and order they appear in pack (except that objects order by type is not
730 	 * needed at input). This order should conform general rules of ordering
731 	 * objects in git - by recency and path (type and delta-base first is
732 	 * internally secured) and responsibility for guaranteeing this order is on
733 	 * a caller side. Iterator must return each id of object to write exactly
734 	 * once.
735 	 * </p>
736 	 *
737 	 * @param objectsSource
738 	 *            iterator of object to store in a pack; order of objects within
739 	 *            each type is important, ordering by type is not needed;
740 	 *            allowed types for objects are
741 	 *            {@link org.eclipse.jgit.lib.Constants#OBJ_COMMIT},
742 	 *            {@link org.eclipse.jgit.lib.Constants#OBJ_TREE},
743 	 *            {@link org.eclipse.jgit.lib.Constants#OBJ_BLOB} and
744 	 *            {@link org.eclipse.jgit.lib.Constants#OBJ_TAG}; objects
745 	 *            returned by iterator may be later reused by caller as object
746 	 *            id and type are internally copied in each iteration.
747 	 * @throws java.io.IOException
748 	 *             when some I/O problem occur during reading objects.
749 	 */
750 	public void preparePack(@NonNull Iterator<RevObject> objectsSource)
751 			throws IOException {
752 		while (objectsSource.hasNext()) {
753 			addObject(objectsSource.next());
754 		}
755 	}
756 
757 	/**
758 	 * Prepare the list of objects to be written to the pack stream.
759 	 * <p>
760 	 * Basing on these 2 sets, another set of objects to put in a pack file is
761 	 * created: this set consists of all objects reachable (ancestors) from
762 	 * interesting objects, except uninteresting objects and their ancestors.
763 	 * This method uses class {@link org.eclipse.jgit.revwalk.ObjectWalk}
764 	 * extensively to find out that appropriate set of output objects and their
765 	 * optimal order in output pack. Order is consistent with general git
766 	 * in-pack rules: sort by object type, recency, path and delta-base first.
767 	 * </p>
768 	 *
769 	 * @param countingMonitor
770 	 *            progress during object enumeration.
771 	 * @param want
772 	 *            collection of objects to be marked as interesting (start
773 	 *            points of graph traversal). Must not be {@code null}.
774 	 * @param have
775 	 *            collection of objects to be marked as uninteresting (end
776 	 *            points of graph traversal). Pass {@link #NONE} if all objects
777 	 *            reachable from {@code want} are desired, such as when serving
778 	 *            a clone.
779 	 * @throws java.io.IOException
780 	 *             when some I/O problem occur during reading objects.
781 	 */
782 	public void preparePack(ProgressMonitor countingMonitor,
783 			@NonNull Set<? extends ObjectId> want,
784 			@NonNull Set<? extends ObjectId> have) throws IOException {
785 		preparePack(countingMonitor, want, have, NONE, NONE);
786 	}
787 
788 	/**
789 	 * Prepare the list of objects to be written to the pack stream.
790 	 * <p>
791 	 * Like {@link #preparePack(ProgressMonitor, Set, Set)} but also allows
792 	 * specifying commits that should not be walked past ("shallow" commits).
793 	 * The caller is responsible for filtering out commits that should not be
794 	 * shallow any more ("unshallow" commits as in {@link #setShallowPack}) from
795 	 * the shallow set.
796 	 *
797 	 * @param countingMonitor
798 	 *            progress during object enumeration.
799 	 * @param want
800 	 *            objects of interest, ancestors of which will be included in
801 	 *            the pack. Must not be {@code null}.
802 	 * @param have
803 	 *            objects whose ancestors (up to and including {@code shallow}
804 	 *            commits) do not need to be included in the pack because they
805 	 *            are already available from elsewhere. Must not be
806 	 *            {@code null}.
807 	 * @param shallow
808 	 *            commits indicating the boundary of the history marked with
809 	 *            {@code have}. Shallow commits have parents but those parents
810 	 *            are considered not to be already available. Parents of
811 	 *            {@code shallow} commits and earlier generations will be
812 	 *            included in the pack if requested by {@code want}. Must not be
813 	 *            {@code null}.
814 	 * @throws java.io.IOException
815 	 *             an I/O problem occurred while reading objects.
816 	 */
817 	public void preparePack(ProgressMonitor countingMonitor,
818 			@NonNull Set<? extends ObjectId> want,
819 			@NonNull Set<? extends ObjectId> have,
820 			@NonNull Set<? extends ObjectId> shallow) throws IOException {
821 		preparePack(countingMonitor, want, have, shallow, NONE);
822 	}
823 
824 	/**
825 	 * Prepare the list of objects to be written to the pack stream.
826 	 * <p>
827 	 * Like {@link #preparePack(ProgressMonitor, Set, Set)} but also allows
828 	 * specifying commits that should not be walked past ("shallow" commits).
829 	 * The caller is responsible for filtering out commits that should not be
830 	 * shallow any more ("unshallow" commits as in {@link #setShallowPack}) from
831 	 * the shallow set.
832 	 *
833 	 * @param countingMonitor
834 	 *            progress during object enumeration.
835 	 * @param want
836 	 *            objects of interest, ancestors of which will be included in
837 	 *            the pack. Must not be {@code null}.
838 	 * @param have
839 	 *            objects whose ancestors (up to and including {@code shallow}
840 	 *            commits) do not need to be included in the pack because they
841 	 *            are already available from elsewhere. Must not be
842 	 *            {@code null}.
843 	 * @param shallow
844 	 *            commits indicating the boundary of the history marked with
845 	 *            {@code have}. Shallow commits have parents but those parents
846 	 *            are considered not to be already available. Parents of
847 	 *            {@code shallow} commits and earlier generations will be
848 	 *            included in the pack if requested by {@code want}. Must not be
849 	 *            {@code null}.
850 	 * @param noBitmaps
851 	 *            collection of objects to be excluded from bitmap commit
852 	 *            selection.
853 	 * @throws java.io.IOException
854 	 *             an I/O problem occurred while reading objects.
855 	 */
856 	public void preparePack(ProgressMonitor countingMonitor,
857 			@NonNull Set<? extends ObjectId> want,
858 			@NonNull Set<? extends ObjectId> have,
859 			@NonNull Set<? extends ObjectId> shallow,
860 			@NonNull Set<? extends ObjectId> noBitmaps) throws IOException {
861 		try (ObjectWalk ow = getObjectWalk()) {
862 			ow.assumeShallow(shallow);
863 			preparePack(countingMonitor, ow, want, have, noBitmaps);
864 		}
865 	}
866 
867 	private ObjectWalk getObjectWalk() {
868 		return shallowPack ? new DepthWalk.ObjectWalk(reader, depth - 1)
869 				: new ObjectWalk(reader);
870 	}
871 
872 	/**
873 	 * Prepare the list of objects to be written to the pack stream.
874 	 * <p>
875 	 * Basing on these 2 sets, another set of objects to put in a pack file is
876 	 * created: this set consists of all objects reachable (ancestors) from
877 	 * interesting objects, except uninteresting objects and their ancestors.
878 	 * This method uses class {@link org.eclipse.jgit.revwalk.ObjectWalk}
879 	 * extensively to find out that appropriate set of output objects and their
880 	 * optimal order in output pack. Order is consistent with general git
881 	 * in-pack rules: sort by object type, recency, path and delta-base first.
882 	 * </p>
883 	 *
884 	 * @param countingMonitor
885 	 *            progress during object enumeration.
886 	 * @param walk
887 	 *            ObjectWalk to perform enumeration.
888 	 * @param interestingObjects
889 	 *            collection of objects to be marked as interesting (start
890 	 *            points of graph traversal). Must not be {@code null}.
891 	 * @param uninterestingObjects
892 	 *            collection of objects to be marked as uninteresting (end
893 	 *            points of graph traversal). Pass {@link #NONE} if all objects
894 	 *            reachable from {@code want} are desired, such as when serving
895 	 *            a clone.
896 	 * @param noBitmaps
897 	 *            collection of objects to be excluded from bitmap commit
898 	 *            selection.
899 	 * @throws java.io.IOException
900 	 *             when some I/O problem occur during reading objects.
901 	 */
902 	public void preparePack(ProgressMonitor countingMonitor,
903 			@NonNull ObjectWalk walk,
904 			@NonNull Set<? extends ObjectId> interestingObjects,
905 			@NonNull Set<? extends ObjectId> uninterestingObjects,
906 			@NonNull Set<? extends ObjectId> noBitmaps)
907 			throws IOException {
908 		if (countingMonitor == null)
909 			countingMonitor = NullProgressMonitor.INSTANCE;
910 		if (shallowPack && !(walk instanceof DepthWalk.ObjectWalk))
911 			throw new IllegalArgumentException(
912 					JGitText.get().shallowPacksRequireDepthWalk);
913 		findObjectsToPack(countingMonitor, walk, interestingObjects,
914 				uninterestingObjects, noBitmaps);
915 	}
916 
917 	/**
918 	 * Determine if the pack file will contain the requested object.
919 	 *
920 	 * @param id
921 	 *            the object to test the existence of.
922 	 * @return true if the object will appear in the output pack file.
923 	 * @throws java.io.IOException
924 	 *             a cached pack cannot be examined.
925 	 */
926 	public boolean willInclude(AnyObjectId id) throws IOException {
927 		ObjectToPack obj = objectsMap.get(id);
928 		return obj != null && !obj.isEdge();
929 	}
930 
931 	/**
932 	 * Lookup the ObjectToPack object for a given ObjectId.
933 	 *
934 	 * @param id
935 	 *            the object to find in the pack.
936 	 * @return the object we are packing, or null.
937 	 */
938 	public ObjectToPack get(AnyObjectId id) {
939 		ObjectToPack obj = objectsMap.get(id);
940 		return obj != null && !obj.isEdge() ? obj : null;
941 	}
942 
943 	/**
944 	 * Computes SHA-1 of lexicographically sorted objects ids written in this
945 	 * pack, as used to name a pack file in repository.
946 	 *
947 	 * @return ObjectId representing SHA-1 name of a pack that was created.
948 	 */
949 	public ObjectId computeName() {
950 		final byte[] buf = new byte[OBJECT_ID_LENGTH];
951 		final MessageDigest md = Constants.newMessageDigest();
952 		for (ObjectToPack otp : sortByName()) {
953 			otp.copyRawTo(buf, 0);
954 			md.update(buf, 0, OBJECT_ID_LENGTH);
955 		}
956 		return ObjectId.fromRaw(md.digest());
957 	}
958 
959 	/**
960 	 * Returns the index format version that will be written.
961 	 * <p>
962 	 * This method can only be invoked after
963 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)} has
964 	 * been invoked and completed successfully.
965 	 *
966 	 * @return the index format version.
967 	 */
968 	public int getIndexVersion() {
969 		int indexVersion = config.getIndexVersion();
970 		if (indexVersion <= 0) {
971 			for (BlockList<ObjectToPack> objs : objectsLists)
972 				indexVersion = Math.max(indexVersion,
973 						PackIndexWriter.oldestPossibleFormat(objs));
974 		}
975 		return indexVersion;
976 	}
977 
978 	/**
979 	 * Create an index file to match the pack file just written.
980 	 * <p>
981 	 * Called after
982 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}.
983 	 * <p>
984 	 * Writing an index is only required for local pack storage. Packs sent on
985 	 * the network do not need to create an index.
986 	 *
987 	 * @param indexStream
988 	 *            output for the index data. Caller is responsible for closing
989 	 *            this stream.
990 	 * @throws java.io.IOException
991 	 *             the index data could not be written to the supplied stream.
992 	 */
993 	public void writeIndex(OutputStream indexStream) throws IOException {
994 		if (isIndexDisabled())
995 			throw new IOException(JGitText.get().cachedPacksPreventsIndexCreation);
996 
997 		long writeStart = System.currentTimeMillis();
998 		final PackIndexWriter iw = PackIndexWriter.createVersion(
999 				indexStream, getIndexVersion());
1000 		iw.write(sortByName(), packcsum);
1001 		stats.timeWriting += System.currentTimeMillis() - writeStart;
1002 	}
1003 
1004 	/**
1005 	 * Create a bitmap index file to match the pack file just written.
1006 	 * <p>
1007 	 * Called after {@link #prepareBitmapIndex(ProgressMonitor)}.
1008 	 *
1009 	 * @param bitmapIndexStream
1010 	 *            output for the bitmap index data. Caller is responsible for
1011 	 *            closing this stream.
1012 	 * @throws java.io.IOException
1013 	 *             the index data could not be written to the supplied stream.
1014 	 */
1015 	public void writeBitmapIndex(OutputStream bitmapIndexStream)
1016 			throws IOException {
1017 		if (writeBitmaps == null)
1018 			throw new IOException(JGitText.get().bitmapsMustBePrepared);
1019 
1020 		long writeStart = System.currentTimeMillis();
1021 		final PackBitmapIndexWriterV1ile/PackBitmapIndexWriterV1.html#PackBitmapIndexWriterV1">PackBitmapIndexWriterV1 iw = new PackBitmapIndexWriterV1(bitmapIndexStream);
1022 		iw.write(writeBitmaps, packcsum);
1023 		stats.timeWriting += System.currentTimeMillis() - writeStart;
1024 	}
1025 
1026 	private List<ObjectToPack> sortByName() {
1027 		if (sortedByName == null) {
1028 			int cnt = 0;
1029 			cnt += objectsLists[OBJ_COMMIT].size();
1030 			cnt += objectsLists[OBJ_TREE].size();
1031 			cnt += objectsLists[OBJ_BLOB].size();
1032 			cnt += objectsLists[OBJ_TAG].size();
1033 
1034 			sortedByName = new BlockList<>(cnt);
1035 			sortedByName.addAll(objectsLists[OBJ_COMMIT]);
1036 			sortedByName.addAll(objectsLists[OBJ_TREE]);
1037 			sortedByName.addAll(objectsLists[OBJ_BLOB]);
1038 			sortedByName.addAll(objectsLists[OBJ_TAG]);
1039 			Collections.sort(sortedByName);
1040 		}
1041 		return sortedByName;
1042 	}
1043 
1044 	private void beginPhase(PackingPhase phase, ProgressMonitor monitor,
1045 			long cnt) {
1046 		state.phase = phase;
1047 		String task;
1048 		switch (phase) {
1049 		case COUNTING:
1050 			task = JGitText.get().countingObjects;
1051 			break;
1052 		case GETTING_SIZES:
1053 			task = JGitText.get().searchForSizes;
1054 			break;
1055 		case FINDING_SOURCES:
1056 			task = JGitText.get().searchForReuse;
1057 			break;
1058 		case COMPRESSING:
1059 			task = JGitText.get().compressingObjects;
1060 			break;
1061 		case WRITING:
1062 			task = JGitText.get().writingObjects;
1063 			break;
1064 		case BUILDING_BITMAPS:
1065 			task = JGitText.get().buildingBitmaps;
1066 			break;
1067 		default:
1068 			throw new IllegalArgumentException(
1069 					MessageFormat.format(JGitText.get().illegalPackingPhase, phase));
1070 		}
1071 		monitor.beginTask(task, (int) cnt);
1072 	}
1073 
1074 	private void endPhase(ProgressMonitor monitor) {
1075 		monitor.endTask();
1076 	}
1077 
1078 	/**
1079 	 * Write the prepared pack to the supplied stream.
1080 	 * <p>
1081 	 * Called after
1082 	 * {@link #preparePack(ProgressMonitor, ObjectWalk, Set, Set, Set)} or
1083 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
1084 	 * <p>
1085 	 * Performs delta search if enabled and writes the pack stream.
1086 	 * <p>
1087 	 * All reused objects data checksum (Adler32/CRC32) is computed and
1088 	 * validated against existing checksum.
1089 	 *
1090 	 * @param compressMonitor
1091 	 *            progress monitor to report object compression work.
1092 	 * @param writeMonitor
1093 	 *            progress monitor to report the number of objects written.
1094 	 * @param packStream
1095 	 *            output stream of pack data. The stream should be buffered by
1096 	 *            the caller. The caller is responsible for closing the stream.
1097 	 * @throws java.io.IOException
1098 	 *             an error occurred reading a local object's data to include in
1099 	 *             the pack, or writing compressed object data to the output
1100 	 *             stream.
1101 	 * @throws WriteAbortedException
1102 	 *             the write operation is aborted by
1103 	 *             {@link org.eclipse.jgit.transport.ObjectCountCallback} .
1104 	 */
1105 	public void writePack(ProgressMonitor compressMonitor,
1106 			ProgressMonitor writeMonitor, OutputStream packStream)
1107 			throws IOException {
1108 		if (compressMonitor == null)
1109 			compressMonitor = NullProgressMonitor.INSTANCE;
1110 		if (writeMonitor == null)
1111 			writeMonitor = NullProgressMonitor.INSTANCE;
1112 
1113 		excludeInPacks = null;
1114 		excludeInPackLast = null;
1115 
1116 		boolean needSearchForReuse = reuseSupport != null && (
1117 				   reuseDeltas
1118 				|| config.isReuseObjects()
1119 				|| !cachedPacks.isEmpty());
1120 
1121 		if (compressMonitor instanceof BatchingProgressMonitor) {
1122 			long delay = 1000;
1123 			if (needSearchForReuse && config.isDeltaCompress())
1124 				delay = 500;
1125 			((BatchingProgressMonitor) compressMonitor).setDelayStart(
1126 					delay,
1127 					TimeUnit.MILLISECONDS);
1128 		}
1129 
1130 		if (needSearchForReuse)
1131 			searchForReuse(compressMonitor);
1132 		if (config.isDeltaCompress())
1133 			searchForDeltas(compressMonitor);
1134 
1135 		crc32 = new CRC32();
1136 		final PackOutputStreamrage/pack/PackOutputStream.html#PackOutputStream">PackOutputStream out = new PackOutputStream(
1137 			writeMonitor,
1138 			isIndexDisabled()
1139 				? packStream
1140 				: new CheckedOutputStream(packStream, crc32),
1141 			this);
1142 
1143 		long objCnt = getObjectCount();
1144 		stats.totalObjects = objCnt;
1145 		if (callback != null)
1146 			callback.setObjectCount(objCnt);
1147 		beginPhase(PackingPhase.WRITING, writeMonitor, objCnt);
1148 		long writeStart = System.currentTimeMillis();
1149 		try {
1150 			out.writeFileHeader(PACK_VERSION_GENERATED, objCnt);
1151 			out.flush();
1152 
1153 			writeObjects(out);
1154 			if (!edgeObjects.isEmpty() || !cachedPacks.isEmpty()) {
1155 				for (PackStatistics.ObjectType.Accumulator typeStat : stats.objectTypes) {
1156 					if (typeStat == null)
1157 						continue;
1158 					stats.thinPackBytes += typeStat.bytes;
1159 				}
1160 			}
1161 
1162 			stats.reusedPacks = Collections.unmodifiableList(cachedPacks);
1163 			for (CachedPack pack : cachedPacks) {
1164 				long deltaCnt = pack.getDeltaCount();
1165 				stats.reusedObjects += pack.getObjectCount();
1166 				stats.reusedDeltas += deltaCnt;
1167 				stats.totalDeltas += deltaCnt;
1168 				reuseSupport.copyPackAsIs(out, pack);
1169 			}
1170 			writeChecksum(out);
1171 			out.flush();
1172 		} finally {
1173 			stats.timeWriting = System.currentTimeMillis() - writeStart;
1174 			stats.depth = depth;
1175 
1176 			for (PackStatistics.ObjectType.Accumulator typeStat : stats.objectTypes) {
1177 				if (typeStat == null)
1178 					continue;
1179 				typeStat.cntDeltas += typeStat.reusedDeltas;
1180 				stats.reusedObjects += typeStat.reusedObjects;
1181 				stats.reusedDeltas += typeStat.reusedDeltas;
1182 				stats.totalDeltas += typeStat.cntDeltas;
1183 			}
1184 		}
1185 
1186 		stats.totalBytes = out.length();
1187 		reader.close();
1188 		endPhase(writeMonitor);
1189 	}
1190 
1191 	/**
1192 	 * Get statistics of what this PackWriter did in order to create the final
1193 	 * pack stream.
1194 	 *
1195 	 * @return description of what this PackWriter did in order to create the
1196 	 *         final pack stream. This should only be invoked after the calls to
1197 	 *         create the pack/index/bitmap have completed.
1198 	 */
1199 	public PackStatistics getStatistics() {
1200 		return new PackStatistics(stats);
1201 	}
1202 
1203 	/**
1204 	 * Get snapshot of the current state of this PackWriter.
1205 	 *
1206 	 * @return snapshot of the current state of this PackWriter.
1207 	 */
1208 	public State getState() {
1209 		return state.snapshot();
1210 	}
1211 
1212 	/**
1213 	 * {@inheritDoc}
1214 	 * <p>
1215 	 * Release all resources used by this writer.
1216 	 */
1217 	@Override
1218 	public void close() {
1219 		reader.close();
1220 		if (myDeflater != null) {
1221 			myDeflater.end();
1222 			myDeflater = null;
1223 		}
1224 		instances.remove(selfRef);
1225 	}
1226 
1227 	private void searchForReuse(ProgressMonitor monitor) throws IOException {
1228 		long cnt = 0;
1229 		cnt += objectsLists[OBJ_COMMIT].size();
1230 		cnt += objectsLists[OBJ_TREE].size();
1231 		cnt += objectsLists[OBJ_BLOB].size();
1232 		cnt += objectsLists[OBJ_TAG].size();
1233 
1234 		long start = System.currentTimeMillis();
1235 		beginPhase(PackingPhase.FINDING_SOURCES, monitor, cnt);
1236 		if (cnt <= 4096) {
1237 			// For small object counts, do everything as one list.
1238 			BlockList<ObjectToPack> tmp = new BlockList<>((int) cnt);
1239 			tmp.addAll(objectsLists[OBJ_TAG]);
1240 			tmp.addAll(objectsLists[OBJ_COMMIT]);
1241 			tmp.addAll(objectsLists[OBJ_TREE]);
1242 			tmp.addAll(objectsLists[OBJ_BLOB]);
1243 			searchForReuse(monitor, tmp);
1244 			if (pruneCurrentObjectList) {
1245 				// If the list was pruned, we need to re-prune the main lists.
1246 				pruneEdgesFromObjectList(objectsLists[OBJ_COMMIT]);
1247 				pruneEdgesFromObjectList(objectsLists[OBJ_TREE]);
1248 				pruneEdgesFromObjectList(objectsLists[OBJ_BLOB]);
1249 				pruneEdgesFromObjectList(objectsLists[OBJ_TAG]);
1250 			}
1251 		} else {
1252 			searchForReuse(monitor, objectsLists[OBJ_TAG]);
1253 			searchForReuse(monitor, objectsLists[OBJ_COMMIT]);
1254 			searchForReuse(monitor, objectsLists[OBJ_TREE]);
1255 			searchForReuse(monitor, objectsLists[OBJ_BLOB]);
1256 		}
1257 		endPhase(monitor);
1258 		stats.timeSearchingForReuse = System.currentTimeMillis() - start;
1259 
1260 		if (config.isReuseDeltas() && config.getCutDeltaChains()) {
1261 			cutDeltaChains(objectsLists[OBJ_TREE]);
1262 			cutDeltaChains(objectsLists[OBJ_BLOB]);
1263 		}
1264 	}
1265 
1266 	private void searchForReuse(ProgressMonitor monitor, List<ObjectToPack> list)
1267 			throws IOException, MissingObjectException {
1268 		pruneCurrentObjectList = false;
1269 		reuseSupport.selectObjectRepresentation(this, monitor, list);
1270 		if (pruneCurrentObjectList)
1271 			pruneEdgesFromObjectList(list);
1272 	}
1273 
1274 	private void cutDeltaChains(BlockList<ObjectToPack> list)
1275 			throws IOException {
1276 		int max = config.getMaxDeltaDepth();
1277 		for (int idx = list.size() - 1; idx >= 0; idx--) {
1278 			int d = 0;
1279 			ObjectToPack b = list.get(idx).getDeltaBase();
1280 			while (b != null) {
1281 				if (d < b.getChainLength())
1282 					break;
1283 				b.setChainLength(++d);
1284 				if (d >= max && b.isDeltaRepresentation()) {
1285 					reselectNonDelta(b);
1286 					break;
1287 				}
1288 				b = b.getDeltaBase();
1289 			}
1290 		}
1291 		if (config.isDeltaCompress()) {
1292 			for (ObjectToPack otp : list)
1293 				otp.clearChainLength();
1294 		}
1295 	}
1296 
1297 	private void searchForDeltas(ProgressMonitor monitor)
1298 			throws MissingObjectException, IncorrectObjectTypeException,
1299 			IOException {
1300 		// Commits and annotated tags tend to have too many differences to
1301 		// really benefit from delta compression. Consequently just don't
1302 		// bother examining those types here.
1303 		//
1304 		ObjectToPack[] list = new ObjectToPack[
1305 				  objectsLists[OBJ_TREE].size()
1306 				+ objectsLists[OBJ_BLOB].size()
1307 				+ edgeObjects.size()];
1308 		int cnt = 0;
1309 		cnt = findObjectsNeedingDelta(list, cnt, OBJ_TREE);
1310 		cnt = findObjectsNeedingDelta(list, cnt, OBJ_BLOB);
1311 		if (cnt == 0)
1312 			return;
1313 		int nonEdgeCnt = cnt;
1314 
1315 		// Queue up any edge objects that we might delta against.  We won't
1316 		// be sending these as we assume the other side has them, but we need
1317 		// them in the search phase below.
1318 		//
1319 		for (ObjectToPack eo : edgeObjects) {
1320 			eo.setWeight(0);
1321 			list[cnt++] = eo;
1322 		}
1323 
1324 		// Compute the sizes of the objects so we can do a proper sort.
1325 		// We let the reader skip missing objects if it chooses. For
1326 		// some readers this can be a huge win. We detect missing objects
1327 		// by having set the weights above to 0 and allowing the delta
1328 		// search code to discover the missing object and skip over it, or
1329 		// abort with an exception if we actually had to have it.
1330 		//
1331 		final long sizingStart = System.currentTimeMillis();
1332 		beginPhase(PackingPhase.GETTING_SIZES, monitor, cnt);
1333 		AsyncObjectSizeQueue<ObjectToPack> sizeQueue = reader.getObjectSize(
1334 				Arrays.<ObjectToPack> asList(list).subList(0, cnt), false);
1335 		try {
1336 			final long limit = Math.min(
1337 					config.getBigFileThreshold(),
1338 					Integer.MAX_VALUE);
1339 			for (;;) {
1340 				try {
1341 					if (!sizeQueue.next())
1342 						break;
1343 				} catch (MissingObjectException notFound) {
1344 					monitor.update(1);
1345 					if (ignoreMissingUninteresting) {
1346 						ObjectToPack otp = sizeQueue.getCurrent();
1347 						if (otp != null && otp.isEdge()) {
1348 							otp.setDoNotDelta();
1349 							continue;
1350 						}
1351 
1352 						otp = objectsMap.get(notFound.getObjectId());
1353 						if (otp != null && otp.isEdge()) {
1354 							otp.setDoNotDelta();
1355 							continue;
1356 						}
1357 					}
1358 					throw notFound;
1359 				}
1360 
1361 				ObjectToPack otp = sizeQueue.getCurrent();
1362 				if (otp == null)
1363 					otp = objectsMap.get(sizeQueue.getObjectId());
1364 
1365 				long sz = sizeQueue.getSize();
1366 				if (DeltaIndex.BLKSZ < sz && sz < limit)
1367 					otp.setWeight((int) sz);
1368 				else
1369 					otp.setDoNotDelta(); // too small, or too big
1370 				monitor.update(1);
1371 			}
1372 		} finally {
1373 			sizeQueue.release();
1374 		}
1375 		endPhase(monitor);
1376 		stats.timeSearchingForSizes = System.currentTimeMillis() - sizingStart;
1377 
1378 		// Sort the objects by path hash so like files are near each other,
1379 		// and then by size descending so that bigger files are first. This
1380 		// applies "Linus' Law" which states that newer files tend to be the
1381 		// bigger ones, because source files grow and hardly ever shrink.
1382 		//
1383 		Arrays.sort(list, 0, cnt, new Comparator<ObjectToPack>() {
1384 			@Override
1385 			public int compare(ObjectToPack"../../../../../../org/eclipse/jgit/internal/storage/pack/ObjectToPack.html#ObjectToPack">ObjectToPack a, ObjectToPack b) {
1386 				int cmp = (a.isDoNotDelta() ? 1 : 0)
1387 						- (b.isDoNotDelta() ? 1 : 0);
1388 				if (cmp != 0)
1389 					return cmp;
1390 
1391 				cmp = a.getType() - b.getType();
1392 				if (cmp != 0)
1393 					return cmp;
1394 
1395 				cmp = (a.getPathHash() >>> 1) - (b.getPathHash() >>> 1);
1396 				if (cmp != 0)
1397 					return cmp;
1398 
1399 				cmp = (a.getPathHash() & 1) - (b.getPathHash() & 1);
1400 				if (cmp != 0)
1401 					return cmp;
1402 
1403 				cmp = (a.isEdge() ? 0 : 1) - (b.isEdge() ? 0 : 1);
1404 				if (cmp != 0)
1405 					return cmp;
1406 
1407 				return b.getWeight() - a.getWeight();
1408 			}
1409 		});
1410 
1411 		// Above we stored the objects we cannot delta onto the end.
1412 		// Remove them from the list so we don't waste time on them.
1413 		while (0 < cnt && list[cnt - 1].isDoNotDelta()) {
1414 			if (!list[cnt - 1].isEdge())
1415 				nonEdgeCnt--;
1416 			cnt--;
1417 		}
1418 		if (cnt == 0)
1419 			return;
1420 
1421 		final long searchStart = System.currentTimeMillis();
1422 		searchForDeltas(monitor, list, cnt);
1423 		stats.deltaSearchNonEdgeObjects = nonEdgeCnt;
1424 		stats.timeCompressing = System.currentTimeMillis() - searchStart;
1425 
1426 		for (int i = 0; i < cnt; i++)
1427 			if (!list[i].isEdge() && list[i].isDeltaRepresentation())
1428 				stats.deltasFound++;
1429 	}
1430 
1431 	private int findObjectsNeedingDelta(ObjectToPack[] list, int cnt, int type) {
1432 		for (ObjectToPack otp : objectsLists[type]) {
1433 			if (otp.isDoNotDelta()) // delta is disabled for this path
1434 				continue;
1435 			if (otp.isDeltaRepresentation()) // already reusing a delta
1436 				continue;
1437 			otp.setWeight(0);
1438 			list[cnt++] = otp;
1439 		}
1440 		return cnt;
1441 	}
1442 
1443 	private void reselectNonDelta(ObjectToPack otp) throws IOException {
1444 		otp.clearDeltaBase();
1445 		otp.clearReuseAsIs();
1446 		boolean old = reuseDeltas;
1447 		reuseDeltas = false;
1448 		reuseSupport.selectObjectRepresentation(this,
1449 				NullProgressMonitor.INSTANCE,
1450 				Collections.singleton(otp));
1451 		reuseDeltas = old;
1452 	}
1453 
1454 	private void searchForDeltas(final ProgressMonitor monitor,
1455 			final ObjectToPack[] list, final int cnt)
1456 			throws MissingObjectException, IncorrectObjectTypeException,
1457 			LargeObjectException, IOException {
1458 		int threads = config.getThreads();
1459 		if (threads == 0)
1460 			threads = Runtime.getRuntime().availableProcessors();
1461 		if (threads <= 1 || cnt <= config.getDeltaSearchWindowSize())
1462 			singleThreadDeltaSearch(monitor, list, cnt);
1463 		else
1464 			parallelDeltaSearch(monitor, list, cnt, threads);
1465 	}
1466 
1467 	private void singleThreadDeltaSearch(ProgressMonitor monitor,
1468 			ObjectToPack[] list, int cnt) throws IOException {
1469 		long totalWeight = 0;
1470 		for (int i = 0; i < cnt; i++) {
1471 			ObjectToPack o = list[i];
1472 			totalWeight += DeltaTask.getAdjustedWeight(o);
1473 		}
1474 
1475 		long bytesPerUnit = 1;
1476 		while (DeltaTask.MAX_METER <= (totalWeight / bytesPerUnit))
1477 			bytesPerUnit <<= 10;
1478 		int cost = (int) (totalWeight / bytesPerUnit);
1479 		if (totalWeight % bytesPerUnit != 0)
1480 			cost++;
1481 
1482 		beginPhase(PackingPhase.COMPRESSING, monitor, cost);
1483 		new DeltaWindow(config, new DeltaCache(config), reader,
1484 				monitor, bytesPerUnit,
1485 				list, 0, cnt).search();
1486 		endPhase(monitor);
1487 	}
1488 
1489 	private void parallelDeltaSearch(ProgressMonitor monitor,
1490 			ObjectToPack[] list, int cnt, int threads) throws IOException {
1491 		DeltaCache dc = new ThreadSafeDeltaCache(config);
1492 		ThreadSafeProgressMonitor pm = new ThreadSafeProgressMonitor(monitor);
1493 		DeltaTask.Block taskBlock = new DeltaTask.Block(threads, config,
1494 				reader, dc, pm,
1495 				list, 0, cnt);
1496 		taskBlock.partitionTasks();
1497 		beginPhase(PackingPhase.COMPRESSING, monitor, taskBlock.cost());
1498 		pm.startWorkers(taskBlock.tasks.size());
1499 
1500 		Executor executor = config.getExecutor();
1501 		final List<Throwable> errors =
1502 				Collections.synchronizedList(new ArrayList<Throwable>(threads));
1503 		if (executor instanceof ExecutorService) {
1504 			// Caller supplied us a service, use it directly.
1505 			runTasks((ExecutorService) executor, pm, taskBlock, errors);
1506 		} else if (executor == null) {
1507 			// Caller didn't give us a way to run the tasks, spawn up a
1508 			// temporary thread pool and make sure it tears down cleanly.
1509 			ExecutorService pool = Executors.newFixedThreadPool(threads);
1510 			try {
1511 				runTasks(pool, pm, taskBlock, errors);
1512 			} finally {
1513 				pool.shutdown();
1514 				for (;;) {
1515 					try {
1516 						if (pool.awaitTermination(60, TimeUnit.SECONDS))
1517 							break;
1518 					} catch (InterruptedException e) {
1519 						throw new IOException(
1520 								JGitText.get().packingCancelledDuringObjectsWriting);
1521 					}
1522 				}
1523 			}
1524 		} else {
1525 			// The caller gave us an executor, but it might not do
1526 			// asynchronous execution.  Wrap everything and hope it
1527 			// can schedule these for us.
1528 			for (DeltaTask task : taskBlock.tasks) {
1529 				executor.execute(new Runnable() {
1530 					@Override
1531 					public void run() {
1532 						try {
1533 							task.call();
1534 						} catch (Throwable failure) {
1535 							errors.add(failure);
1536 						}
1537 					}
1538 				});
1539 			}
1540 			try {
1541 				pm.waitForCompletion();
1542 			} catch (InterruptedException ie) {
1543 				// We can't abort the other tasks as we have no handle.
1544 				// Cross our fingers and just break out anyway.
1545 				//
1546 				throw new IOException(
1547 						JGitText.get().packingCancelledDuringObjectsWriting);
1548 			}
1549 		}
1550 
1551 		// If any task threw an error, try to report it back as
1552 		// though we weren't using a threaded search algorithm.
1553 		//
1554 		if (!errors.isEmpty()) {
1555 			Throwable err = errors.get(0);
1556 			if (err instanceof Error)
1557 				throw (Error) err;
1558 			if (err instanceof RuntimeException)
1559 				throw (RuntimeException) err;
1560 			if (err instanceof IOException)
1561 				throw (IOException) err;
1562 
1563 			throw new IOException(err.getMessage(), err);
1564 		}
1565 		endPhase(monitor);
1566 	}
1567 
1568 	private static void runTasks(ExecutorService pool,
1569 			ThreadSafeProgressMonitor pm,
1570 			DeltaTask.Block tb, List<Throwable> errors) throws IOException {
1571 		List<Future<?>> futures = new ArrayList<>(tb.tasks.size());
1572 		for (DeltaTask task : tb.tasks)
1573 			futures.add(pool.submit(task));
1574 
1575 		try {
1576 			pm.waitForCompletion();
1577 			for (Future<?> f : futures) {
1578 				try {
1579 					f.get();
1580 				} catch (ExecutionException failed) {
1581 					errors.add(failed.getCause());
1582 				}
1583 			}
1584 		} catch (InterruptedException ie) {
1585 			for (Future<?> f : futures)
1586 				f.cancel(true);
1587 			throw new IOException(
1588 					JGitText.get().packingCancelledDuringObjectsWriting);
1589 		}
1590 	}
1591 
1592 	private void writeObjects(PackOutputStream out) throws IOException {
1593 		writeObjects(out, objectsLists[OBJ_COMMIT]);
1594 		writeObjects(out, objectsLists[OBJ_TAG]);
1595 		writeObjects(out, objectsLists[OBJ_TREE]);
1596 		writeObjects(out, objectsLists[OBJ_BLOB]);
1597 	}
1598 
1599 	private void writeObjects(PackOutputStream out, List<ObjectToPack> list)
1600 			throws IOException {
1601 		if (list.isEmpty())
1602 			return;
1603 
1604 		typeStats = stats.objectTypes[list.get(0).getType()];
1605 		long beginOffset = out.length();
1606 
1607 		if (reuseSupport != null) {
1608 			reuseSupport.writeObjects(out, list);
1609 		} else {
1610 			for (ObjectToPack otp : list)
1611 				out.writeObject(otp);
1612 		}
1613 
1614 		typeStats.bytes += out.length() - beginOffset;
1615 		typeStats.cntObjects = list.size();
1616 	}
1617 
1618 	void writeObject(PackOutputStream out, ObjectToPack otp) throws IOException {
1619 		if (!otp.isWritten())
1620 			writeObjectImpl(out, otp);
1621 	}
1622 
1623 	private void writeObjectImpl(PackOutputStream out, ObjectToPack otp)
1624 			throws IOException {
1625 		if (otp.wantWrite()) {
1626 			// A cycle exists in this delta chain. This should only occur if a
1627 			// selected object representation disappeared during writing
1628 			// (for example due to a concurrent repack) and a different base
1629 			// was chosen, forcing a cycle. Select something other than a
1630 			// delta, and write this object.
1631 			reselectNonDelta(otp);
1632 		}
1633 		otp.markWantWrite();
1634 
1635 		while (otp.isReuseAsIs()) {
1636 			writeBase(out, otp.getDeltaBase());
1637 			if (otp.isWritten())
1638 				return; // Delta chain cycle caused this to write already.
1639 
1640 			crc32.reset();
1641 			otp.setOffset(out.length());
1642 			try {
1643 				reuseSupport.copyObjectAsIs(out, otp, reuseValidate);
1644 				out.endObject();
1645 				otp.setCRC((int) crc32.getValue());
1646 				typeStats.reusedObjects++;
1647 				if (otp.isDeltaRepresentation()) {
1648 					typeStats.reusedDeltas++;
1649 					typeStats.deltaBytes += out.length() - otp.getOffset();
1650 				}
1651 				return;
1652 			} catch (StoredObjectRepresentationNotAvailableException gone) {
1653 				if (otp.getOffset() == out.length()) {
1654 					otp.setOffset(0);
1655 					otp.clearDeltaBase();
1656 					otp.clearReuseAsIs();
1657 					reuseSupport.selectObjectRepresentation(this,
1658 							NullProgressMonitor.INSTANCE,
1659 							Collections.singleton(otp));
1660 					continue;
1661 				} else {
1662 					// Object writing already started, we cannot recover.
1663 					//
1664 					CorruptObjectException coe;
1665 					coe = new CorruptObjectException(otp, ""); //$NON-NLS-1$
1666 					coe.initCause(gone);
1667 					throw coe;
1668 				}
1669 			}
1670 		}
1671 
1672 		// If we reached here, reuse wasn't possible.
1673 		//
1674 		if (otp.isDeltaRepresentation())
1675 			writeDeltaObjectDeflate(out, otp);
1676 		else
1677 			writeWholeObjectDeflate(out, otp);
1678 		out.endObject();
1679 		otp.setCRC((int) crc32.getValue());
1680 	}
1681 
1682 	private void writeBase(PackOutputStream out, ObjectToPack base)
1683 			throws IOException {
1684 		if (base != null && !base.isWritten() && !base.isEdge())
1685 			writeObjectImpl(out, base);
1686 	}
1687 
1688 	private void writeWholeObjectDeflate(PackOutputStream out,
1689 			final ObjectToPack otp) throws IOException {
1690 		final Deflater deflater = deflater();
1691 		final ObjectLoader ldr = reader.open(otp, otp.getType());
1692 
1693 		crc32.reset();
1694 		otp.setOffset(out.length());
1695 		out.writeHeader(otp, ldr.getSize());
1696 
1697 		deflater.reset();
1698 		DeflaterOutputStream dst = new DeflaterOutputStream(out, deflater);
1699 		ldr.copyTo(dst);
1700 		dst.finish();
1701 	}
1702 
1703 	private void writeDeltaObjectDeflate(PackOutputStream out,
1704 			final ObjectToPack otp) throws IOException {
1705 		writeBase(out, otp.getDeltaBase());
1706 
1707 		crc32.reset();
1708 		otp.setOffset(out.length());
1709 
1710 		DeltaCache.Ref ref = otp.popCachedDelta();
1711 		if (ref != null) {
1712 			byte[] zbuf = ref.get();
1713 			if (zbuf != null) {
1714 				out.writeHeader(otp, otp.getCachedSize());
1715 				out.write(zbuf);
1716 				typeStats.cntDeltas++;
1717 				typeStats.deltaBytes += out.length() - otp.getOffset();
1718 				return;
1719 			}
1720 		}
1721 
1722 		try (TemporaryBuffer.Heap delta = delta(otp)) {
1723 			out.writeHeader(otp, delta.length());
1724 
1725 			Deflater deflater = deflater();
1726 			deflater.reset();
1727 			DeflaterOutputStream dst = new DeflaterOutputStream(out, deflater);
1728 			delta.writeTo(dst, null);
1729 			dst.finish();
1730 		}
1731 		typeStats.cntDeltas++;
1732 		typeStats.deltaBytes += out.length() - otp.getOffset();
1733 	}
1734 
1735 	private TemporaryBuffer.Heap delta(ObjectToPack otp)
1736 			throws IOException {
1737 		DeltaIndex index = new DeltaIndex(buffer(otp.getDeltaBaseId()));
1738 		byte[] res = buffer(otp);
1739 
1740 		// We never would have proposed this pair if the delta would be
1741 		// larger than the unpacked version of the object. So using it
1742 		// as our buffer limit is valid: we will never reach it.
1743 		//
1744 		TemporaryBuffer.Heap delta = new TemporaryBuffer.Heap(res.length);
1745 		index.encode(delta, res);
1746 		return delta;
1747 	}
1748 
1749 	private byte[] buffer(AnyObjectId objId) throws IOException {
1750 		return buffer(config, reader, objId);
1751 	}
1752 
1753 	static byte[] buffer(PackConfig config, ObjectReader or, AnyObjectId objId)
1754 			throws IOException {
1755 		// PackWriter should have already pruned objects that
1756 		// are above the big file threshold, so our chances of
1757 		// the object being below it are very good. We really
1758 		// shouldn't be here, unless the implementation is odd.
1759 
1760 		return or.open(objId).getCachedBytes(config.getBigFileThreshold());
1761 	}
1762 
1763 	private Deflater deflater() {
1764 		if (myDeflater == null)
1765 			myDeflater = new Deflater(config.getCompressionLevel());
1766 		return myDeflater;
1767 	}
1768 
1769 	private void writeChecksum(PackOutputStream out) throws IOException {
1770 		packcsum = out.getDigest();
1771 		out.write(packcsum);
1772 	}
1773 
1774 	private void findObjectsToPack(@NonNull ProgressMonitor countingMonitor,
1775 			@NonNull ObjectWalk walker, @NonNull Set<? extends ObjectId> want,
1776 			@NonNull Set<? extends ObjectId> have,
1777 			@NonNull Set<? extends ObjectId> noBitmaps) throws IOException {
1778 		final long countingStart = System.currentTimeMillis();
1779 		beginPhase(PackingPhase.COUNTING, countingMonitor, ProgressMonitor.UNKNOWN);
1780 
1781 		stats.interestingObjects = Collections.unmodifiableSet(new HashSet<ObjectId>(want));
1782 		stats.uninterestingObjects = Collections.unmodifiableSet(new HashSet<ObjectId>(have));
1783 		excludeFromBitmapSelection = noBitmaps;
1784 
1785 		canBuildBitmaps = config.isBuildBitmaps()
1786 				&& !shallowPack
1787 				&& have.isEmpty()
1788 				&& (excludeInPacks == null || excludeInPacks.length == 0);
1789 		if (!shallowPack && useBitmaps) {
1790 			BitmapIndex bitmapIndex = reader.getBitmapIndex();
1791 			if (bitmapIndex != null) {
1792 				BitmapWalker bitmapWalker = new BitmapWalker(
1793 						walker, bitmapIndex, countingMonitor);
1794 				findObjectsToPackUsingBitmaps(bitmapWalker, want, have);
1795 				endPhase(countingMonitor);
1796 				stats.timeCounting = System.currentTimeMillis() - countingStart;
1797 				stats.bitmapIndexMisses = bitmapWalker.getCountOfBitmapIndexMisses();
1798 				return;
1799 			}
1800 		}
1801 
1802 		List<ObjectId> all = new ArrayList<>(want.size() + have.size());
1803 		all.addAll(want);
1804 		all.addAll(have);
1805 
1806 		final RevFlag include = walker.newFlag("include"); //$NON-NLS-1$
1807 		final RevFlag added = walker.newFlag("added"); //$NON-NLS-1$
1808 
1809 		walker.carry(include);
1810 
1811 		int haveEst = have.size();
1812 		if (have.isEmpty()) {
1813 			walker.sort(RevSort.COMMIT_TIME_DESC);
1814 		} else {
1815 			walker.sort(RevSort.TOPO);
1816 			if (thin)
1817 				walker.sort(RevSort.BOUNDARY, true);
1818 		}
1819 
1820 		List<RevObject> wantObjs = new ArrayList<>(want.size());
1821 		List<RevObject> haveObjs = new ArrayList<>(haveEst);
1822 		List<RevTag> wantTags = new ArrayList<>(want.size());
1823 
1824 		// Retrieve the RevWalk's versions of "want" and "have" objects to
1825 		// maintain any state previously set in the RevWalk.
1826 		AsyncRevObjectQueue q = walker.parseAny(all, true);
1827 		try {
1828 			for (;;) {
1829 				try {
1830 					RevObject o = q.next();
1831 					if (o == null)
1832 						break;
1833 					if (have.contains(o))
1834 						haveObjs.add(o);
1835 					if (want.contains(o)) {
1836 						o.add(include);
1837 						wantObjs.add(o);
1838 						if (o instanceof RevTag)
1839 							wantTags.add((RevTag) o);
1840 					}
1841 				} catch (MissingObjectException e) {
1842 					if (ignoreMissingUninteresting
1843 							&& have.contains(e.getObjectId()))
1844 						continue;
1845 					throw e;
1846 				}
1847 			}
1848 		} finally {
1849 			q.release();
1850 		}
1851 
1852 		if (!wantTags.isEmpty()) {
1853 			all = new ArrayList<>(wantTags.size());
1854 			for (RevTag tag : wantTags)
1855 				all.add(tag.getObject());
1856 			q = walker.parseAny(all, true);
1857 			try {
1858 				while (q.next() != null) {
1859 					// Just need to pop the queue item to parse the object.
1860 				}
1861 			} finally {
1862 				q.release();
1863 			}
1864 		}
1865 
1866 		if (walker instanceof DepthWalk.ObjectWalk) {
1867 			DepthWalk.ObjectWalk depthWalk = (DepthWalk.ObjectWalk) walker;
1868 			for (RevObject obj : wantObjs) {
1869 				depthWalk.markRoot(obj);
1870 			}
1871 			// Mark the tree objects associated with "have" commits as
1872 			// uninteresting to avoid writing redundant blobs. A normal RevWalk
1873 			// lazily propagates the "uninteresting" state from a commit to its
1874 			// tree during the walk, but DepthWalks can terminate early so
1875 			// preemptively propagate that state here.
1876 			for (RevObject obj : haveObjs) {
1877 				if (obj instanceof RevCommit) {
1878 					RevTree t = ((RevCommit) obj).getTree();
1879 					depthWalk.markUninteresting(t);
1880 				}
1881 			}
1882 
1883 			if (unshallowObjects != null) {
1884 				for (ObjectId id : unshallowObjects) {
1885 					depthWalk.markUnshallow(walker.parseAny(id));
1886 				}
1887 			}
1888 		} else {
1889 			for (RevObject obj : wantObjs)
1890 				walker.markStart(obj);
1891 		}
1892 		for (RevObject obj : haveObjs)
1893 			walker.markUninteresting(obj);
1894 
1895 		final int maxBases = config.getDeltaSearchWindowSize();
1896 		Set<RevTree> baseTrees = new HashSet<>();
1897 		BlockList<RevCommit> commits = new BlockList<>();
1898 		Set<ObjectId> roots = new HashSet<>();
1899 		RevCommit c;
1900 		while ((c = walker.next()) != null) {
1901 			if (exclude(c))
1902 				continue;
1903 			if (c.has(RevFlag.UNINTERESTING)) {
1904 				if (baseTrees.size() <= maxBases)
1905 					baseTrees.add(c.getTree());
1906 				continue;
1907 			}
1908 
1909 			commits.add(c);
1910 			if (c.getParentCount() == 0) {
1911 				roots.add(c.copy());
1912 			}
1913 			countingMonitor.update(1);
1914 		}
1915 		stats.rootCommits = Collections.unmodifiableSet(roots);
1916 
1917 		if (shallowPack) {
1918 			for (RevCommit cmit : commits) {
1919 				addObject(cmit, 0);
1920 			}
1921 		} else {
1922 			int commitCnt = 0;
1923 			boolean putTagTargets = false;
1924 			for (RevCommit cmit : commits) {
1925 				if (!cmit.has(added)) {
1926 					cmit.add(added);
1927 					addObject(cmit, 0);
1928 					commitCnt++;
1929 				}
1930 
1931 				for (int i = 0; i < cmit.getParentCount(); i++) {
1932 					RevCommit p = cmit.getParent(i);
1933 					if (!p.has(added) && !p.has(RevFlag.UNINTERESTING)
1934 							&& !exclude(p)) {
1935 						p.add(added);
1936 						addObject(p, 0);
1937 						commitCnt++;
1938 					}
1939 				}
1940 
1941 				if (!putTagTargets && 4096 < commitCnt) {
1942 					for (ObjectId id : tagTargets) {
1943 						RevObject obj = walker.lookupOrNull(id);
1944 						if (obj instanceof RevCommit
1945 								&& obj.has(include)
1946 								&& !obj.has(RevFlag.UNINTERESTING)
1947 								&& !obj.has(added)) {
1948 							obj.add(added);
1949 							addObject(obj, 0);
1950 						}
1951 					}
1952 					putTagTargets = true;
1953 				}
1954 			}
1955 		}
1956 		commits = null;
1957 
1958 		if (thin && !baseTrees.isEmpty()) {
1959 			BaseSearch bases = new BaseSearch(countingMonitor, baseTrees, //
1960 					objectsMap, edgeObjects, reader);
1961 			RevObject o;
1962 			while ((o = walker.nextObject()) != null) {
1963 				if (o.has(RevFlag.UNINTERESTING))
1964 					continue;
1965 				if (exclude(o))
1966 					continue;
1967 
1968 				int pathHash = walker.getPathHashCode();
1969 				byte[] pathBuf = walker.getPathBuffer();
1970 				int pathLen = walker.getPathLength();
1971 				bases.addBase(o.getType(), pathBuf, pathLen, pathHash);
1972 				filterAndAddObject(o, o.getType(), pathHash, want);
1973 				countingMonitor.update(1);
1974 			}
1975 		} else {
1976 			RevObject o;
1977 			while ((o = walker.nextObject()) != null) {
1978 				if (o.has(RevFlag.UNINTERESTING))
1979 					continue;
1980 				if (exclude(o))
1981 					continue;
1982 				filterAndAddObject(o, o.getType(), walker.getPathHashCode(), want);
1983 				countingMonitor.update(1);
1984 			}
1985 		}
1986 
1987 		for (CachedPack pack : cachedPacks)
1988 			countingMonitor.update((int) pack.getObjectCount());
1989 		endPhase(countingMonitor);
1990 		stats.timeCounting = System.currentTimeMillis() - countingStart;
1991 		stats.bitmapIndexMisses = -1;
1992 	}
1993 
1994 	private void findObjectsToPackUsingBitmaps(
1995 			BitmapWalker bitmapWalker, Set<? extends ObjectId> want,
1996 			Set<? extends ObjectId> have)
1997 			throws MissingObjectException, IncorrectObjectTypeException,
1998 			IOException {
1999 		BitmapBuilder haveBitmap = bitmapWalker.findObjects(have, null, true);
2000 		BitmapBuilder wantBitmap = bitmapWalker.findObjects(want, haveBitmap,
2001 				false);
2002 		BitmapBuilder needBitmap = wantBitmap.andNot(haveBitmap);
2003 
2004 		if (useCachedPacks && reuseSupport != null && !reuseValidate
2005 				&& (excludeInPacks == null || excludeInPacks.length == 0))
2006 			cachedPacks.addAll(
2007 					reuseSupport.getCachedPacksAndUpdate(needBitmap));
2008 
2009 		for (BitmapObject obj : needBitmap) {
2010 			ObjectId objectId = obj.getObjectId();
2011 			if (exclude(objectId)) {
2012 				needBitmap.remove(objectId);
2013 				continue;
2014 			}
2015 			filterAndAddObject(objectId, obj.getType(), 0, want);
2016 		}
2017 
2018 		if (thin)
2019 			haveObjects = haveBitmap;
2020 	}
2021 
2022 	private static void pruneEdgesFromObjectList(List<ObjectToPack> list) {
2023 		final int size = list.size();
2024 		int src = 0;
2025 		int dst = 0;
2026 
2027 		for (; src < size; src++) {
2028 			ObjectToPack obj = list.get(src);
2029 			if (obj.isEdge())
2030 				continue;
2031 			if (dst != src)
2032 				list.set(dst, obj);
2033 			dst++;
2034 		}
2035 
2036 		while (dst < list.size())
2037 			list.remove(list.size() - 1);
2038 	}
2039 
2040 	/**
2041 	 * Include one object to the output file.
2042 	 * <p>
2043 	 * Objects are written in the order they are added. If the same object is
2044 	 * added twice, it may be written twice, creating a larger than necessary
2045 	 * file.
2046 	 *
2047 	 * @param object
2048 	 *            the object to add.
2049 	 * @throws org.eclipse.jgit.errors.IncorrectObjectTypeException
2050 	 *             the object is an unsupported type.
2051 	 */
2052 	public void addObject(RevObject object)
2053 			throws IncorrectObjectTypeException {
2054 		if (!exclude(object))
2055 			addObject(object, 0);
2056 	}
2057 
2058 	private void addObject(RevObject object, int pathHashCode) {
2059 		addObject(object, object.getType(), pathHashCode);
2060 	}
2061 
2062 	private void addObject(
2063 			final AnyObjectId src, final int type, final int pathHashCode) {
2064 		final ObjectToPack otp;
2065 		if (reuseSupport != null)
2066 			otp = reuseSupport.newObjectToPack(src, type);
2067 		else
2068 			otp = new ObjectToPack(src, type);
2069 		otp.setPathHash(pathHashCode);
2070 		objectsLists[type].add(otp);
2071 		objectsMap.add(otp);
2072 	}
2073 
2074 	// Adds the given object as an object to be packed, first performing
2075 	// filtering on blobs at or exceeding a given size.
2076 	private void filterAndAddObject(@NonNull AnyObjectId src, int type,
2077 			int pathHashCode, @NonNull Set<? extends AnyObjectId> want)
2078 			throws IOException {
2079 
2080 		// Check if this object needs to be rejected, doing the cheaper
2081 		// checks first.
2082 		boolean reject = filterBlobLimit >= 0 &&
2083 			type == OBJ_BLOB &&
2084 			!want.contains(src) &&
2085 			reader.getObjectSize(src, OBJ_BLOB) > filterBlobLimit;
2086 		if (!reject) {
2087 			addObject(src, type, pathHashCode);
2088 		}
2089 	}
2090 
2091 	private boolean exclude(AnyObjectId objectId) {
2092 		if (excludeInPacks == null)
2093 			return false;
2094 		if (excludeInPackLast.contains(objectId))
2095 			return true;
2096 		for (ObjectIdSet idx : excludeInPacks) {
2097 			if (idx.contains(objectId)) {
2098 				excludeInPackLast = idx;
2099 				return true;
2100 			}
2101 		}
2102 		return false;
2103 	}
2104 
2105 	/**
2106 	 * Select an object representation for this writer.
2107 	 * <p>
2108 	 * An {@link org.eclipse.jgit.lib.ObjectReader} implementation should invoke
2109 	 * this method once for each representation available for an object, to
2110 	 * allow the writer to find the most suitable one for the output.
2111 	 *
2112 	 * @param otp
2113 	 *            the object being packed.
2114 	 * @param next
2115 	 *            the next available representation from the repository.
2116 	 */
2117 	public void select(ObjectToPack otp, StoredObjectRepresentation next) {
2118 		int nFmt = next.getFormat();
2119 
2120 		if (!cachedPacks.isEmpty()) {
2121 			if (otp.isEdge())
2122 				return;
2123 			if ((nFmt == PACK_WHOLE) | (nFmt == PACK_DELTA)) {
2124 				for (CachedPack pack : cachedPacks) {
2125 					if (pack.hasObject(otp, next)) {
2126 						otp.setEdge();
2127 						otp.clearDeltaBase();
2128 						otp.clearReuseAsIs();
2129 						pruneCurrentObjectList = true;
2130 						return;
2131 					}
2132 				}
2133 			}
2134 		}
2135 
2136 		if (nFmt == PACK_DELTA && reuseDeltas && reuseDeltaFor(otp)) {
2137 			ObjectId baseId = next.getDeltaBase();
2138 			ObjectToPack ptr = objectsMap.get(baseId);
2139 			if (ptr != null && !ptr.isEdge()) {
2140 				otp.setDeltaBase(ptr);
2141 				otp.setReuseAsIs();
2142 			} else if (thin && have(ptr, baseId)) {
2143 				otp.setDeltaBase(baseId);
2144 				otp.setReuseAsIs();
2145 			} else {
2146 				otp.clearDeltaBase();
2147 				otp.clearReuseAsIs();
2148 			}
2149 		} else if (nFmt == PACK_WHOLE && config.isReuseObjects()) {
2150 			int nWeight = next.getWeight();
2151 			if (otp.isReuseAsIs() && !otp.isDeltaRepresentation()) {
2152 				// We've chosen another PACK_WHOLE format for this object,
2153 				// choose the one that has the smaller compressed size.
2154 				//
2155 				if (otp.getWeight() <= nWeight)
2156 					return;
2157 			}
2158 			otp.clearDeltaBase();
2159 			otp.setReuseAsIs();
2160 			otp.setWeight(nWeight);
2161 		} else {
2162 			otp.clearDeltaBase();
2163 			otp.clearReuseAsIs();
2164 		}
2165 
2166 		otp.setDeltaAttempted(reuseDeltas & next.wasDeltaAttempted());
2167 		otp.select(next);
2168 	}
2169 
2170 	private final boolean have(ObjectToPack ptr, AnyObjectId objectId) {
2171 		return (ptr != null && ptr.isEdge())
2172 				|| (haveObjects != null && haveObjects.contains(objectId));
2173 	}
2174 
2175 	/**
2176 	 * Prepares the bitmaps to be written to the bitmap index file.
2177 	 * <p>
2178 	 * Bitmaps can be used to speed up fetches and clones by storing the entire
2179 	 * object graph at selected commits. Writing a bitmap index is an optional
2180 	 * feature that not all pack users may require.
2181 	 * <p>
2182 	 * Called after {@link #writeIndex(OutputStream)}.
2183 	 * <p>
2184 	 * To reduce memory internal state is cleared during this method, rendering
2185 	 * the PackWriter instance useless for anything further than a call to write
2186 	 * out the new bitmaps with {@link #writeBitmapIndex(OutputStream)}.
2187 	 *
2188 	 * @param pm
2189 	 *            progress monitor to report bitmap building work.
2190 	 * @return whether a bitmap index may be written.
2191 	 * @throws java.io.IOException
2192 	 *             when some I/O problem occur during reading objects.
2193 	 */
2194 	public boolean prepareBitmapIndex(ProgressMonitor pm) throws IOException {
2195 		if (!canBuildBitmaps || getObjectCount() > Integer.MAX_VALUE
2196 				|| !cachedPacks.isEmpty())
2197 			return false;
2198 
2199 		if (pm == null)
2200 			pm = NullProgressMonitor.INSTANCE;
2201 
2202 		int numCommits = objectsLists[OBJ_COMMIT].size();
2203 		List<ObjectToPack> byName = sortByName();
2204 		sortedByName = null;
2205 		objectsLists = null;
2206 		objectsMap = null;
2207 		writeBitmaps = new PackBitmapIndexBuilder(byName);
2208 		byName = null;
2209 
2210 		PackWriterBitmapPreparer bitmapPreparer = new PackWriterBitmapPreparer(
2211 				reader, writeBitmaps, pm, stats.interestingObjects, config);
2212 
2213 		Collection<PackWriterBitmapPreparer.BitmapCommit> selectedCommits = bitmapPreparer
2214 				.selectCommits(numCommits, excludeFromBitmapSelection);
2215 
2216 		beginPhase(PackingPhase.BUILDING_BITMAPS, pm, selectedCommits.size());
2217 
2218 		BitmapWalker walker = bitmapPreparer.newBitmapWalker();
2219 		AnyObjectId last = null;
2220 		for (PackWriterBitmapPreparer.BitmapCommit cmit : selectedCommits) {
2221 			if (!cmit.isReuseWalker()) {
2222 				walker = bitmapPreparer.newBitmapWalker();
2223 			}
2224 			BitmapBuilder bitmap = walker.findObjects(
2225 					Collections.singleton(cmit), null, false);
2226 
2227 			if (last != null && cmit.isReuseWalker() && !bitmap.contains(last))
2228 				throw new IllegalStateException(MessageFormat.format(
2229 						JGitText.get().bitmapMissingObject, cmit.name(),
2230 						last.name()));
2231 			last = cmit;
2232 			writeBitmaps.addBitmap(cmit, bitmap.build(), cmit.getFlags());
2233 
2234 			pm.update(1);
2235 		}
2236 
2237 		endPhase(pm);
2238 		return true;
2239 	}
2240 
2241 	private boolean reuseDeltaFor(ObjectToPack otp) {
2242 		int type = otp.getType();
2243 		if ((type & 2) != 0) // OBJ_TREE(2) or OBJ_BLOB(3)
2244 			return true;
2245 		if (type == OBJ_COMMIT)
2246 			return reuseDeltaCommits;
2247 		if (type == OBJ_TAG)
2248 			return false;
2249 		return true;
2250 	}
2251 
2252 	private class MutableState {
2253 		/** Estimated size of a single ObjectToPack instance. */
2254 		// Assume 64-bit pointers, since this is just an estimate.
2255 		private static final long OBJECT_TO_PACK_SIZE =
2256 				(2 * 8)               // Object header
2257 				+ (2 * 8) + (2 * 8)   // ObjectToPack fields
2258 				+ (8 + 8)             // PackedObjectInfo fields
2259 				+ 8                   // ObjectIdOwnerMap fields
2260 				+ 40                  // AnyObjectId fields
2261 				+ 8;                  // Reference in BlockList
2262 
2263 		private final long totalDeltaSearchBytes;
2264 
2265 		private volatile PackingPhase phase;
2266 
2267 		MutableState() {
2268 			phase = PackingPhase.COUNTING;
2269 			if (config.isDeltaCompress()) {
2270 				int threads = config.getThreads();
2271 				if (threads <= 0)
2272 					threads = Runtime.getRuntime().availableProcessors();
2273 				totalDeltaSearchBytes = (threads * config.getDeltaSearchMemoryLimit())
2274 						+ config.getBigFileThreshold();
2275 			} else
2276 				totalDeltaSearchBytes = 0;
2277 		}
2278 
2279 		State snapshot() {
2280 			long objCnt = 0;
2281 			BlockList<ObjectToPack>[] lists = objectsLists;
2282 			if (lists != null) {
2283 				objCnt += lists[OBJ_COMMIT].size();
2284 				objCnt += lists[OBJ_TREE].size();
2285 				objCnt += lists[OBJ_BLOB].size();
2286 				objCnt += lists[OBJ_TAG].size();
2287 				// Exclude CachedPacks.
2288 			}
2289 
2290 			long bytesUsed = OBJECT_TO_PACK_SIZE * objCnt;
2291 			PackingPhase curr = phase;
2292 			if (curr == PackingPhase.COMPRESSING)
2293 				bytesUsed += totalDeltaSearchBytes;
2294 			return new State(curr, bytesUsed);
2295 		}
2296 	}
2297 
2298 	/** Possible states that a PackWriter can be in. */
2299 	public static enum PackingPhase {
2300 		/** Counting objects phase. */
2301 		COUNTING,
2302 
2303 		/** Getting sizes phase. */
2304 		GETTING_SIZES,
2305 
2306 		/** Finding sources phase. */
2307 		FINDING_SOURCES,
2308 
2309 		/** Compressing objects phase. */
2310 		COMPRESSING,
2311 
2312 		/** Writing objects phase. */
2313 		WRITING,
2314 
2315 		/** Building bitmaps phase. */
2316 		BUILDING_BITMAPS;
2317 	}
2318 
2319 	/** Summary of the current state of a PackWriter. */
2320 	public class State {
2321 		private final PackingPhase phase;
2322 
2323 		private final long bytesUsed;
2324 
2325 		State(PackingPhase phase, long bytesUsed) {
2326 			this.phase = phase;
2327 			this.bytesUsed = bytesUsed;
2328 		}
2329 
2330 		/** @return the PackConfig used to build the writer. */
2331 		public PackConfig getConfig() {
2332 			return config;
2333 		}
2334 
2335 		/** @return the current phase of the writer. */
2336 		public PackingPhase getPhase() {
2337 			return phase;
2338 		}
2339 
2340 		/** @return an estimate of the total memory used by the writer. */
2341 		public long estimateBytesUsed() {
2342 			return bytesUsed;
2343 		}
2344 
2345 		@SuppressWarnings("nls")
2346 		@Override
2347 		public String toString() {
2348 			return "PackWriter.State[" + phase + ", memory=" + bytesUsed + "]";
2349 		}
2350 	}
2351 }