View Javadoc
1   /*
2    * Copyright (C) 2008-2010, Google Inc.
3    * Copyright (C) 2008, Marek Zawirski <marek.zawirski@gmail.com>
4    * and other copyright owners as documented in the project's IP log.
5    *
6    * This program and the accompanying materials are made available
7    * under the terms of the Eclipse Distribution License v1.0 which
8    * accompanies this distribution, is reproduced below, and is
9    * available at http://www.eclipse.org/org/documents/edl-v10.php
10   *
11   * All rights reserved.
12   *
13   * Redistribution and use in source and binary forms, with or
14   * without modification, are permitted provided that the following
15   * conditions are met:
16   *
17   * - Redistributions of source code must retain the above copyright
18   *   notice, this list of conditions and the following disclaimer.
19   *
20   * - Redistributions in binary form must reproduce the above
21   *   copyright notice, this list of conditions and the following
22   *   disclaimer in the documentation and/or other materials provided
23   *   with the distribution.
24   *
25   * - Neither the name of the Eclipse Foundation, Inc. nor the
26   *   names of its contributors may be used to endorse or promote
27   *   products derived from this software without specific prior
28   *   written permission.
29   *
30   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
31   * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
32   * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
33   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34   * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
35   * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36   * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
37   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
38   * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
39   * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
40   * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
42   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43   */
44  
45  package org.eclipse.jgit.internal.storage.pack;
46  
47  import static org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation.PACK_DELTA;
48  import static org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation.PACK_WHOLE;
49  import static org.eclipse.jgit.lib.Constants.OBJECT_ID_LENGTH;
50  import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
51  import static org.eclipse.jgit.lib.Constants.OBJ_COMMIT;
52  import static org.eclipse.jgit.lib.Constants.OBJ_TAG;
53  import static org.eclipse.jgit.lib.Constants.OBJ_TREE;
54  
55  import java.io.IOException;
56  import java.io.OutputStream;
57  import java.lang.ref.WeakReference;
58  import java.security.MessageDigest;
59  import java.text.MessageFormat;
60  import java.util.ArrayList;
61  import java.util.Arrays;
62  import java.util.Collection;
63  import java.util.Collections;
64  import java.util.Comparator;
65  import java.util.HashSet;
66  import java.util.Iterator;
67  import java.util.List;
68  import java.util.Map;
69  import java.util.NoSuchElementException;
70  import java.util.Set;
71  import java.util.concurrent.ConcurrentHashMap;
72  import java.util.concurrent.ExecutionException;
73  import java.util.concurrent.Executor;
74  import java.util.concurrent.ExecutorService;
75  import java.util.concurrent.Executors;
76  import java.util.concurrent.Future;
77  import java.util.concurrent.TimeUnit;
78  import java.util.zip.CRC32;
79  import java.util.zip.CheckedOutputStream;
80  import java.util.zip.Deflater;
81  import java.util.zip.DeflaterOutputStream;
82  
83  import org.eclipse.jgit.annotations.NonNull;
84  import org.eclipse.jgit.errors.CorruptObjectException;
85  import org.eclipse.jgit.errors.IncorrectObjectTypeException;
86  import org.eclipse.jgit.errors.LargeObjectException;
87  import org.eclipse.jgit.errors.MissingObjectException;
88  import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException;
89  import org.eclipse.jgit.internal.JGitText;
90  import org.eclipse.jgit.internal.storage.file.PackBitmapIndexBuilder;
91  import org.eclipse.jgit.internal.storage.file.PackBitmapIndexWriterV1;
92  import org.eclipse.jgit.internal.storage.file.PackIndexWriter;
93  import org.eclipse.jgit.lib.AnyObjectId;
94  import org.eclipse.jgit.lib.AsyncObjectSizeQueue;
95  import org.eclipse.jgit.lib.BatchingProgressMonitor;
96  import org.eclipse.jgit.lib.BitmapIndex;
97  import org.eclipse.jgit.lib.BitmapIndex.BitmapBuilder;
98  import org.eclipse.jgit.lib.BitmapObject;
99  import org.eclipse.jgit.lib.Constants;
100 import org.eclipse.jgit.lib.NullProgressMonitor;
101 import org.eclipse.jgit.lib.ObjectId;
102 import org.eclipse.jgit.lib.ObjectIdOwnerMap;
103 import org.eclipse.jgit.lib.ObjectIdSet;
104 import org.eclipse.jgit.lib.ObjectLoader;
105 import org.eclipse.jgit.lib.ObjectReader;
106 import org.eclipse.jgit.lib.ProgressMonitor;
107 import org.eclipse.jgit.lib.Repository;
108 import org.eclipse.jgit.lib.ThreadSafeProgressMonitor;
109 import org.eclipse.jgit.revwalk.AsyncRevObjectQueue;
110 import org.eclipse.jgit.revwalk.DepthWalk;
111 import org.eclipse.jgit.revwalk.ObjectWalk;
112 import org.eclipse.jgit.revwalk.RevCommit;
113 import org.eclipse.jgit.revwalk.RevFlag;
114 import org.eclipse.jgit.revwalk.RevObject;
115 import org.eclipse.jgit.revwalk.RevSort;
116 import org.eclipse.jgit.revwalk.RevTag;
117 import org.eclipse.jgit.revwalk.RevTree;
118 import org.eclipse.jgit.storage.pack.PackConfig;
119 import org.eclipse.jgit.storage.pack.PackStatistics;
120 import org.eclipse.jgit.transport.ObjectCountCallback;
121 import org.eclipse.jgit.transport.WriteAbortedException;
122 import org.eclipse.jgit.util.BlockList;
123 import org.eclipse.jgit.util.TemporaryBuffer;
124 
125 /**
126  * <p>
127  * PackWriter class is responsible for generating pack files from specified set
128  * of objects from repository. This implementation produce pack files in format
129  * version 2.
130  * </p>
131  * <p>
132  * Source of objects may be specified in two ways:
133  * <ul>
134  * <li>(usually) by providing sets of interesting and uninteresting objects in
135  * repository - all interesting objects and their ancestors except uninteresting
136  * objects and their ancestors will be included in pack, or</li>
137  * <li>by providing iterator of {@link RevObject} specifying exact list and
138  * order of objects in pack</li>
139  * </ul>
140  * <p>
141  * Typical usage consists of creating an instance, configuring options,
142  * preparing the list of objects by calling {@link #preparePack(Iterator)} or
143  * {@link #preparePack(ProgressMonitor, Set, Set)}, and streaming with
144  * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}. If the
145  * pack is being stored as a file the matching index can be written out after
146  * writing the pack by {@link #writeIndex(OutputStream)}. An optional bitmap
147  * index can be made by calling {@link #prepareBitmapIndex(ProgressMonitor)}
148  * followed by {@link #writeBitmapIndex(OutputStream)}.
149  * </p>
150  * <p>
151  * Class provide set of configurable options and {@link ProgressMonitor}
152  * support, as operations may take a long time for big repositories. Deltas
153  * searching algorithm is <b>NOT IMPLEMENTED</b> yet - this implementation
154  * relies only on deltas and objects reuse.
155  * </p>
156  * <p>
157  * This class is not thread safe. It is intended to be used in one thread as a
158  * single pass to produce one pack. Invoking methods multiple times or out of
159  * order is not supported as internal data structures are destroyed during
160  * certain phases to save memory when packing large repositories.
161  * </p>
162  */
163 public class PackWriter implements AutoCloseable {
164 	private static final int PACK_VERSION_GENERATED = 2;
165 
166 	/** Empty set of objects for {@code preparePack()}. */
167 	public static final Set<ObjectId> NONE = Collections.emptySet();
168 
169 	private static final Map<WeakReference<PackWriter>, Boolean> instances =
170 			new ConcurrentHashMap<>();
171 
172 	private static final Iterable<PackWriter> instancesIterable = new Iterable<PackWriter>() {
173 		@Override
174 		public Iterator<PackWriter> iterator() {
175 			return new Iterator<PackWriter>() {
176 				private final Iterator<WeakReference<PackWriter>> it =
177 						instances.keySet().iterator();
178 				private PackWriter next;
179 
180 				@Override
181 				public boolean hasNext() {
182 					if (next != null)
183 						return true;
184 					while (it.hasNext()) {
185 						WeakReference<PackWriter> ref = it.next();
186 						next = ref.get();
187 						if (next != null)
188 							return true;
189 						it.remove();
190 					}
191 					return false;
192 				}
193 
194 				@Override
195 				public PackWriter next() {
196 					if (hasNext()) {
197 						PackWriter result = next;
198 						next = null;
199 						return result;
200 					}
201 					throw new NoSuchElementException();
202 				}
203 
204 				@Override
205 				public void remove() {
206 					throw new UnsupportedOperationException();
207 				}
208 			};
209 		}
210 	};
211 
212 	/** @return all allocated, non-released PackWriters instances. */
213 	public static Iterable<PackWriter> getInstances() {
214 		return instancesIterable;
215 	}
216 
217 	@SuppressWarnings("unchecked")
218 	BlockList<ObjectToPack> objectsLists[] = new BlockList[OBJ_TAG + 1];
219 	{
220 		objectsLists[OBJ_COMMIT] = new BlockList<>();
221 		objectsLists[OBJ_TREE] = new BlockList<>();
222 		objectsLists[OBJ_BLOB] = new BlockList<>();
223 		objectsLists[OBJ_TAG] = new BlockList<>();
224 	}
225 
226 	private ObjectIdOwnerMap<ObjectToPack> objectsMap = new ObjectIdOwnerMap<>();
227 
228 	// edge objects for thin packs
229 	private List<ObjectToPack> edgeObjects = new BlockList<>();
230 
231 	// Objects the client is known to have already.
232 	private BitmapBuilder haveObjects;
233 
234 	private List<CachedPack> cachedPacks = new ArrayList<>(2);
235 
236 	private Set<ObjectId> tagTargets = Collections.emptySet();
237 
238 	private ObjectIdSet[] excludeInPacks;
239 
240 	private ObjectIdSet excludeInPackLast;
241 
242 	private Deflater myDeflater;
243 
244 	private final ObjectReader reader;
245 
246 	/** {@link #reader} recast to the reuse interface, if it supports it. */
247 	private final ObjectReuseAsIs reuseSupport;
248 
249 	final PackConfig config;
250 
251 	private final PackStatistics.Accumulator stats;
252 
253 	private final MutableState state;
254 
255 	private final WeakReference<PackWriter> selfRef;
256 
257 	private PackStatistics.ObjectType.Accumulator typeStats;
258 
259 	private List<ObjectToPack> sortedByName;
260 
261 	private byte packcsum[];
262 
263 	private boolean deltaBaseAsOffset;
264 
265 	private boolean reuseDeltas;
266 
267 	private boolean reuseDeltaCommits;
268 
269 	private boolean reuseValidate;
270 
271 	private boolean thin;
272 
273 	private boolean useCachedPacks;
274 
275 	private boolean useBitmaps;
276 
277 	private boolean ignoreMissingUninteresting = true;
278 
279 	private boolean pruneCurrentObjectList;
280 
281 	private boolean shallowPack;
282 
283 	private boolean canBuildBitmaps;
284 
285 	private boolean indexDisabled;
286 
287 	private int depth;
288 
289 	private Collection<? extends ObjectId> unshallowObjects;
290 
291 	private PackBitmapIndexBuilder writeBitmaps;
292 
293 	private CRC32 crc32;
294 
295 	private ObjectCountCallback callback;
296 
297 	/**
298 	 * Create writer for specified repository.
299 	 * <p>
300 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
301 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
302 	 *
303 	 * @param repo
304 	 *            repository where objects are stored.
305 	 */
306 	public PackWriter(final Repository repo) {
307 		this(repo, repo.newObjectReader());
308 	}
309 
310 	/**
311 	 * Create a writer to load objects from the specified reader.
312 	 * <p>
313 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
314 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
315 	 *
316 	 * @param reader
317 	 *            reader to read from the repository with.
318 	 */
319 	public PackWriter(final ObjectReader reader) {
320 		this(new PackConfig(), reader);
321 	}
322 
323 	/**
324 	 * Create writer for specified repository.
325 	 * <p>
326 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
327 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
328 	 *
329 	 * @param repo
330 	 *            repository where objects are stored.
331 	 * @param reader
332 	 *            reader to read from the repository with.
333 	 */
334 	public PackWriter(final Repository repo, final ObjectReader reader) {
335 		this(new PackConfig(repo), reader);
336 	}
337 
338 	/**
339 	 * Create writer with a specified configuration.
340 	 * <p>
341 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
342 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
343 	 *
344 	 * @param config
345 	 *            configuration for the pack writer.
346 	 * @param reader
347 	 *            reader to read from the repository with.
348 	 */
349 	public PackWriter(final PackConfig config, final ObjectReader reader) {
350 		this.config = config;
351 		this.reader = reader;
352 		if (reader instanceof ObjectReuseAsIs)
353 			reuseSupport = ((ObjectReuseAsIs) reader);
354 		else
355 			reuseSupport = null;
356 
357 		deltaBaseAsOffset = config.isDeltaBaseAsOffset();
358 		reuseDeltas = config.isReuseDeltas();
359 		reuseValidate = true; // be paranoid by default
360 		stats = new PackStatistics.Accumulator();
361 		state = new MutableState();
362 		selfRef = new WeakReference<>(this);
363 		instances.put(selfRef, Boolean.TRUE);
364 	}
365 
366 	/**
367 	 * Set the {@code ObjectCountCallback}.
368 	 * <p>
369 	 * It should be set before calling
370 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}.
371 	 *
372 	 * @param callback
373 	 *            the callback to set
374 	 *
375 	 * @return this object for chaining.
376 	 */
377 	public PackWriter setObjectCountCallback(ObjectCountCallback callback) {
378 		this.callback = callback;
379 		return this;
380 	}
381 
382 	/**
383 	 * Records the set of shallow commits in the client.
384 	 *
385 	 * @param clientShallowCommits
386 	 *            the shallow commits in the client
387 	 */
388 	public void setClientShallowCommits(Set<ObjectId> clientShallowCommits) {
389 		stats.clientShallowCommits = Collections
390 				.unmodifiableSet(new HashSet<>(clientShallowCommits));
391 	}
392 
393 	/**
394 	 * Check whether writer can store delta base as an offset (new style
395 	 * reducing pack size) or should store it as an object id (legacy style,
396 	 * compatible with old readers).
397 	 *
398 	 * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
399 	 *
400 	 * @return true if delta base is stored as an offset; false if it is stored
401 	 *         as an object id.
402 	 */
403 	public boolean isDeltaBaseAsOffset() {
404 		return deltaBaseAsOffset;
405 	}
406 
407 	/**
408 	 * Set writer delta base format. Delta base can be written as an offset in a
409 	 * pack file (new approach reducing file size) or as an object id (legacy
410 	 * approach, compatible with old readers).
411 	 *
412 	 * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
413 	 *
414 	 * @param deltaBaseAsOffset
415 	 *            boolean indicating whether delta base can be stored as an
416 	 *            offset.
417 	 */
418 	public void setDeltaBaseAsOffset(boolean deltaBaseAsOffset) {
419 		this.deltaBaseAsOffset = deltaBaseAsOffset;
420 	}
421 
422 	/**
423 	 * Check if the writer will reuse commits that are already stored as deltas.
424 	 *
425 	 * @return true if the writer would reuse commits stored as deltas, assuming
426 	 *         delta reuse is already enabled.
427 	 */
428 	public boolean isReuseDeltaCommits() {
429 		return reuseDeltaCommits;
430 	}
431 
432 	/**
433 	 * Set the writer to reuse existing delta versions of commits.
434 	 *
435 	 * @param reuse
436 	 *            if true, the writer will reuse any commits stored as deltas.
437 	 *            By default the writer does not reuse delta commits.
438 	 */
439 	public void setReuseDeltaCommits(boolean reuse) {
440 		reuseDeltaCommits = reuse;
441 	}
442 
443 	/**
444 	 * Check if the writer validates objects before copying them.
445 	 *
446 	 * @return true if validation is enabled; false if the reader will handle
447 	 *         object validation as a side-effect of it consuming the output.
448 	 */
449 	public boolean isReuseValidatingObjects() {
450 		return reuseValidate;
451 	}
452 
453 	/**
454 	 * Enable (or disable) object validation during packing.
455 	 *
456 	 * @param validate
457 	 *            if true the pack writer will validate an object before it is
458 	 *            put into the output. This additional validation work may be
459 	 *            necessary to avoid propagating corruption from one local pack
460 	 *            file to another local pack file.
461 	 */
462 	public void setReuseValidatingObjects(boolean validate) {
463 		reuseValidate = validate;
464 	}
465 
466 	/** @return true if this writer is producing a thin pack. */
467 	public boolean isThin() {
468 		return thin;
469 	}
470 
471 	/**
472 	 * @param packthin
473 	 *            a boolean indicating whether writer may pack objects with
474 	 *            delta base object not within set of objects to pack, but
475 	 *            belonging to party repository (uninteresting/boundary) as
476 	 *            determined by set; this kind of pack is used only for
477 	 *            transport; true - to produce thin pack, false - otherwise.
478 	 */
479 	public void setThin(final boolean packthin) {
480 		thin = packthin;
481 	}
482 
483 	/** @return true to reuse cached packs. If true index creation isn't available. */
484 	public boolean isUseCachedPacks() {
485 		return useCachedPacks;
486 	}
487 
488 	/**
489 	 * @param useCached
490 	 *            if set to true and a cached pack is present, it will be
491 	 *            appended onto the end of a thin-pack, reducing the amount of
492 	 *            working set space and CPU used by PackWriter. Enabling this
493 	 *            feature prevents PackWriter from creating an index for the
494 	 *            newly created pack, so its only suitable for writing to a
495 	 *            network client, where the client will make the index.
496 	 */
497 	public void setUseCachedPacks(boolean useCached) {
498 		useCachedPacks = useCached;
499 	}
500 
501 	/** @return true to use bitmaps for ObjectWalks, if available. */
502 	public boolean isUseBitmaps() {
503 		return useBitmaps;
504 	}
505 
506 	/**
507 	 * @param useBitmaps
508 	 *            if set to true, bitmaps will be used when preparing a pack.
509 	 */
510 	public void setUseBitmaps(boolean useBitmaps) {
511 		this.useBitmaps = useBitmaps;
512 	}
513 
514 	/** @return true if the index file cannot be created by this PackWriter. */
515 	public boolean isIndexDisabled() {
516 		return indexDisabled || !cachedPacks.isEmpty();
517 	}
518 
519 	/**
520 	 * @param noIndex
521 	 *            true to disable creation of the index file.
522 	 */
523 	public void setIndexDisabled(boolean noIndex) {
524 		this.indexDisabled = noIndex;
525 	}
526 
527 	/**
528 	 * @return true to ignore objects that are uninteresting and also not found
529 	 *         on local disk; false to throw a {@link MissingObjectException}
530 	 *         out of {@link #preparePack(ProgressMonitor, Set, Set)} if an
531 	 *         uninteresting object is not in the source repository. By default,
532 	 *         true, permitting gracefully ignoring of uninteresting objects.
533 	 */
534 	public boolean isIgnoreMissingUninteresting() {
535 		return ignoreMissingUninteresting;
536 	}
537 
538 	/**
539 	 * @param ignore
540 	 *            true if writer should ignore non existing uninteresting
541 	 *            objects during construction set of objects to pack; false
542 	 *            otherwise - non existing uninteresting objects may cause
543 	 *            {@link MissingObjectException}
544 	 */
545 	public void setIgnoreMissingUninteresting(final boolean ignore) {
546 		ignoreMissingUninteresting = ignore;
547 	}
548 
549 	/**
550 	 * Set the tag targets that should be hoisted earlier during packing.
551 	 * <p>
552 	 * Callers may put objects into this set before invoking any of the
553 	 * preparePack methods to influence where an annotated tag's target is
554 	 * stored within the resulting pack. Typically these will be clustered
555 	 * together, and hoisted earlier in the file even if they are ancient
556 	 * revisions, allowing readers to find tag targets with better locality.
557 	 *
558 	 * @param objects
559 	 *            objects that annotated tags point at.
560 	 */
561 	public void setTagTargets(Set<ObjectId> objects) {
562 		tagTargets = objects;
563 	}
564 
565 	/**
566 	 * Configure this pack for a shallow clone.
567 	 *
568 	 * @param depth
569 	 *            maximum depth of history to return. 1 means return only the
570 	 *            "wants".
571 	 * @param unshallow
572 	 *            objects which used to be shallow on the client, but are being
573 	 *            extended as part of this fetch
574 	 */
575 	public void setShallowPack(int depth,
576 			Collection<? extends ObjectId> unshallow) {
577 		this.shallowPack = true;
578 		this.depth = depth;
579 		this.unshallowObjects = unshallow;
580 	}
581 
582 	/**
583 	 * Returns objects number in a pack file that was created by this writer.
584 	 *
585 	 * @return number of objects in pack.
586 	 * @throws IOException
587 	 *             a cached pack cannot supply its object count.
588 	 */
589 	public long getObjectCount() throws IOException {
590 		if (stats.totalObjects == 0) {
591 			long objCnt = 0;
592 
593 			objCnt += objectsLists[OBJ_COMMIT].size();
594 			objCnt += objectsLists[OBJ_TREE].size();
595 			objCnt += objectsLists[OBJ_BLOB].size();
596 			objCnt += objectsLists[OBJ_TAG].size();
597 
598 			for (CachedPack pack : cachedPacks)
599 				objCnt += pack.getObjectCount();
600 			return objCnt;
601 		}
602 		return stats.totalObjects;
603 	}
604 
605 	/**
606 	 * Returns the object ids in the pack file that was created by this writer.
607 	 * <p>
608 	 * This method can only be invoked after
609 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)} has
610 	 * been invoked and completed successfully.
611 	 *
612 	 * @return set of objects in pack.
613 	 * @throws IOException
614 	 *             a cached pack cannot supply its object ids.
615 	 */
616 	public ObjectIdOwnerMap<ObjectIdOwnerMap.Entry> getObjectSet()
617 			throws IOException {
618 		if (!cachedPacks.isEmpty())
619 			throw new IOException(
620 					JGitText.get().cachedPacksPreventsListingObjects);
621 
622 		if (writeBitmaps != null) {
623 			return writeBitmaps.getObjectSet();
624 		}
625 
626 		ObjectIdOwnerMap<ObjectIdOwnerMap.Entry> r = new ObjectIdOwnerMap<>();
627 		for (BlockList<ObjectToPack> objList : objectsLists) {
628 			if (objList != null) {
629 				for (ObjectToPack otp : objList)
630 					r.add(new ObjectIdOwnerMap.Entry(otp) {
631 						// A new entry that copies the ObjectId
632 					});
633 			}
634 		}
635 		return r;
636 	}
637 
638 	/**
639 	 * Add a pack index whose contents should be excluded from the result.
640 	 *
641 	 * @param idx
642 	 *            objects in this index will not be in the output pack.
643 	 */
644 	public void excludeObjects(ObjectIdSet idx) {
645 		if (excludeInPacks == null) {
646 			excludeInPacks = new ObjectIdSet[] { idx };
647 			excludeInPackLast = idx;
648 		} else {
649 			int cnt = excludeInPacks.length;
650 			ObjectIdSet[] newList = new ObjectIdSet[cnt + 1];
651 			System.arraycopy(excludeInPacks, 0, newList, 0, cnt);
652 			newList[cnt] = idx;
653 			excludeInPacks = newList;
654 		}
655 	}
656 
657 	/**
658 	 * Prepare the list of objects to be written to the pack stream.
659 	 * <p>
660 	 * Iterator <b>exactly</b> determines which objects are included in a pack
661 	 * and order they appear in pack (except that objects order by type is not
662 	 * needed at input). This order should conform general rules of ordering
663 	 * objects in git - by recency and path (type and delta-base first is
664 	 * internally secured) and responsibility for guaranteeing this order is on
665 	 * a caller side. Iterator must return each id of object to write exactly
666 	 * once.
667 	 * </p>
668 	 *
669 	 * @param objectsSource
670 	 *            iterator of object to store in a pack; order of objects within
671 	 *            each type is important, ordering by type is not needed;
672 	 *            allowed types for objects are {@link Constants#OBJ_COMMIT},
673 	 *            {@link Constants#OBJ_TREE}, {@link Constants#OBJ_BLOB} and
674 	 *            {@link Constants#OBJ_TAG}; objects returned by iterator may be
675 	 *            later reused by caller as object id and type are internally
676 	 *            copied in each iteration.
677 	 * @throws IOException
678 	 *             when some I/O problem occur during reading objects.
679 	 */
680 	public void preparePack(@NonNull Iterator<RevObject> objectsSource)
681 			throws IOException {
682 		while (objectsSource.hasNext()) {
683 			addObject(objectsSource.next());
684 		}
685 	}
686 
687 	/**
688 	 * Prepare the list of objects to be written to the pack stream.
689 	 * <p>
690 	 * Basing on these 2 sets, another set of objects to put in a pack file is
691 	 * created: this set consists of all objects reachable (ancestors) from
692 	 * interesting objects, except uninteresting objects and their ancestors.
693 	 * This method uses class {@link ObjectWalk} extensively to find out that
694 	 * appropriate set of output objects and their optimal order in output pack.
695 	 * Order is consistent with general git in-pack rules: sort by object type,
696 	 * recency, path and delta-base first.
697 	 * </p>
698 	 *
699 	 * @param countingMonitor
700 	 *            progress during object enumeration.
701 	 * @param want
702 	 *            collection of objects to be marked as interesting (start
703 	 *            points of graph traversal). Must not be {@code null}.
704 	 * @param have
705 	 *            collection of objects to be marked as uninteresting (end
706 	 *            points of graph traversal). Pass {@link #NONE} if all objects
707 	 *            reachable from {@code want} are desired, such as when serving
708 	 *            a clone.
709 	 * @throws IOException
710 	 *             when some I/O problem occur during reading objects.
711 	 */
712 	public void preparePack(ProgressMonitor countingMonitor,
713 			@NonNull Set<? extends ObjectId> want,
714 			@NonNull Set<? extends ObjectId> have) throws IOException {
715 		preparePack(countingMonitor,
716 				want, have, Collections.<ObjectId> emptySet());
717 	}
718 
719 	/**
720 	 * Prepare the list of objects to be written to the pack stream.
721 	 * <p>
722 	 * Like {@link #preparePack(ProgressMonitor, Set, Set)} but also allows
723 	 * specifying commits that should not be walked past ("shallow" commits).
724 	 * The caller is responsible for filtering out commits that should not
725 	 * be shallow any more ("unshallow" commits as in {@link #setShallowPack})
726 	 * from the shallow set.
727 	 *
728 	 * @param countingMonitor
729 	 *            progress during object enumeration.
730 	 * @param want
731 	 *            objects of interest, ancestors of which will be included in
732 	 *            the pack. Must not be {@code null}.
733 	 * @param have
734 	 *            objects whose ancestors (up to and including
735 	 *            {@code shallow} commits) do not need to be included in the
736 	 *            pack because they are already available from elsewhere.
737 	 *            Must not be {@code null}.
738 	 * @param shallow
739 	 *            commits indicating the boundary of the history marked with
740 	 *            {@code have}. Shallow commits have parents but those
741 	 *            parents are considered not to be already available.
742 	 *            Parents of {@code shallow} commits and earlier generations
743 	 *            will be included in the pack if requested by {@code want}.
744 	 *            Must not be {@code null}.
745 	 * @throws IOException
746 	 *            an I/O problem occured while reading objects.
747 	 */
748 	public void preparePack(ProgressMonitor countingMonitor,
749 			@NonNull Set<? extends ObjectId> want,
750 			@NonNull Set<? extends ObjectId> have,
751 			@NonNull Set<? extends ObjectId> shallow) throws IOException {
752 		try (ObjectWalk ow = getObjectWalk()) {
753 			ow.assumeShallow(shallow);
754 			preparePack(countingMonitor, ow, want, have);
755 		}
756 	}
757 
758 	private ObjectWalk getObjectWalk() {
759 		return shallowPack ? new DepthWalk.ObjectWalk(reader, depth - 1)
760 				: new ObjectWalk(reader);
761 	}
762 
763 	/**
764 	 * Prepare the list of objects to be written to the pack stream.
765 	 * <p>
766 	 * Basing on these 2 sets, another set of objects to put in a pack file is
767 	 * created: this set consists of all objects reachable (ancestors) from
768 	 * interesting objects, except uninteresting objects and their ancestors.
769 	 * This method uses class {@link ObjectWalk} extensively to find out that
770 	 * appropriate set of output objects and their optimal order in output pack.
771 	 * Order is consistent with general git in-pack rules: sort by object type,
772 	 * recency, path and delta-base first.
773 	 * </p>
774 	 *
775 	 * @param countingMonitor
776 	 *            progress during object enumeration.
777 	 * @param walk
778 	 *            ObjectWalk to perform enumeration.
779 	 * @param interestingObjects
780 	 *            collection of objects to be marked as interesting (start
781 	 *            points of graph traversal). Must not be {@code null}.
782 	 * @param uninterestingObjects
783 	 *            collection of objects to be marked as uninteresting (end
784 	 *            points of graph traversal). Pass {@link #NONE} if all objects
785 	 *            reachable from {@code want} are desired, such as when serving
786 	 *            a clone.
787 	 * @throws IOException
788 	 *             when some I/O problem occur during reading objects.
789 	 */
790 	public void preparePack(ProgressMonitor countingMonitor,
791 			@NonNull ObjectWalk walk,
792 			@NonNull Set<? extends ObjectId> interestingObjects,
793 			@NonNull Set<? extends ObjectId> uninterestingObjects)
794 			throws IOException {
795 		if (countingMonitor == null)
796 			countingMonitor = NullProgressMonitor.INSTANCE;
797 		if (shallowPack && !(walk instanceof DepthWalk.ObjectWalk))
798 			throw new IllegalArgumentException(
799 					JGitText.get().shallowPacksRequireDepthWalk);
800 		findObjectsToPack(countingMonitor, walk, interestingObjects,
801 				uninterestingObjects);
802 	}
803 
804 	/**
805 	 * Determine if the pack file will contain the requested object.
806 	 *
807 	 * @param id
808 	 *            the object to test the existence of.
809 	 * @return true if the object will appear in the output pack file.
810 	 * @throws IOException
811 	 *             a cached pack cannot be examined.
812 	 */
813 	public boolean willInclude(final AnyObjectId id) throws IOException {
814 		ObjectToPack obj = objectsMap.get(id);
815 		return obj != null && !obj.isEdge();
816 	}
817 
818 	/**
819 	 * Lookup the ObjectToPack object for a given ObjectId.
820 	 *
821 	 * @param id
822 	 *            the object to find in the pack.
823 	 * @return the object we are packing, or null.
824 	 */
825 	public ObjectToPack get(AnyObjectId id) {
826 		ObjectToPack obj = objectsMap.get(id);
827 		return obj != null && !obj.isEdge() ? obj : null;
828 	}
829 
830 	/**
831 	 * Computes SHA-1 of lexicographically sorted objects ids written in this
832 	 * pack, as used to name a pack file in repository.
833 	 *
834 	 * @return ObjectId representing SHA-1 name of a pack that was created.
835 	 */
836 	public ObjectId computeName() {
837 		final byte[] buf = new byte[OBJECT_ID_LENGTH];
838 		final MessageDigest md = Constants.newMessageDigest();
839 		for (ObjectToPack otp : sortByName()) {
840 			otp.copyRawTo(buf, 0);
841 			md.update(buf, 0, OBJECT_ID_LENGTH);
842 		}
843 		return ObjectId.fromRaw(md.digest());
844 	}
845 
846 	/**
847 	 * Returns the index format version that will be written.
848 	 * <p>
849 	 * This method can only be invoked after
850 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)} has
851 	 * been invoked and completed successfully.
852 	 *
853 	 * @return the index format version.
854 	 */
855 	public int getIndexVersion() {
856 		int indexVersion = config.getIndexVersion();
857 		if (indexVersion <= 0) {
858 			for (BlockList<ObjectToPack> objs : objectsLists)
859 				indexVersion = Math.max(indexVersion,
860 						PackIndexWriter.oldestPossibleFormat(objs));
861 		}
862 		return indexVersion;
863 	}
864 
865 	/**
866 	 * Create an index file to match the pack file just written.
867 	 * <p>
868 	 * Called after
869 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}.
870 	 * <p>
871 	 * Writing an index is only required for local pack storage. Packs sent on
872 	 * the network do not need to create an index.
873 	 *
874 	 * @param indexStream
875 	 *            output for the index data. Caller is responsible for closing
876 	 *            this stream.
877 	 * @throws IOException
878 	 *             the index data could not be written to the supplied stream.
879 	 */
880 	public void writeIndex(final OutputStream indexStream) throws IOException {
881 		if (isIndexDisabled())
882 			throw new IOException(JGitText.get().cachedPacksPreventsIndexCreation);
883 
884 		long writeStart = System.currentTimeMillis();
885 		final PackIndexWriter iw = PackIndexWriter.createVersion(
886 				indexStream, getIndexVersion());
887 		iw.write(sortByName(), packcsum);
888 		stats.timeWriting += System.currentTimeMillis() - writeStart;
889 	}
890 
891 	/**
892 	 * Create a bitmap index file to match the pack file just written.
893 	 * <p>
894 	 * Called after {@link #prepareBitmapIndex(ProgressMonitor)}.
895 	 *
896 	 * @param bitmapIndexStream
897 	 *            output for the bitmap index data. Caller is responsible for
898 	 *            closing this stream.
899 	 * @throws IOException
900 	 *             the index data could not be written to the supplied stream.
901 	 */
902 	public void writeBitmapIndex(final OutputStream bitmapIndexStream)
903 			throws IOException {
904 		if (writeBitmaps == null)
905 			throw new IOException(JGitText.get().bitmapsMustBePrepared);
906 
907 		long writeStart = System.currentTimeMillis();
908 		final PackBitmapIndexWriterV1 iw = new PackBitmapIndexWriterV1(bitmapIndexStream);
909 		iw.write(writeBitmaps, packcsum);
910 		stats.timeWriting += System.currentTimeMillis() - writeStart;
911 	}
912 
913 	private List<ObjectToPack> sortByName() {
914 		if (sortedByName == null) {
915 			int cnt = 0;
916 			cnt += objectsLists[OBJ_COMMIT].size();
917 			cnt += objectsLists[OBJ_TREE].size();
918 			cnt += objectsLists[OBJ_BLOB].size();
919 			cnt += objectsLists[OBJ_TAG].size();
920 
921 			sortedByName = new BlockList<>(cnt);
922 			sortedByName.addAll(objectsLists[OBJ_COMMIT]);
923 			sortedByName.addAll(objectsLists[OBJ_TREE]);
924 			sortedByName.addAll(objectsLists[OBJ_BLOB]);
925 			sortedByName.addAll(objectsLists[OBJ_TAG]);
926 			Collections.sort(sortedByName);
927 		}
928 		return sortedByName;
929 	}
930 
931 	private void beginPhase(PackingPhase phase, ProgressMonitor monitor,
932 			long cnt) {
933 		state.phase = phase;
934 		String task;
935 		switch (phase) {
936 		case COUNTING:
937 			task = JGitText.get().countingObjects;
938 			break;
939 		case GETTING_SIZES:
940 			task = JGitText.get().searchForSizes;
941 			break;
942 		case FINDING_SOURCES:
943 			task = JGitText.get().searchForReuse;
944 			break;
945 		case COMPRESSING:
946 			task = JGitText.get().compressingObjects;
947 			break;
948 		case WRITING:
949 			task = JGitText.get().writingObjects;
950 			break;
951 		case BUILDING_BITMAPS:
952 			task = JGitText.get().buildingBitmaps;
953 			break;
954 		default:
955 			throw new IllegalArgumentException(
956 					MessageFormat.format(JGitText.get().illegalPackingPhase, phase));
957 		}
958 		monitor.beginTask(task, (int) cnt);
959 	}
960 
961 	private void endPhase(ProgressMonitor monitor) {
962 		monitor.endTask();
963 	}
964 
965 	/**
966 	 * Write the prepared pack to the supplied stream.
967 	 * <p>
968 	 * Called after {@link #preparePack(ProgressMonitor, ObjectWalk, Set, Set)}
969 	 * or {@link #preparePack(ProgressMonitor, Set, Set)}.
970 	 * <p>
971 	 * Performs delta search if enabled and writes the pack stream.
972 	 * <p>
973 	 * All reused objects data checksum (Adler32/CRC32) is computed and
974 	 * validated against existing checksum.
975 	 *
976 	 * @param compressMonitor
977 	 *            progress monitor to report object compression work.
978 	 * @param writeMonitor
979 	 *            progress monitor to report the number of objects written.
980 	 * @param packStream
981 	 *            output stream of pack data. The stream should be buffered by
982 	 *            the caller. The caller is responsible for closing the stream.
983 	 * @throws IOException
984 	 *             an error occurred reading a local object's data to include in
985 	 *             the pack, or writing compressed object data to the output
986 	 *             stream.
987 	 * @throws WriteAbortedException
988 	 *             the write operation is aborted by {@link ObjectCountCallback}
989 	 *             .
990 	 */
991 	public void writePack(ProgressMonitor compressMonitor,
992 			ProgressMonitor writeMonitor, OutputStream packStream)
993 			throws IOException {
994 		if (compressMonitor == null)
995 			compressMonitor = NullProgressMonitor.INSTANCE;
996 		if (writeMonitor == null)
997 			writeMonitor = NullProgressMonitor.INSTANCE;
998 
999 		excludeInPacks = null;
1000 		excludeInPackLast = null;
1001 
1002 		boolean needSearchForReuse = reuseSupport != null && (
1003 				   reuseDeltas
1004 				|| config.isReuseObjects()
1005 				|| !cachedPacks.isEmpty());
1006 
1007 		if (compressMonitor instanceof BatchingProgressMonitor) {
1008 			long delay = 1000;
1009 			if (needSearchForReuse && config.isDeltaCompress())
1010 				delay = 500;
1011 			((BatchingProgressMonitor) compressMonitor).setDelayStart(
1012 					delay,
1013 					TimeUnit.MILLISECONDS);
1014 		}
1015 
1016 		if (needSearchForReuse)
1017 			searchForReuse(compressMonitor);
1018 		if (config.isDeltaCompress())
1019 			searchForDeltas(compressMonitor);
1020 
1021 		crc32 = new CRC32();
1022 		final PackOutputStream out = new PackOutputStream(
1023 			writeMonitor,
1024 			isIndexDisabled()
1025 				? packStream
1026 				: new CheckedOutputStream(packStream, crc32),
1027 			this);
1028 
1029 		long objCnt = getObjectCount();
1030 		stats.totalObjects = objCnt;
1031 		if (callback != null)
1032 			callback.setObjectCount(objCnt);
1033 		beginPhase(PackingPhase.WRITING, writeMonitor, objCnt);
1034 		long writeStart = System.currentTimeMillis();
1035 		try {
1036 			out.writeFileHeader(PACK_VERSION_GENERATED, objCnt);
1037 			out.flush();
1038 
1039 			writeObjects(out);
1040 			if (!edgeObjects.isEmpty() || !cachedPacks.isEmpty()) {
1041 				for (PackStatistics.ObjectType.Accumulator typeStat : stats.objectTypes) {
1042 					if (typeStat == null)
1043 						continue;
1044 					stats.thinPackBytes += typeStat.bytes;
1045 				}
1046 			}
1047 
1048 			stats.reusedPacks = Collections.unmodifiableList(cachedPacks);
1049 			for (CachedPack pack : cachedPacks) {
1050 				long deltaCnt = pack.getDeltaCount();
1051 				stats.reusedObjects += pack.getObjectCount();
1052 				stats.reusedDeltas += deltaCnt;
1053 				stats.totalDeltas += deltaCnt;
1054 				reuseSupport.copyPackAsIs(out, pack);
1055 			}
1056 			writeChecksum(out);
1057 			out.flush();
1058 		} finally {
1059 			stats.timeWriting = System.currentTimeMillis() - writeStart;
1060 			stats.depth = depth;
1061 
1062 			for (PackStatistics.ObjectType.Accumulator typeStat : stats.objectTypes) {
1063 				if (typeStat == null)
1064 					continue;
1065 				typeStat.cntDeltas += typeStat.reusedDeltas;
1066 				stats.reusedObjects += typeStat.reusedObjects;
1067 				stats.reusedDeltas += typeStat.reusedDeltas;
1068 				stats.totalDeltas += typeStat.cntDeltas;
1069 			}
1070 		}
1071 
1072 		stats.totalBytes = out.length();
1073 		reader.close();
1074 		endPhase(writeMonitor);
1075 	}
1076 
1077 	/**
1078 	 * @return description of what this PackWriter did in order to create the
1079 	 *         final pack stream. This should only be invoked after the calls to
1080 	 *         create the pack/index/bitmap have completed.
1081 	 */
1082 	public PackStatistics getStatistics() {
1083 		return new PackStatistics(stats);
1084 	}
1085 
1086 	/** @return snapshot of the current state of this PackWriter. */
1087 	public State getState() {
1088 		return state.snapshot();
1089 	}
1090 
1091 	/**
1092 	 * Release all resources used by this writer.
1093 	 */
1094 	@Override
1095 	public void close() {
1096 		reader.close();
1097 		if (myDeflater != null) {
1098 			myDeflater.end();
1099 			myDeflater = null;
1100 		}
1101 		instances.remove(selfRef);
1102 	}
1103 
1104 	private void searchForReuse(ProgressMonitor monitor) throws IOException {
1105 		long cnt = 0;
1106 		cnt += objectsLists[OBJ_COMMIT].size();
1107 		cnt += objectsLists[OBJ_TREE].size();
1108 		cnt += objectsLists[OBJ_BLOB].size();
1109 		cnt += objectsLists[OBJ_TAG].size();
1110 
1111 		long start = System.currentTimeMillis();
1112 		beginPhase(PackingPhase.FINDING_SOURCES, monitor, cnt);
1113 		if (cnt <= 4096) {
1114 			// For small object counts, do everything as one list.
1115 			BlockList<ObjectToPack> tmp = new BlockList<>((int) cnt);
1116 			tmp.addAll(objectsLists[OBJ_TAG]);
1117 			tmp.addAll(objectsLists[OBJ_COMMIT]);
1118 			tmp.addAll(objectsLists[OBJ_TREE]);
1119 			tmp.addAll(objectsLists[OBJ_BLOB]);
1120 			searchForReuse(monitor, tmp);
1121 			if (pruneCurrentObjectList) {
1122 				// If the list was pruned, we need to re-prune the main lists.
1123 				pruneEdgesFromObjectList(objectsLists[OBJ_COMMIT]);
1124 				pruneEdgesFromObjectList(objectsLists[OBJ_TREE]);
1125 				pruneEdgesFromObjectList(objectsLists[OBJ_BLOB]);
1126 				pruneEdgesFromObjectList(objectsLists[OBJ_TAG]);
1127 			}
1128 		} else {
1129 			searchForReuse(monitor, objectsLists[OBJ_TAG]);
1130 			searchForReuse(monitor, objectsLists[OBJ_COMMIT]);
1131 			searchForReuse(monitor, objectsLists[OBJ_TREE]);
1132 			searchForReuse(monitor, objectsLists[OBJ_BLOB]);
1133 		}
1134 		endPhase(monitor);
1135 		stats.timeSearchingForReuse = System.currentTimeMillis() - start;
1136 
1137 		if (config.isReuseDeltas() && config.getCutDeltaChains()) {
1138 			cutDeltaChains(objectsLists[OBJ_TREE]);
1139 			cutDeltaChains(objectsLists[OBJ_BLOB]);
1140 		}
1141 	}
1142 
1143 	private void searchForReuse(ProgressMonitor monitor, List<ObjectToPack> list)
1144 			throws IOException, MissingObjectException {
1145 		pruneCurrentObjectList = false;
1146 		reuseSupport.selectObjectRepresentation(this, monitor, list);
1147 		if (pruneCurrentObjectList)
1148 			pruneEdgesFromObjectList(list);
1149 	}
1150 
1151 	private void cutDeltaChains(BlockList<ObjectToPack> list)
1152 			throws IOException {
1153 		int max = config.getMaxDeltaDepth();
1154 		for (int idx = list.size() - 1; idx >= 0; idx--) {
1155 			int d = 0;
1156 			ObjectToPack b = list.get(idx).getDeltaBase();
1157 			while (b != null) {
1158 				if (d < b.getChainLength())
1159 					break;
1160 				b.setChainLength(++d);
1161 				if (d >= max && b.isDeltaRepresentation()) {
1162 					reselectNonDelta(b);
1163 					break;
1164 				}
1165 				b = b.getDeltaBase();
1166 			}
1167 		}
1168 		if (config.isDeltaCompress()) {
1169 			for (ObjectToPack otp : list)
1170 				otp.clearChainLength();
1171 		}
1172 	}
1173 
1174 	private void searchForDeltas(ProgressMonitor monitor)
1175 			throws MissingObjectException, IncorrectObjectTypeException,
1176 			IOException {
1177 		// Commits and annotated tags tend to have too many differences to
1178 		// really benefit from delta compression. Consequently just don't
1179 		// bother examining those types here.
1180 		//
1181 		ObjectToPack[] list = new ObjectToPack[
1182 				  objectsLists[OBJ_TREE].size()
1183 				+ objectsLists[OBJ_BLOB].size()
1184 				+ edgeObjects.size()];
1185 		int cnt = 0;
1186 		cnt = findObjectsNeedingDelta(list, cnt, OBJ_TREE);
1187 		cnt = findObjectsNeedingDelta(list, cnt, OBJ_BLOB);
1188 		if (cnt == 0)
1189 			return;
1190 		int nonEdgeCnt = cnt;
1191 
1192 		// Queue up any edge objects that we might delta against.  We won't
1193 		// be sending these as we assume the other side has them, but we need
1194 		// them in the search phase below.
1195 		//
1196 		for (ObjectToPack eo : edgeObjects) {
1197 			eo.setWeight(0);
1198 			list[cnt++] = eo;
1199 		}
1200 
1201 		// Compute the sizes of the objects so we can do a proper sort.
1202 		// We let the reader skip missing objects if it chooses. For
1203 		// some readers this can be a huge win. We detect missing objects
1204 		// by having set the weights above to 0 and allowing the delta
1205 		// search code to discover the missing object and skip over it, or
1206 		// abort with an exception if we actually had to have it.
1207 		//
1208 		final long sizingStart = System.currentTimeMillis();
1209 		beginPhase(PackingPhase.GETTING_SIZES, monitor, cnt);
1210 		AsyncObjectSizeQueue<ObjectToPack> sizeQueue = reader.getObjectSize(
1211 				Arrays.<ObjectToPack> asList(list).subList(0, cnt), false);
1212 		try {
1213 			final long limit = Math.min(
1214 					config.getBigFileThreshold(),
1215 					Integer.MAX_VALUE);
1216 			for (;;) {
1217 				try {
1218 					if (!sizeQueue.next())
1219 						break;
1220 				} catch (MissingObjectException notFound) {
1221 					monitor.update(1);
1222 					if (ignoreMissingUninteresting) {
1223 						ObjectToPack otp = sizeQueue.getCurrent();
1224 						if (otp != null && otp.isEdge()) {
1225 							otp.setDoNotDelta();
1226 							continue;
1227 						}
1228 
1229 						otp = objectsMap.get(notFound.getObjectId());
1230 						if (otp != null && otp.isEdge()) {
1231 							otp.setDoNotDelta();
1232 							continue;
1233 						}
1234 					}
1235 					throw notFound;
1236 				}
1237 
1238 				ObjectToPack otp = sizeQueue.getCurrent();
1239 				if (otp == null)
1240 					otp = objectsMap.get(sizeQueue.getObjectId());
1241 
1242 				long sz = sizeQueue.getSize();
1243 				if (DeltaIndex.BLKSZ < sz && sz < limit)
1244 					otp.setWeight((int) sz);
1245 				else
1246 					otp.setDoNotDelta(); // too small, or too big
1247 				monitor.update(1);
1248 			}
1249 		} finally {
1250 			sizeQueue.release();
1251 		}
1252 		endPhase(monitor);
1253 		stats.timeSearchingForSizes = System.currentTimeMillis() - sizingStart;
1254 
1255 		// Sort the objects by path hash so like files are near each other,
1256 		// and then by size descending so that bigger files are first. This
1257 		// applies "Linus' Law" which states that newer files tend to be the
1258 		// bigger ones, because source files grow and hardly ever shrink.
1259 		//
1260 		Arrays.sort(list, 0, cnt, new Comparator<ObjectToPack>() {
1261 			@Override
1262 			public int compare(ObjectToPack a, ObjectToPack b) {
1263 				int cmp = (a.isDoNotDelta() ? 1 : 0)
1264 						- (b.isDoNotDelta() ? 1 : 0);
1265 				if (cmp != 0)
1266 					return cmp;
1267 
1268 				cmp = a.getType() - b.getType();
1269 				if (cmp != 0)
1270 					return cmp;
1271 
1272 				cmp = (a.getPathHash() >>> 1) - (b.getPathHash() >>> 1);
1273 				if (cmp != 0)
1274 					return cmp;
1275 
1276 				cmp = (a.getPathHash() & 1) - (b.getPathHash() & 1);
1277 				if (cmp != 0)
1278 					return cmp;
1279 
1280 				cmp = (a.isEdge() ? 0 : 1) - (b.isEdge() ? 0 : 1);
1281 				if (cmp != 0)
1282 					return cmp;
1283 
1284 				return b.getWeight() - a.getWeight();
1285 			}
1286 		});
1287 
1288 		// Above we stored the objects we cannot delta onto the end.
1289 		// Remove them from the list so we don't waste time on them.
1290 		while (0 < cnt && list[cnt - 1].isDoNotDelta()) {
1291 			if (!list[cnt - 1].isEdge())
1292 				nonEdgeCnt--;
1293 			cnt--;
1294 		}
1295 		if (cnt == 0)
1296 			return;
1297 
1298 		final long searchStart = System.currentTimeMillis();
1299 		searchForDeltas(monitor, list, cnt);
1300 		stats.deltaSearchNonEdgeObjects = nonEdgeCnt;
1301 		stats.timeCompressing = System.currentTimeMillis() - searchStart;
1302 
1303 		for (int i = 0; i < cnt; i++)
1304 			if (!list[i].isEdge() && list[i].isDeltaRepresentation())
1305 				stats.deltasFound++;
1306 	}
1307 
1308 	private int findObjectsNeedingDelta(ObjectToPack[] list, int cnt, int type) {
1309 		for (ObjectToPack otp : objectsLists[type]) {
1310 			if (otp.isDoNotDelta()) // delta is disabled for this path
1311 				continue;
1312 			if (otp.isDeltaRepresentation()) // already reusing a delta
1313 				continue;
1314 			otp.setWeight(0);
1315 			list[cnt++] = otp;
1316 		}
1317 		return cnt;
1318 	}
1319 
1320 	private void reselectNonDelta(ObjectToPack otp) throws IOException {
1321 		otp.clearDeltaBase();
1322 		otp.clearReuseAsIs();
1323 		boolean old = reuseDeltas;
1324 		reuseDeltas = false;
1325 		reuseSupport.selectObjectRepresentation(this,
1326 				NullProgressMonitor.INSTANCE,
1327 				Collections.singleton(otp));
1328 		reuseDeltas = old;
1329 	}
1330 
1331 	private void searchForDeltas(final ProgressMonitor monitor,
1332 			final ObjectToPack[] list, final int cnt)
1333 			throws MissingObjectException, IncorrectObjectTypeException,
1334 			LargeObjectException, IOException {
1335 		int threads = config.getThreads();
1336 		if (threads == 0)
1337 			threads = Runtime.getRuntime().availableProcessors();
1338 		if (threads <= 1 || cnt <= config.getDeltaSearchWindowSize())
1339 			singleThreadDeltaSearch(monitor, list, cnt);
1340 		else
1341 			parallelDeltaSearch(monitor, list, cnt, threads);
1342 	}
1343 
1344 	private void singleThreadDeltaSearch(ProgressMonitor monitor,
1345 			ObjectToPack[] list, int cnt) throws IOException {
1346 		long totalWeight = 0;
1347 		for (int i = 0; i < cnt; i++) {
1348 			ObjectToPack o = list[i];
1349 			totalWeight += DeltaTask.getAdjustedWeight(o);
1350 		}
1351 
1352 		long bytesPerUnit = 1;
1353 		while (DeltaTask.MAX_METER <= (totalWeight / bytesPerUnit))
1354 			bytesPerUnit <<= 10;
1355 		int cost = (int) (totalWeight / bytesPerUnit);
1356 		if (totalWeight % bytesPerUnit != 0)
1357 			cost++;
1358 
1359 		beginPhase(PackingPhase.COMPRESSING, monitor, cost);
1360 		new DeltaWindow(config, new DeltaCache(config), reader,
1361 				monitor, bytesPerUnit,
1362 				list, 0, cnt).search();
1363 		endPhase(monitor);
1364 	}
1365 
1366 	private void parallelDeltaSearch(ProgressMonitor monitor,
1367 			ObjectToPack[] list, int cnt, int threads) throws IOException {
1368 		DeltaCache dc = new ThreadSafeDeltaCache(config);
1369 		ThreadSafeProgressMonitor pm = new ThreadSafeProgressMonitor(monitor);
1370 		DeltaTask.Block taskBlock = new DeltaTask.Block(threads, config,
1371 				reader, dc, pm,
1372 				list, 0, cnt);
1373 		taskBlock.partitionTasks();
1374 		beginPhase(PackingPhase.COMPRESSING, monitor, taskBlock.cost());
1375 		pm.startWorkers(taskBlock.tasks.size());
1376 
1377 		Executor executor = config.getExecutor();
1378 		final List<Throwable> errors =
1379 				Collections.synchronizedList(new ArrayList<Throwable>(threads));
1380 		if (executor instanceof ExecutorService) {
1381 			// Caller supplied us a service, use it directly.
1382 			runTasks((ExecutorService) executor, pm, taskBlock, errors);
1383 		} else if (executor == null) {
1384 			// Caller didn't give us a way to run the tasks, spawn up a
1385 			// temporary thread pool and make sure it tears down cleanly.
1386 			ExecutorService pool = Executors.newFixedThreadPool(threads);
1387 			try {
1388 				runTasks(pool, pm, taskBlock, errors);
1389 			} finally {
1390 				pool.shutdown();
1391 				for (;;) {
1392 					try {
1393 						if (pool.awaitTermination(60, TimeUnit.SECONDS))
1394 							break;
1395 					} catch (InterruptedException e) {
1396 						throw new IOException(
1397 								JGitText.get().packingCancelledDuringObjectsWriting);
1398 					}
1399 				}
1400 			}
1401 		} else {
1402 			// The caller gave us an executor, but it might not do
1403 			// asynchronous execution.  Wrap everything and hope it
1404 			// can schedule these for us.
1405 			for (final DeltaTask task : taskBlock.tasks) {
1406 				executor.execute(new Runnable() {
1407 					@Override
1408 					public void run() {
1409 						try {
1410 							task.call();
1411 						} catch (Throwable failure) {
1412 							errors.add(failure);
1413 						}
1414 					}
1415 				});
1416 			}
1417 			try {
1418 				pm.waitForCompletion();
1419 			} catch (InterruptedException ie) {
1420 				// We can't abort the other tasks as we have no handle.
1421 				// Cross our fingers and just break out anyway.
1422 				//
1423 				throw new IOException(
1424 						JGitText.get().packingCancelledDuringObjectsWriting);
1425 			}
1426 		}
1427 
1428 		// If any task threw an error, try to report it back as
1429 		// though we weren't using a threaded search algorithm.
1430 		//
1431 		if (!errors.isEmpty()) {
1432 			Throwable err = errors.get(0);
1433 			if (err instanceof Error)
1434 				throw (Error) err;
1435 			if (err instanceof RuntimeException)
1436 				throw (RuntimeException) err;
1437 			if (err instanceof IOException)
1438 				throw (IOException) err;
1439 
1440 			IOException fail = new IOException(err.getMessage());
1441 			fail.initCause(err);
1442 			throw fail;
1443 		}
1444 		endPhase(monitor);
1445 	}
1446 
1447 	private static void runTasks(ExecutorService pool,
1448 			ThreadSafeProgressMonitor pm,
1449 			DeltaTask.Block tb, List<Throwable> errors) throws IOException {
1450 		List<Future<?>> futures = new ArrayList<>(tb.tasks.size());
1451 		for (DeltaTask task : tb.tasks)
1452 			futures.add(pool.submit(task));
1453 
1454 		try {
1455 			pm.waitForCompletion();
1456 			for (Future<?> f : futures) {
1457 				try {
1458 					f.get();
1459 				} catch (ExecutionException failed) {
1460 					errors.add(failed.getCause());
1461 				}
1462 			}
1463 		} catch (InterruptedException ie) {
1464 			for (Future<?> f : futures)
1465 				f.cancel(true);
1466 			throw new IOException(
1467 					JGitText.get().packingCancelledDuringObjectsWriting);
1468 		}
1469 	}
1470 
1471 	private void writeObjects(PackOutputStream out) throws IOException {
1472 		writeObjects(out, objectsLists[OBJ_COMMIT]);
1473 		writeObjects(out, objectsLists[OBJ_TAG]);
1474 		writeObjects(out, objectsLists[OBJ_TREE]);
1475 		writeObjects(out, objectsLists[OBJ_BLOB]);
1476 	}
1477 
1478 	private void writeObjects(PackOutputStream out, List<ObjectToPack> list)
1479 			throws IOException {
1480 		if (list.isEmpty())
1481 			return;
1482 
1483 		typeStats = stats.objectTypes[list.get(0).getType()];
1484 		long beginOffset = out.length();
1485 
1486 		if (reuseSupport != null) {
1487 			reuseSupport.writeObjects(out, list);
1488 		} else {
1489 			for (ObjectToPack otp : list)
1490 				out.writeObject(otp);
1491 		}
1492 
1493 		typeStats.bytes += out.length() - beginOffset;
1494 		typeStats.cntObjects = list.size();
1495 	}
1496 
1497 	void writeObject(PackOutputStream out, ObjectToPack otp) throws IOException {
1498 		if (!otp.isWritten())
1499 			writeObjectImpl(out, otp);
1500 	}
1501 
1502 	private void writeObjectImpl(PackOutputStream out, ObjectToPack otp)
1503 			throws IOException {
1504 		if (otp.wantWrite()) {
1505 			// A cycle exists in this delta chain. This should only occur if a
1506 			// selected object representation disappeared during writing
1507 			// (for example due to a concurrent repack) and a different base
1508 			// was chosen, forcing a cycle. Select something other than a
1509 			// delta, and write this object.
1510 			reselectNonDelta(otp);
1511 		}
1512 		otp.markWantWrite();
1513 
1514 		while (otp.isReuseAsIs()) {
1515 			writeBase(out, otp.getDeltaBase());
1516 			if (otp.isWritten())
1517 				return; // Delta chain cycle caused this to write already.
1518 
1519 			crc32.reset();
1520 			otp.setOffset(out.length());
1521 			try {
1522 				reuseSupport.copyObjectAsIs(out, otp, reuseValidate);
1523 				out.endObject();
1524 				otp.setCRC((int) crc32.getValue());
1525 				typeStats.reusedObjects++;
1526 				if (otp.isDeltaRepresentation()) {
1527 					typeStats.reusedDeltas++;
1528 					typeStats.deltaBytes += out.length() - otp.getOffset();
1529 				}
1530 				return;
1531 			} catch (StoredObjectRepresentationNotAvailableException gone) {
1532 				if (otp.getOffset() == out.length()) {
1533 					otp.setOffset(0);
1534 					otp.clearDeltaBase();
1535 					otp.clearReuseAsIs();
1536 					reuseSupport.selectObjectRepresentation(this,
1537 							NullProgressMonitor.INSTANCE,
1538 							Collections.singleton(otp));
1539 					continue;
1540 				} else {
1541 					// Object writing already started, we cannot recover.
1542 					//
1543 					CorruptObjectException coe;
1544 					coe = new CorruptObjectException(otp, ""); //$NON-NLS-1$
1545 					coe.initCause(gone);
1546 					throw coe;
1547 				}
1548 			}
1549 		}
1550 
1551 		// If we reached here, reuse wasn't possible.
1552 		//
1553 		if (otp.isDeltaRepresentation())
1554 			writeDeltaObjectDeflate(out, otp);
1555 		else
1556 			writeWholeObjectDeflate(out, otp);
1557 		out.endObject();
1558 		otp.setCRC((int) crc32.getValue());
1559 	}
1560 
1561 	private void writeBase(PackOutputStream out, ObjectToPack base)
1562 			throws IOException {
1563 		if (base != null && !base.isWritten() && !base.isEdge())
1564 			writeObjectImpl(out, base);
1565 	}
1566 
1567 	private void writeWholeObjectDeflate(PackOutputStream out,
1568 			final ObjectToPack otp) throws IOException {
1569 		final Deflater deflater = deflater();
1570 		final ObjectLoader ldr = reader.open(otp, otp.getType());
1571 
1572 		crc32.reset();
1573 		otp.setOffset(out.length());
1574 		out.writeHeader(otp, ldr.getSize());
1575 
1576 		deflater.reset();
1577 		DeflaterOutputStream dst = new DeflaterOutputStream(out, deflater);
1578 		ldr.copyTo(dst);
1579 		dst.finish();
1580 	}
1581 
1582 	private void writeDeltaObjectDeflate(PackOutputStream out,
1583 			final ObjectToPack otp) throws IOException {
1584 		writeBase(out, otp.getDeltaBase());
1585 
1586 		crc32.reset();
1587 		otp.setOffset(out.length());
1588 
1589 		DeltaCache.Ref ref = otp.popCachedDelta();
1590 		if (ref != null) {
1591 			byte[] zbuf = ref.get();
1592 			if (zbuf != null) {
1593 				out.writeHeader(otp, otp.getCachedSize());
1594 				out.write(zbuf);
1595 				typeStats.cntDeltas++;
1596 				typeStats.deltaBytes += out.length() - otp.getOffset();
1597 				return;
1598 			}
1599 		}
1600 
1601 		try (TemporaryBuffer.Heap delta = delta(otp)) {
1602 			out.writeHeader(otp, delta.length());
1603 
1604 			Deflater deflater = deflater();
1605 			deflater.reset();
1606 			DeflaterOutputStream dst = new DeflaterOutputStream(out, deflater);
1607 			delta.writeTo(dst, null);
1608 			dst.finish();
1609 		}
1610 		typeStats.cntDeltas++;
1611 		typeStats.deltaBytes += out.length() - otp.getOffset();
1612 	}
1613 
1614 	private TemporaryBuffer.Heap delta(final ObjectToPack otp)
1615 			throws IOException {
1616 		DeltaIndex index = new DeltaIndex(buffer(otp.getDeltaBaseId()));
1617 		byte[] res = buffer(otp);
1618 
1619 		// We never would have proposed this pair if the delta would be
1620 		// larger than the unpacked version of the object. So using it
1621 		// as our buffer limit is valid: we will never reach it.
1622 		//
1623 		TemporaryBuffer.Heap delta = new TemporaryBuffer.Heap(res.length);
1624 		index.encode(delta, res);
1625 		return delta;
1626 	}
1627 
1628 	private byte[] buffer(AnyObjectId objId) throws IOException {
1629 		return buffer(config, reader, objId);
1630 	}
1631 
1632 	static byte[] buffer(PackConfig config, ObjectReader or, AnyObjectId objId)
1633 			throws IOException {
1634 		// PackWriter should have already pruned objects that
1635 		// are above the big file threshold, so our chances of
1636 		// the object being below it are very good. We really
1637 		// shouldn't be here, unless the implementation is odd.
1638 
1639 		return or.open(objId).getCachedBytes(config.getBigFileThreshold());
1640 	}
1641 
1642 	private Deflater deflater() {
1643 		if (myDeflater == null)
1644 			myDeflater = new Deflater(config.getCompressionLevel());
1645 		return myDeflater;
1646 	}
1647 
1648 	private void writeChecksum(PackOutputStream out) throws IOException {
1649 		packcsum = out.getDigest();
1650 		out.write(packcsum);
1651 	}
1652 
1653 	private void findObjectsToPack(@NonNull ProgressMonitor countingMonitor,
1654 			@NonNull ObjectWalk walker, @NonNull Set<? extends ObjectId> want,
1655 			@NonNull Set<? extends ObjectId> have) throws IOException {
1656 		final long countingStart = System.currentTimeMillis();
1657 		beginPhase(PackingPhase.COUNTING, countingMonitor, ProgressMonitor.UNKNOWN);
1658 
1659 		stats.interestingObjects = Collections.unmodifiableSet(new HashSet<ObjectId>(want));
1660 		stats.uninterestingObjects = Collections.unmodifiableSet(new HashSet<ObjectId>(have));
1661 
1662 		canBuildBitmaps = config.isBuildBitmaps()
1663 				&& !shallowPack
1664 				&& have.isEmpty()
1665 				&& (excludeInPacks == null || excludeInPacks.length == 0);
1666 		if (!shallowPack && useBitmaps) {
1667 			BitmapIndex bitmapIndex = reader.getBitmapIndex();
1668 			if (bitmapIndex != null) {
1669 				PackWriterBitmapWalker bitmapWalker = new PackWriterBitmapWalker(
1670 						walker, bitmapIndex, countingMonitor);
1671 				findObjectsToPackUsingBitmaps(bitmapWalker, want, have);
1672 				endPhase(countingMonitor);
1673 				stats.timeCounting = System.currentTimeMillis() - countingStart;
1674 				stats.bitmapIndexMisses = bitmapWalker.getCountOfBitmapIndexMisses();
1675 				return;
1676 			}
1677 		}
1678 
1679 		List<ObjectId> all = new ArrayList<>(want.size() + have.size());
1680 		all.addAll(want);
1681 		all.addAll(have);
1682 
1683 		final RevFlag include = walker.newFlag("include"); //$NON-NLS-1$
1684 		final RevFlag added = walker.newFlag("added"); //$NON-NLS-1$
1685 
1686 		walker.carry(include);
1687 
1688 		int haveEst = have.size();
1689 		if (have.isEmpty()) {
1690 			walker.sort(RevSort.COMMIT_TIME_DESC);
1691 		} else {
1692 			walker.sort(RevSort.TOPO);
1693 			if (thin)
1694 				walker.sort(RevSort.BOUNDARY, true);
1695 		}
1696 
1697 		List<RevObject> wantObjs = new ArrayList<>(want.size());
1698 		List<RevObject> haveObjs = new ArrayList<>(haveEst);
1699 		List<RevTag> wantTags = new ArrayList<>(want.size());
1700 
1701 		// Retrieve the RevWalk's versions of "want" and "have" objects to
1702 		// maintain any state previously set in the RevWalk.
1703 		AsyncRevObjectQueue q = walker.parseAny(all, true);
1704 		try {
1705 			for (;;) {
1706 				try {
1707 					RevObject o = q.next();
1708 					if (o == null)
1709 						break;
1710 					if (have.contains(o))
1711 						haveObjs.add(o);
1712 					if (want.contains(o)) {
1713 						o.add(include);
1714 						wantObjs.add(o);
1715 						if (o instanceof RevTag)
1716 							wantTags.add((RevTag) o);
1717 					}
1718 				} catch (MissingObjectException e) {
1719 					if (ignoreMissingUninteresting
1720 							&& have.contains(e.getObjectId()))
1721 						continue;
1722 					throw e;
1723 				}
1724 			}
1725 		} finally {
1726 			q.release();
1727 		}
1728 
1729 		if (!wantTags.isEmpty()) {
1730 			all = new ArrayList<>(wantTags.size());
1731 			for (RevTag tag : wantTags)
1732 				all.add(tag.getObject());
1733 			q = walker.parseAny(all, true);
1734 			try {
1735 				while (q.next() != null) {
1736 					// Just need to pop the queue item to parse the object.
1737 				}
1738 			} finally {
1739 				q.release();
1740 			}
1741 		}
1742 
1743 		if (walker instanceof DepthWalk.ObjectWalk) {
1744 			DepthWalk.ObjectWalk depthWalk = (DepthWalk.ObjectWalk) walker;
1745 			for (RevObject obj : wantObjs) {
1746 				depthWalk.markRoot(obj);
1747 			}
1748 			// Mark the tree objects associated with "have" commits as
1749 			// uninteresting to avoid writing redundant blobs. A normal RevWalk
1750 			// lazily propagates the "uninteresting" state from a commit to its
1751 			// tree during the walk, but DepthWalks can terminate early so
1752 			// preemptively propagate that state here.
1753 			for (RevObject obj : haveObjs) {
1754 				if (obj instanceof RevCommit) {
1755 					RevTree t = ((RevCommit) obj).getTree();
1756 					depthWalk.markUninteresting(t);
1757 				}
1758 			}
1759 
1760 			if (unshallowObjects != null) {
1761 				for (ObjectId id : unshallowObjects) {
1762 					depthWalk.markUnshallow(walker.parseAny(id));
1763 				}
1764 			}
1765 		} else {
1766 			for (RevObject obj : wantObjs)
1767 				walker.markStart(obj);
1768 		}
1769 		for (RevObject obj : haveObjs)
1770 			walker.markUninteresting(obj);
1771 
1772 		final int maxBases = config.getDeltaSearchWindowSize();
1773 		Set<RevTree> baseTrees = new HashSet<>();
1774 		BlockList<RevCommit> commits = new BlockList<>();
1775 		Set<ObjectId> roots = new HashSet<>();
1776 		RevCommit c;
1777 		while ((c = walker.next()) != null) {
1778 			if (exclude(c))
1779 				continue;
1780 			if (c.has(RevFlag.UNINTERESTING)) {
1781 				if (baseTrees.size() <= maxBases)
1782 					baseTrees.add(c.getTree());
1783 				continue;
1784 			}
1785 
1786 			commits.add(c);
1787 			if (c.getParentCount() == 0) {
1788 				roots.add(c.copy());
1789 			}
1790 			countingMonitor.update(1);
1791 		}
1792 		stats.rootCommits = Collections.unmodifiableSet(roots);
1793 
1794 		if (shallowPack) {
1795 			for (RevCommit cmit : commits) {
1796 				addObject(cmit, 0);
1797 			}
1798 		} else {
1799 			int commitCnt = 0;
1800 			boolean putTagTargets = false;
1801 			for (RevCommit cmit : commits) {
1802 				if (!cmit.has(added)) {
1803 					cmit.add(added);
1804 					addObject(cmit, 0);
1805 					commitCnt++;
1806 				}
1807 
1808 				for (int i = 0; i < cmit.getParentCount(); i++) {
1809 					RevCommit p = cmit.getParent(i);
1810 					if (!p.has(added) && !p.has(RevFlag.UNINTERESTING)
1811 							&& !exclude(p)) {
1812 						p.add(added);
1813 						addObject(p, 0);
1814 						commitCnt++;
1815 					}
1816 				}
1817 
1818 				if (!putTagTargets && 4096 < commitCnt) {
1819 					for (ObjectId id : tagTargets) {
1820 						RevObject obj = walker.lookupOrNull(id);
1821 						if (obj instanceof RevCommit
1822 								&& obj.has(include)
1823 								&& !obj.has(RevFlag.UNINTERESTING)
1824 								&& !obj.has(added)) {
1825 							obj.add(added);
1826 							addObject(obj, 0);
1827 						}
1828 					}
1829 					putTagTargets = true;
1830 				}
1831 			}
1832 		}
1833 		commits = null;
1834 
1835 		if (thin && !baseTrees.isEmpty()) {
1836 			BaseSearch bases = new BaseSearch(countingMonitor, baseTrees, //
1837 					objectsMap, edgeObjects, reader);
1838 			RevObject o;
1839 			while ((o = walker.nextObject()) != null) {
1840 				if (o.has(RevFlag.UNINTERESTING))
1841 					continue;
1842 				if (exclude(o))
1843 					continue;
1844 
1845 				int pathHash = walker.getPathHashCode();
1846 				byte[] pathBuf = walker.getPathBuffer();
1847 				int pathLen = walker.getPathLength();
1848 				bases.addBase(o.getType(), pathBuf, pathLen, pathHash);
1849 				addObject(o, pathHash);
1850 				countingMonitor.update(1);
1851 			}
1852 		} else {
1853 			RevObject o;
1854 			while ((o = walker.nextObject()) != null) {
1855 				if (o.has(RevFlag.UNINTERESTING))
1856 					continue;
1857 				if (exclude(o))
1858 					continue;
1859 				addObject(o, walker.getPathHashCode());
1860 				countingMonitor.update(1);
1861 			}
1862 		}
1863 
1864 		for (CachedPack pack : cachedPacks)
1865 			countingMonitor.update((int) pack.getObjectCount());
1866 		endPhase(countingMonitor);
1867 		stats.timeCounting = System.currentTimeMillis() - countingStart;
1868 		stats.bitmapIndexMisses = -1;
1869 	}
1870 
1871 	private void findObjectsToPackUsingBitmaps(
1872 			PackWriterBitmapWalker bitmapWalker, Set<? extends ObjectId> want,
1873 			Set<? extends ObjectId> have)
1874 			throws MissingObjectException, IncorrectObjectTypeException,
1875 			IOException {
1876 		BitmapBuilder haveBitmap = bitmapWalker.findObjects(have, null, true);
1877 		bitmapWalker.reset();
1878 		BitmapBuilder wantBitmap = bitmapWalker.findObjects(want, haveBitmap,
1879 				false);
1880 		BitmapBuilder needBitmap = wantBitmap.andNot(haveBitmap);
1881 
1882 		if (useCachedPacks && reuseSupport != null && !reuseValidate
1883 				&& (excludeInPacks == null || excludeInPacks.length == 0))
1884 			cachedPacks.addAll(
1885 					reuseSupport.getCachedPacksAndUpdate(needBitmap));
1886 
1887 		for (BitmapObject obj : needBitmap) {
1888 			ObjectId objectId = obj.getObjectId();
1889 			if (exclude(objectId)) {
1890 				needBitmap.remove(objectId);
1891 				continue;
1892 			}
1893 			addObject(objectId, obj.getType(), 0);
1894 		}
1895 
1896 		if (thin)
1897 			haveObjects = haveBitmap;
1898 	}
1899 
1900 	private static void pruneEdgesFromObjectList(List<ObjectToPack> list) {
1901 		final int size = list.size();
1902 		int src = 0;
1903 		int dst = 0;
1904 
1905 		for (; src < size; src++) {
1906 			ObjectToPack obj = list.get(src);
1907 			if (obj.isEdge())
1908 				continue;
1909 			if (dst != src)
1910 				list.set(dst, obj);
1911 			dst++;
1912 		}
1913 
1914 		while (dst < list.size())
1915 			list.remove(list.size() - 1);
1916 	}
1917 
1918 	/**
1919 	 * Include one object to the output file.
1920 	 * <p>
1921 	 * Objects are written in the order they are added. If the same object is
1922 	 * added twice, it may be written twice, creating a larger than necessary
1923 	 * file.
1924 	 *
1925 	 * @param object
1926 	 *            the object to add.
1927 	 * @throws IncorrectObjectTypeException
1928 	 *             the object is an unsupported type.
1929 	 */
1930 	public void addObject(final RevObject object)
1931 			throws IncorrectObjectTypeException {
1932 		if (!exclude(object))
1933 			addObject(object, 0);
1934 	}
1935 
1936 	private void addObject(final RevObject object, final int pathHashCode) {
1937 		addObject(object, object.getType(), pathHashCode);
1938 	}
1939 
1940 	private void addObject(
1941 			final AnyObjectId src, final int type, final int pathHashCode) {
1942 		final ObjectToPack otp;
1943 		if (reuseSupport != null)
1944 			otp = reuseSupport.newObjectToPack(src, type);
1945 		else
1946 			otp = new ObjectToPack(src, type);
1947 		otp.setPathHash(pathHashCode);
1948 		objectsLists[type].add(otp);
1949 		objectsMap.add(otp);
1950 	}
1951 
1952 	private boolean exclude(AnyObjectId objectId) {
1953 		if (excludeInPacks == null)
1954 			return false;
1955 		if (excludeInPackLast.contains(objectId))
1956 			return true;
1957 		for (ObjectIdSet idx : excludeInPacks) {
1958 			if (idx.contains(objectId)) {
1959 				excludeInPackLast = idx;
1960 				return true;
1961 			}
1962 		}
1963 		return false;
1964 	}
1965 
1966 	/**
1967 	 * Select an object representation for this writer.
1968 	 * <p>
1969 	 * An {@link ObjectReader} implementation should invoke this method once for
1970 	 * each representation available for an object, to allow the writer to find
1971 	 * the most suitable one for the output.
1972 	 *
1973 	 * @param otp
1974 	 *            the object being packed.
1975 	 * @param next
1976 	 *            the next available representation from the repository.
1977 	 */
1978 	public void select(ObjectToPack otp, StoredObjectRepresentation next) {
1979 		int nFmt = next.getFormat();
1980 
1981 		if (!cachedPacks.isEmpty()) {
1982 			if (otp.isEdge())
1983 				return;
1984 			if ((nFmt == PACK_WHOLE) | (nFmt == PACK_DELTA)) {
1985 				for (CachedPack pack : cachedPacks) {
1986 					if (pack.hasObject(otp, next)) {
1987 						otp.setEdge();
1988 						otp.clearDeltaBase();
1989 						otp.clearReuseAsIs();
1990 						pruneCurrentObjectList = true;
1991 						return;
1992 					}
1993 				}
1994 			}
1995 		}
1996 
1997 		if (nFmt == PACK_DELTA && reuseDeltas && reuseDeltaFor(otp)) {
1998 			ObjectId baseId = next.getDeltaBase();
1999 			ObjectToPack ptr = objectsMap.get(baseId);
2000 			if (ptr != null && !ptr.isEdge()) {
2001 				otp.setDeltaBase(ptr);
2002 				otp.setReuseAsIs();
2003 			} else if (thin && have(ptr, baseId)) {
2004 				otp.setDeltaBase(baseId);
2005 				otp.setReuseAsIs();
2006 			} else {
2007 				otp.clearDeltaBase();
2008 				otp.clearReuseAsIs();
2009 			}
2010 		} else if (nFmt == PACK_WHOLE && config.isReuseObjects()) {
2011 			int nWeight = next.getWeight();
2012 			if (otp.isReuseAsIs() && !otp.isDeltaRepresentation()) {
2013 				// We've chosen another PACK_WHOLE format for this object,
2014 				// choose the one that has the smaller compressed size.
2015 				//
2016 				if (otp.getWeight() <= nWeight)
2017 					return;
2018 			}
2019 			otp.clearDeltaBase();
2020 			otp.setReuseAsIs();
2021 			otp.setWeight(nWeight);
2022 		} else {
2023 			otp.clearDeltaBase();
2024 			otp.clearReuseAsIs();
2025 		}
2026 
2027 		otp.setDeltaAttempted(reuseDeltas & next.wasDeltaAttempted());
2028 		otp.select(next);
2029 	}
2030 
2031 	private final boolean have(ObjectToPack ptr, AnyObjectId objectId) {
2032 		return (ptr != null && ptr.isEdge())
2033 				|| (haveObjects != null && haveObjects.contains(objectId));
2034 	}
2035 
2036 	/**
2037 	 * Prepares the bitmaps to be written to the bitmap index file.
2038 	 * <p>
2039 	 * Bitmaps can be used to speed up fetches and clones by storing the entire
2040 	 * object graph at selected commits. Writing a bitmap index is an optional
2041 	 * feature that not all pack users may require.
2042 	 * <p>
2043 	 * Called after {@link #writeIndex(OutputStream)}.
2044 	 * <p>
2045 	 * To reduce memory internal state is cleared during this method, rendering
2046 	 * the PackWriter instance useless for anything further than a call to write
2047 	 * out the new bitmaps with {@link #writeBitmapIndex(OutputStream)}.
2048 	 *
2049 	 * @param pm
2050 	 *            progress monitor to report bitmap building work.
2051 	 * @return whether a bitmap index may be written.
2052 	 * @throws IOException
2053 	 *             when some I/O problem occur during reading objects.
2054 	 */
2055 	public boolean prepareBitmapIndex(ProgressMonitor pm) throws IOException {
2056 		if (!canBuildBitmaps || getObjectCount() > Integer.MAX_VALUE
2057 				|| !cachedPacks.isEmpty())
2058 			return false;
2059 
2060 		if (pm == null)
2061 			pm = NullProgressMonitor.INSTANCE;
2062 
2063 		int numCommits = objectsLists[OBJ_COMMIT].size();
2064 		List<ObjectToPack> byName = sortByName();
2065 		sortedByName = null;
2066 		objectsLists = null;
2067 		objectsMap = null;
2068 		writeBitmaps = new PackBitmapIndexBuilder(byName);
2069 		byName = null;
2070 
2071 		PackWriterBitmapPreparer bitmapPreparer = new PackWriterBitmapPreparer(
2072 				reader, writeBitmaps, pm, stats.interestingObjects, config);
2073 
2074 		Collection<PackWriterBitmapPreparer.BitmapCommit> selectedCommits =
2075 				bitmapPreparer.selectCommits(numCommits);
2076 
2077 		beginPhase(PackingPhase.BUILDING_BITMAPS, pm, selectedCommits.size());
2078 
2079 		PackWriterBitmapWalker walker = bitmapPreparer.newBitmapWalker();
2080 		AnyObjectId last = null;
2081 		for (PackWriterBitmapPreparer.BitmapCommit cmit : selectedCommits) {
2082 			if (cmit.isReuseWalker())
2083 				walker.reset();
2084 			else
2085 				walker = bitmapPreparer.newBitmapWalker();
2086 
2087 			BitmapBuilder bitmap = walker.findObjects(
2088 					Collections.singleton(cmit), null, false);
2089 
2090 			if (last != null && cmit.isReuseWalker() && !bitmap.contains(last))
2091 				throw new IllegalStateException(MessageFormat.format(
2092 						JGitText.get().bitmapMissingObject, cmit.name(),
2093 						last.name()));
2094 			last = cmit;
2095 			writeBitmaps.addBitmap(cmit, bitmap.build(), cmit.getFlags());
2096 
2097 			pm.update(1);
2098 		}
2099 
2100 		endPhase(pm);
2101 		return true;
2102 	}
2103 
2104 	private boolean reuseDeltaFor(ObjectToPack otp) {
2105 		int type = otp.getType();
2106 		if ((type & 2) != 0) // OBJ_TREE(2) or OBJ_BLOB(3)
2107 			return true;
2108 		if (type == OBJ_COMMIT)
2109 			return reuseDeltaCommits;
2110 		if (type == OBJ_TAG)
2111 			return false;
2112 		return true;
2113 	}
2114 
2115 	/**
2116 	 * Summary of how PackWriter created the pack.
2117 	 *
2118 	 * @deprecated Use {@link PackStatistics} instead.
2119 	 */
2120 	@Deprecated
2121 	public static class Statistics {
2122 		/** Statistics about a single class of object. */
2123 		public static class ObjectType {
2124 			// All requests are forwarded to this object.
2125 			private PackStatistics.ObjectType objectType;
2126 
2127 			/**
2128 			 * Wraps an
2129 			 * {@link org.eclipse.jgit.storage.pack.PackStatistics.ObjectType}
2130 			 * instance to maintain backwards compatibility with existing API.
2131 			 *
2132 			 * @param type
2133 			 *            the wrapped instance
2134 			 */
2135 			public ObjectType(PackStatistics.ObjectType type) {
2136 				objectType = type;
2137 			}
2138 
2139 			/**
2140 			 * @return total number of objects output. This total includes the
2141 			 *         value of {@link #getDeltas()}.
2142 			 */
2143 			public long getObjects() {
2144 				return objectType.getObjects();
2145 			}
2146 
2147 			/**
2148 			 * @return total number of deltas output. This may be lower than the
2149 			 *         actual number of deltas if a cached pack was reused.
2150 			 */
2151 			public long getDeltas() {
2152 				return objectType.getDeltas();
2153 			}
2154 
2155 			/**
2156 			 * @return number of objects whose existing representation was
2157 			 *         reused in the output. This count includes
2158 			 *         {@link #getReusedDeltas()}.
2159 			 */
2160 			public long getReusedObjects() {
2161 				return objectType.getReusedObjects();
2162 			}
2163 
2164 			/**
2165 			 * @return number of deltas whose existing representation was reused
2166 			 *         in the output, as their base object was also output or
2167 			 *         was assumed present for a thin pack. This may be lower
2168 			 *         than the actual number of reused deltas if a cached pack
2169 			 *         was reused.
2170 			 */
2171 			public long getReusedDeltas() {
2172 				return objectType.getReusedDeltas();
2173 			}
2174 
2175 			/**
2176 			 * @return total number of bytes written. This size includes the
2177 			 *         object headers as well as the compressed data. This size
2178 			 *         also includes all of {@link #getDeltaBytes()}.
2179 			 */
2180 			public long getBytes() {
2181 				return objectType.getBytes();
2182 			}
2183 
2184 			/**
2185 			 * @return number of delta bytes written. This size includes the
2186 			 *         object headers for the delta objects.
2187 			 */
2188 			public long getDeltaBytes() {
2189 				return objectType.getDeltaBytes();
2190 			}
2191 		}
2192 
2193 		// All requests are forwarded to this object.
2194 		private PackStatistics statistics;
2195 
2196 		/**
2197 		 * Wraps a {@link PackStatistics} object to maintain backwards
2198 		 * compatibility with existing API.
2199 		 *
2200 		 * @param stats
2201 		 *            the wrapped PackStatitics object
2202 		 */
2203 		public Statistics(PackStatistics stats) {
2204 			statistics = stats;
2205 		}
2206 
2207 		/**
2208 		 * @return unmodifiable collection of objects to be included in the
2209 		 *         pack. May be null if the pack was hand-crafted in a unit
2210 		 *         test.
2211 		 */
2212 		public Set<ObjectId> getInterestingObjects() {
2213 			return statistics.getInterestingObjects();
2214 		}
2215 
2216 		/**
2217 		 * @return unmodifiable collection of objects that should be excluded
2218 		 *         from the pack, as the peer that will receive the pack already
2219 		 *         has these objects.
2220 		 */
2221 		public Set<ObjectId> getUninterestingObjects() {
2222 			return statistics.getUninterestingObjects();
2223 		}
2224 
2225 		/**
2226 		 * @return unmodifiable collection of the cached packs that were reused
2227 		 *         in the output, if any were selected for reuse.
2228 		 */
2229 		public Collection<CachedPack> getReusedPacks() {
2230 			return statistics.getReusedPacks();
2231 		}
2232 
2233 		/**
2234 		 * @return number of objects in the output pack that went through the
2235 		 *         delta search process in order to find a potential delta base.
2236 		 */
2237 		public int getDeltaSearchNonEdgeObjects() {
2238 			return statistics.getDeltaSearchNonEdgeObjects();
2239 		}
2240 
2241 		/**
2242 		 * @return number of objects in the output pack that went through delta
2243 		 *         base search and found a suitable base. This is a subset of
2244 		 *         {@link #getDeltaSearchNonEdgeObjects()}.
2245 		 */
2246 		public int getDeltasFound() {
2247 			return statistics.getDeltasFound();
2248 		}
2249 
2250 		/**
2251 		 * @return total number of objects output. This total includes the value
2252 		 *         of {@link #getTotalDeltas()}.
2253 		 */
2254 		public long getTotalObjects() {
2255 			return statistics.getTotalObjects();
2256 		}
2257 
2258 		/**
2259 		 * @return the count of objects that needed to be discovered through an
2260 		 *         object walk because they were not found in bitmap indices.
2261 		 *         Returns -1 if no bitmap indices were found.
2262 		 */
2263 		public long getBitmapIndexMisses() {
2264 			return statistics.getBitmapIndexMisses();
2265 		}
2266 
2267 		/**
2268 		 * @return total number of deltas output. This may be lower than the
2269 		 *         actual number of deltas if a cached pack was reused.
2270 		 */
2271 		public long getTotalDeltas() {
2272 			return statistics.getTotalDeltas();
2273 		}
2274 
2275 		/**
2276 		 * @return number of objects whose existing representation was reused in
2277 		 *         the output. This count includes {@link #getReusedDeltas()}.
2278 		 */
2279 		public long getReusedObjects() {
2280 			return statistics.getReusedObjects();
2281 		}
2282 
2283 		/**
2284 		 * @return number of deltas whose existing representation was reused in
2285 		 *         the output, as their base object was also output or was
2286 		 *         assumed present for a thin pack. This may be lower than the
2287 		 *         actual number of reused deltas if a cached pack was reused.
2288 		 */
2289 		public long getReusedDeltas() {
2290 			return statistics.getReusedDeltas();
2291 		}
2292 
2293 		/**
2294 		 * @return total number of bytes written. This size includes the pack
2295 		 *         header, trailer, thin pack, and reused cached pack(s).
2296 		 */
2297 		public long getTotalBytes() {
2298 			return statistics.getTotalBytes();
2299 		}
2300 
2301 		/**
2302 		 * @return size of the thin pack in bytes, if a thin pack was generated.
2303 		 *         A thin pack is created when the client already has objects
2304 		 *         and some deltas are created against those objects, or if a
2305 		 *         cached pack is being used and some deltas will reference
2306 		 *         objects in the cached pack. This size does not include the
2307 		 *         pack header or trailer.
2308 		 */
2309 		public long getThinPackBytes() {
2310 			return statistics.getThinPackBytes();
2311 		}
2312 
2313 		/**
2314 		 * @param typeCode
2315 		 *            object type code, e.g. OBJ_COMMIT or OBJ_TREE.
2316 		 * @return information about this type of object in the pack.
2317 		 */
2318 		public ObjectType byObjectType(int typeCode) {
2319 			return new ObjectType(statistics.byObjectType(typeCode));
2320 		}
2321 
2322 		/** @return true if the resulting pack file was a shallow pack. */
2323 		public boolean isShallow() {
2324 			return statistics.isShallow();
2325 		}
2326 
2327 		/** @return depth (in commits) the pack includes if shallow. */
2328 		public int getDepth() {
2329 			return statistics.getDepth();
2330 		}
2331 
2332 		/**
2333 		 * @return time in milliseconds spent enumerating the objects that need
2334 		 *         to be included in the output. This time includes any restarts
2335 		 *         that occur when a cached pack is selected for reuse.
2336 		 */
2337 		public long getTimeCounting() {
2338 			return statistics.getTimeCounting();
2339 		}
2340 
2341 		/**
2342 		 * @return time in milliseconds spent matching existing representations
2343 		 *         against objects that will be transmitted, or that the client
2344 		 *         can be assumed to already have.
2345 		 */
2346 		public long getTimeSearchingForReuse() {
2347 			return statistics.getTimeSearchingForReuse();
2348 		}
2349 
2350 		/**
2351 		 * @return time in milliseconds spent finding the sizes of all objects
2352 		 *         that will enter the delta compression search window. The
2353 		 *         sizes need to be known to better match similar objects
2354 		 *         together and improve delta compression ratios.
2355 		 */
2356 		public long getTimeSearchingForSizes() {
2357 			return statistics.getTimeSearchingForSizes();
2358 		}
2359 
2360 		/**
2361 		 * @return time in milliseconds spent on delta compression. This is
2362 		 *         observed wall-clock time and does not accurately track CPU
2363 		 *         time used when multiple threads were used to perform the
2364 		 *         delta compression.
2365 		 */
2366 		public long getTimeCompressing() {
2367 			return statistics.getTimeCompressing();
2368 		}
2369 
2370 		/**
2371 		 * @return time in milliseconds spent writing the pack output, from
2372 		 *         start of header until end of trailer. The transfer speed can
2373 		 *         be approximated by dividing {@link #getTotalBytes()} by this
2374 		 *         value.
2375 		 */
2376 		public long getTimeWriting() {
2377 			return statistics.getTimeWriting();
2378 		}
2379 
2380 		/** @return total time spent processing this pack. */
2381 		public long getTimeTotal() {
2382 			return statistics.getTimeTotal();
2383 		}
2384 
2385 		/**
2386 		 * @return get the average output speed in terms of bytes-per-second.
2387 		 *         {@code getTotalBytes() / (getTimeWriting() / 1000.0)}.
2388 		 */
2389 		public double getTransferRate() {
2390 			return statistics.getTransferRate();
2391 		}
2392 
2393 		/** @return formatted message string for display to clients. */
2394 		public String getMessage() {
2395 			return statistics.getMessage();
2396 		}
2397 	}
2398 
2399 	private class MutableState {
2400 		/** Estimated size of a single ObjectToPack instance. */
2401 		// Assume 64-bit pointers, since this is just an estimate.
2402 		private static final long OBJECT_TO_PACK_SIZE =
2403 				(2 * 8)               // Object header
2404 				+ (2 * 8) + (2 * 8)   // ObjectToPack fields
2405 				+ (8 + 8)             // PackedObjectInfo fields
2406 				+ 8                   // ObjectIdOwnerMap fields
2407 				+ 40                  // AnyObjectId fields
2408 				+ 8;                  // Reference in BlockList
2409 
2410 		private final long totalDeltaSearchBytes;
2411 
2412 		private volatile PackingPhase phase;
2413 
2414 		MutableState() {
2415 			phase = PackingPhase.COUNTING;
2416 			if (config.isDeltaCompress()) {
2417 				int threads = config.getThreads();
2418 				if (threads <= 0)
2419 					threads = Runtime.getRuntime().availableProcessors();
2420 				totalDeltaSearchBytes = (threads * config.getDeltaSearchMemoryLimit())
2421 						+ config.getBigFileThreshold();
2422 			} else
2423 				totalDeltaSearchBytes = 0;
2424 		}
2425 
2426 		State snapshot() {
2427 			long objCnt = 0;
2428 			BlockList<ObjectToPack>[] lists = objectsLists;
2429 			if (lists != null) {
2430 				objCnt += lists[OBJ_COMMIT].size();
2431 				objCnt += lists[OBJ_TREE].size();
2432 				objCnt += lists[OBJ_BLOB].size();
2433 				objCnt += lists[OBJ_TAG].size();
2434 				// Exclude CachedPacks.
2435 			}
2436 
2437 			long bytesUsed = OBJECT_TO_PACK_SIZE * objCnt;
2438 			PackingPhase curr = phase;
2439 			if (curr == PackingPhase.COMPRESSING)
2440 				bytesUsed += totalDeltaSearchBytes;
2441 			return new State(curr, bytesUsed);
2442 		}
2443 	}
2444 
2445 	/** Possible states that a PackWriter can be in. */
2446 	public static enum PackingPhase {
2447 		/** Counting objects phase. */
2448 		COUNTING,
2449 
2450 		/** Getting sizes phase. */
2451 		GETTING_SIZES,
2452 
2453 		/** Finding sources phase. */
2454 		FINDING_SOURCES,
2455 
2456 		/** Compressing objects phase. */
2457 		COMPRESSING,
2458 
2459 		/** Writing objects phase. */
2460 		WRITING,
2461 
2462 		/** Building bitmaps phase. */
2463 		BUILDING_BITMAPS;
2464 	}
2465 
2466 	/** Summary of the current state of a PackWriter. */
2467 	public class State {
2468 		private final PackingPhase phase;
2469 
2470 		private final long bytesUsed;
2471 
2472 		State(PackingPhase phase, long bytesUsed) {
2473 			this.phase = phase;
2474 			this.bytesUsed = bytesUsed;
2475 		}
2476 
2477 		/** @return the PackConfig used to build the writer. */
2478 		public PackConfig getConfig() {
2479 			return config;
2480 		}
2481 
2482 		/** @return the current phase of the writer. */
2483 		public PackingPhase getPhase() {
2484 			return phase;
2485 		}
2486 
2487 		/** @return an estimate of the total memory used by the writer. */
2488 		public long estimateBytesUsed() {
2489 			return bytesUsed;
2490 		}
2491 
2492 		@SuppressWarnings("nls")
2493 		@Override
2494 		public String toString() {
2495 			return "PackWriter.State[" + phase + ", memory=" + bytesUsed + "]";
2496 		}
2497 	}
2498 }