PackWriter.java

  1. /*
  2.  * Copyright (C) 2008-2010, Google Inc.
  3.  * Copyright (C) 2008, Marek Zawirski <marek.zawirski@gmail.com> and others
  4.  *
  5.  * This program and the accompanying materials are made available under the
  6.  * terms of the Eclipse Distribution License v. 1.0 which is available at
  7.  * https://www.eclipse.org/org/documents/edl-v10.php.
  8.  *
  9.  * SPDX-License-Identifier: BSD-3-Clause
  10.  */

  11. package org.eclipse.jgit.internal.storage.pack;

  12. import static java.util.Objects.requireNonNull;
  13. import static org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation.PACK_DELTA;
  14. import static org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation.PACK_WHOLE;
  15. import static org.eclipse.jgit.lib.Constants.OBJECT_ID_LENGTH;
  16. import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
  17. import static org.eclipse.jgit.lib.Constants.OBJ_COMMIT;
  18. import static org.eclipse.jgit.lib.Constants.OBJ_TAG;
  19. import static org.eclipse.jgit.lib.Constants.OBJ_TREE;

  20. import java.io.IOException;
  21. import java.io.OutputStream;
  22. import java.lang.ref.WeakReference;
  23. import java.security.MessageDigest;
  24. import java.text.MessageFormat;
  25. import java.util.ArrayList;
  26. import java.util.Arrays;
  27. import java.util.Collection;
  28. import java.util.Collections;
  29. import java.util.HashMap;
  30. import java.util.HashSet;
  31. import java.util.Iterator;
  32. import java.util.List;
  33. import java.util.Map;
  34. import java.util.NoSuchElementException;
  35. import java.util.Set;
  36. import java.util.concurrent.ConcurrentHashMap;
  37. import java.util.concurrent.ExecutionException;
  38. import java.util.concurrent.Executor;
  39. import java.util.concurrent.ExecutorService;
  40. import java.util.concurrent.Executors;
  41. import java.util.concurrent.Future;
  42. import java.util.concurrent.TimeUnit;
  43. import java.util.zip.CRC32;
  44. import java.util.zip.CheckedOutputStream;
  45. import java.util.zip.Deflater;
  46. import java.util.zip.DeflaterOutputStream;

  47. import org.eclipse.jgit.annotations.NonNull;
  48. import org.eclipse.jgit.annotations.Nullable;
  49. import org.eclipse.jgit.errors.CorruptObjectException;
  50. import org.eclipse.jgit.errors.IncorrectObjectTypeException;
  51. import org.eclipse.jgit.errors.LargeObjectException;
  52. import org.eclipse.jgit.errors.MissingObjectException;
  53. import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException;
  54. import org.eclipse.jgit.internal.JGitText;
  55. import org.eclipse.jgit.internal.storage.file.PackBitmapIndexBuilder;
  56. import org.eclipse.jgit.internal.storage.file.PackBitmapIndexWriterV1;
  57. import org.eclipse.jgit.internal.storage.file.PackIndexWriter;
  58. import org.eclipse.jgit.lib.AnyObjectId;
  59. import org.eclipse.jgit.lib.AsyncObjectSizeQueue;
  60. import org.eclipse.jgit.lib.BatchingProgressMonitor;
  61. import org.eclipse.jgit.lib.BitmapIndex;
  62. import org.eclipse.jgit.lib.BitmapIndex.BitmapBuilder;
  63. import org.eclipse.jgit.lib.BitmapObject;
  64. import org.eclipse.jgit.lib.Constants;
  65. import org.eclipse.jgit.lib.NullProgressMonitor;
  66. import org.eclipse.jgit.lib.ObjectId;
  67. import org.eclipse.jgit.lib.ObjectIdOwnerMap;
  68. import org.eclipse.jgit.lib.ObjectIdSet;
  69. import org.eclipse.jgit.lib.ObjectLoader;
  70. import org.eclipse.jgit.lib.ObjectReader;
  71. import org.eclipse.jgit.lib.ProgressMonitor;
  72. import org.eclipse.jgit.lib.Repository;
  73. import org.eclipse.jgit.lib.ThreadSafeProgressMonitor;
  74. import org.eclipse.jgit.revwalk.AsyncRevObjectQueue;
  75. import org.eclipse.jgit.revwalk.BitmapWalker;
  76. import org.eclipse.jgit.revwalk.DepthWalk;
  77. import org.eclipse.jgit.revwalk.ObjectWalk;
  78. import org.eclipse.jgit.revwalk.RevCommit;
  79. import org.eclipse.jgit.revwalk.RevFlag;
  80. import org.eclipse.jgit.revwalk.RevObject;
  81. import org.eclipse.jgit.revwalk.RevSort;
  82. import org.eclipse.jgit.revwalk.RevTag;
  83. import org.eclipse.jgit.revwalk.RevTree;
  84. import org.eclipse.jgit.storage.pack.PackConfig;
  85. import org.eclipse.jgit.storage.pack.PackStatistics;
  86. import org.eclipse.jgit.transport.FilterSpec;
  87. import org.eclipse.jgit.transport.ObjectCountCallback;
  88. import org.eclipse.jgit.transport.PacketLineOut;
  89. import org.eclipse.jgit.transport.WriteAbortedException;
  90. import org.eclipse.jgit.util.BlockList;
  91. import org.eclipse.jgit.util.TemporaryBuffer;

  92. /**
  93.  * <p>
  94.  * PackWriter class is responsible for generating pack files from specified set
  95.  * of objects from repository. This implementation produce pack files in format
  96.  * version 2.
  97.  * </p>
  98.  * <p>
  99.  * Source of objects may be specified in two ways:
  100.  * <ul>
  101.  * <li>(usually) by providing sets of interesting and uninteresting objects in
  102.  * repository - all interesting objects and their ancestors except uninteresting
  103.  * objects and their ancestors will be included in pack, or</li>
  104.  * <li>by providing iterator of {@link org.eclipse.jgit.revwalk.RevObject}
  105.  * specifying exact list and order of objects in pack</li>
  106.  * </ul>
  107.  * <p>
  108.  * Typical usage consists of creating an instance, configuring options,
  109.  * preparing the list of objects by calling {@link #preparePack(Iterator)} or
  110.  * {@link #preparePack(ProgressMonitor, Set, Set)}, and streaming with
  111.  * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}. If the
  112.  * pack is being stored as a file the matching index can be written out after
  113.  * writing the pack by {@link #writeIndex(OutputStream)}. An optional bitmap
  114.  * index can be made by calling {@link #prepareBitmapIndex(ProgressMonitor)}
  115.  * followed by {@link #writeBitmapIndex(OutputStream)}.
  116.  * </p>
  117.  * <p>
  118.  * Class provide set of configurable options and
  119.  * {@link org.eclipse.jgit.lib.ProgressMonitor} support, as operations may take
  120.  * a long time for big repositories. Deltas searching algorithm is <b>NOT
  121.  * IMPLEMENTED</b> yet - this implementation relies only on deltas and objects
  122.  * reuse.
  123.  * </p>
  124.  * <p>
  125.  * This class is not thread safe. It is intended to be used in one thread as a
  126.  * single pass to produce one pack. Invoking methods multiple times or out of
  127.  * order is not supported as internal data structures are destroyed during
  128.  * certain phases to save memory when packing large repositories.
  129.  * </p>
  130.  */
  131. public class PackWriter implements AutoCloseable {
  132.     private static final int PACK_VERSION_GENERATED = 2;

  133.     /** Empty set of objects for {@code preparePack()}. */
  134.     public static final Set<ObjectId> NONE = Collections.emptySet();

  135.     private static final Map<WeakReference<PackWriter>, Boolean> instances =
  136.             new ConcurrentHashMap<>();

  137.     private static final Iterable<PackWriter> instancesIterable = () -> new Iterator<PackWriter>() {

  138.         private final Iterator<WeakReference<PackWriter>> it = instances
  139.                 .keySet().iterator();

  140.         private PackWriter next;

  141.         @Override
  142.         public boolean hasNext() {
  143.             if (next != null) {
  144.                 return true;
  145.             }
  146.             while (it.hasNext()) {
  147.                 WeakReference<PackWriter> ref = it.next();
  148.                 next = ref.get();
  149.                 if (next != null) {
  150.                     return true;
  151.                 }
  152.                 it.remove();
  153.             }
  154.             return false;
  155.         }

  156.         @Override
  157.         public PackWriter next() {
  158.             if (hasNext()) {
  159.                 PackWriter result = next;
  160.                 next = null;
  161.                 return result;
  162.             }
  163.             throw new NoSuchElementException();
  164.         }

  165.         @Override
  166.         public void remove() {
  167.             throw new UnsupportedOperationException();
  168.         }
  169.     };

  170.     /**
  171.      * Get all allocated, non-released PackWriters instances.
  172.      *
  173.      * @return all allocated, non-released PackWriters instances.
  174.      */
  175.     public static Iterable<PackWriter> getInstances() {
  176.         return instancesIterable;
  177.     }

  178.     @SuppressWarnings("unchecked")
  179.     BlockList<ObjectToPack>[] objectsLists = new BlockList[OBJ_TAG + 1];
  180.     {
  181.         objectsLists[OBJ_COMMIT] = new BlockList<>();
  182.         objectsLists[OBJ_TREE] = new BlockList<>();
  183.         objectsLists[OBJ_BLOB] = new BlockList<>();
  184.         objectsLists[OBJ_TAG] = new BlockList<>();
  185.     }

  186.     private ObjectIdOwnerMap<ObjectToPack> objectsMap = new ObjectIdOwnerMap<>();

  187.     // edge objects for thin packs
  188.     private List<ObjectToPack> edgeObjects = new BlockList<>();

  189.     // Objects the client is known to have already.
  190.     private BitmapBuilder haveObjects;

  191.     private List<CachedPack> cachedPacks = new ArrayList<>(2);

  192.     private Set<ObjectId> tagTargets = NONE;

  193.     private Set<? extends ObjectId> excludeFromBitmapSelection = NONE;

  194.     private ObjectIdSet[] excludeInPacks;

  195.     private ObjectIdSet excludeInPackLast;

  196.     private Deflater myDeflater;

  197.     private final ObjectReader reader;

  198.     /** {@link #reader} recast to the reuse interface, if it supports it. */
  199.     private final ObjectReuseAsIs reuseSupport;

  200.     final PackConfig config;

  201.     private final PackStatistics.Accumulator stats;

  202.     private final MutableState state;

  203.     private final WeakReference<PackWriter> selfRef;

  204.     private PackStatistics.ObjectType.Accumulator typeStats;

  205.     private List<ObjectToPack> sortedByName;

  206.     private byte[] packcsum;

  207.     private boolean deltaBaseAsOffset;

  208.     private boolean reuseDeltas;

  209.     private boolean reuseDeltaCommits;

  210.     private boolean reuseValidate;

  211.     private boolean thin;

  212.     private boolean useCachedPacks;

  213.     private boolean useBitmaps;

  214.     private boolean ignoreMissingUninteresting = true;

  215.     private boolean pruneCurrentObjectList;

  216.     private boolean shallowPack;

  217.     private boolean canBuildBitmaps;

  218.     private boolean indexDisabled;

  219.     private int depth;

  220.     private Collection<? extends ObjectId> unshallowObjects;

  221.     private PackBitmapIndexBuilder writeBitmaps;

  222.     private CRC32 crc32;

  223.     private ObjectCountCallback callback;

  224.     private FilterSpec filterSpec = FilterSpec.NO_FILTER;

  225.     private PackfileUriConfig packfileUriConfig;

  226.     /**
  227.      * Create writer for specified repository.
  228.      * <p>
  229.      * Objects for packing are specified in {@link #preparePack(Iterator)} or
  230.      * {@link #preparePack(ProgressMonitor, Set, Set)}.
  231.      *
  232.      * @param repo
  233.      *            repository where objects are stored.
  234.      */
  235.     public PackWriter(Repository repo) {
  236.         this(repo, repo.newObjectReader());
  237.     }

  238.     /**
  239.      * Create a writer to load objects from the specified reader.
  240.      * <p>
  241.      * Objects for packing are specified in {@link #preparePack(Iterator)} or
  242.      * {@link #preparePack(ProgressMonitor, Set, Set)}.
  243.      *
  244.      * @param reader
  245.      *            reader to read from the repository with.
  246.      */
  247.     public PackWriter(ObjectReader reader) {
  248.         this(new PackConfig(), reader);
  249.     }

  250.     /**
  251.      * Create writer for specified repository.
  252.      * <p>
  253.      * Objects for packing are specified in {@link #preparePack(Iterator)} or
  254.      * {@link #preparePack(ProgressMonitor, Set, Set)}.
  255.      *
  256.      * @param repo
  257.      *            repository where objects are stored.
  258.      * @param reader
  259.      *            reader to read from the repository with.
  260.      */
  261.     public PackWriter(Repository repo, ObjectReader reader) {
  262.         this(new PackConfig(repo), reader);
  263.     }

  264.     /**
  265.      * Create writer with a specified configuration.
  266.      * <p>
  267.      * Objects for packing are specified in {@link #preparePack(Iterator)} or
  268.      * {@link #preparePack(ProgressMonitor, Set, Set)}.
  269.      *
  270.      * @param config
  271.      *            configuration for the pack writer.
  272.      * @param reader
  273.      *            reader to read from the repository with.
  274.      */
  275.     public PackWriter(PackConfig config, ObjectReader reader) {
  276.         this(config, reader, null);
  277.     }

  278.     /**
  279.      * Create writer with a specified configuration.
  280.      * <p>
  281.      * Objects for packing are specified in {@link #preparePack(Iterator)} or
  282.      * {@link #preparePack(ProgressMonitor, Set, Set)}.
  283.      *
  284.      * @param config
  285.      *            configuration for the pack writer.
  286.      * @param reader
  287.      *            reader to read from the repository with.
  288.      * @param statsAccumulator
  289.      *            accumulator for statics
  290.      */
  291.     public PackWriter(PackConfig config, final ObjectReader reader,
  292.             @Nullable PackStatistics.Accumulator statsAccumulator) {
  293.         this.config = config;
  294.         this.reader = reader;
  295.         if (reader instanceof ObjectReuseAsIs)
  296.             reuseSupport = ((ObjectReuseAsIs) reader);
  297.         else
  298.             reuseSupport = null;

  299.         deltaBaseAsOffset = config.isDeltaBaseAsOffset();
  300.         reuseDeltas = config.isReuseDeltas();
  301.         reuseValidate = true; // be paranoid by default
  302.         stats = statsAccumulator != null ? statsAccumulator
  303.                 : new PackStatistics.Accumulator();
  304.         state = new MutableState();
  305.         selfRef = new WeakReference<>(this);
  306.         instances.put(selfRef, Boolean.TRUE);
  307.     }

  308.     /**
  309.      * Set the {@code ObjectCountCallback}.
  310.      * <p>
  311.      * It should be set before calling
  312.      * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}.
  313.      *
  314.      * @param callback
  315.      *            the callback to set
  316.      * @return this object for chaining.
  317.      */
  318.     public PackWriter setObjectCountCallback(ObjectCountCallback callback) {
  319.         this.callback = callback;
  320.         return this;
  321.     }

  322.     /**
  323.      * Records the set of shallow commits in the client.
  324.      *
  325.      * @param clientShallowCommits
  326.      *            the shallow commits in the client
  327.      */
  328.     public void setClientShallowCommits(Set<ObjectId> clientShallowCommits) {
  329.         stats.clientShallowCommits = Collections
  330.                 .unmodifiableSet(new HashSet<>(clientShallowCommits));
  331.     }

  332.     /**
  333.      * Check whether writer can store delta base as an offset (new style
  334.      * reducing pack size) or should store it as an object id (legacy style,
  335.      * compatible with old readers).
  336.      *
  337.      * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
  338.      *
  339.      * @return true if delta base is stored as an offset; false if it is stored
  340.      *         as an object id.
  341.      */
  342.     public boolean isDeltaBaseAsOffset() {
  343.         return deltaBaseAsOffset;
  344.     }

  345.     /**
  346.      * Set writer delta base format. Delta base can be written as an offset in a
  347.      * pack file (new approach reducing file size) or as an object id (legacy
  348.      * approach, compatible with old readers).
  349.      *
  350.      * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
  351.      *
  352.      * @param deltaBaseAsOffset
  353.      *            boolean indicating whether delta base can be stored as an
  354.      *            offset.
  355.      */
  356.     public void setDeltaBaseAsOffset(boolean deltaBaseAsOffset) {
  357.         this.deltaBaseAsOffset = deltaBaseAsOffset;
  358.     }

  359.     /**
  360.      * Check if the writer will reuse commits that are already stored as deltas.
  361.      *
  362.      * @return true if the writer would reuse commits stored as deltas, assuming
  363.      *         delta reuse is already enabled.
  364.      */
  365.     public boolean isReuseDeltaCommits() {
  366.         return reuseDeltaCommits;
  367.     }

  368.     /**
  369.      * Set the writer to reuse existing delta versions of commits.
  370.      *
  371.      * @param reuse
  372.      *            if true, the writer will reuse any commits stored as deltas.
  373.      *            By default the writer does not reuse delta commits.
  374.      */
  375.     public void setReuseDeltaCommits(boolean reuse) {
  376.         reuseDeltaCommits = reuse;
  377.     }

  378.     /**
  379.      * Check if the writer validates objects before copying them.
  380.      *
  381.      * @return true if validation is enabled; false if the reader will handle
  382.      *         object validation as a side-effect of it consuming the output.
  383.      */
  384.     public boolean isReuseValidatingObjects() {
  385.         return reuseValidate;
  386.     }

  387.     /**
  388.      * Enable (or disable) object validation during packing.
  389.      *
  390.      * @param validate
  391.      *            if true the pack writer will validate an object before it is
  392.      *            put into the output. This additional validation work may be
  393.      *            necessary to avoid propagating corruption from one local pack
  394.      *            file to another local pack file.
  395.      */
  396.     public void setReuseValidatingObjects(boolean validate) {
  397.         reuseValidate = validate;
  398.     }

  399.     /**
  400.      * Whether this writer is producing a thin pack.
  401.      *
  402.      * @return true if this writer is producing a thin pack.
  403.      */
  404.     public boolean isThin() {
  405.         return thin;
  406.     }

  407.     /**
  408.      * Whether writer may pack objects with delta base object not within set of
  409.      * objects to pack
  410.      *
  411.      * @param packthin
  412.      *            a boolean indicating whether writer may pack objects with
  413.      *            delta base object not within set of objects to pack, but
  414.      *            belonging to party repository (uninteresting/boundary) as
  415.      *            determined by set; this kind of pack is used only for
  416.      *            transport; true - to produce thin pack, false - otherwise.
  417.      */
  418.     public void setThin(boolean packthin) {
  419.         thin = packthin;
  420.     }

  421.     /**
  422.      * Whether to reuse cached packs.
  423.      *
  424.      * @return {@code true} to reuse cached packs. If true index creation isn't
  425.      *         available.
  426.      */
  427.     public boolean isUseCachedPacks() {
  428.         return useCachedPacks;
  429.     }

  430.     /**
  431.      * Whether to use cached packs
  432.      *
  433.      * @param useCached
  434.      *            if set to {@code true} and a cached pack is present, it will
  435.      *            be appended onto the end of a thin-pack, reducing the amount
  436.      *            of working set space and CPU used by PackWriter. Enabling this
  437.      *            feature prevents PackWriter from creating an index for the
  438.      *            newly created pack, so its only suitable for writing to a
  439.      *            network client, where the client will make the index.
  440.      */
  441.     public void setUseCachedPacks(boolean useCached) {
  442.         useCachedPacks = useCached;
  443.     }

  444.     /**
  445.      * Whether to use bitmaps
  446.      *
  447.      * @return {@code true} to use bitmaps for ObjectWalks, if available.
  448.      */
  449.     public boolean isUseBitmaps() {
  450.         return useBitmaps;
  451.     }

  452.     /**
  453.      * Whether to use bitmaps
  454.      *
  455.      * @param useBitmaps
  456.      *            if set to true, bitmaps will be used when preparing a pack.
  457.      */
  458.     public void setUseBitmaps(boolean useBitmaps) {
  459.         this.useBitmaps = useBitmaps;
  460.     }

  461.     /**
  462.      * Whether the index file cannot be created by this PackWriter.
  463.      *
  464.      * @return {@code true} if the index file cannot be created by this
  465.      *         PackWriter.
  466.      */
  467.     public boolean isIndexDisabled() {
  468.         return indexDisabled || !cachedPacks.isEmpty();
  469.     }

  470.     /**
  471.      * Whether to disable creation of the index file.
  472.      *
  473.      * @param noIndex
  474.      *            {@code true} to disable creation of the index file.
  475.      */
  476.     public void setIndexDisabled(boolean noIndex) {
  477.         this.indexDisabled = noIndex;
  478.     }

  479.     /**
  480.      * Whether to ignore missing uninteresting objects
  481.      *
  482.      * @return {@code true} to ignore objects that are uninteresting and also
  483.      *         not found on local disk; false to throw a
  484.      *         {@link org.eclipse.jgit.errors.MissingObjectException} out of
  485.      *         {@link #preparePack(ProgressMonitor, Set, Set)} if an
  486.      *         uninteresting object is not in the source repository. By default,
  487.      *         true, permitting gracefully ignoring of uninteresting objects.
  488.      */
  489.     public boolean isIgnoreMissingUninteresting() {
  490.         return ignoreMissingUninteresting;
  491.     }

  492.     /**
  493.      * Whether writer should ignore non existing uninteresting objects
  494.      *
  495.      * @param ignore
  496.      *            {@code true} if writer should ignore non existing
  497.      *            uninteresting objects during construction set of objects to
  498.      *            pack; false otherwise - non existing uninteresting objects may
  499.      *            cause {@link org.eclipse.jgit.errors.MissingObjectException}
  500.      */
  501.     public void setIgnoreMissingUninteresting(boolean ignore) {
  502.         ignoreMissingUninteresting = ignore;
  503.     }

  504.     /**
  505.      * Set the tag targets that should be hoisted earlier during packing.
  506.      * <p>
  507.      * Callers may put objects into this set before invoking any of the
  508.      * preparePack methods to influence where an annotated tag's target is
  509.      * stored within the resulting pack. Typically these will be clustered
  510.      * together, and hoisted earlier in the file even if they are ancient
  511.      * revisions, allowing readers to find tag targets with better locality.
  512.      *
  513.      * @param objects
  514.      *            objects that annotated tags point at.
  515.      */
  516.     public void setTagTargets(Set<ObjectId> objects) {
  517.         tagTargets = objects;
  518.     }

  519.     /**
  520.      * Configure this pack for a shallow clone.
  521.      *
  522.      * @param depth
  523.      *            maximum depth of history to return. 1 means return only the
  524.      *            "wants".
  525.      * @param unshallow
  526.      *            objects which used to be shallow on the client, but are being
  527.      *            extended as part of this fetch
  528.      */
  529.     public void setShallowPack(int depth,
  530.             Collection<? extends ObjectId> unshallow) {
  531.         this.shallowPack = true;
  532.         this.depth = depth;
  533.         this.unshallowObjects = unshallow;
  534.     }

  535.     /**
  536.      * @param filter the filter which indicates what and what not this writer
  537.      *            should include
  538.      */
  539.     public void setFilterSpec(@NonNull FilterSpec filter) {
  540.         filterSpec = requireNonNull(filter);
  541.     }

  542.     /**
  543.      * @param config configuration related to packfile URIs
  544.      * @since 5.5
  545.      */
  546.     public void setPackfileUriConfig(PackfileUriConfig config) {
  547.         packfileUriConfig = config;
  548.     }

  549.     /**
  550.      * Returns objects number in a pack file that was created by this writer.
  551.      *
  552.      * @return number of objects in pack.
  553.      * @throws java.io.IOException
  554.      *             a cached pack cannot supply its object count.
  555.      */
  556.     public long getObjectCount() throws IOException {
  557.         if (stats.totalObjects == 0) {
  558.             long objCnt = 0;

  559.             objCnt += objectsLists[OBJ_COMMIT].size();
  560.             objCnt += objectsLists[OBJ_TREE].size();
  561.             objCnt += objectsLists[OBJ_BLOB].size();
  562.             objCnt += objectsLists[OBJ_TAG].size();

  563.             for (CachedPack pack : cachedPacks)
  564.                 objCnt += pack.getObjectCount();
  565.             return objCnt;
  566.         }
  567.         return stats.totalObjects;
  568.     }

  569.     private long getUnoffloadedObjectCount() throws IOException {
  570.         long objCnt = 0;

  571.         objCnt += objectsLists[OBJ_COMMIT].size();
  572.         objCnt += objectsLists[OBJ_TREE].size();
  573.         objCnt += objectsLists[OBJ_BLOB].size();
  574.         objCnt += objectsLists[OBJ_TAG].size();

  575.         for (CachedPack pack : cachedPacks) {
  576.             CachedPackUriProvider.PackInfo packInfo =
  577.                 packfileUriConfig.cachedPackUriProvider.getInfo(
  578.                     pack, packfileUriConfig.protocolsSupported);
  579.             if (packInfo == null) {
  580.                 objCnt += pack.getObjectCount();
  581.             }
  582.         }

  583.         return objCnt;
  584.     }

  585.     /**
  586.      * Returns the object ids in the pack file that was created by this writer.
  587.      * <p>
  588.      * This method can only be invoked after
  589.      * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)} has
  590.      * been invoked and completed successfully.
  591.      *
  592.      * @return set of objects in pack.
  593.      * @throws java.io.IOException
  594.      *             a cached pack cannot supply its object ids.
  595.      */
  596.     public ObjectIdOwnerMap<ObjectIdOwnerMap.Entry> getObjectSet()
  597.             throws IOException {
  598.         if (!cachedPacks.isEmpty())
  599.             throw new IOException(
  600.                     JGitText.get().cachedPacksPreventsListingObjects);

  601.         if (writeBitmaps != null) {
  602.             return writeBitmaps.getObjectSet();
  603.         }

  604.         ObjectIdOwnerMap<ObjectIdOwnerMap.Entry> r = new ObjectIdOwnerMap<>();
  605.         for (BlockList<ObjectToPack> objList : objectsLists) {
  606.             if (objList != null) {
  607.                 for (ObjectToPack otp : objList)
  608.                     r.add(new ObjectIdOwnerMap.Entry(otp) {
  609.                         // A new entry that copies the ObjectId
  610.                     });
  611.             }
  612.         }
  613.         return r;
  614.     }

  615.     /**
  616.      * Add a pack index whose contents should be excluded from the result.
  617.      *
  618.      * @param idx
  619.      *            objects in this index will not be in the output pack.
  620.      */
  621.     public void excludeObjects(ObjectIdSet idx) {
  622.         if (excludeInPacks == null) {
  623.             excludeInPacks = new ObjectIdSet[] { idx };
  624.             excludeInPackLast = idx;
  625.         } else {
  626.             int cnt = excludeInPacks.length;
  627.             ObjectIdSet[] newList = new ObjectIdSet[cnt + 1];
  628.             System.arraycopy(excludeInPacks, 0, newList, 0, cnt);
  629.             newList[cnt] = idx;
  630.             excludeInPacks = newList;
  631.         }
  632.     }

  633.     /**
  634.      * Prepare the list of objects to be written to the pack stream.
  635.      * <p>
  636.      * Iterator <b>exactly</b> determines which objects are included in a pack
  637.      * and order they appear in pack (except that objects order by type is not
  638.      * needed at input). This order should conform general rules of ordering
  639.      * objects in git - by recency and path (type and delta-base first is
  640.      * internally secured) and responsibility for guaranteeing this order is on
  641.      * a caller side. Iterator must return each id of object to write exactly
  642.      * once.
  643.      * </p>
  644.      *
  645.      * @param objectsSource
  646.      *            iterator of object to store in a pack; order of objects within
  647.      *            each type is important, ordering by type is not needed;
  648.      *            allowed types for objects are
  649.      *            {@link org.eclipse.jgit.lib.Constants#OBJ_COMMIT},
  650.      *            {@link org.eclipse.jgit.lib.Constants#OBJ_TREE},
  651.      *            {@link org.eclipse.jgit.lib.Constants#OBJ_BLOB} and
  652.      *            {@link org.eclipse.jgit.lib.Constants#OBJ_TAG}; objects
  653.      *            returned by iterator may be later reused by caller as object
  654.      *            id and type are internally copied in each iteration.
  655.      * @throws java.io.IOException
  656.      *             when some I/O problem occur during reading objects.
  657.      */
  658.     public void preparePack(@NonNull Iterator<RevObject> objectsSource)
  659.             throws IOException {
  660.         while (objectsSource.hasNext()) {
  661.             addObject(objectsSource.next());
  662.         }
  663.     }

  664.     /**
  665.      * Prepare the list of objects to be written to the pack stream.
  666.      *
  667.      * <p>
  668.      * PackWriter will concat and write out the specified packs as-is.
  669.      *
  670.      * @param c
  671.      *            cached packs to be written.
  672.      */
  673.     public void preparePack(Collection<? extends CachedPack> c) {
  674.         cachedPacks.addAll(c);
  675.     }

  676.     /**
  677.      * Prepare the list of objects to be written to the pack stream.
  678.      * <p>
  679.      * Basing on these 2 sets, another set of objects to put in a pack file is
  680.      * created: this set consists of all objects reachable (ancestors) from
  681.      * interesting objects, except uninteresting objects and their ancestors.
  682.      * This method uses class {@link org.eclipse.jgit.revwalk.ObjectWalk}
  683.      * extensively to find out that appropriate set of output objects and their
  684.      * optimal order in output pack. Order is consistent with general git
  685.      * in-pack rules: sort by object type, recency, path and delta-base first.
  686.      * </p>
  687.      *
  688.      * @param countingMonitor
  689.      *            progress during object enumeration.
  690.      * @param want
  691.      *            collection of objects to be marked as interesting (start
  692.      *            points of graph traversal). Must not be {@code null}.
  693.      * @param have
  694.      *            collection of objects to be marked as uninteresting (end
  695.      *            points of graph traversal). Pass {@link #NONE} if all objects
  696.      *            reachable from {@code want} are desired, such as when serving
  697.      *            a clone.
  698.      * @throws java.io.IOException
  699.      *             when some I/O problem occur during reading objects.
  700.      */
  701.     public void preparePack(ProgressMonitor countingMonitor,
  702.             @NonNull Set<? extends ObjectId> want,
  703.             @NonNull Set<? extends ObjectId> have) throws IOException {
  704.         preparePack(countingMonitor, want, have, NONE, NONE);
  705.     }

  706.     /**
  707.      * Prepare the list of objects to be written to the pack stream.
  708.      * <p>
  709.      * Like {@link #preparePack(ProgressMonitor, Set, Set)} but also allows
  710.      * specifying commits that should not be walked past ("shallow" commits).
  711.      * The caller is responsible for filtering out commits that should not be
  712.      * shallow any more ("unshallow" commits as in {@link #setShallowPack}) from
  713.      * the shallow set.
  714.      *
  715.      * @param countingMonitor
  716.      *            progress during object enumeration.
  717.      * @param want
  718.      *            objects of interest, ancestors of which will be included in
  719.      *            the pack. Must not be {@code null}.
  720.      * @param have
  721.      *            objects whose ancestors (up to and including {@code shallow}
  722.      *            commits) do not need to be included in the pack because they
  723.      *            are already available from elsewhere. Must not be
  724.      *            {@code null}.
  725.      * @param shallow
  726.      *            commits indicating the boundary of the history marked with
  727.      *            {@code have}. Shallow commits have parents but those parents
  728.      *            are considered not to be already available. Parents of
  729.      *            {@code shallow} commits and earlier generations will be
  730.      *            included in the pack if requested by {@code want}. Must not be
  731.      *            {@code null}.
  732.      * @throws java.io.IOException
  733.      *             an I/O problem occurred while reading objects.
  734.      */
  735.     public void preparePack(ProgressMonitor countingMonitor,
  736.             @NonNull Set<? extends ObjectId> want,
  737.             @NonNull Set<? extends ObjectId> have,
  738.             @NonNull Set<? extends ObjectId> shallow) throws IOException {
  739.         preparePack(countingMonitor, want, have, shallow, NONE);
  740.     }

  741.     /**
  742.      * Prepare the list of objects to be written to the pack stream.
  743.      * <p>
  744.      * Like {@link #preparePack(ProgressMonitor, Set, Set)} but also allows
  745.      * specifying commits that should not be walked past ("shallow" commits).
  746.      * The caller is responsible for filtering out commits that should not be
  747.      * shallow any more ("unshallow" commits as in {@link #setShallowPack}) from
  748.      * the shallow set.
  749.      *
  750.      * @param countingMonitor
  751.      *            progress during object enumeration.
  752.      * @param want
  753.      *            objects of interest, ancestors of which will be included in
  754.      *            the pack. Must not be {@code null}.
  755.      * @param have
  756.      *            objects whose ancestors (up to and including {@code shallow}
  757.      *            commits) do not need to be included in the pack because they
  758.      *            are already available from elsewhere. Must not be
  759.      *            {@code null}.
  760.      * @param shallow
  761.      *            commits indicating the boundary of the history marked with
  762.      *            {@code have}. Shallow commits have parents but those parents
  763.      *            are considered not to be already available. Parents of
  764.      *            {@code shallow} commits and earlier generations will be
  765.      *            included in the pack if requested by {@code want}. Must not be
  766.      *            {@code null}.
  767.      * @param noBitmaps
  768.      *            collection of objects to be excluded from bitmap commit
  769.      *            selection.
  770.      * @throws java.io.IOException
  771.      *             an I/O problem occurred while reading objects.
  772.      */
  773.     public void preparePack(ProgressMonitor countingMonitor,
  774.             @NonNull Set<? extends ObjectId> want,
  775.             @NonNull Set<? extends ObjectId> have,
  776.             @NonNull Set<? extends ObjectId> shallow,
  777.             @NonNull Set<? extends ObjectId> noBitmaps) throws IOException {
  778.         try (ObjectWalk ow = getObjectWalk()) {
  779.             ow.assumeShallow(shallow);
  780.             preparePack(countingMonitor, ow, want, have, noBitmaps);
  781.         }
  782.     }

  783.     private ObjectWalk getObjectWalk() {
  784.         return shallowPack ? new DepthWalk.ObjectWalk(reader, depth - 1)
  785.                 : new ObjectWalk(reader);
  786.     }

  787.     /**
  788.      * A visitation policy which uses the depth at which the object is seen to
  789.      * decide if re-traversal is necessary. In particular, if the object has
  790.      * already been visited at this depth or shallower, it is not necessary to
  791.      * re-visit at this depth.
  792.      */
  793.     private static class DepthAwareVisitationPolicy
  794.             implements ObjectWalk.VisitationPolicy {
  795.         private final Map<ObjectId, Integer> lowestDepthVisited = new HashMap<>();

  796.         private final ObjectWalk walk;

  797.         DepthAwareVisitationPolicy(ObjectWalk walk) {
  798.             this.walk = requireNonNull(walk);
  799.         }

  800.         @Override
  801.         public boolean shouldVisit(RevObject o) {
  802.             Integer lastDepth = lowestDepthVisited.get(o);
  803.             if (lastDepth == null) {
  804.                 return true;
  805.             }
  806.             return walk.getTreeDepth() < lastDepth.intValue();
  807.         }

  808.         @Override
  809.         public void visited(RevObject o) {
  810.             lowestDepthVisited.put(o, Integer.valueOf(walk.getTreeDepth()));
  811.         }
  812.     }

  813.     /**
  814.      * Prepare the list of objects to be written to the pack stream.
  815.      * <p>
  816.      * Basing on these 2 sets, another set of objects to put in a pack file is
  817.      * created: this set consists of all objects reachable (ancestors) from
  818.      * interesting objects, except uninteresting objects and their ancestors.
  819.      * This method uses class {@link org.eclipse.jgit.revwalk.ObjectWalk}
  820.      * extensively to find out that appropriate set of output objects and their
  821.      * optimal order in output pack. Order is consistent with general git
  822.      * in-pack rules: sort by object type, recency, path and delta-base first.
  823.      * </p>
  824.      *
  825.      * @param countingMonitor
  826.      *            progress during object enumeration.
  827.      * @param walk
  828.      *            ObjectWalk to perform enumeration.
  829.      * @param interestingObjects
  830.      *            collection of objects to be marked as interesting (start
  831.      *            points of graph traversal). Must not be {@code null}.
  832.      * @param uninterestingObjects
  833.      *            collection of objects to be marked as uninteresting (end
  834.      *            points of graph traversal). Pass {@link #NONE} if all objects
  835.      *            reachable from {@code want} are desired, such as when serving
  836.      *            a clone.
  837.      * @param noBitmaps
  838.      *            collection of objects to be excluded from bitmap commit
  839.      *            selection.
  840.      * @throws java.io.IOException
  841.      *             when some I/O problem occur during reading objects.
  842.      */
  843.     public void preparePack(ProgressMonitor countingMonitor,
  844.             @NonNull ObjectWalk walk,
  845.             @NonNull Set<? extends ObjectId> interestingObjects,
  846.             @NonNull Set<? extends ObjectId> uninterestingObjects,
  847.             @NonNull Set<? extends ObjectId> noBitmaps)
  848.             throws IOException {
  849.         if (countingMonitor == null)
  850.             countingMonitor = NullProgressMonitor.INSTANCE;
  851.         if (shallowPack && !(walk instanceof DepthWalk.ObjectWalk))
  852.             throw new IllegalArgumentException(
  853.                     JGitText.get().shallowPacksRequireDepthWalk);
  854.         if (filterSpec.getTreeDepthLimit() >= 0) {
  855.             walk.setVisitationPolicy(new DepthAwareVisitationPolicy(walk));
  856.         }
  857.         findObjectsToPack(countingMonitor, walk, interestingObjects,
  858.                 uninterestingObjects, noBitmaps);
  859.     }

  860.     /**
  861.      * Determine if the pack file will contain the requested object.
  862.      *
  863.      * @param id
  864.      *            the object to test the existence of.
  865.      * @return true if the object will appear in the output pack file.
  866.      * @throws java.io.IOException
  867.      *             a cached pack cannot be examined.
  868.      */
  869.     public boolean willInclude(AnyObjectId id) throws IOException {
  870.         ObjectToPack obj = objectsMap.get(id);
  871.         return obj != null && !obj.isEdge();
  872.     }

  873.     /**
  874.      * Lookup the ObjectToPack object for a given ObjectId.
  875.      *
  876.      * @param id
  877.      *            the object to find in the pack.
  878.      * @return the object we are packing, or null.
  879.      */
  880.     public ObjectToPack get(AnyObjectId id) {
  881.         ObjectToPack obj = objectsMap.get(id);
  882.         return obj != null && !obj.isEdge() ? obj : null;
  883.     }

  884.     /**
  885.      * Computes SHA-1 of lexicographically sorted objects ids written in this
  886.      * pack, as used to name a pack file in repository.
  887.      *
  888.      * @return ObjectId representing SHA-1 name of a pack that was created.
  889.      */
  890.     public ObjectId computeName() {
  891.         final byte[] buf = new byte[OBJECT_ID_LENGTH];
  892.         final MessageDigest md = Constants.newMessageDigest();
  893.         for (ObjectToPack otp : sortByName()) {
  894.             otp.copyRawTo(buf, 0);
  895.             md.update(buf, 0, OBJECT_ID_LENGTH);
  896.         }
  897.         return ObjectId.fromRaw(md.digest());
  898.     }

  899.     /**
  900.      * Returns the index format version that will be written.
  901.      * <p>
  902.      * This method can only be invoked after
  903.      * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)} has
  904.      * been invoked and completed successfully.
  905.      *
  906.      * @return the index format version.
  907.      */
  908.     public int getIndexVersion() {
  909.         int indexVersion = config.getIndexVersion();
  910.         if (indexVersion <= 0) {
  911.             for (BlockList<ObjectToPack> objs : objectsLists)
  912.                 indexVersion = Math.max(indexVersion,
  913.                         PackIndexWriter.oldestPossibleFormat(objs));
  914.         }
  915.         return indexVersion;
  916.     }

  917.     /**
  918.      * Create an index file to match the pack file just written.
  919.      * <p>
  920.      * Called after
  921.      * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}.
  922.      * <p>
  923.      * Writing an index is only required for local pack storage. Packs sent on
  924.      * the network do not need to create an index.
  925.      *
  926.      * @param indexStream
  927.      *            output for the index data. Caller is responsible for closing
  928.      *            this stream.
  929.      * @throws java.io.IOException
  930.      *             the index data could not be written to the supplied stream.
  931.      */
  932.     public void writeIndex(OutputStream indexStream) throws IOException {
  933.         if (isIndexDisabled())
  934.             throw new IOException(JGitText.get().cachedPacksPreventsIndexCreation);

  935.         long writeStart = System.currentTimeMillis();
  936.         final PackIndexWriter iw = PackIndexWriter.createVersion(
  937.                 indexStream, getIndexVersion());
  938.         iw.write(sortByName(), packcsum);
  939.         stats.timeWriting += System.currentTimeMillis() - writeStart;
  940.     }

  941.     /**
  942.      * Create a bitmap index file to match the pack file just written.
  943.      * <p>
  944.      * Called after {@link #prepareBitmapIndex(ProgressMonitor)}.
  945.      *
  946.      * @param bitmapIndexStream
  947.      *            output for the bitmap index data. Caller is responsible for
  948.      *            closing this stream.
  949.      * @throws java.io.IOException
  950.      *             the index data could not be written to the supplied stream.
  951.      */
  952.     public void writeBitmapIndex(OutputStream bitmapIndexStream)
  953.             throws IOException {
  954.         if (writeBitmaps == null)
  955.             throw new IOException(JGitText.get().bitmapsMustBePrepared);

  956.         long writeStart = System.currentTimeMillis();
  957.         final PackBitmapIndexWriterV1 iw = new PackBitmapIndexWriterV1(bitmapIndexStream);
  958.         iw.write(writeBitmaps, packcsum);
  959.         stats.timeWriting += System.currentTimeMillis() - writeStart;
  960.     }

  961.     private List<ObjectToPack> sortByName() {
  962.         if (sortedByName == null) {
  963.             int cnt = 0;
  964.             cnt += objectsLists[OBJ_COMMIT].size();
  965.             cnt += objectsLists[OBJ_TREE].size();
  966.             cnt += objectsLists[OBJ_BLOB].size();
  967.             cnt += objectsLists[OBJ_TAG].size();

  968.             sortedByName = new BlockList<>(cnt);
  969.             sortedByName.addAll(objectsLists[OBJ_COMMIT]);
  970.             sortedByName.addAll(objectsLists[OBJ_TREE]);
  971.             sortedByName.addAll(objectsLists[OBJ_BLOB]);
  972.             sortedByName.addAll(objectsLists[OBJ_TAG]);
  973.             Collections.sort(sortedByName);
  974.         }
  975.         return sortedByName;
  976.     }

  977.     private void beginPhase(PackingPhase phase, ProgressMonitor monitor,
  978.             long cnt) {
  979.         state.phase = phase;
  980.         String task;
  981.         switch (phase) {
  982.         case COUNTING:
  983.             task = JGitText.get().countingObjects;
  984.             break;
  985.         case GETTING_SIZES:
  986.             task = JGitText.get().searchForSizes;
  987.             break;
  988.         case FINDING_SOURCES:
  989.             task = JGitText.get().searchForReuse;
  990.             break;
  991.         case COMPRESSING:
  992.             task = JGitText.get().compressingObjects;
  993.             break;
  994.         case WRITING:
  995.             task = JGitText.get().writingObjects;
  996.             break;
  997.         case BUILDING_BITMAPS:
  998.             task = JGitText.get().buildingBitmaps;
  999.             break;
  1000.         default:
  1001.             throw new IllegalArgumentException(
  1002.                     MessageFormat.format(JGitText.get().illegalPackingPhase, phase));
  1003.         }
  1004.         monitor.beginTask(task, (int) cnt);
  1005.     }

  1006.     private void endPhase(ProgressMonitor monitor) {
  1007.         monitor.endTask();
  1008.     }

  1009.     /**
  1010.      * Write the prepared pack to the supplied stream.
  1011.      * <p>
  1012.      * Called after
  1013.      * {@link #preparePack(ProgressMonitor, ObjectWalk, Set, Set, Set)} or
  1014.      * {@link #preparePack(ProgressMonitor, Set, Set)}.
  1015.      * <p>
  1016.      * Performs delta search if enabled and writes the pack stream.
  1017.      * <p>
  1018.      * All reused objects data checksum (Adler32/CRC32) is computed and
  1019.      * validated against existing checksum.
  1020.      *
  1021.      * @param compressMonitor
  1022.      *            progress monitor to report object compression work.
  1023.      * @param writeMonitor
  1024.      *            progress monitor to report the number of objects written.
  1025.      * @param packStream
  1026.      *            output stream of pack data. The stream should be buffered by
  1027.      *            the caller. The caller is responsible for closing the stream.
  1028.      * @throws java.io.IOException
  1029.      *             an error occurred reading a local object's data to include in
  1030.      *             the pack, or writing compressed object data to the output
  1031.      *             stream.
  1032.      * @throws WriteAbortedException
  1033.      *             the write operation is aborted by
  1034.      *             {@link org.eclipse.jgit.transport.ObjectCountCallback} .
  1035.      */
  1036.     public void writePack(ProgressMonitor compressMonitor,
  1037.             ProgressMonitor writeMonitor, OutputStream packStream)
  1038.             throws IOException {
  1039.         if (compressMonitor == null)
  1040.             compressMonitor = NullProgressMonitor.INSTANCE;
  1041.         if (writeMonitor == null)
  1042.             writeMonitor = NullProgressMonitor.INSTANCE;

  1043.         excludeInPacks = null;
  1044.         excludeInPackLast = null;

  1045.         boolean needSearchForReuse = reuseSupport != null && (
  1046.                    reuseDeltas
  1047.                 || config.isReuseObjects()
  1048.                 || !cachedPacks.isEmpty());

  1049.         if (compressMonitor instanceof BatchingProgressMonitor) {
  1050.             long delay = 1000;
  1051.             if (needSearchForReuse && config.isDeltaCompress())
  1052.                 delay = 500;
  1053.             ((BatchingProgressMonitor) compressMonitor).setDelayStart(
  1054.                     delay,
  1055.                     TimeUnit.MILLISECONDS);
  1056.         }

  1057.         if (needSearchForReuse)
  1058.             searchForReuse(compressMonitor);
  1059.         if (config.isDeltaCompress())
  1060.             searchForDeltas(compressMonitor);

  1061.         crc32 = new CRC32();
  1062.         final PackOutputStream out = new PackOutputStream(
  1063.             writeMonitor,
  1064.             isIndexDisabled()
  1065.                 ? packStream
  1066.                 : new CheckedOutputStream(packStream, crc32),
  1067.             this);

  1068.         long objCnt = packfileUriConfig == null ? getObjectCount() :
  1069.             getUnoffloadedObjectCount();
  1070.         stats.totalObjects = objCnt;
  1071.         if (callback != null)
  1072.             callback.setObjectCount(objCnt);
  1073.         beginPhase(PackingPhase.WRITING, writeMonitor, objCnt);
  1074.         long writeStart = System.currentTimeMillis();
  1075.         try {
  1076.             List<CachedPack> unwrittenCachedPacks;

  1077.             if (packfileUriConfig != null) {
  1078.                 unwrittenCachedPacks = new ArrayList<>();
  1079.                 CachedPackUriProvider p = packfileUriConfig.cachedPackUriProvider;
  1080.                 PacketLineOut o = packfileUriConfig.pckOut;

  1081.                 o.writeString("packfile-uris\n"); //$NON-NLS-1$
  1082.                 for (CachedPack pack : cachedPacks) {
  1083.                     CachedPackUriProvider.PackInfo packInfo = p.getInfo(
  1084.                             pack, packfileUriConfig.protocolsSupported);
  1085.                     if (packInfo != null) {
  1086.                         o.writeString(packInfo.getHash() + ' ' +
  1087.                                 packInfo.getUri() + '\n');
  1088.                         stats.offloadedPackfiles += 1;
  1089.                         stats.offloadedPackfileSize += packInfo.getSize();
  1090.                     } else {
  1091.                         unwrittenCachedPacks.add(pack);
  1092.                     }
  1093.                 }
  1094.                 packfileUriConfig.pckOut.writeDelim();
  1095.                 packfileUriConfig.pckOut.writeString("packfile\n"); //$NON-NLS-1$
  1096.             } else {
  1097.                 unwrittenCachedPacks = cachedPacks;
  1098.             }

  1099.             out.writeFileHeader(PACK_VERSION_GENERATED, objCnt);
  1100.             out.flush();

  1101.             writeObjects(out);
  1102.             if (!edgeObjects.isEmpty() || !cachedPacks.isEmpty()) {
  1103.                 for (PackStatistics.ObjectType.Accumulator typeStat : stats.objectTypes) {
  1104.                     if (typeStat == null)
  1105.                         continue;
  1106.                     stats.thinPackBytes += typeStat.bytes;
  1107.                 }
  1108.             }

  1109.             stats.reusedPacks = Collections.unmodifiableList(cachedPacks);
  1110.             for (CachedPack pack : unwrittenCachedPacks) {
  1111.                 long deltaCnt = pack.getDeltaCount();
  1112.                 stats.reusedObjects += pack.getObjectCount();
  1113.                 stats.reusedDeltas += deltaCnt;
  1114.                 stats.totalDeltas += deltaCnt;
  1115.                 reuseSupport.copyPackAsIs(out, pack);
  1116.             }
  1117.             writeChecksum(out);
  1118.             out.flush();
  1119.         } finally {
  1120.             stats.timeWriting = System.currentTimeMillis() - writeStart;
  1121.             stats.depth = depth;

  1122.             for (PackStatistics.ObjectType.Accumulator typeStat : stats.objectTypes) {
  1123.                 if (typeStat == null)
  1124.                     continue;
  1125.                 typeStat.cntDeltas += typeStat.reusedDeltas;
  1126.                 stats.reusedObjects += typeStat.reusedObjects;
  1127.                 stats.reusedDeltas += typeStat.reusedDeltas;
  1128.                 stats.totalDeltas += typeStat.cntDeltas;
  1129.             }
  1130.         }

  1131.         stats.totalBytes = out.length();
  1132.         reader.close();
  1133.         endPhase(writeMonitor);
  1134.     }

  1135.     /**
  1136.      * Get statistics of what this PackWriter did in order to create the final
  1137.      * pack stream.
  1138.      *
  1139.      * @return description of what this PackWriter did in order to create the
  1140.      *         final pack stream. This should only be invoked after the calls to
  1141.      *         create the pack/index/bitmap have completed.
  1142.      */
  1143.     public PackStatistics getStatistics() {
  1144.         return new PackStatistics(stats);
  1145.     }

  1146.     /**
  1147.      * Get snapshot of the current state of this PackWriter.
  1148.      *
  1149.      * @return snapshot of the current state of this PackWriter.
  1150.      */
  1151.     public State getState() {
  1152.         return state.snapshot();
  1153.     }

  1154.     /**
  1155.      * {@inheritDoc}
  1156.      * <p>
  1157.      * Release all resources used by this writer.
  1158.      */
  1159.     @Override
  1160.     public void close() {
  1161.         reader.close();
  1162.         if (myDeflater != null) {
  1163.             myDeflater.end();
  1164.             myDeflater = null;
  1165.         }
  1166.         instances.remove(selfRef);
  1167.     }

  1168.     private void searchForReuse(ProgressMonitor monitor) throws IOException {
  1169.         long cnt = 0;
  1170.         cnt += objectsLists[OBJ_COMMIT].size();
  1171.         cnt += objectsLists[OBJ_TREE].size();
  1172.         cnt += objectsLists[OBJ_BLOB].size();
  1173.         cnt += objectsLists[OBJ_TAG].size();

  1174.         long start = System.currentTimeMillis();
  1175.         beginPhase(PackingPhase.FINDING_SOURCES, monitor, cnt);
  1176.         if (cnt <= 4096) {
  1177.             // For small object counts, do everything as one list.
  1178.             BlockList<ObjectToPack> tmp = new BlockList<>((int) cnt);
  1179.             tmp.addAll(objectsLists[OBJ_TAG]);
  1180.             tmp.addAll(objectsLists[OBJ_COMMIT]);
  1181.             tmp.addAll(objectsLists[OBJ_TREE]);
  1182.             tmp.addAll(objectsLists[OBJ_BLOB]);
  1183.             searchForReuse(monitor, tmp);
  1184.             if (pruneCurrentObjectList) {
  1185.                 // If the list was pruned, we need to re-prune the main lists.
  1186.                 pruneEdgesFromObjectList(objectsLists[OBJ_COMMIT]);
  1187.                 pruneEdgesFromObjectList(objectsLists[OBJ_TREE]);
  1188.                 pruneEdgesFromObjectList(objectsLists[OBJ_BLOB]);
  1189.                 pruneEdgesFromObjectList(objectsLists[OBJ_TAG]);
  1190.             }
  1191.         } else {
  1192.             searchForReuse(monitor, objectsLists[OBJ_TAG]);
  1193.             searchForReuse(monitor, objectsLists[OBJ_COMMIT]);
  1194.             searchForReuse(monitor, objectsLists[OBJ_TREE]);
  1195.             searchForReuse(monitor, objectsLists[OBJ_BLOB]);
  1196.         }
  1197.         endPhase(monitor);
  1198.         stats.timeSearchingForReuse = System.currentTimeMillis() - start;

  1199.         if (config.isReuseDeltas() && config.getCutDeltaChains()) {
  1200.             cutDeltaChains(objectsLists[OBJ_TREE]);
  1201.             cutDeltaChains(objectsLists[OBJ_BLOB]);
  1202.         }
  1203.     }

  1204.     private void searchForReuse(ProgressMonitor monitor, List<ObjectToPack> list)
  1205.             throws IOException, MissingObjectException {
  1206.         pruneCurrentObjectList = false;
  1207.         reuseSupport.selectObjectRepresentation(this, monitor, list);
  1208.         if (pruneCurrentObjectList)
  1209.             pruneEdgesFromObjectList(list);
  1210.     }

  1211.     private void cutDeltaChains(BlockList<ObjectToPack> list)
  1212.             throws IOException {
  1213.         int max = config.getMaxDeltaDepth();
  1214.         for (int idx = list.size() - 1; idx >= 0; idx--) {
  1215.             int d = 0;
  1216.             ObjectToPack b = list.get(idx).getDeltaBase();
  1217.             while (b != null) {
  1218.                 if (d < b.getChainLength())
  1219.                     break;
  1220.                 b.setChainLength(++d);
  1221.                 if (d >= max && b.isDeltaRepresentation()) {
  1222.                     reselectNonDelta(b);
  1223.                     break;
  1224.                 }
  1225.                 b = b.getDeltaBase();
  1226.             }
  1227.         }
  1228.         if (config.isDeltaCompress()) {
  1229.             for (ObjectToPack otp : list)
  1230.                 otp.clearChainLength();
  1231.         }
  1232.     }

  1233.     private void searchForDeltas(ProgressMonitor monitor)
  1234.             throws MissingObjectException, IncorrectObjectTypeException,
  1235.             IOException {
  1236.         // Commits and annotated tags tend to have too many differences to
  1237.         // really benefit from delta compression. Consequently just don't
  1238.         // bother examining those types here.
  1239.         //
  1240.         ObjectToPack[] list = new ObjectToPack[
  1241.                   objectsLists[OBJ_TREE].size()
  1242.                 + objectsLists[OBJ_BLOB].size()
  1243.                 + edgeObjects.size()];
  1244.         int cnt = 0;
  1245.         cnt = findObjectsNeedingDelta(list, cnt, OBJ_TREE);
  1246.         cnt = findObjectsNeedingDelta(list, cnt, OBJ_BLOB);
  1247.         if (cnt == 0)
  1248.             return;
  1249.         int nonEdgeCnt = cnt;

  1250.         // Queue up any edge objects that we might delta against.  We won't
  1251.         // be sending these as we assume the other side has them, but we need
  1252.         // them in the search phase below.
  1253.         //
  1254.         for (ObjectToPack eo : edgeObjects) {
  1255.             eo.setWeight(0);
  1256.             list[cnt++] = eo;
  1257.         }

  1258.         // Compute the sizes of the objects so we can do a proper sort.
  1259.         // We let the reader skip missing objects if it chooses. For
  1260.         // some readers this can be a huge win. We detect missing objects
  1261.         // by having set the weights above to 0 and allowing the delta
  1262.         // search code to discover the missing object and skip over it, or
  1263.         // abort with an exception if we actually had to have it.
  1264.         //
  1265.         final long sizingStart = System.currentTimeMillis();
  1266.         beginPhase(PackingPhase.GETTING_SIZES, monitor, cnt);
  1267.         AsyncObjectSizeQueue<ObjectToPack> sizeQueue = reader.getObjectSize(
  1268.                 Arrays.<ObjectToPack> asList(list).subList(0, cnt), false);
  1269.         try {
  1270.             final long limit = Math.min(
  1271.                     config.getBigFileThreshold(),
  1272.                     Integer.MAX_VALUE);
  1273.             for (;;) {
  1274.                 try {
  1275.                     if (!sizeQueue.next())
  1276.                         break;
  1277.                 } catch (MissingObjectException notFound) {
  1278.                     monitor.update(1);
  1279.                     if (ignoreMissingUninteresting) {
  1280.                         ObjectToPack otp = sizeQueue.getCurrent();
  1281.                         if (otp != null && otp.isEdge()) {
  1282.                             otp.setDoNotDelta();
  1283.                             continue;
  1284.                         }

  1285.                         otp = objectsMap.get(notFound.getObjectId());
  1286.                         if (otp != null && otp.isEdge()) {
  1287.                             otp.setDoNotDelta();
  1288.                             continue;
  1289.                         }
  1290.                     }
  1291.                     throw notFound;
  1292.                 }

  1293.                 ObjectToPack otp = sizeQueue.getCurrent();
  1294.                 if (otp == null)
  1295.                     otp = objectsMap.get(sizeQueue.getObjectId());

  1296.                 long sz = sizeQueue.getSize();
  1297.                 if (DeltaIndex.BLKSZ < sz && sz < limit)
  1298.                     otp.setWeight((int) sz);
  1299.                 else
  1300.                     otp.setDoNotDelta(); // too small, or too big
  1301.                 monitor.update(1);
  1302.             }
  1303.         } finally {
  1304.             sizeQueue.release();
  1305.         }
  1306.         endPhase(monitor);
  1307.         stats.timeSearchingForSizes = System.currentTimeMillis() - sizingStart;

  1308.         // Sort the objects by path hash so like files are near each other,
  1309.         // and then by size descending so that bigger files are first. This
  1310.         // applies "Linus' Law" which states that newer files tend to be the
  1311.         // bigger ones, because source files grow and hardly ever shrink.
  1312.         //
  1313.         Arrays.sort(list, 0, cnt, (ObjectToPack a, ObjectToPack b) -> {
  1314.             int cmp = (a.isDoNotDelta() ? 1 : 0) - (b.isDoNotDelta() ? 1 : 0);
  1315.             if (cmp != 0) {
  1316.                 return cmp;
  1317.             }

  1318.             cmp = a.getType() - b.getType();
  1319.             if (cmp != 0) {
  1320.                 return cmp;
  1321.             }

  1322.             cmp = (a.getPathHash() >>> 1) - (b.getPathHash() >>> 1);
  1323.             if (cmp != 0) {
  1324.                 return cmp;
  1325.             }

  1326.             cmp = (a.getPathHash() & 1) - (b.getPathHash() & 1);
  1327.             if (cmp != 0) {
  1328.                 return cmp;
  1329.             }

  1330.             cmp = (a.isEdge() ? 0 : 1) - (b.isEdge() ? 0 : 1);
  1331.             if (cmp != 0) {
  1332.                 return cmp;
  1333.             }

  1334.             return b.getWeight() - a.getWeight();
  1335.         });

  1336.         // Above we stored the objects we cannot delta onto the end.
  1337.         // Remove them from the list so we don't waste time on them.
  1338.         while (0 < cnt && list[cnt - 1].isDoNotDelta()) {
  1339.             if (!list[cnt - 1].isEdge())
  1340.                 nonEdgeCnt--;
  1341.             cnt--;
  1342.         }
  1343.         if (cnt == 0)
  1344.             return;

  1345.         final long searchStart = System.currentTimeMillis();
  1346.         searchForDeltas(monitor, list, cnt);
  1347.         stats.deltaSearchNonEdgeObjects = nonEdgeCnt;
  1348.         stats.timeCompressing = System.currentTimeMillis() - searchStart;

  1349.         for (int i = 0; i < cnt; i++)
  1350.             if (!list[i].isEdge() && list[i].isDeltaRepresentation())
  1351.                 stats.deltasFound++;
  1352.     }

  1353.     private int findObjectsNeedingDelta(ObjectToPack[] list, int cnt, int type) {
  1354.         for (ObjectToPack otp : objectsLists[type]) {
  1355.             if (otp.isDoNotDelta()) // delta is disabled for this path
  1356.                 continue;
  1357.             if (otp.isDeltaRepresentation()) // already reusing a delta
  1358.                 continue;
  1359.             otp.setWeight(0);
  1360.             list[cnt++] = otp;
  1361.         }
  1362.         return cnt;
  1363.     }

  1364.     private void reselectNonDelta(ObjectToPack otp) throws IOException {
  1365.         otp.clearDeltaBase();
  1366.         otp.clearReuseAsIs();
  1367.         boolean old = reuseDeltas;
  1368.         reuseDeltas = false;
  1369.         reuseSupport.selectObjectRepresentation(this,
  1370.                 NullProgressMonitor.INSTANCE,
  1371.                 Collections.singleton(otp));
  1372.         reuseDeltas = old;
  1373.     }

  1374.     private void searchForDeltas(final ProgressMonitor monitor,
  1375.             final ObjectToPack[] list, final int cnt)
  1376.             throws MissingObjectException, IncorrectObjectTypeException,
  1377.             LargeObjectException, IOException {
  1378.         int threads = config.getThreads();
  1379.         if (threads == 0)
  1380.             threads = Runtime.getRuntime().availableProcessors();
  1381.         if (threads <= 1 || cnt <= config.getDeltaSearchWindowSize())
  1382.             singleThreadDeltaSearch(monitor, list, cnt);
  1383.         else
  1384.             parallelDeltaSearch(monitor, list, cnt, threads);
  1385.     }

  1386.     private void singleThreadDeltaSearch(ProgressMonitor monitor,
  1387.             ObjectToPack[] list, int cnt) throws IOException {
  1388.         long totalWeight = 0;
  1389.         for (int i = 0; i < cnt; i++) {
  1390.             ObjectToPack o = list[i];
  1391.             totalWeight += DeltaTask.getAdjustedWeight(o);
  1392.         }

  1393.         long bytesPerUnit = 1;
  1394.         while (DeltaTask.MAX_METER <= (totalWeight / bytesPerUnit))
  1395.             bytesPerUnit <<= 10;
  1396.         int cost = (int) (totalWeight / bytesPerUnit);
  1397.         if (totalWeight % bytesPerUnit != 0)
  1398.             cost++;

  1399.         beginPhase(PackingPhase.COMPRESSING, monitor, cost);
  1400.         new DeltaWindow(config, new DeltaCache(config), reader,
  1401.                 monitor, bytesPerUnit,
  1402.                 list, 0, cnt).search();
  1403.         endPhase(monitor);
  1404.     }

  1405.     @SuppressWarnings("Finally")
  1406.     private void parallelDeltaSearch(ProgressMonitor monitor,
  1407.             ObjectToPack[] list, int cnt, int threads) throws IOException {
  1408.         DeltaCache dc = new ThreadSafeDeltaCache(config);
  1409.         ThreadSafeProgressMonitor pm = new ThreadSafeProgressMonitor(monitor);
  1410.         DeltaTask.Block taskBlock = new DeltaTask.Block(threads, config,
  1411.                 reader, dc, pm,
  1412.                 list, 0, cnt);
  1413.         taskBlock.partitionTasks();
  1414.         beginPhase(PackingPhase.COMPRESSING, monitor, taskBlock.cost());
  1415.         pm.startWorkers(taskBlock.tasks.size());

  1416.         Executor executor = config.getExecutor();
  1417.         final List<Throwable> errors =
  1418.                 Collections.synchronizedList(new ArrayList<>(threads));
  1419.         if (executor instanceof ExecutorService) {
  1420.             // Caller supplied us a service, use it directly.
  1421.             runTasks((ExecutorService) executor, pm, taskBlock, errors);
  1422.         } else if (executor == null) {
  1423.             // Caller didn't give us a way to run the tasks, spawn up a
  1424.             // temporary thread pool and make sure it tears down cleanly.
  1425.             ExecutorService pool = Executors.newFixedThreadPool(threads);
  1426.             Throwable e1 = null;
  1427.             try {
  1428.                 runTasks(pool, pm, taskBlock, errors);
  1429.             } catch (Exception e) {
  1430.                 e1 = e;
  1431.             } finally {
  1432.                 pool.shutdown();
  1433.                 for (;;) {
  1434.                     try {
  1435.                         if (pool.awaitTermination(60, TimeUnit.SECONDS)) {
  1436.                             break;
  1437.                         }
  1438.                     } catch (InterruptedException e) {
  1439.                         if (e1 != null) {
  1440.                             e.addSuppressed(e1);
  1441.                         }
  1442.                         throw new IOException(JGitText
  1443.                                 .get().packingCancelledDuringObjectsWriting, e);
  1444.                     }
  1445.                 }
  1446.             }
  1447.         } else {
  1448.             // The caller gave us an executor, but it might not do
  1449.             // asynchronous execution.  Wrap everything and hope it
  1450.             // can schedule these for us.
  1451.             for (DeltaTask task : taskBlock.tasks) {
  1452.                 executor.execute(() -> {
  1453.                     try {
  1454.                         task.call();
  1455.                     } catch (Throwable failure) {
  1456.                         errors.add(failure);
  1457.                     }
  1458.                 });
  1459.             }
  1460.             try {
  1461.                 pm.waitForCompletion();
  1462.             } catch (InterruptedException ie) {
  1463.                 // We can't abort the other tasks as we have no handle.
  1464.                 // Cross our fingers and just break out anyway.
  1465.                 //
  1466.                 throw new IOException(
  1467.                         JGitText.get().packingCancelledDuringObjectsWriting,
  1468.                         ie);
  1469.             }
  1470.         }

  1471.         // If any task threw an error, try to report it back as
  1472.         // though we weren't using a threaded search algorithm.
  1473.         //
  1474.         if (!errors.isEmpty()) {
  1475.             Throwable err = errors.get(0);
  1476.             if (err instanceof Error)
  1477.                 throw (Error) err;
  1478.             if (err instanceof RuntimeException)
  1479.                 throw (RuntimeException) err;
  1480.             if (err instanceof IOException)
  1481.                 throw (IOException) err;

  1482.             throw new IOException(err.getMessage(), err);
  1483.         }
  1484.         endPhase(monitor);
  1485.     }

  1486.     private static void runTasks(ExecutorService pool,
  1487.             ThreadSafeProgressMonitor pm,
  1488.             DeltaTask.Block tb, List<Throwable> errors) throws IOException {
  1489.         List<Future<?>> futures = new ArrayList<>(tb.tasks.size());
  1490.         for (DeltaTask task : tb.tasks)
  1491.             futures.add(pool.submit(task));

  1492.         try {
  1493.             pm.waitForCompletion();
  1494.             for (Future<?> f : futures) {
  1495.                 try {
  1496.                     f.get();
  1497.                 } catch (ExecutionException failed) {
  1498.                     errors.add(failed.getCause());
  1499.                 }
  1500.             }
  1501.         } catch (InterruptedException ie) {
  1502.             for (Future<?> f : futures)
  1503.                 f.cancel(true);
  1504.             throw new IOException(
  1505.                     JGitText.get().packingCancelledDuringObjectsWriting, ie);
  1506.         }
  1507.     }

  1508.     private void writeObjects(PackOutputStream out) throws IOException {
  1509.         writeObjects(out, objectsLists[OBJ_COMMIT]);
  1510.         writeObjects(out, objectsLists[OBJ_TAG]);
  1511.         writeObjects(out, objectsLists[OBJ_TREE]);
  1512.         writeObjects(out, objectsLists[OBJ_BLOB]);
  1513.     }

  1514.     private void writeObjects(PackOutputStream out, List<ObjectToPack> list)
  1515.             throws IOException {
  1516.         if (list.isEmpty())
  1517.             return;

  1518.         typeStats = stats.objectTypes[list.get(0).getType()];
  1519.         long beginOffset = out.length();

  1520.         if (reuseSupport != null) {
  1521.             reuseSupport.writeObjects(out, list);
  1522.         } else {
  1523.             for (ObjectToPack otp : list)
  1524.                 out.writeObject(otp);
  1525.         }

  1526.         typeStats.bytes += out.length() - beginOffset;
  1527.         typeStats.cntObjects = list.size();
  1528.     }

  1529.     void writeObject(PackOutputStream out, ObjectToPack otp) throws IOException {
  1530.         if (!otp.isWritten())
  1531.             writeObjectImpl(out, otp);
  1532.     }

  1533.     private void writeObjectImpl(PackOutputStream out, ObjectToPack otp)
  1534.             throws IOException {
  1535.         if (otp.wantWrite()) {
  1536.             // A cycle exists in this delta chain. This should only occur if a
  1537.             // selected object representation disappeared during writing
  1538.             // (for example due to a concurrent repack) and a different base
  1539.             // was chosen, forcing a cycle. Select something other than a
  1540.             // delta, and write this object.
  1541.             reselectNonDelta(otp);
  1542.         }
  1543.         otp.markWantWrite();

  1544.         while (otp.isReuseAsIs()) {
  1545.             writeBase(out, otp.getDeltaBase());
  1546.             if (otp.isWritten())
  1547.                 return; // Delta chain cycle caused this to write already.

  1548.             crc32.reset();
  1549.             otp.setOffset(out.length());
  1550.             try {
  1551.                 reuseSupport.copyObjectAsIs(out, otp, reuseValidate);
  1552.                 out.endObject();
  1553.                 otp.setCRC((int) crc32.getValue());
  1554.                 typeStats.reusedObjects++;
  1555.                 if (otp.isDeltaRepresentation()) {
  1556.                     typeStats.reusedDeltas++;
  1557.                     typeStats.deltaBytes += out.length() - otp.getOffset();
  1558.                 }
  1559.                 return;
  1560.             } catch (StoredObjectRepresentationNotAvailableException gone) {
  1561.                 if (otp.getOffset() == out.length()) {
  1562.                     otp.setOffset(0);
  1563.                     otp.clearDeltaBase();
  1564.                     otp.clearReuseAsIs();
  1565.                     reuseSupport.selectObjectRepresentation(this,
  1566.                             NullProgressMonitor.INSTANCE,
  1567.                             Collections.singleton(otp));
  1568.                     continue;
  1569.                 }
  1570.                 // Object writing already started, we cannot recover.
  1571.                 //
  1572.                 CorruptObjectException coe;
  1573.                 coe = new CorruptObjectException(otp, ""); //$NON-NLS-1$
  1574.                 coe.initCause(gone);
  1575.                 throw coe;
  1576.             }
  1577.         }

  1578.         // If we reached here, reuse wasn't possible.
  1579.         //
  1580.         if (otp.isDeltaRepresentation()) {
  1581.             writeDeltaObjectDeflate(out, otp);
  1582.         } else {
  1583.             writeWholeObjectDeflate(out, otp);
  1584.         }
  1585.         out.endObject();
  1586.         otp.setCRC((int) crc32.getValue());
  1587.     }

  1588.     private void writeBase(PackOutputStream out, ObjectToPack base)
  1589.             throws IOException {
  1590.         if (base != null && !base.isWritten() && !base.isEdge())
  1591.             writeObjectImpl(out, base);
  1592.     }

  1593.     private void writeWholeObjectDeflate(PackOutputStream out,
  1594.             final ObjectToPack otp) throws IOException {
  1595.         final Deflater deflater = deflater();
  1596.         final ObjectLoader ldr = reader.open(otp, otp.getType());

  1597.         crc32.reset();
  1598.         otp.setOffset(out.length());
  1599.         out.writeHeader(otp, ldr.getSize());

  1600.         deflater.reset();
  1601.         DeflaterOutputStream dst = new DeflaterOutputStream(out, deflater);
  1602.         ldr.copyTo(dst);
  1603.         dst.finish();
  1604.     }

  1605.     private void writeDeltaObjectDeflate(PackOutputStream out,
  1606.             final ObjectToPack otp) throws IOException {
  1607.         writeBase(out, otp.getDeltaBase());

  1608.         crc32.reset();
  1609.         otp.setOffset(out.length());

  1610.         DeltaCache.Ref ref = otp.popCachedDelta();
  1611.         if (ref != null) {
  1612.             byte[] zbuf = ref.get();
  1613.             if (zbuf != null) {
  1614.                 out.writeHeader(otp, otp.getCachedSize());
  1615.                 out.write(zbuf);
  1616.                 typeStats.cntDeltas++;
  1617.                 typeStats.deltaBytes += out.length() - otp.getOffset();
  1618.                 return;
  1619.             }
  1620.         }

  1621.         try (TemporaryBuffer.Heap delta = delta(otp)) {
  1622.             out.writeHeader(otp, delta.length());

  1623.             Deflater deflater = deflater();
  1624.             deflater.reset();
  1625.             DeflaterOutputStream dst = new DeflaterOutputStream(out, deflater);
  1626.             delta.writeTo(dst, null);
  1627.             dst.finish();
  1628.         }
  1629.         typeStats.cntDeltas++;
  1630.         typeStats.deltaBytes += out.length() - otp.getOffset();
  1631.     }

  1632.     private TemporaryBuffer.Heap delta(ObjectToPack otp)
  1633.             throws IOException {
  1634.         DeltaIndex index = new DeltaIndex(buffer(otp.getDeltaBaseId()));
  1635.         byte[] res = buffer(otp);

  1636.         // We never would have proposed this pair if the delta would be
  1637.         // larger than the unpacked version of the object. So using it
  1638.         // as our buffer limit is valid: we will never reach it.
  1639.         //
  1640.         TemporaryBuffer.Heap delta = new TemporaryBuffer.Heap(res.length);
  1641.         index.encode(delta, res);
  1642.         return delta;
  1643.     }

  1644.     private byte[] buffer(AnyObjectId objId) throws IOException {
  1645.         return buffer(config, reader, objId);
  1646.     }

  1647.     static byte[] buffer(PackConfig config, ObjectReader or, AnyObjectId objId)
  1648.             throws IOException {
  1649.         // PackWriter should have already pruned objects that
  1650.         // are above the big file threshold, so our chances of
  1651.         // the object being below it are very good. We really
  1652.         // shouldn't be here, unless the implementation is odd.

  1653.         return or.open(objId).getCachedBytes(config.getBigFileThreshold());
  1654.     }

  1655.     private Deflater deflater() {
  1656.         if (myDeflater == null)
  1657.             myDeflater = new Deflater(config.getCompressionLevel());
  1658.         return myDeflater;
  1659.     }

  1660.     private void writeChecksum(PackOutputStream out) throws IOException {
  1661.         packcsum = out.getDigest();
  1662.         out.write(packcsum);
  1663.     }

  1664.     private void findObjectsToPack(@NonNull ProgressMonitor countingMonitor,
  1665.             @NonNull ObjectWalk walker, @NonNull Set<? extends ObjectId> want,
  1666.             @NonNull Set<? extends ObjectId> have,
  1667.             @NonNull Set<? extends ObjectId> noBitmaps) throws IOException {
  1668.         final long countingStart = System.currentTimeMillis();
  1669.         beginPhase(PackingPhase.COUNTING, countingMonitor, ProgressMonitor.UNKNOWN);

  1670.         stats.interestingObjects = Collections.unmodifiableSet(new HashSet<ObjectId>(want));
  1671.         stats.uninterestingObjects = Collections.unmodifiableSet(new HashSet<ObjectId>(have));
  1672.         excludeFromBitmapSelection = noBitmaps;

  1673.         canBuildBitmaps = config.isBuildBitmaps()
  1674.                 && !shallowPack
  1675.                 && have.isEmpty()
  1676.                 && (excludeInPacks == null || excludeInPacks.length == 0);
  1677.         if (!shallowPack && useBitmaps) {
  1678.             BitmapIndex bitmapIndex = reader.getBitmapIndex();
  1679.             if (bitmapIndex != null) {
  1680.                 BitmapWalker bitmapWalker = new BitmapWalker(
  1681.                         walker, bitmapIndex, countingMonitor);
  1682.                 findObjectsToPackUsingBitmaps(bitmapWalker, want, have);
  1683.                 endPhase(countingMonitor);
  1684.                 stats.timeCounting = System.currentTimeMillis() - countingStart;
  1685.                 stats.bitmapIndexMisses = bitmapWalker.getCountOfBitmapIndexMisses();
  1686.                 return;
  1687.             }
  1688.         }

  1689.         List<ObjectId> all = new ArrayList<>(want.size() + have.size());
  1690.         all.addAll(want);
  1691.         all.addAll(have);

  1692.         final RevFlag include = walker.newFlag("include"); //$NON-NLS-1$
  1693.         final RevFlag added = walker.newFlag("added"); //$NON-NLS-1$

  1694.         walker.carry(include);

  1695.         int haveEst = have.size();
  1696.         if (have.isEmpty()) {
  1697.             walker.sort(RevSort.COMMIT_TIME_DESC);
  1698.         } else {
  1699.             walker.sort(RevSort.TOPO);
  1700.             if (thin)
  1701.                 walker.sort(RevSort.BOUNDARY, true);
  1702.         }

  1703.         List<RevObject> wantObjs = new ArrayList<>(want.size());
  1704.         List<RevObject> haveObjs = new ArrayList<>(haveEst);
  1705.         List<RevTag> wantTags = new ArrayList<>(want.size());

  1706.         // Retrieve the RevWalk's versions of "want" and "have" objects to
  1707.         // maintain any state previously set in the RevWalk.
  1708.         AsyncRevObjectQueue q = walker.parseAny(all, true);
  1709.         try {
  1710.             for (;;) {
  1711.                 try {
  1712.                     RevObject o = q.next();
  1713.                     if (o == null)
  1714.                         break;
  1715.                     if (have.contains(o))
  1716.                         haveObjs.add(o);
  1717.                     if (want.contains(o)) {
  1718.                         o.add(include);
  1719.                         wantObjs.add(o);
  1720.                         if (o instanceof RevTag)
  1721.                             wantTags.add((RevTag) o);
  1722.                     }
  1723.                 } catch (MissingObjectException e) {
  1724.                     if (ignoreMissingUninteresting
  1725.                             && have.contains(e.getObjectId()))
  1726.                         continue;
  1727.                     throw e;
  1728.                 }
  1729.             }
  1730.         } finally {
  1731.             q.release();
  1732.         }

  1733.         if (!wantTags.isEmpty()) {
  1734.             all = new ArrayList<>(wantTags.size());
  1735.             for (RevTag tag : wantTags)
  1736.                 all.add(tag.getObject());
  1737.             q = walker.parseAny(all, true);
  1738.             try {
  1739.                 while (q.next() != null) {
  1740.                     // Just need to pop the queue item to parse the object.
  1741.                 }
  1742.             } finally {
  1743.                 q.release();
  1744.             }
  1745.         }

  1746.         if (walker instanceof DepthWalk.ObjectWalk) {
  1747.             DepthWalk.ObjectWalk depthWalk = (DepthWalk.ObjectWalk) walker;
  1748.             for (RevObject obj : wantObjs) {
  1749.                 depthWalk.markRoot(obj);
  1750.             }
  1751.             // Mark the tree objects associated with "have" commits as
  1752.             // uninteresting to avoid writing redundant blobs. A normal RevWalk
  1753.             // lazily propagates the "uninteresting" state from a commit to its
  1754.             // tree during the walk, but DepthWalks can terminate early so
  1755.             // preemptively propagate that state here.
  1756.             for (RevObject obj : haveObjs) {
  1757.                 if (obj instanceof RevCommit) {
  1758.                     RevTree t = ((RevCommit) obj).getTree();
  1759.                     depthWalk.markUninteresting(t);
  1760.                 }
  1761.             }

  1762.             if (unshallowObjects != null) {
  1763.                 for (ObjectId id : unshallowObjects) {
  1764.                     depthWalk.markUnshallow(walker.parseAny(id));
  1765.                 }
  1766.             }
  1767.         } else {
  1768.             for (RevObject obj : wantObjs)
  1769.                 walker.markStart(obj);
  1770.         }
  1771.         for (RevObject obj : haveObjs)
  1772.             walker.markUninteresting(obj);

  1773.         final int maxBases = config.getDeltaSearchWindowSize();
  1774.         Set<RevTree> baseTrees = new HashSet<>();
  1775.         BlockList<RevCommit> commits = new BlockList<>();
  1776.         Set<ObjectId> roots = new HashSet<>();
  1777.         RevCommit c;
  1778.         while ((c = walker.next()) != null) {
  1779.             if (exclude(c))
  1780.                 continue;
  1781.             if (c.has(RevFlag.UNINTERESTING)) {
  1782.                 if (baseTrees.size() <= maxBases)
  1783.                     baseTrees.add(c.getTree());
  1784.                 continue;
  1785.             }

  1786.             commits.add(c);
  1787.             if (c.getParentCount() == 0) {
  1788.                 roots.add(c.copy());
  1789.             }
  1790.             countingMonitor.update(1);
  1791.         }
  1792.         stats.rootCommits = Collections.unmodifiableSet(roots);

  1793.         if (shallowPack) {
  1794.             for (RevCommit cmit : commits) {
  1795.                 addObject(cmit, 0);
  1796.             }
  1797.         } else {
  1798.             int commitCnt = 0;
  1799.             boolean putTagTargets = false;
  1800.             for (RevCommit cmit : commits) {
  1801.                 if (!cmit.has(added)) {
  1802.                     cmit.add(added);
  1803.                     addObject(cmit, 0);
  1804.                     commitCnt++;
  1805.                 }

  1806.                 for (int i = 0; i < cmit.getParentCount(); i++) {
  1807.                     RevCommit p = cmit.getParent(i);
  1808.                     if (!p.has(added) && !p.has(RevFlag.UNINTERESTING)
  1809.                             && !exclude(p)) {
  1810.                         p.add(added);
  1811.                         addObject(p, 0);
  1812.                         commitCnt++;
  1813.                     }
  1814.                 }

  1815.                 if (!putTagTargets && 4096 < commitCnt) {
  1816.                     for (ObjectId id : tagTargets) {
  1817.                         RevObject obj = walker.lookupOrNull(id);
  1818.                         if (obj instanceof RevCommit
  1819.                                 && obj.has(include)
  1820.                                 && !obj.has(RevFlag.UNINTERESTING)
  1821.                                 && !obj.has(added)) {
  1822.                             obj.add(added);
  1823.                             addObject(obj, 0);
  1824.                         }
  1825.                     }
  1826.                     putTagTargets = true;
  1827.                 }
  1828.             }
  1829.         }
  1830.         commits = null;

  1831.         if (thin && !baseTrees.isEmpty()) {
  1832.             BaseSearch bases = new BaseSearch(countingMonitor, baseTrees, //
  1833.                     objectsMap, edgeObjects, reader);
  1834.             RevObject o;
  1835.             while ((o = walker.nextObject()) != null) {
  1836.                 if (o.has(RevFlag.UNINTERESTING))
  1837.                     continue;
  1838.                 if (exclude(o))
  1839.                     continue;

  1840.                 int pathHash = walker.getPathHashCode();
  1841.                 byte[] pathBuf = walker.getPathBuffer();
  1842.                 int pathLen = walker.getPathLength();
  1843.                 bases.addBase(o.getType(), pathBuf, pathLen, pathHash);
  1844.                 if (!depthSkip(o, walker)) {
  1845.                     filterAndAddObject(o, o.getType(), pathHash, want);
  1846.                 }
  1847.                 countingMonitor.update(1);
  1848.             }
  1849.         } else {
  1850.             RevObject o;
  1851.             while ((o = walker.nextObject()) != null) {
  1852.                 if (o.has(RevFlag.UNINTERESTING))
  1853.                     continue;
  1854.                 if (exclude(o))
  1855.                     continue;
  1856.                 if (!depthSkip(o, walker)) {
  1857.                     filterAndAddObject(o, o.getType(), walker.getPathHashCode(),
  1858.                                        want);
  1859.                 }
  1860.                 countingMonitor.update(1);
  1861.             }
  1862.         }

  1863.         for (CachedPack pack : cachedPacks)
  1864.             countingMonitor.update((int) pack.getObjectCount());
  1865.         endPhase(countingMonitor);
  1866.         stats.timeCounting = System.currentTimeMillis() - countingStart;
  1867.         stats.bitmapIndexMisses = -1;
  1868.     }

  1869.     private void findObjectsToPackUsingBitmaps(
  1870.             BitmapWalker bitmapWalker, Set<? extends ObjectId> want,
  1871.             Set<? extends ObjectId> have)
  1872.             throws MissingObjectException, IncorrectObjectTypeException,
  1873.             IOException {
  1874.         BitmapBuilder haveBitmap = bitmapWalker.findObjects(have, null, true);
  1875.         BitmapBuilder wantBitmap = bitmapWalker.findObjects(want, haveBitmap,
  1876.                 false);
  1877.         BitmapBuilder needBitmap = wantBitmap.andNot(haveBitmap);

  1878.         if (useCachedPacks && reuseSupport != null && !reuseValidate
  1879.                 && (excludeInPacks == null || excludeInPacks.length == 0))
  1880.             cachedPacks.addAll(
  1881.                     reuseSupport.getCachedPacksAndUpdate(needBitmap));

  1882.         for (BitmapObject obj : needBitmap) {
  1883.             ObjectId objectId = obj.getObjectId();
  1884.             if (exclude(objectId)) {
  1885.                 needBitmap.remove(objectId);
  1886.                 continue;
  1887.             }
  1888.             filterAndAddObject(objectId, obj.getType(), 0, want);
  1889.         }

  1890.         if (thin)
  1891.             haveObjects = haveBitmap;
  1892.     }

  1893.     private static void pruneEdgesFromObjectList(List<ObjectToPack> list) {
  1894.         final int size = list.size();
  1895.         int src = 0;
  1896.         int dst = 0;

  1897.         for (; src < size; src++) {
  1898.             ObjectToPack obj = list.get(src);
  1899.             if (obj.isEdge())
  1900.                 continue;
  1901.             if (dst != src)
  1902.                 list.set(dst, obj);
  1903.             dst++;
  1904.         }

  1905.         while (dst < list.size())
  1906.             list.remove(list.size() - 1);
  1907.     }

  1908.     /**
  1909.      * Include one object to the output file.
  1910.      * <p>
  1911.      * Objects are written in the order they are added. If the same object is
  1912.      * added twice, it may be written twice, creating a larger than necessary
  1913.      * file.
  1914.      *
  1915.      * @param object
  1916.      *            the object to add.
  1917.      * @throws org.eclipse.jgit.errors.IncorrectObjectTypeException
  1918.      *             the object is an unsupported type.
  1919.      */
  1920.     public void addObject(RevObject object)
  1921.             throws IncorrectObjectTypeException {
  1922.         if (!exclude(object))
  1923.             addObject(object, 0);
  1924.     }

  1925.     private void addObject(RevObject object, int pathHashCode) {
  1926.         addObject(object, object.getType(), pathHashCode);
  1927.     }

  1928.     private void addObject(
  1929.             final AnyObjectId src, final int type, final int pathHashCode) {
  1930.         final ObjectToPack otp;
  1931.         if (reuseSupport != null)
  1932.             otp = reuseSupport.newObjectToPack(src, type);
  1933.         else
  1934.             otp = new ObjectToPack(src, type);
  1935.         otp.setPathHash(pathHashCode);
  1936.         objectsLists[type].add(otp);
  1937.         objectsMap.add(otp);
  1938.     }

  1939.     /**
  1940.      * Determines if the object should be omitted from the pack as a result of
  1941.      * its depth (probably because of the tree:<depth> filter).
  1942.      * <p>
  1943.      * Causes {@code walker} to skip traversing the current tree, which ought to
  1944.      * have just started traversal, assuming this method is called as soon as a
  1945.      * new depth is reached.
  1946.      * <p>
  1947.      * This method increments the {@code treesTraversed} statistic.
  1948.      *
  1949.      * @param obj
  1950.      *            the object to check whether it should be omitted.
  1951.      * @param walker
  1952.      *            the walker being used for traveresal.
  1953.      * @return whether the given object should be skipped.
  1954.      */
  1955.     private boolean depthSkip(@NonNull RevObject obj, ObjectWalk walker) {
  1956.         long treeDepth = walker.getTreeDepth();

  1957.         // Check if this object needs to be rejected because it is a tree or
  1958.         // blob that is too deep from the root tree.

  1959.         // A blob is considered one level deeper than the tree that contains it.
  1960.         if (obj.getType() == OBJ_BLOB) {
  1961.             treeDepth++;
  1962.         } else {
  1963.             stats.treesTraversed++;
  1964.         }

  1965.         if (filterSpec.getTreeDepthLimit() < 0 ||
  1966.             treeDepth <= filterSpec.getTreeDepthLimit()) {
  1967.             return false;
  1968.         }

  1969.         walker.skipTree();
  1970.         return true;
  1971.     }

  1972.     // Adds the given object as an object to be packed, first performing
  1973.     // filtering on blobs at or exceeding a given size.
  1974.     private void filterAndAddObject(@NonNull AnyObjectId src, int type,
  1975.             int pathHashCode, @NonNull Set<? extends AnyObjectId> want)
  1976.             throws IOException {

  1977.         // Check if this object needs to be rejected, doing the cheaper
  1978.         // checks first.
  1979.         boolean reject =
  1980.             (!filterSpec.allowsType(type) && !want.contains(src)) ||
  1981.             (filterSpec.getBlobLimit() >= 0 &&
  1982.                 type == OBJ_BLOB &&
  1983.                 !want.contains(src) &&
  1984.                 reader.getObjectSize(src, OBJ_BLOB) > filterSpec.getBlobLimit());
  1985.         if (!reject) {
  1986.             addObject(src, type, pathHashCode);
  1987.         }
  1988.     }

  1989.     private boolean exclude(AnyObjectId objectId) {
  1990.         if (excludeInPacks == null)
  1991.             return false;
  1992.         if (excludeInPackLast.contains(objectId))
  1993.             return true;
  1994.         for (ObjectIdSet idx : excludeInPacks) {
  1995.             if (idx.contains(objectId)) {
  1996.                 excludeInPackLast = idx;
  1997.                 return true;
  1998.             }
  1999.         }
  2000.         return false;
  2001.     }

  2002.     /**
  2003.      * Select an object representation for this writer.
  2004.      * <p>
  2005.      * An {@link org.eclipse.jgit.lib.ObjectReader} implementation should invoke
  2006.      * this method once for each representation available for an object, to
  2007.      * allow the writer to find the most suitable one for the output.
  2008.      *
  2009.      * @param otp
  2010.      *            the object being packed.
  2011.      * @param next
  2012.      *            the next available representation from the repository.
  2013.      */
  2014.     public void select(ObjectToPack otp, StoredObjectRepresentation next) {
  2015.         int nFmt = next.getFormat();

  2016.         if (!cachedPacks.isEmpty()) {
  2017.             if (otp.isEdge())
  2018.                 return;
  2019.             if (nFmt == PACK_WHOLE || nFmt == PACK_DELTA) {
  2020.                 for (CachedPack pack : cachedPacks) {
  2021.                     if (pack.hasObject(otp, next)) {
  2022.                         otp.setEdge();
  2023.                         otp.clearDeltaBase();
  2024.                         otp.clearReuseAsIs();
  2025.                         pruneCurrentObjectList = true;
  2026.                         return;
  2027.                     }
  2028.                 }
  2029.             }
  2030.         }

  2031.         if (nFmt == PACK_DELTA && reuseDeltas && reuseDeltaFor(otp)) {
  2032.             ObjectId baseId = next.getDeltaBase();
  2033.             ObjectToPack ptr = objectsMap.get(baseId);
  2034.             if (ptr != null && !ptr.isEdge()) {
  2035.                 otp.setDeltaBase(ptr);
  2036.                 otp.setReuseAsIs();
  2037.             } else if (thin && have(ptr, baseId)) {
  2038.                 otp.setDeltaBase(baseId);
  2039.                 otp.setReuseAsIs();
  2040.             } else {
  2041.                 otp.clearDeltaBase();
  2042.                 otp.clearReuseAsIs();
  2043.             }
  2044.         } else if (nFmt == PACK_WHOLE && config.isReuseObjects()) {
  2045.             int nWeight = next.getWeight();
  2046.             if (otp.isReuseAsIs() && !otp.isDeltaRepresentation()) {
  2047.                 // We've chosen another PACK_WHOLE format for this object,
  2048.                 // choose the one that has the smaller compressed size.
  2049.                 //
  2050.                 if (otp.getWeight() <= nWeight)
  2051.                     return;
  2052.             }
  2053.             otp.clearDeltaBase();
  2054.             otp.setReuseAsIs();
  2055.             otp.setWeight(nWeight);
  2056.         } else {
  2057.             otp.clearDeltaBase();
  2058.             otp.clearReuseAsIs();
  2059.         }

  2060.         otp.setDeltaAttempted(reuseDeltas && next.wasDeltaAttempted());
  2061.         otp.select(next);
  2062.     }

  2063.     private final boolean have(ObjectToPack ptr, AnyObjectId objectId) {
  2064.         return (ptr != null && ptr.isEdge())
  2065.                 || (haveObjects != null && haveObjects.contains(objectId));
  2066.     }

  2067.     /**
  2068.      * Prepares the bitmaps to be written to the bitmap index file.
  2069.      * <p>
  2070.      * Bitmaps can be used to speed up fetches and clones by storing the entire
  2071.      * object graph at selected commits. Writing a bitmap index is an optional
  2072.      * feature that not all pack users may require.
  2073.      * <p>
  2074.      * Called after {@link #writeIndex(OutputStream)}.
  2075.      * <p>
  2076.      * To reduce memory internal state is cleared during this method, rendering
  2077.      * the PackWriter instance useless for anything further than a call to write
  2078.      * out the new bitmaps with {@link #writeBitmapIndex(OutputStream)}.
  2079.      *
  2080.      * @param pm
  2081.      *            progress monitor to report bitmap building work.
  2082.      * @return whether a bitmap index may be written.
  2083.      * @throws java.io.IOException
  2084.      *             when some I/O problem occur during reading objects.
  2085.      */
  2086.     public boolean prepareBitmapIndex(ProgressMonitor pm) throws IOException {
  2087.         if (!canBuildBitmaps || getObjectCount() > Integer.MAX_VALUE
  2088.                 || !cachedPacks.isEmpty())
  2089.             return false;

  2090.         if (pm == null)
  2091.             pm = NullProgressMonitor.INSTANCE;

  2092.         int numCommits = objectsLists[OBJ_COMMIT].size();
  2093.         List<ObjectToPack> byName = sortByName();
  2094.         sortedByName = null;
  2095.         objectsLists = null;
  2096.         objectsMap = null;
  2097.         writeBitmaps = new PackBitmapIndexBuilder(byName);
  2098.         byName = null;

  2099.         PackWriterBitmapPreparer bitmapPreparer = new PackWriterBitmapPreparer(
  2100.                 reader, writeBitmaps, pm, stats.interestingObjects, config);

  2101.         Collection<BitmapCommit> selectedCommits = bitmapPreparer
  2102.                 .selectCommits(numCommits, excludeFromBitmapSelection);

  2103.         beginPhase(PackingPhase.BUILDING_BITMAPS, pm, selectedCommits.size());

  2104.         BitmapWalker walker = bitmapPreparer.newBitmapWalker();
  2105.         AnyObjectId last = null;
  2106.         for (BitmapCommit cmit : selectedCommits) {
  2107.             if (!cmit.isReuseWalker()) {
  2108.                 walker = bitmapPreparer.newBitmapWalker();
  2109.             }
  2110.             BitmapBuilder bitmap = walker.findObjects(
  2111.                     Collections.singleton(cmit), null, false);

  2112.             if (last != null && cmit.isReuseWalker() && !bitmap.contains(last))
  2113.                 throw new IllegalStateException(MessageFormat.format(
  2114.                         JGitText.get().bitmapMissingObject, cmit.name(),
  2115.                         last.name()));
  2116.             last = BitmapCommit.copyFrom(cmit).build();
  2117.             writeBitmaps.processBitmapForWrite(cmit, bitmap.build(),
  2118.                     cmit.getFlags());

  2119.             // The bitmap walker should stop when the walk hits the previous
  2120.             // commit, which saves time.
  2121.             walker.setPrevCommit(last);
  2122.             walker.setPrevBitmap(bitmap);

  2123.             pm.update(1);
  2124.         }

  2125.         endPhase(pm);
  2126.         return true;
  2127.     }

  2128.     private boolean reuseDeltaFor(ObjectToPack otp) {
  2129.         int type = otp.getType();
  2130.         if ((type & 2) != 0) // OBJ_TREE(2) or OBJ_BLOB(3)
  2131.             return true;
  2132.         if (type == OBJ_COMMIT)
  2133.             return reuseDeltaCommits;
  2134.         if (type == OBJ_TAG)
  2135.             return false;
  2136.         return true;
  2137.     }

  2138.     private class MutableState {
  2139.         /** Estimated size of a single ObjectToPack instance. */
  2140.         // Assume 64-bit pointers, since this is just an estimate.
  2141.         private static final long OBJECT_TO_PACK_SIZE =
  2142.                 (2 * 8)               // Object header
  2143.                 + (2 * 8) + (2 * 8)   // ObjectToPack fields
  2144.                 + (8 + 8)             // PackedObjectInfo fields
  2145.                 + 8                   // ObjectIdOwnerMap fields
  2146.                 + 40                  // AnyObjectId fields
  2147.                 + 8;                  // Reference in BlockList

  2148.         private final long totalDeltaSearchBytes;

  2149.         private volatile PackingPhase phase;

  2150.         MutableState() {
  2151.             phase = PackingPhase.COUNTING;
  2152.             if (config.isDeltaCompress()) {
  2153.                 int threads = config.getThreads();
  2154.                 if (threads <= 0)
  2155.                     threads = Runtime.getRuntime().availableProcessors();
  2156.                 totalDeltaSearchBytes = (threads * config.getDeltaSearchMemoryLimit())
  2157.                         + config.getBigFileThreshold();
  2158.             } else
  2159.                 totalDeltaSearchBytes = 0;
  2160.         }

  2161.         State snapshot() {
  2162.             long objCnt = 0;
  2163.             BlockList<ObjectToPack>[] lists = objectsLists;
  2164.             if (lists != null) {
  2165.                 objCnt += lists[OBJ_COMMIT].size();
  2166.                 objCnt += lists[OBJ_TREE].size();
  2167.                 objCnt += lists[OBJ_BLOB].size();
  2168.                 objCnt += lists[OBJ_TAG].size();
  2169.                 // Exclude CachedPacks.
  2170.             }

  2171.             long bytesUsed = OBJECT_TO_PACK_SIZE * objCnt;
  2172.             PackingPhase curr = phase;
  2173.             if (curr == PackingPhase.COMPRESSING)
  2174.                 bytesUsed += totalDeltaSearchBytes;
  2175.             return new State(curr, bytesUsed);
  2176.         }
  2177.     }

  2178.     /** Possible states that a PackWriter can be in. */
  2179.     public enum PackingPhase {
  2180.         /** Counting objects phase. */
  2181.         COUNTING,

  2182.         /** Getting sizes phase. */
  2183.         GETTING_SIZES,

  2184.         /** Finding sources phase. */
  2185.         FINDING_SOURCES,

  2186.         /** Compressing objects phase. */
  2187.         COMPRESSING,

  2188.         /** Writing objects phase. */
  2189.         WRITING,

  2190.         /** Building bitmaps phase. */
  2191.         BUILDING_BITMAPS;
  2192.     }

  2193.     /** Summary of the current state of a PackWriter. */
  2194.     public class State {
  2195.         private final PackingPhase phase;

  2196.         private final long bytesUsed;

  2197.         State(PackingPhase phase, long bytesUsed) {
  2198.             this.phase = phase;
  2199.             this.bytesUsed = bytesUsed;
  2200.         }

  2201.         /** @return the PackConfig used to build the writer. */
  2202.         public PackConfig getConfig() {
  2203.             return config;
  2204.         }

  2205.         /** @return the current phase of the writer. */
  2206.         public PackingPhase getPhase() {
  2207.             return phase;
  2208.         }

  2209.         /** @return an estimate of the total memory used by the writer. */
  2210.         public long estimateBytesUsed() {
  2211.             return bytesUsed;
  2212.         }

  2213.         @SuppressWarnings("nls")
  2214.         @Override
  2215.         public String toString() {
  2216.             return "PackWriter.State[" + phase + ", memory=" + bytesUsed + "]";
  2217.         }
  2218.     }

  2219.     /**
  2220.      * Configuration related to the packfile URI feature.
  2221.      *
  2222.      * @since 5.5
  2223.      */
  2224.     public static class PackfileUriConfig {
  2225.         @NonNull
  2226.         private final PacketLineOut pckOut;

  2227.         @NonNull
  2228.         private final Collection<String> protocolsSupported;

  2229.         @NonNull
  2230.         private final CachedPackUriProvider cachedPackUriProvider;

  2231.         /**
  2232.          * @param pckOut where to write "packfile-uri" lines to (should
  2233.          *     output to the same stream as the one passed to
  2234.          *     PackWriter#writePack)
  2235.          * @param protocolsSupported list of protocols supported (e.g. "https")
  2236.          * @param cachedPackUriProvider provider of URIs corresponding
  2237.          *     to cached packs
  2238.          * @since 5.5
  2239.          */
  2240.         public PackfileUriConfig(@NonNull PacketLineOut pckOut,
  2241.                 @NonNull Collection<String> protocolsSupported,
  2242.                 @NonNull CachedPackUriProvider cachedPackUriProvider) {
  2243.             this.pckOut = pckOut;
  2244.             this.protocolsSupported = protocolsSupported;
  2245.             this.cachedPackUriProvider = cachedPackUriProvider;
  2246.         }
  2247.     }
  2248. }