View Javadoc
1   /*
2    * Copyright (C) 2011, Google Inc.
3    * and other copyright owners as documented in the project's IP log.
4    *
5    * This program and the accompanying materials are made available
6    * under the terms of the Eclipse Distribution License v1.0 which
7    * accompanies this distribution, is reproduced below, and is
8    * available at http://www.eclipse.org/org/documents/edl-v10.php
9    *
10   * All rights reserved.
11   *
12   * Redistribution and use in source and binary forms, with or
13   * without modification, are permitted provided that the following
14   * conditions are met:
15   *
16   * - Redistributions of source code must retain the above copyright
17   *   notice, this list of conditions and the following disclaimer.
18   *
19   * - Redistributions in binary form must reproduce the above
20   *   copyright notice, this list of conditions and the following
21   *   disclaimer in the documentation and/or other materials provided
22   *   with the distribution.
23   *
24   * - Neither the name of the Eclipse Foundation, Inc. nor the
25   *   names of its contributors may be used to endorse or promote
26   *   products derived from this software without specific prior
27   *   written permission.
28   *
29   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
30   * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
31   * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
32   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33   * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
34   * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35   * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
36   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37   * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
38   * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39   * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
41   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42   */
43  
44  package org.eclipse.jgit.internal.storage.dfs;
45  
46  import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.COMPACT;
47  import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.GC;
48  import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.GC_REST;
49  import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.GC_TXN;
50  import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.INSERT;
51  import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.RECEIVE;
52  import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.UNREACHABLE_GARBAGE;
53  import static org.eclipse.jgit.internal.storage.pack.PackExt.BITMAP_INDEX;
54  import static org.eclipse.jgit.internal.storage.pack.PackExt.INDEX;
55  import static org.eclipse.jgit.internal.storage.pack.PackExt.PACK;
56  import static org.eclipse.jgit.internal.storage.pack.PackWriter.NONE;
57  
58  import java.io.IOException;
59  import java.util.ArrayList;
60  import java.util.Calendar;
61  import java.util.Collection;
62  import java.util.EnumSet;
63  import java.util.GregorianCalendar;
64  import java.util.HashSet;
65  import java.util.List;
66  import java.util.Set;
67  import java.util.concurrent.TimeUnit;
68  
69  import org.eclipse.jgit.internal.JGitText;
70  import org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource;
71  import org.eclipse.jgit.internal.storage.file.PackIndex;
72  import org.eclipse.jgit.internal.storage.file.PackReverseIndex;
73  import org.eclipse.jgit.internal.storage.pack.PackExt;
74  import org.eclipse.jgit.internal.storage.pack.PackWriter;
75  import org.eclipse.jgit.internal.storage.reftree.RefTreeNames;
76  import org.eclipse.jgit.lib.AnyObjectId;
77  import org.eclipse.jgit.lib.Constants;
78  import org.eclipse.jgit.lib.NullProgressMonitor;
79  import org.eclipse.jgit.lib.ObjectId;
80  import org.eclipse.jgit.lib.ObjectIdSet;
81  import org.eclipse.jgit.lib.ProgressMonitor;
82  import org.eclipse.jgit.lib.Ref;
83  import org.eclipse.jgit.lib.RefDatabase;
84  import org.eclipse.jgit.revwalk.RevWalk;
85  import org.eclipse.jgit.storage.pack.PackConfig;
86  import org.eclipse.jgit.storage.pack.PackStatistics;
87  import org.eclipse.jgit.util.SystemReader;
88  import org.eclipse.jgit.util.io.CountingOutputStream;
89  
90  /** Repack and garbage collect a repository. */
91  public class DfsGarbageCollector {
92  	private final DfsRepository repo;
93  	private final RefDatabase refdb;
94  	private final DfsObjDatabase objdb;
95  
96  	private final List<DfsPackDescription> newPackDesc;
97  
98  	private final List<PackStatistics> newPackStats;
99  
100 	private final List<ObjectIdSet> newPackObj;
101 
102 	private DfsReader ctx;
103 
104 	private PackConfig packConfig;
105 
106 	// See packIsCoalesceableGarbage(), below, for how these two variables
107 	// interact.
108 	private long coalesceGarbageLimit = 50 << 20;
109 	private long garbageTtlMillis = TimeUnit.DAYS.toMillis(1);
110 
111 	private long startTimeMillis;
112 	private List<DfsPackFile> packsBefore;
113 	private List<DfsPackFile> expiredGarbagePacks;
114 
115 	private Set<ObjectId> allHeadsAndTags;
116 	private Set<ObjectId> allTags;
117 	private Set<ObjectId> nonHeads;
118 	private Set<ObjectId> txnHeads;
119 	private Set<ObjectId> tagTargets;
120 
121 	/**
122 	 * Initialize a garbage collector.
123 	 *
124 	 * @param repository
125 	 *            repository objects to be packed will be read from.
126 	 */
127 	public DfsGarbageCollector(DfsRepository repository) {
128 		repo = repository;
129 		refdb = repo.getRefDatabase();
130 		objdb = repo.getObjectDatabase();
131 		newPackDesc = new ArrayList<>(4);
132 		newPackStats = new ArrayList<>(4);
133 		newPackObj = new ArrayList<>(4);
134 
135 		packConfig = new PackConfig(repo);
136 		packConfig.setIndexVersion(2);
137 	}
138 
139 	/** @return configuration used to generate the new pack file. */
140 	public PackConfig getPackConfig() {
141 		return packConfig;
142 	}
143 
144 	/**
145 	 * @param newConfig
146 	 *            the new configuration to use when creating the pack file.
147 	 * @return {@code this}
148 	 */
149 	public DfsGarbageCollector setPackConfig(PackConfig newConfig) {
150 		packConfig = newConfig;
151 		return this;
152 	}
153 
154 	/** @return garbage packs smaller than this size will be repacked. */
155 	public long getCoalesceGarbageLimit() {
156 		return coalesceGarbageLimit;
157 	}
158 
159 	/**
160 	 * Set the byte size limit for garbage packs to be repacked.
161 	 * <p>
162 	 * Any UNREACHABLE_GARBAGE pack smaller than this limit will be repacked at
163 	 * the end of the run. This allows the garbage collector to coalesce
164 	 * unreachable objects into a single file.
165 	 * <p>
166 	 * If an UNREACHABLE_GARBAGE pack is already larger than this limit it will
167 	 * be left alone by the garbage collector. This avoids unnecessary disk IO
168 	 * reading and copying the objects.
169 	 * <p>
170 	 * If limit is set to 0 the UNREACHABLE_GARBAGE coalesce is disabled.<br>
171 	 * If limit is set to {@link Long#MAX_VALUE}, everything is coalesced.
172 	 * <p>
173 	 * Keeping unreachable garbage prevents race conditions with repository
174 	 * changes that may suddenly need an object whose only copy was stored in
175 	 * the UNREACHABLE_GARBAGE pack.
176 	 *
177 	 * @param limit
178 	 *            size in bytes.
179 	 * @return {@code this}
180 	 */
181 	public DfsGarbageCollector setCoalesceGarbageLimit(long limit) {
182 		coalesceGarbageLimit = limit;
183 		return this;
184 	}
185 
186 	/**
187 	 * @return garbage packs older than this limit (in milliseconds) will be
188 	 *         pruned as part of the garbage collection process if the value is
189 	 *         > 0, otherwise garbage packs are retained.
190 	 */
191 	public long getGarbageTtlMillis() {
192 		return garbageTtlMillis;
193 	}
194 
195 	/**
196 	 * Set the time to live for garbage objects.
197 	 * <p>
198 	 * Any UNREACHABLE_GARBAGE older than this limit will be pruned at the end
199 	 * of the run.
200 	 * <p>
201 	 * If timeToLiveMillis is set to 0, UNREACHABLE_GARBAGE purging is disabled.
202 	 *
203 	 * @param ttl
204 	 *            Time to live whatever unit is specified.
205 	 * @param unit
206 	 *            The specified time unit.
207 	 * @return {@code this}
208 	 */
209 	public DfsGarbageCollector setGarbageTtl(long ttl, TimeUnit unit) {
210 		garbageTtlMillis = unit.toMillis(ttl);
211 		return this;
212 	}
213 
214 	/**
215 	 * Create a single new pack file containing all of the live objects.
216 	 * <p>
217 	 * This method safely decides which packs can be expired after the new pack
218 	 * is created by validating the references have not been modified in an
219 	 * incompatible way.
220 	 *
221 	 * @param pm
222 	 *            progress monitor to receive updates on as packing may take a
223 	 *            while, depending on the size of the repository.
224 	 * @return true if the repack was successful without race conditions. False
225 	 *         if a race condition was detected and the repack should be run
226 	 *         again later.
227 	 * @throws IOException
228 	 *             a new pack cannot be created.
229 	 */
230 	public boolean pack(ProgressMonitor pm) throws IOException {
231 		if (pm == null)
232 			pm = NullProgressMonitor.INSTANCE;
233 		if (packConfig.getIndexVersion() != 2)
234 			throw new IllegalStateException(
235 					JGitText.get().supportOnlyPackIndexVersion2);
236 
237 		startTimeMillis = SystemReader.getInstance().getCurrentTime();
238 		ctx = objdb.newReader();
239 		try {
240 			refdb.refresh();
241 			objdb.clearCache();
242 
243 			Collection<Ref> refsBefore = getAllRefs();
244 			readPacksBefore();
245 
246 			Set<ObjectId> allHeads = new HashSet<>();
247 			allHeadsAndTags = new HashSet<>();
248 			allTags = new HashSet<>();
249 			nonHeads = new HashSet<>();
250 			txnHeads = new HashSet<>();
251 			tagTargets = new HashSet<>();
252 			for (Ref ref : refsBefore) {
253 				if (ref.isSymbolic() || ref.getObjectId() == null) {
254 					continue;
255 				}
256 				if (isHead(ref)) {
257 					allHeads.add(ref.getObjectId());
258 				} else if (isTag(ref)) {
259 					allTags.add(ref.getObjectId());
260 				} else if (RefTreeNames.isRefTree(refdb, ref.getName())) {
261 					txnHeads.add(ref.getObjectId());
262 				} else {
263 					nonHeads.add(ref.getObjectId());
264 				}
265 				if (ref.getPeeledObjectId() != null) {
266 					tagTargets.add(ref.getPeeledObjectId());
267 				}
268 			}
269 			// Don't exclude tags that are also branch tips.
270 			allTags.removeAll(allHeads);
271 			allHeadsAndTags.addAll(allHeads);
272 			allHeadsAndTags.addAll(allTags);
273 
274 			// Hoist all branch tips and tags earlier in the pack file
275 			tagTargets.addAll(allHeadsAndTags);
276 
277 			boolean rollback = true;
278 			try {
279 				packHeads(pm);
280 				packRest(pm);
281 				packRefTreeGraph(pm);
282 				packGarbage(pm);
283 				objdb.commitPack(newPackDesc, toPrune());
284 				rollback = false;
285 				return true;
286 			} finally {
287 				if (rollback)
288 					objdb.rollbackPack(newPackDesc);
289 			}
290 		} finally {
291 			ctx.close();
292 		}
293 	}
294 
295 	private Collection<Ref> getAllRefs() throws IOException {
296 		Collection<Ref> refs = refdb.getRefs(RefDatabase.ALL).values();
297 		List<Ref> addl = refdb.getAdditionalRefs();
298 		if (!addl.isEmpty()) {
299 			List<Ref> all = new ArrayList<>(refs.size() + addl.size());
300 			all.addAll(refs);
301 			// add additional refs which start with refs/
302 			for (Ref r : addl) {
303 				if (r.getName().startsWith(Constants.R_REFS)) {
304 					all.add(r);
305 				}
306 			}
307 			return all;
308 		}
309 		return refs;
310 	}
311 
312 	private void readPacksBefore() throws IOException {
313 		DfsPackFile[] packs = objdb.getPacks();
314 		packsBefore = new ArrayList<>(packs.length);
315 		expiredGarbagePacks = new ArrayList<>(packs.length);
316 
317 		long now = SystemReader.getInstance().getCurrentTime();
318 		for (DfsPackFile p : packs) {
319 			DfsPackDescription d = p.getPackDescription();
320 			if (d.getPackSource() != UNREACHABLE_GARBAGE) {
321 				packsBefore.add(p);
322 			} else if (packIsExpiredGarbage(d, now)) {
323 				expiredGarbagePacks.add(p);
324 			} else if (packIsCoalesceableGarbage(d, now)) {
325 				packsBefore.add(p);
326 			}
327 		}
328 	}
329 
330 	private boolean packIsExpiredGarbage(DfsPackDescription d, long now) {
331 		// Consider the garbage pack as expired when it's older than
332 		// garbagePackTtl. This check gives concurrent inserter threads
333 		// sufficient time to identify an object is not in the graph and should
334 		// have a new copy written, rather than relying on something from an
335 		// UNREACHABLE_GARBAGE pack.
336 		return d.getPackSource() == UNREACHABLE_GARBAGE
337 				&& garbageTtlMillis > 0
338 				&& now - d.getLastModified() >= garbageTtlMillis;
339 	}
340 
341 	private boolean packIsCoalesceableGarbage(DfsPackDescription d, long now) {
342 		// An UNREACHABLE_GARBAGE pack can be coalesced if its size is less than
343 		// the coalesceGarbageLimit and either garbageTtl is zero or if the pack
344 		// is created in a close time interval (on a single calendar day when
345 		// the garbageTtl is more than one day or one third of the garbageTtl).
346 		//
347 		// When the garbageTtl is more than 24 hours, garbage packs that are
348 		// created within a single calendar day are coalesced together. This
349 		// would make the effective ttl of the garbage pack as garbageTtl+23:59
350 		// and limit the number of garbage to a maximum number of
351 		// garbageTtl_in_days + 1 (assuming all of them are less than the size
352 		// of coalesceGarbageLimit).
353 		//
354 		// When the garbageTtl is less than or equal to 24 hours, garbage packs
355 		// that are created within a one third of garbageTtl are coalesced
356 		// together. This would make the effective ttl of the garbage packs as
357 		// garbageTtl + (garbageTtl / 3) and would limit the number of garbage
358 		// packs to a maximum number of 4 (assuming all of them are less than
359 		// the size of coalesceGarbageLimit).
360 
361 		if (d.getPackSource() != UNREACHABLE_GARBAGE
362 				|| d.getFileSize(PackExt.PACK) >= coalesceGarbageLimit) {
363 			return false;
364 		}
365 
366 		if (garbageTtlMillis == 0) {
367 			return true;
368 		}
369 
370 		long lastModified = d.getLastModified();
371 		long dayStartLastModified = dayStartInMillis(lastModified);
372 		long dayStartToday = dayStartInMillis(now);
373 
374 		if (dayStartLastModified != dayStartToday) {
375 			return false; // this pack is not created today.
376 		}
377 
378 		if (garbageTtlMillis > TimeUnit.DAYS.toMillis(1)) {
379 			return true; // ttl is more than one day and pack is created today.
380 		}
381 
382 		long timeInterval = garbageTtlMillis / 3;
383 		if (timeInterval == 0) {
384 			return false; // ttl is too small, don't try to coalesce.
385 		}
386 
387 		long modifiedTimeSlot = (lastModified - dayStartLastModified) / timeInterval;
388 		long presentTimeSlot = (now - dayStartToday) / timeInterval;
389 		return modifiedTimeSlot == presentTimeSlot;
390 	}
391 
392 	private static long dayStartInMillis(long timeInMillis) {
393 		Calendar cal = new GregorianCalendar(
394 				SystemReader.getInstance().getTimeZone());
395 		cal.setTimeInMillis(timeInMillis);
396 		cal.set(Calendar.HOUR_OF_DAY, 0);
397 		cal.set(Calendar.MINUTE, 0);
398 		cal.set(Calendar.SECOND, 0);
399 		cal.set(Calendar.MILLISECOND, 0);
400 		return cal.getTimeInMillis();
401 	}
402 
403 	/** @return all of the source packs that fed into this compaction. */
404 	public List<DfsPackDescription> getSourcePacks() {
405 		return toPrune();
406 	}
407 
408 	/** @return new packs created by this compaction. */
409 	public List<DfsPackDescription> getNewPacks() {
410 		return newPackDesc;
411 	}
412 
413 	/** @return statistics corresponding to the {@link #getNewPacks()}. */
414 	public List<PackStatistics> getNewPackStatistics() {
415 		return newPackStats;
416 	}
417 
418 	private List<DfsPackDescription> toPrune() {
419 		int cnt = packsBefore.size();
420 		List<DfsPackDescription> all = new ArrayList<>(cnt);
421 		for (DfsPackFile pack : packsBefore) {
422 			all.add(pack.getPackDescription());
423 		}
424 		for (DfsPackFile pack : expiredGarbagePacks) {
425 			all.add(pack.getPackDescription());
426 		}
427 		return all;
428 	}
429 
430 	private void packHeads(ProgressMonitor pm) throws IOException {
431 		if (allHeadsAndTags.isEmpty())
432 			return;
433 
434 		try (PackWriter pw = newPackWriter()) {
435 			pw.setTagTargets(tagTargets);
436 			pw.preparePack(pm, allHeadsAndTags, NONE, NONE, allTags);
437 			if (0 < pw.getObjectCount())
438 				writePack(GC, pw, pm,
439 						estimateGcPackSize(INSERT, RECEIVE, COMPACT, GC));
440 		}
441 	}
442 
443 	private void packRest(ProgressMonitor pm) throws IOException {
444 		if (nonHeads.isEmpty())
445 			return;
446 
447 		try (PackWriter pw = newPackWriter()) {
448 			for (ObjectIdSet packedObjs : newPackObj)
449 				pw.excludeObjects(packedObjs);
450 			pw.preparePack(pm, nonHeads, allHeadsAndTags);
451 			if (0 < pw.getObjectCount())
452 				writePack(GC_REST, pw, pm,
453 						estimateGcPackSize(INSERT, RECEIVE, COMPACT, GC_REST));
454 		}
455 	}
456 
457 	private void packRefTreeGraph(ProgressMonitor pm) throws IOException {
458 		if (txnHeads.isEmpty())
459 			return;
460 
461 		try (PackWriter pw = newPackWriter()) {
462 			for (ObjectIdSet packedObjs : newPackObj)
463 				pw.excludeObjects(packedObjs);
464 			pw.preparePack(pm, txnHeads, NONE);
465 			if (0 < pw.getObjectCount())
466 				writePack(GC_TXN, pw, pm, 0 /* unknown pack size */);
467 		}
468 	}
469 
470 	private void packGarbage(ProgressMonitor pm) throws IOException {
471 		PackConfig cfg = new PackConfig(packConfig);
472 		cfg.setReuseDeltas(true);
473 		cfg.setReuseObjects(true);
474 		cfg.setDeltaCompress(false);
475 		cfg.setBuildBitmaps(false);
476 
477 		try (PackWriter pw = new PackWriter(cfg, ctx);
478 				RevWalk pool = new RevWalk(ctx)) {
479 			pw.setDeltaBaseAsOffset(true);
480 			pw.setReuseDeltaCommits(true);
481 			pm.beginTask(JGitText.get().findingGarbage, objectsBefore());
482 			long estimatedPackSize = 12 + 20; // header and trailer sizes.
483 			for (DfsPackFile oldPack : packsBefore) {
484 				PackIndex oldIdx = oldPack.getPackIndex(ctx);
485 				PackReverseIndex oldRevIdx = oldPack.getReverseIdx(ctx);
486 				long maxOffset = oldPack.getPackDescription().getFileSize(PACK)
487 						- 20; // pack size - trailer size.
488 				for (PackIndex.MutableEntry ent : oldIdx) {
489 					pm.update(1);
490 					ObjectId id = ent.toObjectId();
491 					if (pool.lookupOrNull(id) != null || anyPackHas(id))
492 						continue;
493 
494 					long offset = ent.getOffset();
495 					int type = oldPack.getObjectType(ctx, offset);
496 					pw.addObject(pool.lookupAny(id, type));
497 					long objSize = oldRevIdx.findNextOffset(offset, maxOffset)
498 							- offset;
499 					estimatedPackSize += objSize;
500 				}
501 			}
502 			pm.endTask();
503 			if (0 < pw.getObjectCount())
504 				writePack(UNREACHABLE_GARBAGE, pw, pm, estimatedPackSize);
505 		}
506 	}
507 
508 	private boolean anyPackHas(AnyObjectId id) {
509 		for (ObjectIdSet packedObjs : newPackObj)
510 			if (packedObjs.contains(id))
511 				return true;
512 		return false;
513 	}
514 
515 	private static boolean isHead(Ref ref) {
516 		return ref.getName().startsWith(Constants.R_HEADS);
517 	}
518 
519 	private static boolean isTag(Ref ref) {
520 		return ref.getName().startsWith(Constants.R_TAGS);
521 	}
522 
523 	private int objectsBefore() {
524 		int cnt = 0;
525 		for (DfsPackFile p : packsBefore)
526 			cnt += p.getPackDescription().getObjectCount();
527 		return cnt;
528 	}
529 
530 	private PackWriter newPackWriter() {
531 		PackWriter pw = new PackWriter(packConfig, ctx);
532 		pw.setDeltaBaseAsOffset(true);
533 		pw.setReuseDeltaCommits(false);
534 		return pw;
535 	}
536 
537 	private long estimateGcPackSize(PackSource first, PackSource... rest) {
538 		EnumSet<PackSource> sourceSet = EnumSet.of(first, rest);
539 		// Every pack file contains 12 bytes of header and 20 bytes of trailer.
540 		// Include the final pack file header and trailer size here and ignore
541 		// the same from individual pack files.
542 		long size = 32;
543 		for (DfsPackDescription pack : getSourcePacks()) {
544 			if (sourceSet.contains(pack.getPackSource())) {
545 				size += pack.getFileSize(PACK) - 32;
546 			}
547 		}
548 		return size;
549 	}
550 
551 	private DfsPackDescription writePack(PackSource source, PackWriter pw,
552 			ProgressMonitor pm, long estimatedPackSize) throws IOException {
553 		DfsPackDescription pack = repo.getObjectDatabase().newPack(source,
554 				estimatedPackSize);
555 		newPackDesc.add(pack);
556 
557 		try (DfsOutputStream out = objdb.writeFile(pack, PACK)) {
558 			pw.writePack(pm, pm, out);
559 			pack.addFileExt(PACK);
560 		}
561 
562 		try (CountingOutputStream cnt =
563 				new CountingOutputStream(objdb.writeFile(pack, INDEX))) {
564 			pw.writeIndex(cnt);
565 			pack.addFileExt(INDEX);
566 			pack.setFileSize(INDEX, cnt.getCount());
567 			pack.setIndexVersion(pw.getIndexVersion());
568 		}
569 
570 		if (pw.prepareBitmapIndex(pm)) {
571 			try (CountingOutputStream cnt = new CountingOutputStream(
572 					objdb.writeFile(pack, BITMAP_INDEX))) {
573 				pw.writeBitmapIndex(cnt);
574 				pack.addFileExt(BITMAP_INDEX);
575 				pack.setFileSize(BITMAP_INDEX, cnt.getCount());
576 			}
577 		}
578 
579 		PackStatistics stats = pw.getStatistics();
580 		pack.setPackStats(stats);
581 		pack.setLastModified(startTimeMillis);
582 		newPackStats.add(stats);
583 		newPackObj.add(pw.getObjectSet());
584 
585 		DfsBlockCache.getInstance().getOrCreate(pack, null);
586 		return pack;
587 	}
588 }