View Javadoc
1   /*
2    * Copyright (C) 2008-2011, Google Inc.
3    * Copyright (C) 2008, Shawn O. Pearce <spearce@spearce.org>
4    * and other copyright owners as documented in the project's IP log.
5    *
6    * This program and the accompanying materials are made available
7    * under the terms of the Eclipse Distribution License v1.0 which
8    * accompanies this distribution, is reproduced below, and is
9    * available at http://www.eclipse.org/org/documents/edl-v10.php
10   *
11   * All rights reserved.
12   *
13   * Redistribution and use in source and binary forms, with or
14   * without modification, are permitted provided that the following
15   * conditions are met:
16   *
17   * - Redistributions of source code must retain the above copyright
18   *   notice, this list of conditions and the following disclaimer.
19   *
20   * - Redistributions in binary form must reproduce the above
21   *   copyright notice, this list of conditions and the following
22   *   disclaimer in the documentation and/or other materials provided
23   *   with the distribution.
24   *
25   * - Neither the name of the Eclipse Foundation, Inc. nor the
26   *   names of its contributors may be used to endorse or promote
27   *   products derived from this software without specific prior
28   *   written permission.
29   *
30   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
31   * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
32   * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
33   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34   * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
35   * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36   * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
37   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
38   * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
39   * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
40   * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
42   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43   */
44  
45  package org.eclipse.jgit.internal.storage.dfs;
46  
47  import java.io.IOException;
48  import java.util.concurrent.atomic.AtomicLong;
49  import java.util.concurrent.atomic.AtomicReference;
50  import java.util.concurrent.atomic.AtomicReferenceArray;
51  import java.util.concurrent.locks.ReentrantLock;
52  import java.util.function.Consumer;
53  import java.util.stream.LongStream;
54  
55  import org.eclipse.jgit.internal.JGitText;
56  import org.eclipse.jgit.internal.storage.pack.PackExt;
57  
58  /**
59   * Caches slices of a
60   * {@link org.eclipse.jgit.internal.storage.dfs.BlockBasedFile} in memory for
61   * faster read access.
62   * <p>
63   * The DfsBlockCache serves as a Java based "buffer cache", loading segments of
64   * a BlockBasedFile into the JVM heap prior to use. As JGit often wants to do
65   * reads of only tiny slices of a file, the DfsBlockCache tries to smooth out
66   * these tiny reads into larger block-sized IO operations.
67   * <p>
68   * Whenever a cache miss occurs, loading is invoked by exactly one thread for
69   * the given <code>(DfsStreamKey,position)</code> key tuple. This is ensured by
70   * an array of locks, with the tuple hashed to a lock instance.
71   * <p>
72   * Its too expensive during object access to be accurate with a least recently
73   * used (LRU) algorithm. Strictly ordering every read is a lot of overhead that
74   * typically doesn't yield a corresponding benefit to the application. This
75   * cache implements a clock replacement algorithm, giving each block one chance
76   * to have been accessed during a sweep of the cache to save itself from
77   * eviction.
78   * <p>
79   * Entities created by the cache are held under hard references, preventing the
80   * Java VM from clearing anything. Blocks are discarded by the replacement
81   * algorithm when adding a new block would cause the cache to exceed its
82   * configured maximum size.
83   * <p>
84   * The key tuple is passed through to methods as a pair of parameters rather
85   * than as a single Object, thus reducing the transient memory allocations of
86   * callers. It is more efficient to avoid the allocation, as we can't be 100%
87   * sure that a JIT would be able to stack-allocate a key tuple.
88   * <p>
89   * The internal hash table does not expand at runtime, instead it is fixed in
90   * size at cache creation time. The internal lock table used to gate load
91   * invocations is also fixed in size.
92   */
93  public final class DfsBlockCache {
94  	private static volatile DfsBlockCache cache;
95  
96  	static {
97  		reconfigure(new DfsBlockCacheConfig());
98  	}
99  
100 	/**
101 	 * Modify the configuration of the window cache.
102 	 * <p>
103 	 * The new configuration is applied immediately, and the existing cache is
104 	 * cleared.
105 	 *
106 	 * @param cfg
107 	 *            the new window cache configuration.
108 	 * @throws java.lang.IllegalArgumentException
109 	 *             the cache configuration contains one or more invalid
110 	 *             settings, usually too low of a limit.
111 	 */
112 	public static void reconfigure(DfsBlockCacheConfig cfg) {
113 		cache = new DfsBlockCache(cfg);
114 	}
115 
116 	/**
117 	 * Get the currently active DfsBlockCache.
118 	 *
119 	 * @return the currently active DfsBlockCache.
120 	 */
121 	public static DfsBlockCache getInstance() {
122 		return cache;
123 	}
124 
125 	/** Number of entries in {@link #table}. */
126 	private final int tableSize;
127 
128 	/** Hash bucket directory; entries are chained below. */
129 	private final AtomicReferenceArray<HashEntry> table;
130 
131 	/**
132 	 * Locks to prevent concurrent loads for same (PackFile,position) block. The
133 	 * number of locks is {@link DfsBlockCacheConfig#getConcurrencyLevel()} to
134 	 * cap the overall concurrent block loads.
135 	 */
136 	private final ReentrantLock[] loadLocks;
137 
138 	/**
139 	 * A separate pool of locks to prevent concurrent loads for same index or bitmap from PackFile.
140 	 */
141 	private final ReentrantLock[] refLocks;
142 
143 	/** Maximum number of bytes the cache should hold. */
144 	private final long maxBytes;
145 
146 	/** Pack files smaller than this size can be copied through the cache. */
147 	private final long maxStreamThroughCache;
148 
149 	/**
150 	 * Suggested block size to read from pack files in.
151 	 * <p>
152 	 * If a pack file does not have a native block size, this size will be used.
153 	 * <p>
154 	 * If a pack file has a native size, a whole multiple of the native size
155 	 * will be used until it matches this size.
156 	 * <p>
157 	 * The value for blockSize must be a power of 2.
158 	 */
159 	private final int blockSize;
160 
161 	/** As {@link #blockSize} is a power of 2, bits to shift for a / blockSize. */
162 	private final int blockSizeShift;
163 
164 	/**
165 	 * Number of times a block was found in the cache, per pack file extension.
166 	 */
167 	private final AtomicReference<AtomicLong[]> statHit;
168 
169 	/**
170 	 * Number of times a block was not found, and had to be loaded, per pack
171 	 * file extension.
172 	 */
173 	private final AtomicReference<AtomicLong[]> statMiss;
174 
175 	/**
176 	 * Number of blocks evicted due to cache being full, per pack file
177 	 * extension.
178 	 */
179 	private final AtomicReference<AtomicLong[]> statEvict;
180 
181 	/**
182 	 * Number of bytes currently loaded in the cache, per pack file extension.
183 	 */
184 	private final AtomicReference<AtomicLong[]> liveBytes;
185 
186 	/** Protects the clock and its related data. */
187 	private final ReentrantLock clockLock;
188 
189 	/**
190 	 * A consumer of object reference lock wait time milliseconds.  May be used to build a metric.
191 	 */
192 	private final Consumer<Long> refLockWaitTime;
193 
194 	/** Current position of the clock. */
195 	private Ref clockHand;
196 
197 	@SuppressWarnings("unchecked")
198 	private DfsBlockCache(DfsBlockCacheConfig cfg) {
199 		tableSize = tableSize(cfg);
200 		if (tableSize < 1) {
201 			throw new IllegalArgumentException(JGitText.get().tSizeMustBeGreaterOrEqual1);
202 		}
203 
204 		table = new AtomicReferenceArray<>(tableSize);
205 		loadLocks = new ReentrantLock[cfg.getConcurrencyLevel()];
206 		for (int i = 0; i < loadLocks.length; i++) {
207 			loadLocks[i] = new ReentrantLock(true /* fair */);
208 		}
209 		refLocks = new ReentrantLock[cfg.getConcurrencyLevel()];
210 		for (int i = 0; i < refLocks.length; i++) {
211 			refLocks[i] = new ReentrantLock(true /* fair */);
212 		}
213 
214 		maxBytes = cfg.getBlockLimit();
215 		maxStreamThroughCache = (long) (maxBytes * cfg.getStreamRatio());
216 		blockSize = cfg.getBlockSize();
217 		blockSizeShift = Integer.numberOfTrailingZeros(blockSize);
218 
219 		clockLock = new ReentrantLock(true /* fair */);
220 		String none = ""; //$NON-NLS-1$
221 		clockHand = new Ref<>(
222 				DfsStreamKey.of(new DfsRepositoryDescription(none), none, null),
223 				-1, 0, null);
224 		clockHand.next = clockHand;
225 
226 		statHit = new AtomicReference<>(newCounters());
227 		statMiss = new AtomicReference<>(newCounters());
228 		statEvict = new AtomicReference<>(newCounters());
229 		liveBytes = new AtomicReference<>(newCounters());
230 
231 		refLockWaitTime = cfg.getRefLockWaitTimeConsumer();
232 	}
233 
234 	boolean shouldCopyThroughCache(long length) {
235 		return length <= maxStreamThroughCache;
236 	}
237 
238 	/**
239 	 * Get total number of bytes in the cache, per pack file extension.
240 	 *
241 	 * @return total number of bytes in the cache, per pack file extension.
242 	 */
243 	public long[] getCurrentSize() {
244 		return getStatVals(liveBytes);
245 	}
246 
247 	/**
248 	 * Get 0..100, defining how full the cache is.
249 	 *
250 	 * @return 0..100, defining how full the cache is.
251 	 */
252 	public long getFillPercentage() {
253 		return LongStream.of(getCurrentSize()).sum() * 100 / maxBytes;
254 	}
255 
256 	/**
257 	 * Get number of requests for items in the cache, per pack file extension.
258 	 *
259 	 * @return number of requests for items in the cache, per pack file
260 	 *         extension.
261 	 */
262 	public long[] getHitCount() {
263 		return getStatVals(statHit);
264 	}
265 
266 	/**
267 	 * Get number of requests for items not in the cache, per pack file
268 	 * extension.
269 	 *
270 	 * @return number of requests for items not in the cache, per pack file
271 	 *         extension.
272 	 */
273 	public long[] getMissCount() {
274 		return getStatVals(statMiss);
275 	}
276 
277 	/**
278 	 * Get total number of requests (hit + miss), per pack file extension.
279 	 *
280 	 * @return total number of requests (hit + miss), per pack file extension.
281 	 */
282 	public long[] getTotalRequestCount() {
283 		AtomicLong[] hit = statHit.get();
284 		AtomicLong[] miss = statMiss.get();
285 		long[] cnt = new long[Math.max(hit.length, miss.length)];
286 		for (int i = 0; i < hit.length; i++) {
287 			cnt[i] += hit[i].get();
288 		}
289 		for (int i = 0; i < miss.length; i++) {
290 			cnt[i] += miss[i].get();
291 		}
292 		return cnt;
293 	}
294 
295 	/**
296 	 * Get hit ratios
297 	 *
298 	 * @return hit ratios
299 	 */
300 	public long[] getHitRatio() {
301 		AtomicLong[] hit = statHit.get();
302 		AtomicLong[] miss = statMiss.get();
303 		long[] ratio = new long[Math.max(hit.length, miss.length)];
304 		for (int i = 0; i < ratio.length; i++) {
305 			if (i >= hit.length) {
306 				ratio[i] = 0;
307 			} else if (i >= miss.length) {
308 				ratio[i] = 100;
309 			} else {
310 				long hitVal = hit[i].get();
311 				long missVal = miss[i].get();
312 				long total = hitVal + missVal;
313 				ratio[i] = total == 0 ? 0 : hitVal * 100 / total;
314 			}
315 		}
316 		return ratio;
317 	}
318 
319 	/**
320 	 * Get number of evictions performed due to cache being full, per pack file
321 	 * extension.
322 	 *
323 	 * @return number of evictions performed due to cache being full, per pack
324 	 *         file extension.
325 	 */
326 	public long[] getEvictions() {
327 		return getStatVals(statEvict);
328 	}
329 
330 	/**
331 	 * Quickly check if the cache contains block 0 of the given stream.
332 	 * <p>
333 	 * This can be useful for sophisticated pre-read algorithms to quickly
334 	 * determine if a file is likely already in cache, especially small
335 	 * reftables which may be smaller than a typical DFS block size.
336 	 *
337 	 * @param key
338 	 *            the file to check.
339 	 * @return true if block 0 (the first block) is in the cache.
340 	 */
341 	public boolean hasBlock0(DfsStreamKey key) {
342 		HashEntry e1 = table.get(slot(key, 0));
343 		DfsBlock v = scan(e1, key, 0);
344 		return v != null && v.contains(key, 0);
345 	}
346 
347 	private int hash(int packHash, long off) {
348 		return packHash + (int) (off >>> blockSizeShift);
349 	}
350 
351 	int getBlockSize() {
352 		return blockSize;
353 	}
354 
355 	private static int tableSize(DfsBlockCacheConfig cfg) {
356 		final int wsz = cfg.getBlockSize();
357 		final long limit = cfg.getBlockLimit();
358 		if (wsz <= 0) {
359 			throw new IllegalArgumentException(JGitText.get().invalidWindowSize);
360 		}
361 		if (limit < wsz) {
362 			throw new IllegalArgumentException(JGitText.get().windowSizeMustBeLesserThanLimit);
363 		}
364 		return (int) Math.min(5 * (limit / wsz) / 2, Integer.MAX_VALUE);
365 	}
366 
367 	/**
368 	 * Look up a cached object, creating and loading it if it doesn't exist.
369 	 *
370 	 * @param file
371 	 *            the pack that "contains" the cached object.
372 	 * @param position
373 	 *            offset within <code>pack</code> of the object.
374 	 * @param ctx
375 	 *            current thread's reader.
376 	 * @param fileChannel
377 	 *            supplier for channel to read {@code pack}.
378 	 * @return the object reference.
379 	 * @throws IOException
380 	 *             the reference was not in the cache and could not be loaded.
381 	 */
382 	DfsBlock getOrLoad(BlockBasedFile file, long position, DfsReader ctx,
383 			ReadableChannelSupplier fileChannel) throws IOException {
384 		final long requestedPosition = position;
385 		position = file.alignToBlock(position);
386 
387 		DfsStreamKey key = file.key;
388 		int slot = slot(key, position);
389 		HashEntry e1 = table.get(slot);
390 		DfsBlock v = scan(e1, key, position);
391 		if (v != null && v.contains(key, requestedPosition)) {
392 			ctx.stats.blockCacheHit++;
393 			getStat(statHit, key).incrementAndGet();
394 			return v;
395 		}
396 
397 		reserveSpace(blockSize, key);
398 		ReentrantLock regionLock = lockFor(key, position);
399 		regionLock.lock();
400 		try {
401 			HashEntry e2 = table.get(slot);
402 			if (e2 != e1) {
403 				v = scan(e2, key, position);
404 				if (v != null) {
405 					ctx.stats.blockCacheHit++;
406 					getStat(statHit, key).incrementAndGet();
407 					creditSpace(blockSize, key);
408 					return v;
409 				}
410 			}
411 
412 			getStat(statMiss, key).incrementAndGet();
413 			boolean credit = true;
414 			try {
415 				v = file.readOneBlock(position, ctx, fileChannel.get());
416 				credit = false;
417 			} finally {
418 				if (credit) {
419 					creditSpace(blockSize, key);
420 				}
421 			}
422 			if (position != v.start) {
423 				// The file discovered its blockSize and adjusted.
424 				position = v.start;
425 				slot = slot(key, position);
426 				e2 = table.get(slot);
427 			}
428 
429 			Ref<DfsBlock> ref = new Ref<>(key, position, v.size(), v);
430 			ref.hot = true;
431 			for (;;) {
432 				HashEntry n = new HashEntry(clean(e2), ref);
433 				if (table.compareAndSet(slot, e2, n)) {
434 					break;
435 				}
436 				e2 = table.get(slot);
437 			}
438 			addToClock(ref, blockSize - v.size());
439 		} finally {
440 			regionLock.unlock();
441 		}
442 
443 		// If the block size changed from the default, it is possible the block
444 		// that was loaded is the wrong block for the requested position.
445 		if (v.contains(file.key, requestedPosition)) {
446 			return v;
447 		}
448 		return getOrLoad(file, requestedPosition, ctx, fileChannel);
449 	}
450 
451 	@SuppressWarnings("unchecked")
452 	private void reserveSpace(long reserve, DfsStreamKey key) {
453 		clockLock.lock();
454 		try {
455 			long live = LongStream.of(getCurrentSize()).sum() + reserve;
456 			if (maxBytes < live) {
457 				Ref prev = clockHand;
458 				Ref hand = clockHand.next;
459 				do {
460 					if (hand.hot) {
461 						// Value was recently touched. Clear
462 						// hot and give it another chance.
463 						hand.hot = false;
464 						prev = hand;
465 						hand = hand.next;
466 						continue;
467 					} else if (prev == hand)
468 						break;
469 
470 					// No recent access since last scan, kill
471 					// value and remove from clock.
472 					Ref dead = hand;
473 					hand = hand.next;
474 					prev.next = hand;
475 					dead.next = null;
476 					dead.value = null;
477 					live -= dead.size;
478 					getStat(liveBytes, dead.key).addAndGet(-dead.size);
479 					getStat(statEvict, dead.key).incrementAndGet();
480 				} while (maxBytes < live);
481 				clockHand = prev;
482 			}
483 			getStat(liveBytes, key).addAndGet(reserve);
484 		} finally {
485 			clockLock.unlock();
486 		}
487 	}
488 
489 	private void creditSpace(long credit, DfsStreamKey key) {
490 		clockLock.lock();
491 		try {
492 			getStat(liveBytes, key).addAndGet(-credit);
493 		} finally {
494 			clockLock.unlock();
495 		}
496 	}
497 
498 	@SuppressWarnings("unchecked")
499 	private void addToClock(Ref ref, long credit) {
500 		clockLock.lock();
501 		try {
502 			if (credit != 0) {
503 				getStat(liveBytes, ref.key).addAndGet(-credit);
504 			}
505 			Ref ptr = clockHand;
506 			ref.next = ptr.next;
507 			ptr.next = ref;
508 			clockHand = ref;
509 		} finally {
510 			clockLock.unlock();
511 		}
512 	}
513 
514 	void put(DfsBlock v) {
515 		put(v.stream, v.start, v.size(), v);
516 	}
517 
518 	/**
519 	 * Look up a cached object, creating and loading it if it doesn't exist.
520 	 *
521 	 * @param key
522 	 *            the stream key of the pack.
523 	 * @param position
524 	 *            the position in the key. The default should be 0.
525 	 * @param loader
526 	 *            the function to load the reference.
527 	 * @return the object reference.
528 	 * @throws IOException
529 	 *             the reference was not in the cache and could not be loaded.
530 	 */
531 	<T> Ref<T> getOrLoadRef(
532 			DfsStreamKey key, long position, RefLoader<T> loader)
533 			throws IOException {
534 		int slot = slot(key, position);
535 		HashEntry e1 = table.get(slot);
536 		Ref<T> ref = scanRef(e1, key, position);
537 		if (ref != null) {
538 			getStat(statHit, key).incrementAndGet();
539 			return ref;
540 		}
541 
542 		ReentrantLock regionLock = lockForRef(key);
543 		long lockStart = System.currentTimeMillis();
544 		regionLock.lock();
545 		try {
546 			HashEntry e2 = table.get(slot);
547 			if (e2 != e1) {
548 				ref = scanRef(e2, key, position);
549 				if (ref != null) {
550 					getStat(statHit, key).incrementAndGet();
551 					return ref;
552 				}
553 			}
554 
555 			if (refLockWaitTime != null) {
556 				refLockWaitTime.accept(
557 						Long.valueOf(System.currentTimeMillis() - lockStart));
558 			}
559 			getStat(statMiss, key).incrementAndGet();
560 			ref = loader.load();
561 			ref.hot = true;
562 			// Reserve after loading to get the size of the object
563 			reserveSpace(ref.size, key);
564 			for (;;) {
565 				HashEntry n = new HashEntry(clean(e2), ref);
566 				if (table.compareAndSet(slot, e2, n)) {
567 					break;
568 				}
569 				e2 = table.get(slot);
570 			}
571 			addToClock(ref, 0);
572 		} finally {
573 			regionLock.unlock();
574 		}
575 		return ref;
576 	}
577 
578 	<T> Ref<T> putRef(DfsStreamKey key, long size, T v) {
579 		return put(key, 0, size, v);
580 	}
581 
582 	<T> Ref<T> put(DfsStreamKey key, long pos, long size, T v) {
583 		int slot = slot(key, pos);
584 		HashEntry e1 = table.get(slot);
585 		Ref<T> ref = scanRef(e1, key, pos);
586 		if (ref != null) {
587 			return ref;
588 		}
589 
590 		reserveSpace(size, key);
591 		ReentrantLock regionLock = lockFor(key, pos);
592 		regionLock.lock();
593 		try {
594 			HashEntry e2 = table.get(slot);
595 			if (e2 != e1) {
596 				ref = scanRef(e2, key, pos);
597 				if (ref != null) {
598 					creditSpace(size, key);
599 					return ref;
600 				}
601 			}
602 
603 			ref = new Ref<>(key, pos, size, v);
604 			ref.hot = true;
605 			for (;;) {
606 				HashEntry n = new HashEntry(clean(e2), ref);
607 				if (table.compareAndSet(slot, e2, n)) {
608 					break;
609 				}
610 				e2 = table.get(slot);
611 			}
612 			addToClock(ref, 0);
613 		} finally {
614 			regionLock.unlock();
615 		}
616 		return ref;
617 	}
618 
619 	boolean contains(DfsStreamKey key, long position) {
620 		return scan(table.get(slot(key, position)), key, position) != null;
621 	}
622 
623 	@SuppressWarnings("unchecked")
624 	<T> T get(DfsStreamKey key, long position) {
625 		T val = (T) scan(table.get(slot(key, position)), key, position);
626 		if (val == null) {
627 			getStat(statMiss, key).incrementAndGet();
628 		} else {
629 			getStat(statHit, key).incrementAndGet();
630 		}
631 		return val;
632 	}
633 
634 	private <T> T scan(HashEntry n, DfsStreamKey key, long position) {
635 		Ref<T> r = scanRef(n, key, position);
636 		return r != null ? r.get() : null;
637 	}
638 
639 	@SuppressWarnings("unchecked")
640 	private <T> Ref<T> scanRef(HashEntry n, DfsStreamKey key, long position) {
641 		for (; n != null; n = n.next) {
642 			Ref<T> r = n.ref;
643 			if (r.position == position && r.key.equals(key)) {
644 				return r.get() != null ? r : null;
645 			}
646 		}
647 		return null;
648 	}
649 
650 	private int slot(DfsStreamKey key, long position) {
651 		return (hash(key.hash, position) >>> 1) % tableSize;
652 	}
653 
654 	private ReentrantLock lockFor(DfsStreamKey key, long position) {
655 		return loadLocks[(hash(key.hash, position) >>> 1) % loadLocks.length];
656 	}
657 
658 	private ReentrantLock lockForRef(DfsStreamKey key) {
659 		return refLocks[(key.hash >>> 1) % refLocks.length];
660 	}
661 
662 	private static AtomicLong[] newCounters() {
663 		AtomicLong[] ret = new AtomicLong[PackExt.values().length];
664 		for (int i = 0; i < ret.length; i++) {
665 			ret[i] = new AtomicLong();
666 		}
667 		return ret;
668 	}
669 
670 	private static AtomicLong getStat(AtomicReference<AtomicLong[]> stats,
671 			DfsStreamKey key) {
672 		int pos = key.packExtPos;
673 		while (true) {
674 			AtomicLong[] vals = stats.get();
675 			if (pos < vals.length) {
676 				return vals[pos];
677 			}
678 			AtomicLong[] expect = vals;
679 			vals = new AtomicLong[Math.max(pos + 1, PackExt.values().length)];
680 			System.arraycopy(expect, 0, vals, 0, expect.length);
681 			for (int i = expect.length; i < vals.length; i++) {
682 				vals[i] = new AtomicLong();
683 			}
684 			if (stats.compareAndSet(expect, vals)) {
685 				return vals[pos];
686 			}
687 		}
688 	}
689 
690 	private static long[] getStatVals(AtomicReference<AtomicLong[]> stat) {
691 		AtomicLong[] stats = stat.get();
692 		long[] cnt = new long[stats.length];
693 		for (int i = 0; i < stats.length; i++) {
694 			cnt[i] = stats[i].get();
695 		}
696 		return cnt;
697 	}
698 
699 	private static HashEntry clean(HashEntry top) {
700 		while (top != null && top.ref.next == null)
701 			top = top.next;
702 		if (top == null) {
703 			return null;
704 		}
705 		HashEntry n = clean(top.next);
706 		return n == top.next ? top : new HashEntry(n, top.ref);
707 	}
708 
709 	private static final class HashEntry {
710 		/** Next entry in the hash table's chain list. */
711 		final HashEntry next;
712 
713 		/** The referenced object. */
714 		final Ref ref;
715 
716 		HashEntry(HashEntry n, Ref r) {
717 			next = n;
718 			ref = r;
719 		}
720 	}
721 
722 	static final class Ref<T> {
723 		final DfsStreamKey key;
724 		final long position;
725 		final long size;
726 		volatile T value;
727 		Ref next;
728 		volatile boolean hot;
729 
730 		Ref(DfsStreamKey key, long position, long size, T v) {
731 			this.key = key;
732 			this.position = position;
733 			this.size = size;
734 			this.value = v;
735 		}
736 
737 		T get() {
738 			T v = value;
739 			if (v != null) {
740 				hot = true;
741 			}
742 			return v;
743 		}
744 
745 		boolean has() {
746 			return value != null;
747 		}
748 	}
749 
750 	@FunctionalInterface
751 	interface RefLoader<T> {
752 		Ref<T> load() throws IOException;
753 	}
754 
755 	/**
756 	 * Supplier for readable channel
757 	 */
758 	@FunctionalInterface
759 	interface ReadableChannelSupplier {
760 		/**
761 		 * @return ReadableChannel
762 		 * @throws IOException
763 		 */
764 		ReadableChannel get() throws IOException;
765 	}
766 }