View Javadoc
1   /*
2    * Copyright (C) 2008-2009, Google Inc.
3    * Copyright (C) 2008, Shawn O. Pearce <spearce@spearce.org>
4    * and other copyright owners as documented in the project's IP log.
5    *
6    * This program and the accompanying materials are made available
7    * under the terms of the Eclipse Distribution License v1.0 which
8    * accompanies this distribution, is reproduced below, and is
9    * available at http://www.eclipse.org/org/documents/edl-v10.php
10   *
11   * All rights reserved.
12   *
13   * Redistribution and use in source and binary forms, with or
14   * without modification, are permitted provided that the following
15   * conditions are met:
16   *
17   * - Redistributions of source code must retain the above copyright
18   *   notice, this list of conditions and the following disclaimer.
19   *
20   * - Redistributions in binary form must reproduce the above
21   *   copyright notice, this list of conditions and the following
22   *   disclaimer in the documentation and/or other materials provided
23   *   with the distribution.
24   *
25   * - Neither the name of the Eclipse Foundation, Inc. nor the
26   *   names of its contributors may be used to endorse or promote
27   *   products derived from this software without specific prior
28   *   written permission.
29   *
30   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
31   * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
32   * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
33   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34   * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
35   * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36   * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
37   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
38   * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
39   * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
40   * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
42   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43   */
44  
45  package org.eclipse.jgit.internal.storage.file;
46  
47  import java.io.IOException;
48  import java.lang.ref.ReferenceQueue;
49  import java.lang.ref.SoftReference;
50  import java.util.Collections;
51  import java.util.Map;
52  import java.util.Random;
53  import java.util.concurrent.ConcurrentHashMap;
54  import java.util.concurrent.ConcurrentLinkedQueue;
55  import java.util.concurrent.atomic.AtomicLong;
56  import java.util.concurrent.atomic.AtomicReferenceArray;
57  import java.util.concurrent.atomic.LongAdder;
58  import java.util.concurrent.locks.ReentrantLock;
59  import java.util.stream.Collectors;
60  
61  import org.eclipse.jgit.annotations.NonNull;
62  import org.eclipse.jgit.internal.JGitText;
63  import org.eclipse.jgit.storage.file.WindowCacheConfig;
64  import org.eclipse.jgit.storage.file.WindowCacheStats;
65  import org.eclipse.jgit.util.Monitoring;
66  
67  /**
68   * Caches slices of a {@link org.eclipse.jgit.internal.storage.file.PackFile} in
69   * memory for faster read access.
70   * <p>
71   * The WindowCache serves as a Java based "buffer cache", loading segments of a
72   * PackFile into the JVM heap prior to use. As JGit often wants to do reads of
73   * only tiny slices of a file, the WindowCache tries to smooth out these tiny
74   * reads into larger block-sized IO operations.
75   * <p>
76   * Whenever a cache miss occurs, {@link #load(PackFile, long)} is invoked by
77   * exactly one thread for the given <code>(PackFile,position)</code> key tuple.
78   * This is ensured by an array of locks, with the tuple hashed to a lock
79   * instance.
80   * <p>
81   * During a miss, older entries are evicted from the cache so long as
82   * {@link #isFull()} returns true.
83   * <p>
84   * Its too expensive during object access to be 100% accurate with a least
85   * recently used (LRU) algorithm. Strictly ordering every read is a lot of
86   * overhead that typically doesn't yield a corresponding benefit to the
87   * application.
88   * <p>
89   * This cache implements a loose LRU policy by randomly picking a window
90   * comprised of roughly 10% of the cache, and evicting the oldest accessed entry
91   * within that window.
92   * <p>
93   * Entities created by the cache are held under SoftReferences if option
94   * {@code core.packedGitUseStrongRefs} is set to {@code false} in the git config
95   * (this is the default) or by calling
96   * {@link WindowCacheConfig#setPackedGitUseStrongRefs(boolean)}, permitting the
97   * Java runtime's garbage collector to evict entries when heap memory gets low.
98   * Most JREs implement a loose least recently used algorithm for this eviction.
99   * When this option is set to {@code true} strong references are used which
100  * means that Java gc cannot evict the WindowCache to reclaim memory. On the
101  * other hand this provides more predictable performance since the cache isn't
102  * flushed when used heap comes close to the maximum heap size.
103  * <p>
104  * The internal hash table does not expand at runtime, instead it is fixed in
105  * size at cache creation time. The internal lock table used to gate load
106  * invocations is also fixed in size.
107  * <p>
108  * The key tuple is passed through to methods as a pair of parameters rather
109  * than as a single Object, thus reducing the transient memory allocations of
110  * callers. It is more efficient to avoid the allocation, as we can't be 100%
111  * sure that a JIT would be able to stack-allocate a key tuple.
112  * <p>
113  * This cache has an implementation rule such that:
114  * <ul>
115  * <li>{@link #load(PackFile, long)} is invoked by at most one thread at a time
116  * for a given <code>(PackFile,position)</code> tuple.</li>
117  * <li>For every <code>load()</code> invocation there is exactly one
118  * {@link #createRef(PackFile, long, ByteWindow)} invocation to wrap a
119  * SoftReference or a StrongReference around the cached entity.</li>
120  * <li>For every Reference created by <code>createRef()</code> there will be
121  * exactly one call to {@link #clear(PageRef)} to cleanup any resources associated
122  * with the (now expired) cached entity.</li>
123  * </ul>
124  * <p>
125  * Therefore, it is safe to perform resource accounting increments during the
126  * {@link #load(PackFile, long)} or
127  * {@link #createRef(PackFile, long, ByteWindow)} methods, and matching
128  * decrements during {@link #clear(PageRef)}. Implementors may need to override
129  * {@link #createRef(PackFile, long, ByteWindow)} in order to embed additional
130  * accounting information into an implementation specific
131  * {@link org.eclipse.jgit.internal.storage.file.WindowCache.PageRef} subclass, as
132  * the cached entity may have already been evicted by the JRE's garbage
133  * collector.
134  * <p>
135  * To maintain higher concurrency workloads, during eviction only one thread
136  * performs the eviction work, while other threads can continue to insert new
137  * objects in parallel. This means that the cache can be temporarily over limit,
138  * especially if the nominated eviction thread is being starved relative to the
139  * other threads.
140  */
141 public class WindowCache {
142 
143 	/**
144 	 * Record statistics for a cache
145 	 */
146 	static interface StatsRecorder {
147 		/**
148 		 * Record cache hits. Called when cache returns a cached entry.
149 		 *
150 		 * @param count
151 		 *            number of cache hits to record
152 		 */
153 		void recordHits(int count);
154 
155 		/**
156 		 * Record cache misses. Called when the cache returns an entry which had
157 		 * to be loaded.
158 		 *
159 		 * @param count
160 		 *            number of cache misses to record
161 		 */
162 		void recordMisses(int count);
163 
164 		/**
165 		 * Record a successful load of a cache entry
166 		 *
167 		 * @param loadTimeNanos
168 		 *            time to load a cache entry
169 		 */
170 		void recordLoadSuccess(long loadTimeNanos);
171 
172 		/**
173 		 * Record a failed load of a cache entry
174 		 *
175 		 * @param loadTimeNanos
176 		 *            time used trying to load a cache entry
177 		 */
178 		void recordLoadFailure(long loadTimeNanos);
179 
180 		/**
181 		 * Record cache evictions due to the cache evictions strategy
182 		 *
183 		 * @param count
184 		 *            number of evictions to record
185 		 */
186 		void recordEvictions(int count);
187 
188 		/**
189 		 * Record files opened by cache
190 		 *
191 		 * @param delta
192 		 *            delta of number of files opened by cache
193 		 */
194 		void recordOpenFiles(int delta);
195 
196 		/**
197 		 * Record cached bytes
198 		 *
199 		 * @param pack
200 		 *            pack file the bytes are read from
201 		 *
202 		 * @param delta
203 		 *            delta of cached bytes
204 		 */
205 		void recordOpenBytes(PackFile pack, int delta);
206 
207 		/**
208 		 * Returns a snapshot of this recorder's stats. Note that this may be an
209 		 * inconsistent view, as it may be interleaved with update operations.
210 		 *
211 		 * @return a snapshot of this recorder's stats
212 		 */
213 		@NonNull
214 		WindowCacheStats getStats();
215 	}
216 
217 	static class StatsRecorderImpl
218 			implements StatsRecorder, WindowCacheStats {
219 		private final LongAdder hitCount;
220 		private final LongAdder missCount;
221 		private final LongAdder loadSuccessCount;
222 		private final LongAdder loadFailureCount;
223 		private final LongAdder totalLoadTime;
224 		private final LongAdder evictionCount;
225 		private final LongAdder openFileCount;
226 		private final LongAdder openByteCount;
227 		private final Map<String, LongAdder> openByteCountPerRepository;
228 
229 		/**
230 		 * Constructs an instance with all counts initialized to zero.
231 		 */
232 		public StatsRecorderImpl() {
233 			hitCount = new LongAdder();
234 			missCount = new LongAdder();
235 			loadSuccessCount = new LongAdder();
236 			loadFailureCount = new LongAdder();
237 			totalLoadTime = new LongAdder();
238 			evictionCount = new LongAdder();
239 			openFileCount = new LongAdder();
240 			openByteCount = new LongAdder();
241 			openByteCountPerRepository = new ConcurrentHashMap<>();
242 		}
243 
244 		@Override
245 		public void recordHits(int count) {
246 			hitCount.add(count);
247 		}
248 
249 		@Override
250 		public void recordMisses(int count) {
251 			missCount.add(count);
252 		}
253 
254 		@Override
255 		public void recordLoadSuccess(long loadTimeNanos) {
256 			loadSuccessCount.increment();
257 			totalLoadTime.add(loadTimeNanos);
258 		}
259 
260 		@Override
261 		public void recordLoadFailure(long loadTimeNanos) {
262 			loadFailureCount.increment();
263 			totalLoadTime.add(loadTimeNanos);
264 		}
265 
266 		@Override
267 		public void recordEvictions(int count) {
268 			evictionCount.add(count);
269 		}
270 
271 		@Override
272 		public void recordOpenFiles(int delta) {
273 			openFileCount.add(delta);
274 		}
275 
276 		@Override
277 		public void recordOpenBytes(PackFile pack, int delta) {
278 			openByteCount.add(delta);
279 			String repositoryId = repositoryId(pack);
280 			LongAdder la = openByteCountPerRepository
281 					.computeIfAbsent(repositoryId, k -> new LongAdder());
282 			la.add(delta);
283 			if (delta < 0) {
284 				openByteCountPerRepository.computeIfPresent(repositoryId,
285 						(k, v) -> v.longValue() == 0 ? null : v);
286 			}
287 		}
288 
289 		private static String repositoryId(PackFile pack) {
290 			// use repository's gitdir since packfile doesn't know its
291 			// repository
292 			return pack.getPackFile().getParentFile().getParentFile()
293 					.getParent();
294 		}
295 
296 		@Override
297 		public WindowCacheStats getStats() {
298 			return this;
299 		}
300 
301 		@Override
302 		public long getHitCount() {
303 			return hitCount.sum();
304 		}
305 
306 		@Override
307 		public long getMissCount() {
308 			return missCount.sum();
309 		}
310 
311 		@Override
312 		public long getLoadSuccessCount() {
313 			return loadSuccessCount.sum();
314 		}
315 
316 		@Override
317 		public long getLoadFailureCount() {
318 			return loadFailureCount.sum();
319 		}
320 
321 		@Override
322 		public long getEvictionCount() {
323 			return evictionCount.sum();
324 		}
325 
326 		@Override
327 		public long getTotalLoadTime() {
328 			return totalLoadTime.sum();
329 		}
330 
331 		@Override
332 		public long getOpenFileCount() {
333 			return openFileCount.sum();
334 		}
335 
336 		@Override
337 		public long getOpenByteCount() {
338 			return openByteCount.sum();
339 		}
340 
341 		@Override
342 		public void resetCounters() {
343 			hitCount.reset();
344 			missCount.reset();
345 			loadSuccessCount.reset();
346 			loadFailureCount.reset();
347 			totalLoadTime.reset();
348 			evictionCount.reset();
349 		}
350 
351 		@Override
352 		public Map<String, Long> getOpenByteCountPerRepository() {
353 			return Collections.unmodifiableMap(
354 					openByteCountPerRepository.entrySet().stream()
355 							.collect(Collectors.toMap(Map.Entry::getKey,
356 									e -> Long.valueOf(e.getValue().sum()),
357 									(u, v) -> v)));
358 		}
359 	}
360 
361 	private static final int bits(int newSize) {
362 		if (newSize < 4096)
363 			throw new IllegalArgumentException(JGitText.get().invalidWindowSize);
364 		if (Integer.bitCount(newSize) != 1)
365 			throw new IllegalArgumentException(JGitText.get().windowSizeMustBePowerOf2);
366 		return Integer.numberOfTrailingZeros(newSize);
367 	}
368 
369 	private static final Random rng = new Random();
370 
371 	private static volatile WindowCache cache;
372 
373 	private static volatile int streamFileThreshold;
374 
375 	static {
376 		reconfigure(new WindowCacheConfig());
377 	}
378 
379 	/**
380 	 * Modify the configuration of the window cache.
381 	 * <p>
382 	 * The new configuration is applied immediately. If the new limits are
383 	 * smaller than what is currently cached, older entries will be purged
384 	 * as soon as possible to allow the cache to meet the new limit.
385 	 *
386 	 * @deprecated use {@code cfg.install()} to avoid internal reference.
387 	 * @param cfg
388 	 *            the new window cache configuration.
389 	 * @throws java.lang.IllegalArgumentException
390 	 *             the cache configuration contains one or more invalid
391 	 *             settings, usually too low of a limit.
392 	 */
393 	@Deprecated
394 	public static void reconfigure(WindowCacheConfig cfg) {
395 		final WindowCacheal/storage/file/WindowCache.html#WindowCache">WindowCache nc = new WindowCache(cfg);
396 		final WindowCache oc = cache;
397 		if (oc != null)
398 			oc.removeAll();
399 		cache = nc;
400 		streamFileThreshold = cfg.getStreamFileThreshold();
401 		DeltaBaseCache.reconfigure(cfg);
402 	}
403 
404 	static int getStreamFileThreshold() {
405 		return streamFileThreshold;
406 	}
407 
408 	/**
409 	 * @return the cached instance.
410 	 */
411 	public static WindowCache getInstance() {
412 		return cache;
413 	}
414 
415 	static final ByteWindow get(PackFile pack, long offset)
416 			throws IOException {
417 		final WindowCache c = cache;
418 		final ByteWindow r = c.getOrLoad(pack, c.toStart(offset));
419 		if (c != cache) {
420 			// The cache was reconfigured while we were using the old one
421 			// to load this window. The window is still valid, but our
422 			// cache may think its still live. Ensure the window is removed
423 			// from the old cache so resources can be released.
424 			//
425 			c.removeAll();
426 		}
427 		return r;
428 	}
429 
430 	static final void purge(PackFile pack) {
431 		cache.removeAll(pack);
432 	}
433 
434 	/** cleanup released and/or garbage collected windows. */
435 	private final CleanupQueue queue;
436 
437 	/** Number of entries in {@link #table}. */
438 	private final int tableSize;
439 
440 	/** Access clock for loose LRU. */
441 	private final AtomicLong clock;
442 
443 	/** Hash bucket directory; entries are chained below. */
444 	private final AtomicReferenceArray<Entry> table;
445 
446 	/** Locks to prevent concurrent loads for same (PackFile,position). */
447 	private final Lock[] locks;
448 
449 	/** Lock to elect the eviction thread after a load occurs. */
450 	private final ReentrantLock evictLock;
451 
452 	/** Number of {@link #table} buckets to scan for an eviction window. */
453 	private final int evictBatch;
454 
455 	private final int maxFiles;
456 
457 	private final long maxBytes;
458 
459 	private final boolean mmap;
460 
461 	private final int windowSizeShift;
462 
463 	private final int windowSize;
464 
465 	private final StatsRecorder statsRecorder;
466 
467 	private final StatsRecorderImpl mbean;
468 
469 	private boolean useStrongRefs;
470 
471 	private WindowCache(WindowCacheConfig cfg) {
472 		tableSize = tableSize(cfg);
473 		final int lockCount = lockCount(cfg);
474 		if (tableSize < 1)
475 			throw new IllegalArgumentException(JGitText.get().tSizeMustBeGreaterOrEqual1);
476 		if (lockCount < 1)
477 			throw new IllegalArgumentException(JGitText.get().lockCountMustBeGreaterOrEqual1);
478 
479 		clock = new AtomicLong(1);
480 		table = new AtomicReferenceArray<>(tableSize);
481 		locks = new Lock[lockCount];
482 		for (int i = 0; i < locks.length; i++)
483 			locks[i] = new Lock();
484 		evictLock = new ReentrantLock();
485 
486 		int eb = (int) (tableSize * .1);
487 		if (64 < eb)
488 			eb = 64;
489 		else if (eb < 4)
490 			eb = 4;
491 		if (tableSize < eb)
492 			eb = tableSize;
493 		evictBatch = eb;
494 
495 		maxFiles = cfg.getPackedGitOpenFiles();
496 		maxBytes = cfg.getPackedGitLimit();
497 		mmap = cfg.isPackedGitMMAP();
498 		windowSizeShift = bits(cfg.getPackedGitWindowSize());
499 		windowSize = 1 << windowSizeShift;
500 		useStrongRefs = cfg.isPackedGitUseStrongRefs();
501 		queue = useStrongRefs ? new StrongCleanupQueue(this)
502 				: new SoftCleanupQueue(this);
503 
504 		mbean = new StatsRecorderImpl();
505 		statsRecorder = mbean;
506 		Monitoring.registerMBean(mbean, "block_cache"); //$NON-NLS-1$
507 
508 		if (maxFiles < 1)
509 			throw new IllegalArgumentException(JGitText.get().openFilesMustBeAtLeast1);
510 		if (maxBytes < windowSize)
511 			throw new IllegalArgumentException(JGitText.get().windowSizeMustBeLesserThanLimit);
512 	}
513 
514 	/**
515 	 * @return cache statistics for the WindowCache
516 	 */
517 	public WindowCacheStats getStats() {
518 		return statsRecorder.getStats();
519 	}
520 
521 	/**
522 	 * Reset stats. Does not reset open bytes and open files stats.
523 	 */
524 	public void resetStats() {
525 		mbean.resetCounters();
526 	}
527 
528 	private int hash(int packHash, long off) {
529 		return packHash + (int) (off >>> windowSizeShift);
530 	}
531 
532 	private ByteWindow load(PackFile pack, long offset) throws IOException {
533 		long startTime = System.nanoTime();
534 		if (pack.beginWindowCache())
535 			statsRecorder.recordOpenFiles(1);
536 		try {
537 			if (mmap)
538 				return pack.mmap(offset, windowSize);
539 			ByteArrayWindow w = pack.read(offset, windowSize);
540 			statsRecorder.recordLoadSuccess(System.nanoTime() - startTime);
541 			return w;
542 		} catch (IOException | RuntimeException | Error e) {
543 			close(pack);
544 			statsRecorder.recordLoadFailure(System.nanoTime() - startTime);
545 			throw e;
546 		} finally {
547 			statsRecorder.recordMisses(1);
548 		}
549 	}
550 
551 	private PageRef<ByteWindow> createRef(PackFile p, long o, ByteWindow v) {
552 		final PageRef<ByteWindow> ref = useStrongRefs
553 				? new StrongRef(p, o, v, queue)
554 				: new SoftRef(p, o, v, (SoftCleanupQueue) queue);
555 		statsRecorder.recordOpenBytes(ref.getPack(), ref.getSize());
556 		return ref;
557 	}
558 
559 	private void clear(PageRef<ByteWindow> ref) {
560 		statsRecorder.recordOpenBytes(ref.getPack(), -ref.getSize());
561 		statsRecorder.recordEvictions(1);
562 		close(ref.getPack());
563 	}
564 
565 	private void close(PackFile pack) {
566 		if (pack.endWindowCache()) {
567 			statsRecorder.recordOpenFiles(-1);
568 		}
569 	}
570 
571 	private boolean isFull() {
572 		return maxFiles < mbean.getOpenFileCount()
573 				|| maxBytes < mbean.getOpenByteCount();
574 	}
575 
576 	private long toStart(long offset) {
577 		return (offset >>> windowSizeShift) << windowSizeShift;
578 	}
579 
580 	private static int tableSize(WindowCacheConfig cfg) {
581 		final int wsz = cfg.getPackedGitWindowSize();
582 		final long limit = cfg.getPackedGitLimit();
583 		if (wsz <= 0)
584 			throw new IllegalArgumentException(JGitText.get().invalidWindowSize);
585 		if (limit < wsz)
586 			throw new IllegalArgumentException(JGitText.get().windowSizeMustBeLesserThanLimit);
587 		return (int) Math.min(5 * (limit / wsz) / 2, 2000000000);
588 	}
589 
590 	private static int lockCount(WindowCacheConfig cfg) {
591 		return Math.max(cfg.getPackedGitOpenFiles(), 32);
592 	}
593 
594 	/**
595 	 * Lookup a cached object, creating and loading it if it doesn't exist.
596 	 *
597 	 * @param pack
598 	 *            the pack that "contains" the cached object.
599 	 * @param position
600 	 *            offset within <code>pack</code> of the object.
601 	 * @return the object reference.
602 	 * @throws IOException
603 	 *             the object reference was not in the cache and could not be
604 	 *             obtained by {@link #load(PackFile, long)}.
605 	 */
606 	private ByteWindow getOrLoad(PackFile pack, long position)
607 			throws IOException {
608 		final int slot = slot(pack, position);
609 		final Entry e1 = table.get(slot);
610 		ByteWindow v = scan(e1, pack, position);
611 		if (v != null) {
612 			statsRecorder.recordHits(1);
613 			return v;
614 		}
615 
616 		synchronized (lock(pack, position)) {
617 			Entry e2 = table.get(slot);
618 			if (e2 != e1) {
619 				v = scan(e2, pack, position);
620 				if (v != null) {
621 					statsRecorder.recordHits(1);
622 					return v;
623 				}
624 			}
625 
626 			v = load(pack, position);
627 			final PageRef<ByteWindow> ref = createRef(pack, position, v);
628 			hit(ref);
629 			for (;;) {
630 				final Entry n = new Entry(clean(e2), ref);
631 				if (table.compareAndSet(slot, e2, n))
632 					break;
633 				e2 = table.get(slot);
634 			}
635 		}
636 
637 		if (evictLock.tryLock()) {
638 			try {
639 				gc();
640 				evict();
641 			} finally {
642 				evictLock.unlock();
643 			}
644 		}
645 
646 		return v;
647 	}
648 
649 	private ByteWindow scan(Entry n, PackFile pack, long position) {
650 		for (; n != null; n = n.next) {
651 			final PageRef<ByteWindow> r = n.ref;
652 			if (r.getPack() == pack && r.getPosition() == position) {
653 				final ByteWindow v = r.get();
654 				if (v != null) {
655 					hit(r);
656 					return v;
657 				}
658 				n.kill();
659 				break;
660 			}
661 		}
662 		return null;
663 	}
664 
665 	private void hit(PageRef r) {
666 		// We don't need to be 100% accurate here. Its sufficient that at least
667 		// one thread performs the increment. Any other concurrent access at
668 		// exactly the same time can simply use the same clock value.
669 		//
670 		// Consequently we attempt the set, but we don't try to recover should
671 		// it fail. This is why we don't use getAndIncrement() here.
672 		//
673 		final long c = clock.get();
674 		clock.compareAndSet(c, c + 1);
675 		r.setLastAccess(c);
676 	}
677 
678 	private void evict() {
679 		while (isFull()) {
680 			int ptr = rng.nextInt(tableSize);
681 			Entry old = null;
682 			int slot = 0;
683 			for (int b = evictBatch - 1; b >= 0; b--, ptr++) {
684 				if (tableSize <= ptr)
685 					ptr = 0;
686 				for (Entry e = table.get(ptr); e != null; e = e.next) {
687 					if (e.dead)
688 						continue;
689 					if (old == null || e.ref.getLastAccess() < old.ref
690 							.getLastAccess()) {
691 						old = e;
692 						slot = ptr;
693 					}
694 				}
695 			}
696 			if (old != null) {
697 				old.kill();
698 				gc();
699 				final Entry e1 = table.get(slot);
700 				table.compareAndSet(slot, e1, clean(e1));
701 			}
702 		}
703 	}
704 
705 	/**
706 	 * Clear every entry from the cache.
707 	 * <p>
708 	 * This is a last-ditch effort to clear out the cache, such as before it
709 	 * gets replaced by another cache that is configured differently. This
710 	 * method tries to force every cached entry through {@link #clear(PageRef)} to
711 	 * ensure that resources are correctly accounted for and cleaned up by the
712 	 * subclass. A concurrent reader loading entries while this method is
713 	 * running may cause resource accounting failures.
714 	 */
715 	private void removeAll() {
716 		for (int s = 0; s < tableSize; s++) {
717 			Entry e1;
718 			do {
719 				e1 = table.get(s);
720 				for (Entry e = e1; e != null; e = e.next)
721 					e.kill();
722 			} while (!table.compareAndSet(s, e1, null));
723 		}
724 		gc();
725 	}
726 
727 	/**
728 	 * Clear all entries related to a single file.
729 	 * <p>
730 	 * Typically this method is invoked during {@link PackFile#close()}, when we
731 	 * know the pack is never going to be useful to us again (for example, it no
732 	 * longer exists on disk). A concurrent reader loading an entry from this
733 	 * same pack may cause the pack to become stuck in the cache anyway.
734 	 *
735 	 * @param pack
736 	 *            the file to purge all entries of.
737 	 */
738 	private void removeAll(PackFile pack) {
739 		for (int s = 0; s < tableSize; s++) {
740 			final Entry e1 = table.get(s);
741 			boolean hasDead = false;
742 			for (Entry e = e1; e != null; e = e.next) {
743 				if (e.ref.getPack() == pack) {
744 					e.kill();
745 					hasDead = true;
746 				} else if (e.dead)
747 					hasDead = true;
748 			}
749 			if (hasDead)
750 				table.compareAndSet(s, e1, clean(e1));
751 		}
752 		gc();
753 	}
754 
755 	private void gc() {
756 		queue.gc();
757 	}
758 
759 	private int slot(PackFile pack, long position) {
760 		return (hash(pack.hash, position) >>> 1) % tableSize;
761 	}
762 
763 	private Lock lock(PackFile pack, long position) {
764 		return locks[(hash(pack.hash, position) >>> 1) % locks.length];
765 	}
766 
767 	private static Entry clean(Entry top) {
768 		while (top != null && top.dead) {
769 			top.ref.kill();
770 			top = top.next;
771 		}
772 		if (top == null)
773 			return null;
774 		final Entry n = clean(top.next);
775 		return n == top.next ? top : new Entry(n, top.ref);
776 	}
777 
778 	private static class Entry {
779 		/** Next entry in the hash table's chain list. */
780 		final Entry next;
781 
782 		/** The referenced object. */
783 		final PageRef<ByteWindow> ref;
784 
785 		/**
786 		 * Marked true when ref.get() returns null and the ref is dead.
787 		 * <p>
788 		 * A true here indicates that the ref is no longer accessible, and that
789 		 * we therefore need to eventually purge this Entry object out of the
790 		 * bucket's chain.
791 		 */
792 		volatile boolean dead;
793 
794 		Entry(Entry n, PageRef<ByteWindow> r) {
795 			next = n;
796 			ref = r;
797 		}
798 
799 		final void kill() {
800 			dead = true;
801 			ref.kill();
802 		}
803 	}
804 
805 	private static interface PageRef<T> {
806 		/**
807 		 * Returns this reference object's referent. If this reference object
808 		 * has been cleared, either by the program or by the garbage collector,
809 		 * then this method returns <code>null</code>.
810 		 *
811 		 * @return The object to which this reference refers, or
812 		 *         <code>null</code> if this reference object has been cleared
813 		 */
814 		T get();
815 
816 	    /**
817 		 * Kill this ref
818 		 *
819 		 * @return <code>true</code> if this reference object was successfully
820 		 *         killed; <code>false</code> if it was already killed
821 		 */
822 		boolean kill();
823 
824 		/**
825 		 * Get the packfile the referenced cache page is allocated for
826 		 *
827 		 * @return the packfile the referenced cache page is allocated for
828 		 */
829 		PackFile getPack();
830 
831 		/**
832 		 * Get the position of the referenced cache page in the packfile
833 		 *
834 		 * @return the position of the referenced cache page in the packfile
835 		 */
836 		long getPosition();
837 
838 		/**
839 		 * Get size of cache page
840 		 *
841 		 * @return size of cache page
842 		 */
843 		int getSize();
844 
845 		/**
846 		 * Get pseudo time of last access to this cache page
847 		 *
848 		 * @return pseudo time of last access to this cache page
849 		 */
850 		long getLastAccess();
851 
852 		/**
853 		 * Set pseudo time of last access to this cache page
854 		 *
855 		 * @param time
856 		 *            pseudo time of last access to this cache page
857 		 */
858 		void setLastAccess(long time);
859 
860 		/**
861 		 * Whether this is a strong reference.
862 		 * @return {@code true} if this is a strong reference
863 		 */
864 		boolean isStrongRef();
865 	}
866 
867 	/** A soft reference wrapped around a cached object. */
868 	private static class SoftRef extends SoftReference<ByteWindow>
869 			implements PageRef<ByteWindow> {
870 		private final PackFile pack;
871 
872 		private final long position;
873 
874 		private final int size;
875 
876 		private long lastAccess;
877 
878 		protected SoftRef(final PackFile pack, final long position,
879 				final ByteWindow v, final SoftCleanupQueue queue) {
880 			super(v, queue);
881 			this.pack = pack;
882 			this.position = position;
883 			this.size = v.size();
884 		}
885 
886 		@Override
887 		public PackFile getPack() {
888 			return pack;
889 		}
890 
891 		@Override
892 		public long getPosition() {
893 			return position;
894 		}
895 
896 		@Override
897 		public int getSize() {
898 			return size;
899 		}
900 
901 		@Override
902 		public long getLastAccess() {
903 			return lastAccess;
904 		}
905 
906 		@Override
907 		public void setLastAccess(long time) {
908 			this.lastAccess = time;
909 		}
910 
911 		@Override
912 		public boolean kill() {
913 			return enqueue();
914 		}
915 
916 		@Override
917 		public boolean isStrongRef() {
918 			return false;
919 		}
920 	}
921 
922 	/** A strong reference wrapped around a cached object. */
923 	private static class StrongRef implements PageRef<ByteWindow> {
924 		private ByteWindow referent;
925 
926 		private final PackFile pack;
927 
928 		private final long position;
929 
930 		private final int size;
931 
932 		private long lastAccess;
933 
934 		private CleanupQueue queue;
935 
936 		protected StrongRef(final PackFile pack, final long position,
937 				final ByteWindow v, final CleanupQueue queue) {
938 			this.pack = pack;
939 			this.position = position;
940 			this.referent = v;
941 			this.size = v.size();
942 			this.queue = queue;
943 		}
944 
945 		@Override
946 		public PackFile getPack() {
947 			return pack;
948 		}
949 
950 		@Override
951 		public long getPosition() {
952 			return position;
953 		}
954 
955 		@Override
956 		public int getSize() {
957 			return size;
958 		}
959 
960 		@Override
961 		public long getLastAccess() {
962 			return lastAccess;
963 		}
964 
965 		@Override
966 		public void setLastAccess(long time) {
967 			this.lastAccess = time;
968 		}
969 
970 		@Override
971 		public ByteWindow get() {
972 			return referent;
973 		}
974 
975 		@Override
976 		public boolean kill() {
977 			if (referent == null) {
978 				return false;
979 			}
980 			referent = null;
981 			return queue.enqueue(this);
982 		}
983 
984 		@Override
985 		public boolean isStrongRef() {
986 			return true;
987 		}
988 	}
989 
990 	private static interface CleanupQueue {
991 		boolean enqueue(PageRef<ByteWindow> r);
992 		void gc();
993 	}
994 
995 	private static class SoftCleanupQueue extends ReferenceQueue<ByteWindow>
996 			implements CleanupQueue {
997 		private final WindowCache wc;
998 
999 		SoftCleanupQueue(WindowCache cache) {
1000 			this.wc = cache;
1001 		}
1002 
1003 		@Override
1004 		public boolean enqueue(PageRef<ByteWindow> r) {
1005 			// no need to explicitly add soft references which are enqueued by
1006 			// the JVM
1007 			return false;
1008 		}
1009 
1010 		@Override
1011 		public void gc() {
1012 			SoftRef r;
1013 			while ((r = (SoftRef) poll()) != null) {
1014 				wc.clear(r);
1015 
1016 				final int s = wc.slot(r.getPack(), r.getPosition());
1017 				final Entry e1 = wc.table.get(s);
1018 				for (Entry n = e1; n != null; n = n.next) {
1019 					if (n.ref == r) {
1020 						n.dead = true;
1021 						wc.table.compareAndSet(s, e1, clean(e1));
1022 						break;
1023 					}
1024 				}
1025 			}
1026 		}
1027 	}
1028 
1029 	private static class StrongCleanupQueue implements CleanupQueue {
1030 		private final WindowCache wc;
1031 
1032 		private final ConcurrentLinkedQueue<PageRef<ByteWindow>> queue = new ConcurrentLinkedQueue<>();
1033 
1034 		StrongCleanupQueue(WindowCache wc) {
1035 			this.wc = wc;
1036 		}
1037 
1038 		@Override
1039 		public boolean enqueue(PageRef<ByteWindow> r) {
1040 			if (queue.contains(r)) {
1041 				return false;
1042 			}
1043 			return queue.add(r);
1044 		}
1045 
1046 		@Override
1047 		public void gc() {
1048 			PageRef<ByteWindow> r;
1049 			while ((r = queue.poll()) != null) {
1050 				wc.clear(r);
1051 
1052 				final int s = wc.slot(r.getPack(), r.getPosition());
1053 				final Entry e1 = wc.table.get(s);
1054 				for (Entry n = e1; n != null; n = n.next) {
1055 					if (n.ref == r) {
1056 						n.dead = true;
1057 						wc.table.compareAndSet(s, e1, clean(e1));
1058 						break;
1059 					}
1060 				}
1061 			}
1062 		}
1063 	}
1064 
1065 	private static final class Lock {
1066 		// Used only for its implicit monitor.
1067 	}
1068 }