View Javadoc
1   /*
2    * Copyright (C) 2008, Shawn O. Pearce <spearce@spearce.org>
3    * Copyright (C) 2010, Christian Halstrick <christian.halstrick@sap.com>
4    * Copyright (C) 2010, Matthias Sohn <matthias.sohn@sap.com>
5    * Copyright (C) 2012-2013, Robin Rosenberg
6    * and other copyright owners as documented in the project's IP log.
7    *
8    * This program and the accompanying materials are made available
9    * under the terms of the Eclipse Distribution License v1.0 which
10   * accompanies this distribution, is reproduced below, and is
11   * available at http://www.eclipse.org/org/documents/edl-v10.php
12   *
13   * All rights reserved.
14   *
15   * Redistribution and use in source and binary forms, with or
16   * without modification, are permitted provided that the following
17   * conditions are met:
18   *
19   * - Redistributions of source code must retain the above copyright
20   *   notice, this list of conditions and the following disclaimer.
21   *
22   * - Redistributions in binary form must reproduce the above
23   *   copyright notice, this list of conditions and the following
24   *   disclaimer in the documentation and/or other materials provided
25   *   with the distribution.
26   *
27   * - Neither the name of the Eclipse Foundation, Inc. nor the
28   *   names of its contributors may be used to endorse or promote
29   *   products derived from this software without specific prior
30   *   written permission.
31   *
32   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
33   * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
34   * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36   * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
37   * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38   * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
39   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
40   * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
41   * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
42   * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
43   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
44   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45   */
46  
47  package org.eclipse.jgit.treewalk;
48  
49  import java.io.ByteArrayInputStream;
50  import java.io.File;
51  import java.io.FileInputStream;
52  import java.io.FileNotFoundException;
53  import java.io.IOException;
54  import java.io.InputStream;
55  import java.nio.ByteBuffer;
56  import java.nio.CharBuffer;
57  import java.nio.charset.CharacterCodingException;
58  import java.nio.charset.CharsetEncoder;
59  import java.security.MessageDigest;
60  import java.text.MessageFormat;
61  import java.util.Arrays;
62  import java.util.Collections;
63  import java.util.Comparator;
64  
65  import org.eclipse.jgit.api.errors.FilterFailedException;
66  import org.eclipse.jgit.attributes.AttributesNode;
67  import org.eclipse.jgit.attributes.AttributesRule;
68  import org.eclipse.jgit.diff.RawText;
69  import org.eclipse.jgit.dircache.DirCache;
70  import org.eclipse.jgit.dircache.DirCacheEntry;
71  import org.eclipse.jgit.dircache.DirCacheIterator;
72  import org.eclipse.jgit.errors.CorruptObjectException;
73  import org.eclipse.jgit.errors.MissingObjectException;
74  import org.eclipse.jgit.errors.NoWorkTreeException;
75  import org.eclipse.jgit.ignore.FastIgnoreRule;
76  import org.eclipse.jgit.ignore.IgnoreNode;
77  import org.eclipse.jgit.internal.JGitText;
78  import org.eclipse.jgit.lib.Constants;
79  import org.eclipse.jgit.lib.CoreConfig;
80  import org.eclipse.jgit.lib.CoreConfig.CheckStat;
81  import org.eclipse.jgit.lib.CoreConfig.EolStreamType;
82  import org.eclipse.jgit.lib.CoreConfig.SymLinks;
83  import org.eclipse.jgit.lib.FileMode;
84  import org.eclipse.jgit.lib.ObjectId;
85  import org.eclipse.jgit.lib.ObjectLoader;
86  import org.eclipse.jgit.lib.ObjectReader;
87  import org.eclipse.jgit.lib.Repository;
88  import org.eclipse.jgit.submodule.SubmoduleWalk;
89  import org.eclipse.jgit.treewalk.TreeWalk.OperationType;
90  import org.eclipse.jgit.util.FS;
91  import org.eclipse.jgit.util.FS.ExecutionResult;
92  import org.eclipse.jgit.util.Holder;
93  import org.eclipse.jgit.util.IO;
94  import org.eclipse.jgit.util.Paths;
95  import org.eclipse.jgit.util.RawParseUtils;
96  import org.eclipse.jgit.util.io.AutoLFInputStream;
97  import org.eclipse.jgit.util.io.EolStreamTypeUtil;
98  
99  /**
100  * Walks a working directory tree as part of a {@link TreeWalk}.
101  * <p>
102  * Most applications will want to use the standard implementation of this
103  * iterator, {@link FileTreeIterator}, as that does all IO through the standard
104  * <code>java.io</code> package. Plugins for a Java based IDE may however wish
105  * to create their own implementations of this class to allow traversal of the
106  * IDE's project space, as well as benefit from any caching the IDE may have.
107  *
108  * @see FileTreeIterator
109  */
110 public abstract class WorkingTreeIterator extends AbstractTreeIterator {
111 	private static final int MAX_EXCEPTION_TEXT_SIZE = 10 * 1024;
112 
113 	/** An empty entry array, suitable for {@link #init(Entry[])}. */
114 	protected static final Entry[] EOF = {};
115 
116 	/** Size we perform file IO in if we have to read and hash a file. */
117 	static final int BUFFER_SIZE = 2048;
118 
119 	/**
120 	 * Maximum size of files which may be read fully into memory for performance
121 	 * reasons.
122 	 */
123 	private static final long MAXIMUM_FILE_SIZE_TO_READ_FULLY = 65536;
124 
125 	/** Inherited state of this iterator, describing working tree, etc. */
126 	private final IteratorState state;
127 
128 	/** The {@link #idBuffer()} for the current entry. */
129 	private byte[] contentId;
130 
131 	/** Index within {@link #entries} that {@link #contentId} came from. */
132 	private int contentIdFromPtr;
133 
134 	/** List of entries obtained from the subclass. */
135 	private Entry[] entries;
136 
137 	/** Total number of entries in {@link #entries} that are valid. */
138 	private int entryCnt;
139 
140 	/** Current position within {@link #entries}. */
141 	private int ptr;
142 
143 	/** If there is a .gitignore file present, the parsed rules from it. */
144 	private IgnoreNode ignoreNode;
145 
146 	/**
147 	 * cached clean filter command. Use a Ref in order to distinguish between
148 	 * the ref not cached yet and the value null
149 	 */
150 	private Holder<String> cleanFilterCommandHolder;
151 
152 	/**
153 	 * cached eol stream type. Use a Ref in order to distinguish between the ref
154 	 * not cached yet and the value null
155 	 */
156 	private Holder<EolStreamType> eolStreamTypeHolder;
157 
158 	/** Repository that is the root level being iterated over */
159 	protected Repository repository;
160 
161 	/** Cached canonical length, initialized from {@link #idBuffer()} */
162 	private long canonLen = -1;
163 
164 	/** The offset of the content id in {@link #idBuffer()} */
165 	private int contentIdOffset;
166 
167 	/**
168 	 * Create a new iterator with no parent.
169 	 *
170 	 * @param options
171 	 *            working tree options to be used
172 	 */
173 	protected WorkingTreeIterator(WorkingTreeOptions options) {
174 		super();
175 		state = new IteratorState(options);
176 	}
177 
178 	/**
179 	 * Create a new iterator with no parent and a prefix.
180 	 * <p>
181 	 * The prefix path supplied is inserted in front of all paths generated by
182 	 * this iterator. It is intended to be used when an iterator is being
183 	 * created for a subsection of an overall repository and needs to be
184 	 * combined with other iterators that are created to run over the entire
185 	 * repository namespace.
186 	 *
187 	 * @param prefix
188 	 *            position of this iterator in the repository tree. The value
189 	 *            may be null or the empty string to indicate the prefix is the
190 	 *            root of the repository. A trailing slash ('/') is
191 	 *            automatically appended if the prefix does not end in '/'.
192 	 * @param options
193 	 *            working tree options to be used
194 	 */
195 	protected WorkingTreeIterator(final String prefix,
196 			WorkingTreeOptions options) {
197 		super(prefix);
198 		state = new IteratorState(options);
199 	}
200 
201 	/**
202 	 * Create an iterator for a subtree of an existing iterator.
203 	 *
204 	 * @param p
205 	 *            parent tree iterator.
206 	 */
207 	protected WorkingTreeIterator(final WorkingTreeIterator p) {
208 		super(p);
209 		state = p.state;
210 		repository = p.repository;
211 	}
212 
213 	/**
214 	 * Initialize this iterator for the root level of a repository.
215 	 * <p>
216 	 * This method should only be invoked after calling {@link #init(Entry[])},
217 	 * and only for the root iterator.
218 	 *
219 	 * @param repo
220 	 *            the repository.
221 	 */
222 	protected void initRootIterator(Repository repo) {
223 		repository = repo;
224 		Entry entry;
225 		if (ignoreNode instanceof PerDirectoryIgnoreNode)
226 			entry = ((PerDirectoryIgnoreNode) ignoreNode).entry;
227 		else
228 			entry = null;
229 		ignoreNode = new RootIgnoreNode(entry, repo);
230 	}
231 
232 	/**
233 	 * Define the matching {@link DirCacheIterator}, to optimize ObjectIds.
234 	 *
235 	 * Once the DirCacheIterator has been set this iterator must only be
236 	 * advanced by the TreeWalk that is supplied, as it assumes that itself and
237 	 * the corresponding DirCacheIterator are positioned on the same file path
238 	 * whenever {@link #idBuffer()} is invoked.
239 	 *
240 	 * @param walk
241 	 *            the walk that will be advancing this iterator.
242 	 * @param treeId
243 	 *            index of the matching {@link DirCacheIterator}.
244 	 */
245 	public void setDirCacheIterator(TreeWalk walk, int treeId) {
246 		state.walk = walk;
247 		state.dirCacheTree = treeId;
248 	}
249 
250 	@Override
251 	public boolean hasId() {
252 		if (contentIdFromPtr == ptr)
253 			return true;
254 		return (mode & FileMode.TYPE_MASK) == FileMode.TYPE_FILE;
255 	}
256 
257 	@Override
258 	public byte[] idBuffer() {
259 		if (contentIdFromPtr == ptr)
260 			return contentId;
261 
262 		if (state.walk != null) {
263 			// If there is a matching DirCacheIterator, we can reuse
264 			// its idBuffer, but only if we appear to be clean against
265 			// the cached index information for the path.
266 			//
267 			DirCacheIterator i = state.walk.getTree(state.dirCacheTree,
268 							DirCacheIterator.class);
269 			if (i != null) {
270 				DirCacheEntry ent = i.getDirCacheEntry();
271 				if (ent != null && compareMetadata(ent) == MetadataDiff.EQUAL) {
272 					contentIdOffset = i.idOffset();
273 					contentIdFromPtr = ptr;
274 					return contentId = i.idBuffer();
275 				}
276 				contentIdOffset = 0;
277 			} else {
278 				contentIdOffset = 0;
279 			}
280 		}
281 		switch (mode & FileMode.TYPE_MASK) {
282 		case FileMode.TYPE_SYMLINK:
283 		case FileMode.TYPE_FILE:
284 			contentIdFromPtr = ptr;
285 			return contentId = idBufferBlob(entries[ptr]);
286 		case FileMode.TYPE_GITLINK:
287 			contentIdFromPtr = ptr;
288 			return contentId = idSubmodule(entries[ptr]);
289 		}
290 		return zeroid;
291 	}
292 
293 	@Override
294 	public boolean isWorkTree() {
295 		return true;
296 	}
297 
298 	/**
299 	 * Get submodule id for given entry.
300 	 *
301 	 * @param e
302 	 * @return non-null submodule id
303 	 */
304 	protected byte[] idSubmodule(Entry e) {
305 		if (repository == null)
306 			return zeroid;
307 		File directory;
308 		try {
309 			directory = repository.getWorkTree();
310 		} catch (NoWorkTreeException nwte) {
311 			return zeroid;
312 		}
313 		return idSubmodule(directory, e);
314 	}
315 
316 	/**
317 	 * Get submodule id using the repository at the location of the entry
318 	 * relative to the directory.
319 	 *
320 	 * @param directory
321 	 * @param e
322 	 * @return non-null submodule id
323 	 */
324 	protected byte[] idSubmodule(File directory, Entry e) {
325 		final Repository submoduleRepo;
326 		try {
327 			submoduleRepo = SubmoduleWalk.getSubmoduleRepository(directory,
328 					e.getName());
329 		} catch (IOException exception) {
330 			return zeroid;
331 		}
332 		if (submoduleRepo == null)
333 			return zeroid;
334 
335 		final ObjectId head;
336 		try {
337 			head = submoduleRepo.resolve(Constants.HEAD);
338 		} catch (IOException exception) {
339 			return zeroid;
340 		} finally {
341 			submoduleRepo.close();
342 		}
343 		if (head == null)
344 			return zeroid;
345 		final byte[] id = new byte[Constants.OBJECT_ID_LENGTH];
346 		head.copyRawTo(id, 0);
347 		return id;
348 	}
349 
350 	private static final byte[] digits = { '0', '1', '2', '3', '4', '5', '6',
351 			'7', '8', '9' };
352 
353 	private static final byte[] hblob = Constants
354 			.encodedTypeString(Constants.OBJ_BLOB);
355 
356 	private byte[] idBufferBlob(final Entry e) {
357 		try {
358 			final InputStream is = e.openInputStream();
359 			if (is == null)
360 				return zeroid;
361 			try {
362 				state.initializeDigestAndReadBuffer();
363 
364 				final long len = e.getLength();
365 				InputStream filteredIs = possiblyFilteredInputStream(e, is, len,
366 						OperationType.CHECKIN_OP);
367 				return computeHash(filteredIs, canonLen);
368 			} finally {
369 				safeClose(is);
370 			}
371 		} catch (IOException err) {
372 			// Can't read the file? Don't report the failure either.
373 			return zeroid;
374 		}
375 	}
376 
377 	private InputStream possiblyFilteredInputStream(final Entry e,
378 			final InputStream is, final long len) throws IOException {
379 		return possiblyFilteredInputStream(e, is, len, null);
380 
381 	}
382 
383 	private InputStream possiblyFilteredInputStream(final Entry e,
384 			final InputStream is, final long len, OperationType opType)
385 			throws IOException {
386 		if (getCleanFilterCommand() == null
387 				&& getEolStreamType(opType) == EolStreamType.DIRECT) {
388 			canonLen = len;
389 			return is;
390 		}
391 
392 		if (len <= MAXIMUM_FILE_SIZE_TO_READ_FULLY) {
393 			ByteBuffer rawbuf = IO.readWholeStream(is, (int) len);
394 			byte[] raw = rawbuf.array();
395 			int n = rawbuf.limit();
396 			if (!isBinary(raw, n)) {
397 				rawbuf = filterClean(raw, n, opType);
398 				raw = rawbuf.array();
399 				n = rawbuf.limit();
400 			}
401 			canonLen = n;
402 			return new ByteArrayInputStream(raw, 0, n);
403 		}
404 
405 		if (getCleanFilterCommand() == null && isBinary(e)) {
406 				canonLen = len;
407 				return is;
408 			}
409 
410 		final InputStream lenIs = filterClean(e.openInputStream(),
411 				opType);
412 		try {
413 			canonLen = computeLength(lenIs);
414 		} finally {
415 			safeClose(lenIs);
416 		}
417 		return filterClean(is, opType);
418 	}
419 
420 	private static void safeClose(final InputStream in) {
421 		try {
422 			in.close();
423 		} catch (IOException err2) {
424 			// Suppress any error related to closing an input
425 			// stream. We don't care, we should not have any
426 			// outstanding data to flush or anything like that.
427 		}
428 	}
429 
430 	private static boolean isBinary(byte[] content, int sz) {
431 		return RawText.isBinary(content, sz);
432 	}
433 
434 	private static boolean isBinary(Entry entry) throws IOException {
435 		InputStream in = entry.openInputStream();
436 		try {
437 			return RawText.isBinary(in);
438 		} finally {
439 			safeClose(in);
440 		}
441 	}
442 
443 	private ByteBuffer filterClean(byte[] src, int n, OperationType opType)
444 			throws IOException {
445 		InputStream in = new ByteArrayInputStream(src);
446 		try {
447 			return IO.readWholeStream(filterClean(in, opType), n);
448 		} finally {
449 			safeClose(in);
450 		}
451 	}
452 
453 	private InputStream filterClean(InputStream in) throws IOException {
454 		return filterClean(in, null);
455 	}
456 
457 	private InputStream filterClean(InputStream in, OperationType opType)
458 			throws IOException {
459 		in = handleAutoCRLF(in, opType);
460 		String filterCommand = getCleanFilterCommand();
461 		if (filterCommand != null) {
462 			FS fs = repository.getFS();
463 			ProcessBuilder filterProcessBuilder = fs.runInShell(filterCommand,
464 					new String[0]);
465 			filterProcessBuilder.directory(repository.getWorkTree());
466 			filterProcessBuilder.environment().put(Constants.GIT_DIR_KEY,
467 					repository.getDirectory().getAbsolutePath());
468 			ExecutionResult result;
469 			try {
470 				result = fs.execute(filterProcessBuilder, in);
471 			} catch (IOException | InterruptedException e) {
472 				throw new IOException(new FilterFailedException(e,
473 						filterCommand, getEntryPathString()));
474 			}
475 			int rc = result.getRc();
476 			if (rc != 0) {
477 				throw new IOException(new FilterFailedException(rc,
478 						filterCommand, getEntryPathString(),
479 						result.getStdout().toByteArray(MAX_EXCEPTION_TEXT_SIZE),
480 						RawParseUtils.decode(result.getStderr()
481 								.toByteArray(MAX_EXCEPTION_TEXT_SIZE))));
482 			}
483 			return result.getStdout().openInputStream();
484 		}
485 		return in;
486 	}
487 
488 	private InputStream handleAutoCRLF(InputStream in, OperationType opType)
489 			throws IOException {
490 		return EolStreamTypeUtil.wrapInputStream(in, getEolStreamType(opType));
491 	}
492 
493 	/**
494 	 * Returns the working tree options used by this iterator.
495 	 *
496 	 * @return working tree options
497 	 */
498 	public WorkingTreeOptions getOptions() {
499 		return state.options;
500 	}
501 
502 	@Override
503 	public int idOffset() {
504 		return contentIdOffset;
505 	}
506 
507 	@Override
508 	public void reset() {
509 		if (!first()) {
510 			ptr = 0;
511 			if (!eof())
512 				parseEntry();
513 		}
514 	}
515 
516 	@Override
517 	public boolean first() {
518 		return ptr == 0;
519 	}
520 
521 	@Override
522 	public boolean eof() {
523 		return ptr == entryCnt;
524 	}
525 
526 	@Override
527 	public void next(final int delta) throws CorruptObjectException {
528 		ptr += delta;
529 		if (!eof()) {
530 			parseEntry();
531 		}
532 	}
533 
534 	@Override
535 	public void back(final int delta) throws CorruptObjectException {
536 		ptr -= delta;
537 		parseEntry();
538 	}
539 
540 	private void parseEntry() {
541 		final Entry e = entries[ptr];
542 		mode = e.getMode().getBits();
543 
544 		final int nameLen = e.encodedNameLen;
545 		ensurePathCapacity(pathOffset + nameLen, pathOffset);
546 		System.arraycopy(e.encodedName, 0, path, pathOffset, nameLen);
547 		pathLen = pathOffset + nameLen;
548 		canonLen = -1;
549 		cleanFilterCommandHolder = null;
550 		eolStreamTypeHolder = null;
551 	}
552 
553 	/**
554 	 * Get the raw byte length of this entry.
555 	 *
556 	 * @return size of this file, in bytes.
557 	 */
558 	public long getEntryLength() {
559 		return current().getLength();
560 	}
561 
562 	/**
563 	 * Get the filtered input length of this entry
564 	 *
565 	 * @return size of the content, in bytes
566 	 * @throws IOException
567 	 */
568 	public long getEntryContentLength() throws IOException {
569 		if (canonLen == -1) {
570 			long rawLen = getEntryLength();
571 			if (rawLen == 0)
572 				canonLen = 0;
573 			InputStream is = current().openInputStream();
574 			try {
575 				// canonLen gets updated here
576 				possiblyFilteredInputStream(current(), is, current()
577 						.getLength());
578 			} finally {
579 				safeClose(is);
580 			}
581 		}
582 		return canonLen;
583 	}
584 
585 	/**
586 	 * Get the last modified time of this entry.
587 	 *
588 	 * @return last modified time of this file, in milliseconds since the epoch
589 	 *         (Jan 1, 1970 UTC).
590 	 */
591 	public long getEntryLastModified() {
592 		return current().getLastModified();
593 	}
594 
595 	/**
596 	 * Obtain an input stream to read the file content.
597 	 * <p>
598 	 * Efficient implementations are not required. The caller will usually
599 	 * obtain the stream only once per entry, if at all.
600 	 * <p>
601 	 * The input stream should not use buffering if the implementation can avoid
602 	 * it. The caller will buffer as necessary to perform efficient block IO
603 	 * operations.
604 	 * <p>
605 	 * The caller will close the stream once complete.
606 	 *
607 	 * @return a stream to read from the file.
608 	 * @throws IOException
609 	 *             the file could not be opened for reading.
610 	 */
611 	public InputStream openEntryStream() throws IOException {
612 		InputStream rawis = current().openInputStream();
613 		if (getCleanFilterCommand() == null
614 				&& getEolStreamType() == EolStreamType.DIRECT)
615 			return rawis;
616 		else
617 			return filterClean(rawis);
618 	}
619 
620 	/**
621 	 * Determine if the current entry path is ignored by an ignore rule.
622 	 *
623 	 * @return true if the entry was ignored by an ignore rule file.
624 	 * @throws IOException
625 	 *             a relevant ignore rule file exists but cannot be read.
626 	 */
627 	public boolean isEntryIgnored() throws IOException {
628 		return isEntryIgnored(pathLen);
629 	}
630 
631 	/**
632 	 * Determine if the entry path is ignored by an ignore rule.
633 	 *
634 	 * @param pLen
635 	 *            the length of the path in the path buffer.
636 	 * @return true if the entry is ignored by an ignore rule.
637 	 * @throws IOException
638 	 *             a relevant ignore rule file exists but cannot be read.
639 	 */
640 	protected boolean isEntryIgnored(final int pLen) throws IOException {
641 		return isEntryIgnored(pLen, mode, false);
642 	}
643 
644 	/**
645 	 * Determine if the entry path is ignored by an ignore rule. Consider
646 	 * possible rule negation from child iterator.
647 	 *
648 	 * @param pLen
649 	 *            the length of the path in the path buffer.
650 	 * @param fileMode
651 	 *            the original iterator file mode
652 	 * @param negatePrevious
653 	 *            true if the previous matching iterator rule was negation
654 	 * @return true if the entry is ignored by an ignore rule.
655 	 * @throws IOException
656 	 *             a relevant ignore rule file exists but cannot be read.
657 	 */
658 	private boolean isEntryIgnored(final int pLen, int fileMode,
659 			boolean negatePrevious)
660 			throws IOException {
661 		IgnoreNode rules = getIgnoreNode();
662 		if (rules != null) {
663 			// The ignore code wants path to start with a '/' if possible.
664 			// If we have the '/' in our path buffer because we are inside
665 			// a subdirectory include it in the range we convert to string.
666 			//
667 			int pOff = pathOffset;
668 			if (0 < pOff)
669 				pOff--;
670 			String p = TreeWalk.pathOf(path, pOff, pLen);
671 			switch (rules.isIgnored(p, FileMode.TREE.equals(fileMode),
672 					negatePrevious)) {
673 			case IGNORED:
674 				return true;
675 			case NOT_IGNORED:
676 				return false;
677 			case CHECK_PARENT:
678 				negatePrevious = false;
679 				break;
680 			case CHECK_PARENT_NEGATE_FIRST_MATCH:
681 				negatePrevious = true;
682 				break;
683 			}
684 		}
685 		if (parent instanceof WorkingTreeIterator)
686 			return ((WorkingTreeIterator) parent).isEntryIgnored(pLen, fileMode,
687 					negatePrevious);
688 		return false;
689 	}
690 
691 	private IgnoreNode getIgnoreNode() throws IOException {
692 		if (ignoreNode instanceof PerDirectoryIgnoreNode)
693 			ignoreNode = ((PerDirectoryIgnoreNode) ignoreNode).load();
694 		return ignoreNode;
695 	}
696 
697 	/**
698 	 * Retrieves the {@link AttributesNode} for the current entry.
699 	 *
700 	 * @return {@link AttributesNode} for the current entry.
701 	 * @throws IOException
702 	 *             if an error is raised while parsing the .gitattributes file
703 	 * @since 3.7
704 	 */
705 	public AttributesNode getEntryAttributesNode() throws IOException {
706 		if (attributesNode instanceof PerDirectoryAttributesNode)
707 			attributesNode = ((PerDirectoryAttributesNode) attributesNode)
708 					.load();
709 		return attributesNode;
710 	}
711 
712 	private static final Comparator<Entry> ENTRY_CMP = new Comparator<Entry>() {
713 		public int compare(Entry a, Entry b) {
714 			return Paths.compare(
715 					a.encodedName, 0, a.encodedNameLen, a.getMode().getBits(),
716 					b.encodedName, 0, b.encodedNameLen, b.getMode().getBits());
717 		}
718 	};
719 
720 	/**
721 	 * Constructor helper.
722 	 *
723 	 * @param list
724 	 *            files in the subtree of the work tree this iterator operates
725 	 *            on
726 	 */
727 	protected void init(final Entry[] list) {
728 		// Filter out nulls, . and .. as these are not valid tree entries,
729 		// also cache the encoded forms of the path names for efficient use
730 		// later on during sorting and iteration.
731 		//
732 		entries = list;
733 		int i, o;
734 
735 		final CharsetEncoder nameEncoder = state.nameEncoder;
736 		for (i = 0, o = 0; i < entries.length; i++) {
737 			final Entry e = entries[i];
738 			if (e == null)
739 				continue;
740 			final String name = e.getName();
741 			if (".".equals(name) || "..".equals(name)) //$NON-NLS-1$ //$NON-NLS-2$
742 				continue;
743 			if (Constants.DOT_GIT.equals(name))
744 				continue;
745 			if (Constants.DOT_GIT_IGNORE.equals(name))
746 				ignoreNode = new PerDirectoryIgnoreNode(e);
747 			if (Constants.DOT_GIT_ATTRIBUTES.equals(name))
748 				attributesNode = new PerDirectoryAttributesNode(e);
749 			if (i != o)
750 				entries[o] = e;
751 			e.encodeName(nameEncoder);
752 			o++;
753 		}
754 		entryCnt = o;
755 		Arrays.sort(entries, 0, entryCnt, ENTRY_CMP);
756 
757 		contentIdFromPtr = -1;
758 		ptr = 0;
759 		if (!eof())
760 			parseEntry();
761 		else if (pathLen == 0) // see bug 445363
762 			pathLen = pathOffset;
763 	}
764 
765 	/**
766 	 * Obtain the current entry from this iterator.
767 	 *
768 	 * @return the currently selected entry.
769 	 */
770 	protected Entry current() {
771 		return entries[ptr];
772 	}
773 
774 	/**
775 	 * The result of a metadata-comparison between the current entry and a
776 	 * {@link DirCacheEntry}
777 	 */
778 	public enum MetadataDiff {
779 		/**
780 		 * The entries are equal by metaData (mode, length,
781 		 * modification-timestamp) or the <code>assumeValid</code> attribute of
782 		 * the index entry is set
783 		 */
784 		EQUAL,
785 
786 		/**
787 		 * The entries are not equal by metaData (mode, length) or the
788 		 * <code>isUpdateNeeded</code> attribute of the index entry is set
789 		 */
790 		DIFFER_BY_METADATA,
791 
792 		/** index entry is smudged - can't use that entry for comparison */
793 		SMUDGED,
794 
795 		/**
796 		 * The entries are equal by metaData (mode, length) but differ by
797 		 * modification-timestamp.
798 		 */
799 		DIFFER_BY_TIMESTAMP
800 	}
801 
802 	/**
803 	 * Is the file mode of the current entry different than the given raw mode?
804 	 *
805 	 * @param rawMode
806 	 * @return true if different, false otherwise
807 	 */
808 	public boolean isModeDifferent(final int rawMode) {
809 		// Determine difference in mode-bits of file and index-entry. In the
810 		// bitwise presentation of modeDiff we'll have a '1' when the two modes
811 		// differ at this position.
812 		int modeDiff = getEntryRawMode() ^ rawMode;
813 
814 		if (modeDiff == 0)
815 			return false;
816 
817 		// Do not rely on filemode differences in case of symbolic links
818 		if (getOptions().getSymLinks() == SymLinks.FALSE)
819 			if (FileMode.SYMLINK.equals(rawMode))
820 				return false;
821 
822 		// Ignore the executable file bits if WorkingTreeOptions tell me to
823 		// do so. Ignoring is done by setting the bits representing a
824 		// EXECUTABLE_FILE to '0' in modeDiff
825 		if (!state.options.isFileMode())
826 			modeDiff &= ~FileMode.EXECUTABLE_FILE.getBits();
827 		return modeDiff != 0;
828 	}
829 
830 	/**
831 	 * Compare the metadata (mode, length, modification-timestamp) of the
832 	 * current entry and a {@link DirCacheEntry}
833 	 *
834 	 * @param entry
835 	 *            the {@link DirCacheEntry} to compare with
836 	 * @return a {@link MetadataDiff} which tells whether and how the entries
837 	 *         metadata differ
838 	 */
839 	public MetadataDiff compareMetadata(DirCacheEntry entry) {
840 		if (entry.isAssumeValid())
841 			return MetadataDiff.EQUAL;
842 
843 		if (entry.isUpdateNeeded())
844 			return MetadataDiff.DIFFER_BY_METADATA;
845 
846 		if (!entry.isSmudged() && entry.getLength() != (int) getEntryLength())
847 			return MetadataDiff.DIFFER_BY_METADATA;
848 
849 		if (isModeDifferent(entry.getRawMode()))
850 			return MetadataDiff.DIFFER_BY_METADATA;
851 
852 		// Git under windows only stores seconds so we round the timestamp
853 		// Java gives us if it looks like the timestamp in index is seconds
854 		// only. Otherwise we compare the timestamp at millisecond precision,
855 		// unless core.checkstat is set to "minimal", in which case we only
856 		// compare the whole second part.
857 		long cacheLastModified = entry.getLastModified();
858 		long fileLastModified = getEntryLastModified();
859 		long lastModifiedMillis = fileLastModified % 1000;
860 		long cacheMillis = cacheLastModified % 1000;
861 		if (getOptions().getCheckStat() == CheckStat.MINIMAL) {
862 			fileLastModified = fileLastModified - lastModifiedMillis;
863 			cacheLastModified = cacheLastModified - cacheMillis;
864 		} else if (cacheMillis == 0)
865 			fileLastModified = fileLastModified - lastModifiedMillis;
866 		// Some Java version on Linux return whole seconds only even when
867 		// the file systems supports more precision.
868 		else if (lastModifiedMillis == 0)
869 			cacheLastModified = cacheLastModified - cacheMillis;
870 
871 		if (fileLastModified != cacheLastModified)
872 			return MetadataDiff.DIFFER_BY_TIMESTAMP;
873 		else if (!entry.isSmudged())
874 			// The file is clean when you look at timestamps.
875 			return MetadataDiff.EQUAL;
876 		else
877 			return MetadataDiff.SMUDGED;
878 	}
879 
880 	/**
881 	 * Checks whether this entry differs from a given entry from the
882 	 * {@link DirCache}.
883 	 *
884 	 * File status information is used and if status is same we consider the
885 	 * file identical to the state in the working directory. Native git uses
886 	 * more stat fields than we have accessible in Java.
887 	 *
888 	 * @param entry
889 	 *            the entry from the dircache we want to compare against
890 	 * @param forceContentCheck
891 	 *            True if the actual file content should be checked if
892 	 *            modification time differs.
893 	 * @param reader
894 	 *            access to repository objects if necessary. Should not be null.
895 	 * @return true if content is most likely different.
896 	 * @throws IOException
897 	 * @since 3.3
898 	 */
899 	public boolean isModified(DirCacheEntry entry, boolean forceContentCheck,
900 			ObjectReader reader) throws IOException {
901 		if (entry == null)
902 			return !FileMode.MISSING.equals(getEntryFileMode());
903 		MetadataDiff diff = compareMetadata(entry);
904 		switch (diff) {
905 		case DIFFER_BY_TIMESTAMP:
906 			if (forceContentCheck)
907 				// But we are told to look at content even though timestamps
908 				// tell us about modification
909 				return contentCheck(entry, reader);
910 			else
911 				// We are told to assume a modification if timestamps differs
912 				return true;
913 		case SMUDGED:
914 			// The file is clean by timestamps but the entry was smudged.
915 			// Lets do a content check
916 			return contentCheck(entry, reader);
917 		case EQUAL:
918 			return false;
919 		case DIFFER_BY_METADATA:
920 			if (mode == FileMode.SYMLINK.getBits())
921 				return contentCheck(entry, reader);
922 			return true;
923 		default:
924 			throw new IllegalStateException(MessageFormat.format(
925 					JGitText.get().unexpectedCompareResult, diff.name()));
926 		}
927 	}
928 
929 	/**
930 	 * Get the file mode to use for the current entry when it is to be updated
931 	 * in the index.
932 	 *
933 	 * @param indexIter
934 	 *            {@link DirCacheIterator} positioned at the same entry as this
935 	 *            iterator or null if no {@link DirCacheIterator} is available
936 	 *            at this iterator's current entry
937 	 * @return index file mode
938 	 */
939 	public FileMode getIndexFileMode(final DirCacheIterator indexIter) {
940 		final FileMode wtMode = getEntryFileMode();
941 		if (indexIter == null) {
942 			return wtMode;
943 		}
944 		final FileMode iMode = indexIter.getEntryFileMode();
945 		if (getOptions().isFileMode() && iMode != FileMode.GITLINK && iMode != FileMode.TREE) {
946 			return wtMode;
947 		}
948 		if (!getOptions().isFileMode()) {
949 			if (FileMode.REGULAR_FILE == wtMode
950 					&& FileMode.EXECUTABLE_FILE == iMode) {
951 				return iMode;
952 			}
953 			if (FileMode.EXECUTABLE_FILE == wtMode
954 					&& FileMode.REGULAR_FILE == iMode) {
955 				return iMode;
956 			}
957 		}
958 		if (FileMode.GITLINK == iMode
959 				&& FileMode.TREE == wtMode) {
960 			return iMode;
961 		}
962 		if (FileMode.TREE == iMode
963 				&& FileMode.GITLINK == wtMode) {
964 			return iMode;
965 		}
966 		return wtMode;
967 	}
968 
969 	/**
970 	 * Compares the entries content with the content in the filesystem.
971 	 * Unsmudges the entry when it is detected that it is clean.
972 	 *
973 	 * @param entry
974 	 *            the entry to be checked
975 	 * @param reader
976 	 *            acccess to repository data if necessary
977 	 * @return <code>true</code> if the content doesn't match,
978 	 *         <code>false</code> if it matches
979 	 * @throws IOException
980 	 */
981 	private boolean contentCheck(DirCacheEntry entry, ObjectReader reader)
982 			throws IOException {
983 		if (getEntryObjectId().equals(entry.getObjectId())) {
984 			// Content has not changed
985 
986 			// We know the entry can't be racily clean because it's still clean.
987 			// Therefore we unsmudge the entry!
988 			// If by any chance we now unsmudge although we are still in the
989 			// same time-slot as the last modification to the index file the
990 			// next index write operation will smudge again.
991 			// Caution: we are unsmudging just by setting the length of the
992 			// in-memory entry object. It's the callers task to detect that we
993 			// have modified the entry and to persist the modified index.
994 			entry.setLength((int) getEntryLength());
995 
996 			return false;
997 		} else {
998 			if (mode == FileMode.SYMLINK.getBits())
999 				return !new File(readContentAsNormalizedString(current()))
1000 						.equals(new File((readContentAsNormalizedString(entry,
1001 								reader))));
1002 			// Content differs: that's a real change, perhaps
1003 			if (reader == null) // deprecated use, do no further checks
1004 				return true;
1005 
1006 			switch (getEolStreamType()) {
1007 			case DIRECT:
1008 				return true;
1009 			default:
1010 				try {
1011 					ObjectLoader loader = reader.open(entry.getObjectId());
1012 					if (loader == null)
1013 						return true;
1014 
1015 					// We need to compute the length, but only if it is not
1016 					// a binary stream.
1017 					long dcInLen;
1018 					try (InputStream dcIn = new AutoLFInputStream(
1019 							loader.openStream(), true,
1020 							true /* abort if binary */)) {
1021 						dcInLen = computeLength(dcIn);
1022 					} catch (AutoLFInputStream.IsBinaryException e) {
1023 						return true;
1024 					}
1025 
1026 					try (InputStream dcIn = new AutoLFInputStream(
1027 							loader.openStream(), true)) {
1028 						byte[] autoCrLfHash = computeHash(dcIn, dcInLen);
1029 						boolean changed = getEntryObjectId()
1030 								.compareTo(autoCrLfHash, 0) != 0;
1031 						return changed;
1032 					}
1033 				} catch (IOException e) {
1034 					return true;
1035 				}
1036 			}
1037 		}
1038 	}
1039 
1040 	private static String readContentAsNormalizedString(DirCacheEntry entry,
1041 			ObjectReader reader) throws MissingObjectException, IOException {
1042 		ObjectLoader open = reader.open(entry.getObjectId());
1043 		byte[] cachedBytes = open.getCachedBytes();
1044 		return FS.detect().normalize(RawParseUtils.decode(cachedBytes));
1045 	}
1046 
1047 	private static String readContentAsNormalizedString(Entry entry) throws IOException {
1048 		long length = entry.getLength();
1049 		byte[] content = new byte[(int) length];
1050 		InputStream is = entry.openInputStream();
1051 		IO.readFully(is, content, 0, (int) length);
1052 		return FS.detect().normalize(RawParseUtils.decode(content));
1053 	}
1054 
1055 	private static long computeLength(InputStream in) throws IOException {
1056 		// Since we only care about the length, use skip. The stream
1057 		// may be able to more efficiently wade through its data.
1058 		//
1059 		long length = 0;
1060 		for (;;) {
1061 			long n = in.skip(1 << 20);
1062 			if (n <= 0)
1063 				break;
1064 			length += n;
1065 		}
1066 		return length;
1067 	}
1068 
1069 	private byte[] computeHash(InputStream in, long length) throws IOException {
1070 		final MessageDigest contentDigest = state.contentDigest;
1071 		final byte[] contentReadBuffer = state.contentReadBuffer;
1072 
1073 		contentDigest.reset();
1074 		contentDigest.update(hblob);
1075 		contentDigest.update((byte) ' ');
1076 
1077 		long sz = length;
1078 		if (sz == 0) {
1079 			contentDigest.update((byte) '0');
1080 		} else {
1081 			final int bufn = contentReadBuffer.length;
1082 			int p = bufn;
1083 			do {
1084 				contentReadBuffer[--p] = digits[(int) (sz % 10)];
1085 				sz /= 10;
1086 			} while (sz > 0);
1087 			contentDigest.update(contentReadBuffer, p, bufn - p);
1088 		}
1089 		contentDigest.update((byte) 0);
1090 
1091 		for (;;) {
1092 			final int r = in.read(contentReadBuffer);
1093 			if (r <= 0)
1094 				break;
1095 			contentDigest.update(contentReadBuffer, 0, r);
1096 			sz += r;
1097 		}
1098 		if (sz != length)
1099 			return zeroid;
1100 		return contentDigest.digest();
1101 	}
1102 
1103 	/** A single entry within a working directory tree. */
1104 	protected static abstract class Entry {
1105 		byte[] encodedName;
1106 
1107 		int encodedNameLen;
1108 
1109 		void encodeName(final CharsetEncoder enc) {
1110 			final ByteBuffer b;
1111 			try {
1112 				b = enc.encode(CharBuffer.wrap(getName()));
1113 			} catch (CharacterCodingException e) {
1114 				// This should so never happen.
1115 				throw new RuntimeException(MessageFormat.format(
1116 						JGitText.get().unencodeableFile, getName()));
1117 			}
1118 
1119 			encodedNameLen = b.limit();
1120 			if (b.hasArray() && b.arrayOffset() == 0)
1121 				encodedName = b.array();
1122 			else
1123 				b.get(encodedName = new byte[encodedNameLen]);
1124 		}
1125 
1126 		public String toString() {
1127 			return getMode().toString() + " " + getName(); //$NON-NLS-1$
1128 		}
1129 
1130 		/**
1131 		 * Get the type of this entry.
1132 		 * <p>
1133 		 * <b>Note: Efficient implementation required.</b>
1134 		 * <p>
1135 		 * The implementation of this method must be efficient. If a subclass
1136 		 * needs to compute the value they should cache the reference within an
1137 		 * instance member instead.
1138 		 *
1139 		 * @return a file mode constant from {@link FileMode}.
1140 		 */
1141 		public abstract FileMode getMode();
1142 
1143 		/**
1144 		 * Get the byte length of this entry.
1145 		 * <p>
1146 		 * <b>Note: Efficient implementation required.</b>
1147 		 * <p>
1148 		 * The implementation of this method must be efficient. If a subclass
1149 		 * needs to compute the value they should cache the reference within an
1150 		 * instance member instead.
1151 		 *
1152 		 * @return size of this file, in bytes.
1153 		 */
1154 		public abstract long getLength();
1155 
1156 		/**
1157 		 * Get the last modified time of this entry.
1158 		 * <p>
1159 		 * <b>Note: Efficient implementation required.</b>
1160 		 * <p>
1161 		 * The implementation of this method must be efficient. If a subclass
1162 		 * needs to compute the value they should cache the reference within an
1163 		 * instance member instead.
1164 		 *
1165 		 * @return time since the epoch (in ms) of the last change.
1166 		 */
1167 		public abstract long getLastModified();
1168 
1169 		/**
1170 		 * Get the name of this entry within its directory.
1171 		 * <p>
1172 		 * Efficient implementations are not required. The caller will obtain
1173 		 * the name only once and cache it once obtained.
1174 		 *
1175 		 * @return name of the entry.
1176 		 */
1177 		public abstract String getName();
1178 
1179 		/**
1180 		 * Obtain an input stream to read the file content.
1181 		 * <p>
1182 		 * Efficient implementations are not required. The caller will usually
1183 		 * obtain the stream only once per entry, if at all.
1184 		 * <p>
1185 		 * The input stream should not use buffering if the implementation can
1186 		 * avoid it. The caller will buffer as necessary to perform efficient
1187 		 * block IO operations.
1188 		 * <p>
1189 		 * The caller will close the stream once complete.
1190 		 *
1191 		 * @return a stream to read from the file.
1192 		 * @throws IOException
1193 		 *             the file could not be opened for reading.
1194 		 */
1195 		public abstract InputStream openInputStream() throws IOException;
1196 	}
1197 
1198 	/** Magic type indicating we know rules exist, but they aren't loaded. */
1199 	private static class PerDirectoryIgnoreNode extends IgnoreNode {
1200 		final Entry entry;
1201 
1202 		PerDirectoryIgnoreNode(Entry entry) {
1203 			super(Collections.<FastIgnoreRule> emptyList());
1204 			this.entry = entry;
1205 		}
1206 
1207 		IgnoreNode load() throws IOException {
1208 			IgnoreNode r = new IgnoreNode();
1209 			InputStream in = entry.openInputStream();
1210 			try {
1211 				r.parse(in);
1212 			} finally {
1213 				in.close();
1214 			}
1215 			return r.getRules().isEmpty() ? null : r;
1216 		}
1217 	}
1218 
1219 	/** Magic type indicating there may be rules for the top level. */
1220 	private static class RootIgnoreNode extends PerDirectoryIgnoreNode {
1221 		final Repository repository;
1222 
1223 		RootIgnoreNode(Entry entry, Repository repository) {
1224 			super(entry);
1225 			this.repository = repository;
1226 		}
1227 
1228 		@Override
1229 		IgnoreNode load() throws IOException {
1230 			IgnoreNode r;
1231 			if (entry != null) {
1232 				r = super.load();
1233 				if (r == null)
1234 					r = new IgnoreNode();
1235 			} else {
1236 				r = new IgnoreNode();
1237 			}
1238 
1239 			FS fs = repository.getFS();
1240 			String path = repository.getConfig().get(CoreConfig.KEY)
1241 					.getExcludesFile();
1242 			if (path != null) {
1243 				File excludesfile;
1244 				if (path.startsWith("~/")) //$NON-NLS-1$
1245 					excludesfile = fs.resolve(fs.userHome(), path.substring(2));
1246 				else
1247 					excludesfile = fs.resolve(null, path);
1248 				loadRulesFromFile(r, excludesfile);
1249 			}
1250 
1251 			File exclude = fs.resolve(repository.getDirectory(),
1252 					Constants.INFO_EXCLUDE);
1253 			loadRulesFromFile(r, exclude);
1254 
1255 			return r.getRules().isEmpty() ? null : r;
1256 		}
1257 
1258 		private static void loadRulesFromFile(IgnoreNode r, File exclude)
1259 				throws FileNotFoundException, IOException {
1260 			if (FS.DETECTED.exists(exclude)) {
1261 				FileInputStream in = new FileInputStream(exclude);
1262 				try {
1263 					r.parse(in);
1264 				} finally {
1265 					in.close();
1266 				}
1267 			}
1268 		}
1269 	}
1270 
1271 	/** Magic type indicating we know rules exist, but they aren't loaded. */
1272 	private static class PerDirectoryAttributesNode extends AttributesNode {
1273 		final Entry entry;
1274 
1275 		PerDirectoryAttributesNode(Entry entry) {
1276 			super(Collections.<AttributesRule> emptyList());
1277 			this.entry = entry;
1278 		}
1279 
1280 		AttributesNode load() throws IOException {
1281 			AttributesNode r = new AttributesNode();
1282 			InputStream in = entry.openInputStream();
1283 			try {
1284 				r.parse(in);
1285 			} finally {
1286 				in.close();
1287 			}
1288 			return r.getRules().isEmpty() ? null : r;
1289 		}
1290 	}
1291 
1292 
1293 	private static final class IteratorState {
1294 		/** Options used to process the working tree. */
1295 		final WorkingTreeOptions options;
1296 
1297 		/** File name character encoder. */
1298 		final CharsetEncoder nameEncoder;
1299 
1300 		/** Digest computer for {@link #contentId} computations. */
1301 		MessageDigest contentDigest;
1302 
1303 		/** Buffer used to perform {@link #contentId} computations. */
1304 		byte[] contentReadBuffer;
1305 
1306 		/** TreeWalk with a (supposedly) matching DirCacheIterator. */
1307 		TreeWalk walk;
1308 
1309 		/** Position of the matching {@link DirCacheIterator}. */
1310 		int dirCacheTree;
1311 
1312 		IteratorState(WorkingTreeOptions options) {
1313 			this.options = options;
1314 			this.nameEncoder = Constants.CHARSET.newEncoder();
1315 		}
1316 
1317 		void initializeDigestAndReadBuffer() {
1318 			if (contentDigest == null) {
1319 				contentDigest = Constants.newMessageDigest();
1320 				contentReadBuffer = new byte[BUFFER_SIZE];
1321 			}
1322 		}
1323 	}
1324 
1325 	/**
1326 	 * @return the clean filter command for the current entry or
1327 	 *         <code>null</code> if no such command is defined
1328 	 * @throws IOException
1329 	 * @since 4.2
1330 	 */
1331 	public String getCleanFilterCommand() throws IOException {
1332 		if (cleanFilterCommandHolder == null) {
1333 			String cmd = null;
1334 			if (state.walk != null) {
1335 				cmd = state.walk
1336 						.getFilterCommand(Constants.ATTR_FILTER_TYPE_CLEAN);
1337 			}
1338 			cleanFilterCommandHolder = new Holder<String>(cmd);
1339 		}
1340 		return cleanFilterCommandHolder.get();
1341 	}
1342 
1343 	/**
1344 	 * @return the eol stream type for the current entry or <code>null</code> if
1345 	 *         it cannot be determined. When state or state.walk is null or the
1346 	 *         {@link TreeWalk} is not based on a {@link Repository} then null
1347 	 *         is returned.
1348 	 * @throws IOException
1349 	 * @since 4.3
1350 	 */
1351 	public EolStreamType getEolStreamType() throws IOException {
1352 		return getEolStreamType(null);
1353 	}
1354 
1355 	/**
1356 	 * @param opType
1357 	 *            The operationtype (checkin/checkout) which should be used
1358 	 * @return the eol stream type for the current entry or <code>null</code> if
1359 	 *         it cannot be determined. When state or state.walk is null or the
1360 	 *         {@link TreeWalk} is not based on a {@link Repository} then null
1361 	 *         is returned.
1362 	 * @throws IOException
1363 	 */
1364 	private EolStreamType getEolStreamType(OperationType opType)
1365 			throws IOException {
1366 		if (eolStreamTypeHolder == null) {
1367 			EolStreamType type=null;
1368 			if (state.walk != null) {
1369 				if (opType != null) {
1370 					type = state.walk.getEolStreamType(opType);
1371 				} else {
1372 					type=state.walk.getEolStreamType();
1373 				}
1374 			} else {
1375 				switch (getOptions().getAutoCRLF()) {
1376 				case FALSE:
1377 					type = EolStreamType.DIRECT;
1378 					break;
1379 				case TRUE:
1380 				case INPUT:
1381 					type = EolStreamType.AUTO_LF;
1382 					break;
1383 				}
1384 			}
1385 			eolStreamTypeHolder = new Holder<EolStreamType>(type);
1386 		}
1387 		return eolStreamTypeHolder.get();
1388 	}
1389 }