diff --git a/Libraries/NGit/ICSharpCode.SharpZipLib.dll b/Libraries/NGit/ICSharpCode.SharpZipLib.dll new file mode 100644 index 0000000..6c6a5d4 Binary files /dev/null and b/Libraries/NGit/ICSharpCode.SharpZipLib.dll differ diff --git a/Libraries/NGit/Mono.Security.dll b/Libraries/NGit/Mono.Security.dll new file mode 100644 index 0000000..df88d59 Binary files /dev/null and b/Libraries/NGit/Mono.Security.dll differ diff --git a/Libraries/NGit/NGit.dll b/Libraries/NGit/NGit.dll new file mode 100644 index 0000000..8ea8b3d Binary files /dev/null and b/Libraries/NGit/NGit.dll differ diff --git a/Libraries/NGit/NGit.xml b/Libraries/NGit/NGit.xml new file mode 100644 index 0000000..a587235 --- /dev/null +++ b/Libraries/NGit/NGit.xml @@ -0,0 +1,45097 @@ + + + + NGit + + + + + A prefix abbreviation of an + ObjectId + . +

+ Sometimes Git produces abbreviated SHA-1 strings, using sufficient leading + digits from the ObjectId name to still be unique within the repository the + string was generated from. These ids are likely to be unique for a useful + period of time, especially if they contain at least 6-10 hex digits. +

+ This class converts the hex string into a binary form, to make it more + efficient for matching against an object. +

+
+ + Test a string of characters to verify it is a hex format. + + Test a string of characters to verify it is a hex format. +

+ If true the string can be parsed with + FromString(string) + . + + the string to test. + true if the string can converted into an AbbreviatedObjectId. + + +

Convert an AbbreviatedObjectId from hex characters (US-ASCII). + Convert an AbbreviatedObjectId from hex characters (US-ASCII). + the US-ASCII buffer to read from. + position to read the first character from. + + one past the last position to read (end-offset is + the length of the string). + + the converted object id. +
+ + + Convert an AbbreviatedObjectId from an + AnyObjectId + . +

+ This method copies over all bits of the Id, and is therefore complete + (see + IsComplete() + ). +

+ + the + ObjectId + to convert from. + + the converted object id. +
+ + Convert an AbbreviatedObjectId from hex characters. + Convert an AbbreviatedObjectId from hex characters. + the string to read from. Must be <= 40 characters. + the converted object id. + + + Number of half-bytes used by this id. + Number of half-bytes used by this id. + + + + a complete ObjectId; null if + IsComplete() + is false + + + + Compares this abbreviation to a full object id. + Compares this abbreviation to a full object id. + the other object id. + + <0 if this abbreviation names an object that is less than + other; 0 if this abbreviation exactly matches the + first + Length() + digits of other.name(); + >0 if this abbreviation names an object that is after + other. + + + + Compare this abbreviation to a network-byte-order ObjectId. + Compare this abbreviation to a network-byte-order ObjectId. + array containing the other ObjectId in network byte order. + + position within + bs + to start the compare at. At least + 20 bytes, starting at this position are required. + + + <0 if this abbreviation names an object that is less than + other; 0 if this abbreviation exactly matches the + first + Length() + digits of other.name(); + >0 if this abbreviation names an object that is after + other. + + + + Compare this abbreviation to a network-byte-order ObjectId. + Compare this abbreviation to a network-byte-order ObjectId. + array containing the other ObjectId in network byte order. + + position within + bs + to start the compare at. At least 5 + ints, starting at this position are required. + + + <0 if this abbreviation names an object that is less than + other; 0 if this abbreviation exactly matches the + first + Length() + digits of other.name(); + >0 if this abbreviation names an object that is after + other. + + + + number of hex digits appearing in this id + + + true if this ObjectId is actually a complete id. + + + value for a fan-out style map, only valid of length >= 2. + + + string form of the abbreviation, in lower case hexadecimal. + + + A (possibly mutable) SHA-1 abstraction. + + A (possibly mutable) SHA-1 abstraction. +

+ If this is an instance of + MutableObjectId + the concept of equality + with this instance can alter at any time, if this instance is modified to + represent a different object name. + + + +

Compare to object identifier byte sequences for equality. + Compare to object identifier byte sequences for equality. + the first identifier to compare. Must not be null. + the second identifier to compare. Must not be null. + true if the two identifiers are the same. +
+ + Get any byte from the ObjectId. + + Get any byte from the ObjectId. + Callers hard-coding + getByte(0) + should instead use the much faster + special case variant + FirstByte() + . + + + index of the byte to obtain from the raw form of the ObjectId. + Must be in range [0, + Constants.OBJECT_ID_LENGTH + ). + + + the value of the requested byte at + index + . Returned values + are unsigned and thus are in the range [0,255] rather than the + signed byte range of [-128, 127]. + + + index + is less than 0, equal to + Constants.OBJECT_ID_LENGTH + , or greater than + Constants.OBJECT_ID_LENGTH + . + + + + Compare this ObjectId to another and obtain a sort ordering. + Compare this ObjectId to another and obtain a sort ordering. + the other id to compare to. Must not be null. + + < 0 if this id comes before other; 0 if this id is equal to + other; > 0 if this id comes after other. + + + + Compare this ObjectId to a network-byte-order ObjectId. + Compare this ObjectId to a network-byte-order ObjectId. + array containing the other ObjectId in network byte order. + + position within + bs + to start the compare at. At least + 20 bytes, starting at this position are required. + + + a negative integer, zero, or a positive integer as this object is + less than, equal to, or greater than the specified object. + + + + Compare this ObjectId to a network-byte-order ObjectId. + Compare this ObjectId to a network-byte-order ObjectId. + array containing the other ObjectId in network byte order. + + position within + bs + to start the compare at. At least 5 + integers, starting at this position are required. + + + a negative integer, zero, or a positive integer as this object is + less than, equal to, or greater than the specified object. + + + + Tests if this ObjectId starts with the given abbreviation. + Tests if this ObjectId starts with the given abbreviation. + the abbreviation. + true if this ObjectId begins with the abbreviation; else false. + + + Determine if this ObjectId has exactly the same value as another. + Determine if this ObjectId has exactly the same value as another. + the other id to compare to. May be null. + true only if both ObjectIds have identical bits. + + + Copy this ObjectId to an output writer in raw binary. + Copy this ObjectId to an output writer in raw binary. + the buffer to copy to. Must be in big endian order. + + + Copy this ObjectId to a byte array. + Copy this ObjectId to a byte array. + the buffer to copy to. + the offset within b to write at. + + + Copy this ObjectId to an int array. + Copy this ObjectId to an int array. + the buffer to copy to. + the offset within b to write at. + + + Copy this ObjectId to an output writer in raw binary. + Copy this ObjectId to an output writer in raw binary. + the stream to write to. + the stream writing failed. + + + + + + Copy this ObjectId to an output writer in hex format. + Copy this ObjectId to an output writer in hex format. + the stream to copy to. + the stream writing failed. + + + Copy this ObjectId to a byte array in hex format. + Copy this ObjectId to a byte array in hex format. + the buffer to copy to. + the offset within b to write at. + + + Copy this ObjectId to a ByteBuffer in hex format. + Copy this ObjectId to a ByteBuffer in hex format. + the buffer to copy to. + + + Copy this ObjectId to an output writer in hex format. + Copy this ObjectId to an output writer in hex format. + the stream to copy to. + the stream writing failed. + + + Copy this ObjectId to an output writer in hex format. + Copy this ObjectId to an output writer in hex format. + + temporary char array to buffer construct into before writing. + Must be at least large enough to hold 2 digits for each byte + of object id (40 characters or larger). + + the stream to copy to. + the stream writing failed. + + + Copy this ObjectId to a StringBuilder in hex format. + Copy this ObjectId to a StringBuilder in hex format. + + temporary char array to buffer construct into before writing. + Must be at least large enough to hold 2 digits for each byte + of object id (40 characters or larger). + + the string to append onto. + + + string form of the SHA-1, in lower case hexadecimal. + + + Return an abbreviation (prefix) of this object SHA-1. + + Return an abbreviation (prefix) of this object SHA-1. +

+ This implementation does not guarantee uniqueness. Callers should + instead use + ObjectReader.Abbreviate(AnyObjectId, int) + + to obtain a + unique abbreviation within the scope of a particular object database. + + length of the abbreviated string. + SHA-1 abbreviation. + + +

Obtain an immutable copy of this current object name value. + + Obtain an immutable copy of this current object name value. +

+ Only returns this if this instance is an unsubclassed + instance of + ObjectId + ; otherwise a new instance is returned + holding the same value. +

+ This method is useful to shed any additional memory that may be tied to + the subclass, yet retain the unique identity of the object id for future + lookups within maps and repositories. + + an immutable copy, using the smallest memory footprint possible. + + +

Obtain an immutable copy of this current object name value. + + Obtain an immutable copy of this current object name value. +

+ See + Copy() + if this is a possibly subclassed (but + immutable) identity and the application needs a lightweight identity + only reference. + + + an immutable copy. May be this if this is already + an immutable instance. + + + +

Get the first 8 bits of the ObjectId. + + Get the first 8 bits of the ObjectId. + This is a faster version of + getByte(0) + . + + + a discriminator usable for a fan-out style map. Returned values + are unsigned and thus are in the range [0,255] rather than the + signed byte range of [-128, 127]. + +
+ + string form of the SHA-1, in lower case hexadecimal. + + + Queue to open objects asynchronously. + + Queue to open objects asynchronously. + A queue may perform background decompression of objects and supply them + (possibly out-of-order) to the application. + + + + Asynchronous operation handle. + + Asynchronous operation handle. + Callers that start an asynchronous operation are supplied with a handle that + may be used to attempt cancellation of the operation if the caller does not + wish to continue. + + + + Cancels the running task. + + Cancels the running task. + Attempts to cancel execution of this task. This attempt will fail if the + task has already completed, already been cancelled, or could not be + cancelled for some other reason. If successful, and this task has not + started when cancel is called, this task should never run. If the task + has already started, then the mayInterruptIfRunning parameter determines + whether the thread executing this task should be interrupted in an + attempt to stop the task. + + + true if the thread executing this task should be interrupted; + otherwise, in-progress tasks are allowed to complete + + + false if the task could not be cancelled, typically because it + has already completed normally; true otherwise + + + + Release resources used by the operation, including cancellation. + Release resources used by the operation, including cancellation. + + + Position this queue onto the next available result. + + Position this queue onto the next available result. + Even if this method returns true, + AsyncObjectLoaderQueue<T>.Open() + + may still throw + NGit.Errors.MissingObjectException + + if the underlying object database was + concurrently modified and the current object is no longer available. + + + true if there is a result available; false if the queue has + finished its input iteration. + + + the object does not exist. If the implementation is retaining + the application's objects + AsyncObjectLoaderQueue<T>.GetCurrent() + + will be the + current object that is missing. There may be more results + still available, so the caller should continue invoking next + to examine another result. + + the object store cannot be accessed. + + + + the current object, null if the implementation lost track. + Implementations may for performance reasons discard the caller's + ObjectId and provider their own through + AsyncObjectLoaderQueue<T>.GetObjectId() + + . + + + + the ObjectId of the current object. Never null. + + + Obtain a loader to read the object. + + Obtain a loader to read the object. + This method can only be invoked once per result + Due to race conditions with a concurrent modification of the underlying + object database, an object may be unavailable when this method is + invoked, even though next returned successfully. + + the ObjectLoader to read this object. Never null. + + the object does not exist. If the implementation is retaining + the application's objects + AsyncObjectLoaderQueue<T>.GetCurrent() + + will be the + current object that is missing. There may be more results + still available, so the caller should continue invoking next + to examine another result. + + the object store cannot be accessed. + + + Queue to examine object sizes asynchronously. + + Queue to examine object sizes asynchronously. + A queue may perform background lookup of object sizes and supply them + (possibly out-of-order) to the application. + + + + Position this queue onto the next available result. + Position this queue onto the next available result. + + true if there is a result available; false if the queue has + finished its input iteration. + + + the object does not exist. If the implementation is retaining + the application's objects + AsyncObjectSizeQueue<T>.GetCurrent() + + will be the + current object that is missing. There may be more results + still available, so the caller should continue invoking next + to examine another result. + + the object store cannot be accessed. + + + + the current object, null if the implementation lost track. + Implementations may for performance reasons discard the caller's + ObjectId and provider their own through + AsyncObjectSizeQueue<T>.GetObjectId() + + . + + + + the ObjectId of the current object. Never null. + + + the size of the current object. + + + Base builder to customize repository construction. + + Base builder to customize repository construction. +

+ Repository implementations may subclass this builder in order to add custom + repository detection methods. + + RepositoryBuilder + NGit.Storage.File.FileRepositoryBuilder + + + + + + +

Directories limiting the search for a Git repository. + Directories limiting the search for a Git repository. +
+ + True only if the caller wants to force bare behavior. + True only if the caller wants to force bare behavior. + + + True if the caller requires the repository to exist. + True if the caller requires the repository to exist. + + + Configuration file of target repository, lazily loaded if required. + Configuration file of target repository, lazily loaded if required. + + + Set the file system abstraction needed by this repository. + Set the file system abstraction needed by this repository. + the abstraction. + + + this + (for chaining calls). + + + + the file system abstraction, or null if not set. + + + Set the Git directory storing the repository metadata. + + Set the Git directory storing the repository metadata. +

+ The meta directory stores the objects, references, and meta files like + MERGE_HEAD + , or the index file. If + null + the path is + assumed to be + workTree/.git + . + + + GIT_DIR + , the repository meta directory. + + + + this + (for chaining calls). + + + + the meta data directory; null if not set. + + +

Set the directory storing the repository's objects. + Set the directory storing the repository's objects. + + GIT_OBJECT_DIRECTORY + , the directory where the + repository's object files are stored. + + + + this + (for chaining calls). + +
+ + the object directory; null if not set. + + + Add an alternate object directory to the search list. + + Add an alternate object directory to the search list. +

+ This setting handles one alternate directory at a time, and is provided + to support + GIT_ALTERNATE_OBJECT_DIRECTORIES + . + + another objects directory to search after the standard one. + + + this + (for chaining calls). + + + +

Add alternate object directories to the search list. + + Add alternate object directories to the search list. +

+ This setting handles several alternate directories at once, and is + provided to support + GIT_ALTERNATE_OBJECT_DIRECTORIES + . + + + other object directories to search after the standard one. The + collection's contents is copied to an internal list. + + + + this + (for chaining calls). + + + +

Add alternate object directories to the search list. + + Add alternate object directories to the search list. +

+ This setting handles several alternate directories at once, and is + provided to support + GIT_ALTERNATE_OBJECT_DIRECTORIES + . + + + other object directories to search after the standard one. The + array's contents is copied to an internal list. + + + + this + (for chaining calls). + + + + ordered array of alternate directories; null if non were set. + + +

Force the repository to be treated as bare (have no working directory). + + Force the repository to be treated as bare (have no working directory). +

+ If bare the working directory aspects of the repository won't be + configured, and will not be accessible. + + + + this + (for chaining calls). + + + + + true if this repository was forced bare by + BaseRepositoryBuilder<B, R>.SetBare() + + . + + + +

Require the repository to exist before it can be opened. + Require the repository to exist before it can be opened. + + true if it must exist; false if it can be missing and created + after being built. + + + + this + (for chaining calls). + +
+ + true if the repository must exist before being opened. + + + Set the top level directory of the working files. + Set the top level directory of the working files. + + GIT_WORK_TREE + , the working directory of the checkout. + + + + this + (for chaining calls). + + + + the work tree directory, or null if not set. + + + Set the local index file that is caching checked out file status. + + Set the local index file that is caching checked out file status. +

+ The location of the index file tracking the status information for each + checked out file in + workTree + . This may be null to assume the + default + gitDiir/index + . + + + GIT_INDEX_FILE + , the index file location. + + + + this + (for chaining calls). + + + + the index file location, or null if not set. + + +

Read standard Git environment variables and configure from those. + + Read standard Git environment variables and configure from those. +

+ This method tries to read the standard Git environment variables, such as + GIT_DIR + and + GIT_WORK_TREE + to configure this builder + instance. If an environment variable is set, it overrides the value + already set in this builder. + + + + this + (for chaining calls). + + + +

Read standard Git environment variables and configure from those. + + Read standard Git environment variables and configure from those. +

+ This method tries to read the standard Git environment variables, such as + GIT_DIR + and + GIT_WORK_TREE + to configure this builder + instance. If a property is already set in the builder, the environment + variable is not used. + + the SystemReader abstraction to access the environment. + + + this + (for chaining calls). + + + +

Add a ceiling directory to the search limit list. + + Add a ceiling directory to the search limit list. +

+ This setting handles one ceiling directory at a time, and is provided to + support + GIT_CEILING_DIRECTORIES + . + + a path to stop searching at; its parent will not be searched. + + + this + (for chaining calls). + + + +

Add ceiling directories to the search list. + + Add ceiling directories to the search list. +

+ This setting handles several ceiling directories at once, and is provided + to support + GIT_CEILING_DIRECTORIES + . + + + directory paths to stop searching at. The collection's + contents is copied to an internal list. + + + + this + (for chaining calls). + + + +

Add ceiling directories to the search list. + + Add ceiling directories to the search list. +

+ This setting handles several ceiling directories at once, and is provided + to support + GIT_CEILING_DIRECTORIES + . + + + directory paths to stop searching at. The array's contents is + copied to an internal list. + + + + this + (for chaining calls). + + + +

+ Configure + GIT_DIR + by searching up the file system. +

+ Starts from the current working directory of the JVM and scans up through + the directory tree until a Git repository is found. Success can be + determined by checking for + getGitDir() != null + . +

+ The search can be limited to specific spaces of the local filesystem by + BaseRepositoryBuilder<B, R>.AddCeilingDirectory(Sharpen.FilePath) + + , or inheriting the list through a + prior call to + BaseRepositoryBuilder<B, R>.ReadEnvironment() + + . +

+ + + this + (for chaining calls). + +
+ + + Configure + GIT_DIR + by searching up the file system. +

+ Starts from the supplied directory path and scans up through the parent + directory tree until a Git repository is found. Success can be determined + by checking for + getGitDir() != null + . +

+ The search can be limited to specific spaces of the local filesystem by + BaseRepositoryBuilder<B, R>.AddCeilingDirectory(Sharpen.FilePath) + + , or inheriting the list through a + prior call to + BaseRepositoryBuilder<B, R>.ReadEnvironment() + + . +

+ directory to begin searching in. + + + this + (for chaining calls). + +
+ + Guess and populate all parameters not already defined. + + Guess and populate all parameters not already defined. +

+ If an option was not set, the setup method will try to default the option + based on other options. If insufficient information is available, an + exception is thrown to the caller. + + + + this + + + insufficient parameters were set, or some parameters are + incompatible with one another. + + + the repository could not be accessed to configure the rest of + the builder's parameters. + + + +

Create a repository matching the configuration in this builder. + + Create a repository matching the configuration in this builder. +

+ If an option was not set, the build method will try to default the option + based on other options. If insufficient information is available, an + exception is thrown to the caller. + + a repository matching this configuration. + insufficient parameters were set. + + the repository could not be accessed to configure the rest of + the builder's parameters. + + + +

+ Require either + gitDir + or + workTree + to be set. + +
+ + Perform standard gitDir initialization. + Perform standard gitDir initialization. + the repository could not be accessed + + + Perform standard work-tree initialization. + + Perform standard work-tree initialization. +

+ This is a method typically invoked inside of + BaseRepositoryBuilder<B, R>.Setup() + + , near the + end after the repository has been identified and its configuration is + available for inspection. + + the repository configuration could not be read. + + + +

Configure the internal implementation details of the repository. + Configure the internal implementation details of the repository. + the repository could not be accessed +
+ + Get the cached repository configuration, loading if not yet available. + Get the cached repository configuration, loading if not yet available. + the configuration of the repository. + the configuration is not available, or is badly formed. + + + + Parse and load the repository specific configuration. + + Parse and load the repository specific configuration. +

+ The default implementation reads + gitDir/config + , or returns an + empty configuration if gitDir was not set. + + the repository's configuration. + the configuration is not available. + + + + + + + the configured FS, or + NGit.Util.FS.DETECTED + . + + + + + + this + + + + +

Configuration file based on the blobs stored in the repository. + + Configuration file based on the blobs stored in the repository. + This implementation currently only provides reading support, and is primarily + useful for supporting the + .gitmodules + file. + +
+ + + Git style + .config + , + .gitconfig + , + .gitmodules + file. + + + + the change listeners + + + Immutable current state of the configuration data. + + Immutable current state of the configuration data. +

+ This state is copy-on-write. It should always contain an immutable list + of the configuration keys/values. + + + +

Magic value indicating a missing entry. + + Magic value indicating a missing entry. +

+ This value is tested for reference equality in some contexts, so we + must ensure it is a special copy of the empty string. It also must + be treated like the empty string. + + + +

Create a configuration with no default fallback. + Create a configuration with no default fallback. +
+ + Create an empty configuration with a fallback for missing keys. + Create an empty configuration with a fallback for missing keys. + + the base configuration to be consulted when a key is missing + from this configuration instance. + + + + Escape the value before saving + the value to escape + the escaped value + + + Obtain an integer value from the configuration. + Obtain an integer value from the configuration. + section the key is grouped within. + name of the key to get. + default value to return if no value was present. + an integer value from the configuration, or defaultValue. + + + Obtain an integer value from the configuration. + Obtain an integer value from the configuration. + section the key is grouped within. + subsection name, such a remote or branch name. + name of the key to get. + default value to return if no value was present. + an integer value from the configuration, or defaultValue. + + + Obtain an integer value from the configuration. + Obtain an integer value from the configuration. + section the key is grouped within. + name of the key to get. + default value to return if no value was present. + an integer value from the configuration, or defaultValue. + + + Obtain an integer value from the configuration. + Obtain an integer value from the configuration. + section the key is grouped within. + subsection name, such a remote or branch name. + name of the key to get. + default value to return if no value was present. + an integer value from the configuration, or defaultValue. + + + Get a boolean value from the git config + section the key is grouped within. + name of the key to get. + default value to return if no value was present. + + true if any value or defaultValue is true, false for missing or + explicit false + + + + Get a boolean value from the git config + section the key is grouped within. + subsection name, such a remote or branch name. + name of the key to get. + default value to return if no value was present. + + true if any value or defaultValue is true, false for missing or + explicit false + + + + Parse an enumeration from the configuration. + Parse an enumeration from the configuration. + section the key is grouped within. + subsection name, such a remote or branch name. + name of the key to get. + default value to return if no value was present. + + the selected enumeration value, or + defaultValue + . + + + + Parse an enumeration from the configuration. + Parse an enumeration from the configuration. + + all possible values in the enumeration which should be + recognized. Typically + EnumType.values() + . + + section the key is grouped within. + subsection name, such a remote or branch name. + name of the key to get. + default value to return if no value was present. + + the selected enumeration value, or + defaultValue + . + + + + Get string value + the section + the subsection for the value + the key name + a String value from git config. + + + + Get a list of string values +

+ If this instance was created with a base, the base's values are returned + first (if any). +

+ + Get a list of string values +

+ If this instance was created with a base, the base's values are returned + first (if any). + + the section + the subsection for the value + the key name + array of zero or more values from the configuration. + + + section to search for. + + set of all subsections of specified section within this + configuration and its base configuration; may be empty if no + subsection exists. The set's iterator returns sections in the + order they are declared by the configuration starting from this + instance and progressing through the base. + + + + + the sections defined in this + Config + . The set's iterator + returns sections in the order they are declared by the + configuration starting from this instance and progressing through + the base. + + + + the section + the list of names defined for this section + + + the section + the subsection + the list of names defined for this subsection + + +

Obtain a handle to a parsed set of configuration values. + Obtain a handle to a parsed set of configuration values. + + parser which can create the model if it is not already + available in this configuration file. The parser is also used + as the key into a cache and must obey the hashCode and equals + contract in order to reuse a parsed model. + + the parsed object instance, which is cached inside this config. +
+ + Remove a cached configuration object. + + Remove a cached configuration object. +

+ If the associated configuration object has not yet been cached, this + method has no effect. + + parser used to obtain the configuration object. + Get<T>(SectionParser<T>) + + +

Adds a listener to be notified about changes. + + Adds a listener to be notified about changes. +

+ Clients are supposed to remove the listeners after they are done with + them using the + NGit.Events.ListenerHandle.Remove() + + method + + the listener + the handle to the registered listener + + +

Determine whether to issue change events for transient changes. + + Determine whether to issue change events for transient changes. +

+ If true is returned (which is the default behavior), + FireConfigChangedEvent() + will be called upon each change. +

+ Subclasses that override this to return false are + responsible for issuing + FireConfigChangedEvent() + calls + themselves. + + + + +

Notifies the listeners +
+ + Add or modify a configuration value. + + Add or modify a configuration value. The parameters will result in a + configuration entry like this. +
+            [section "subsection"]
+            name = value
+            
+
+ section name, e.g "branch" + optional subsection value, e.g. a branch name + parameter name, e.g. "filemode" + parameter value +
+ + Add or modify a configuration value. + + Add or modify a configuration value. The parameters will result in a + configuration entry like this. +
+            [section "subsection"]
+            name = value
+            
+
+ section name, e.g "branch" + optional subsection value, e.g. a branch name + parameter name, e.g. "filemode" + parameter value +
+ + Add or modify a configuration value. + + Add or modify a configuration value. The parameters will result in a + configuration entry like this. +
+            [section "subsection"]
+            name = value
+            
+
+ section name, e.g "branch" + optional subsection value, e.g. a branch name + parameter name, e.g. "filemode" + parameter value +
+ + Add or modify a configuration value. + + Add or modify a configuration value. The parameters will result in a + configuration entry like this. +
+            [section "subsection"]
+            name = value
+            
+
+ section name, e.g "branch" + optional subsection value, e.g. a branch name + parameter name, e.g. "filemode" + parameter value +
+ + Add or modify a configuration value. + + Add or modify a configuration value. The parameters will result in a + configuration entry like this. +
+            [section "subsection"]
+            name = value
+            
+
+ section name, e.g "branch" + optional subsection value, e.g. a branch name + parameter name, e.g. "filemode" + parameter value, e.g. "true" +
+ + Remove a configuration value. + Remove a configuration value. + section name, e.g "branch" + optional subsection value, e.g. a branch name + parameter name, e.g. "filemode" + + + Remove all configuration values under a single section. + Remove all configuration values under a single section. + section name, e.g "branch" + optional subsection value, e.g. a branch name + + + Set a configuration value. + + Set a configuration value. +
+            [section "subsection"]
+            name = value
+            
+
+ section name, e.g "branch" + optional subsection value, e.g. a branch name + parameter name, e.g. "filemode" + list of zero or more values for this key. +
+ + this configuration, formatted as a Git style text file. + + + Clear this configuration and reset to the contents of the parsed string. + + Clear this configuration and reset to the contents of the parsed string. + + Git style text file listing configuration properties. + + the text supplied is not formatted correctly. No changes were + made to + this + . + + + + Clear the configuration file + + + + + + + + + + + + Parses a section of the configuration into an application model object. + + Parses a section of the configuration into an application model object. +

+ Instances must implement hashCode and equals such that model objects can + be cached by using the + SectionParser + as a key of a HashMap. +

+ As the + SectionParser + itself is used as the key of the internal + HashMap applications should be careful to ensure the SectionParser key + does not retain unnecessary application state which may cause memory to + be held longer than expected. + + + +

Create a model object from a configuration. + Create a model object from a configuration. + the configuration to read values from. + the application model instance. +
+ + Parse a configuration from a byte array. + Parse a configuration from a byte array. + the base configuration file + the byte array, should be UTF-8 encoded text. + the byte array is not a valid configuration format. + + + + Load a configuration file from a blob. + Load a configuration file from a blob. + the base configuration file + the repository + the object identifier + the blob cannot be read from the repository. + + the blob is not a valid configuration format. + + + + + + + + + + + + + + Load a configuration file from a blob stored in a specific commit. + Load a configuration file from a blob stored in a specific commit. + the base configuration file + the repository containing the objects. + the tree (or commit) that contains the object + the path within the tree + the path does not exist in the commit's tree. + + the tree and/or blob cannot be accessed. + the blob is not a valid configuration format. + + + + + + + + + + + + + + Mutable builder to construct a commit recording the state of a project. + + Mutable builder to construct a commit recording the state of a project. + Applications should use this object when they need to manually construct a + commit and want precise control over its fields. For a higher level interface + see + NGit.Api.CommitCommand + . + To read a commit object, construct a + NGit.Revwalk.RevWalk + and obtain a + NGit.Revwalk.RevCommit + instance by calling + NGit.Revwalk.RevWalk.ParseCommit(AnyObjectId) + + . + + + + Initialize an empty commit. + Initialize an empty commit. + + + Set the parent of this commit. + Set the parent of this commit. + the single parent for the commit. + + + Set the parents of this commit. + Set the parents of this commit. + + the first parent of this commit. Typically this is the current + value of the + HEAD + reference and is thus the current + branch's position in history. + + + the second parent of this merge commit. Usually this is the + branch being merged into the current branch. + + + + Set the parents of this commit. + Set the parents of this commit. + the entire list of parents for this commit. + + + Set the parents of this commit. + Set the parents of this commit. + the entire list of parents for this commit. + + + Add a parent onto the end of the parent list. + Add a parent onto the end of the parent list. + new parent to add onto the end of the current parent list. + + + + Set the encoding for the commit information + + the encoding name. See + Sharpen.Extensions.GetEncoding(string) + + . + + + + Set the encoding for the commit information + the encoding to use. + + + Format this builder's state as a commit object. + Format this builder's state as a commit object. + + this object in the canonical commit format, suitable for storage + in a repository. + + + the encoding specified by + Encoding() + is not + supported by this Java runtime. + + + + Format this builder's state as a commit object. + Format this builder's state as a commit object. + + this object in the canonical commit format, suitable for storage + in a repository. + + + the encoding specified by + Encoding() + is not + supported by this Java runtime. + + + + id of the root tree listing this commit's snapshot. + Set the tree id for this commit object + the tree identity. + + + the author of this commit (who wrote it). + Set the author (name, email address, and date) of who wrote the commit. + Set the author (name, email address, and date) of who wrote the commit. + the new author. Should not be null. + + + the committer and commit time for this object. + Set the committer and commit time for this object + the committer information. Should not be null. + + + the ancestors of this commit. Never null. + + + the complete commit message. + Set the commit message. + Set the commit message. + the commit message. Should not be null. + + + the encoding that should be used for the commit message text. + + + + Constants for use with the Configuration classes: section names, + configuration keys + + + + The "core" section + + + The "branch" section + + + The "remote" section + + + The "diff" section + + + The "dfs" section + + + The "user" section + + + The "gerrit" section + + + The "workflow" section + + + The "submodule" section + + + The "gc" section + + + The "pack" section + + + The "algorithm" key + + + The "autocrlf" key + + + The "bare" key + + + The "excludesfile" key + + + The "filemode" key + + + The "logallrefupdates" key + + + The "repositoryformatversion" key + + + The "worktree" key + + + The "blockLimit" key + + + The "blockSize" key + + + The "readAheadLimit" key + + + The "readAheadThreads" key + + + The "deltaBaseCacheLimit" key + + + The "streamFileThreshold" key + + + The "remote" key + + + The "merge" key + + + The "rebase" key + + + The "url" key + + + The "autosetupmerge" key + + + The "autosetuprebase" key + + + The "name" key + + + The "email" key + + + + The "false" key (used to configure + CONFIG_KEY_AUTOSETUPMERGE + + + + + + The "true" key (used to configure + CONFIG_KEY_AUTOSETUPMERGE + + + + + + The "always" key (used to configure + CONFIG_KEY_AUTOSETUPREBASE + and + CONFIG_KEY_AUTOSETUPMERGE + + + + + The "never" key (used to configure + CONFIG_KEY_AUTOSETUPREBASE + + + + + + The "local" key (used to configure + CONFIG_KEY_AUTOSETUPREBASE + + + + + The "createchangeid" key + + + The "defaultsourceref" key + + + The "path" key + + + The "update" key + + + The "compression" key + + + The "indexversion" key + + + The "precomposeunicode" key + + + The "pruneexpire" key + + + Misc. + Misc. constants used throughout JGit. + + + A Git object hash is 160 bits, i.e. + + A Git object hash is 160 bits, i.e. 20 bytes. +

+ Changing this assumption is not going to be as easy as changing this + declaration. + + + +

+ A Git object can be expressed as a 40 character string of hexadecimal + digits. + + + A Git object can be expressed as a 40 character string of hexadecimal + digits. + + OBJECT_ID_LENGTH +
+ + An unknown or invalid object type code. + An unknown or invalid object type code. + + + In-pack object type: extended types. + + In-pack object type: extended types. +

+ This header code is reserved for future expansion. It is currently + undefined/unsupported. + + + +

In-pack object type: commit. + + In-pack object type: commit. +

+ Indicates the associated object is a commit. +

+ This constant is fixed and is defined by the Git packfile format. + + TYPE_COMMIT + + +

In-pack object type: tree. + + In-pack object type: tree. +

+ Indicates the associated object is a tree. +

+ This constant is fixed and is defined by the Git packfile format. + + TYPE_BLOB + + +

In-pack object type: blob. + + In-pack object type: blob. +

+ Indicates the associated object is a blob. +

+ This constant is fixed and is defined by the Git packfile format. + + TYPE_BLOB + + +

In-pack object type: annotated tag. + + In-pack object type: annotated tag. +

+ Indicates the associated object is an annotated tag. +

+ This constant is fixed and is defined by the Git packfile format. + + TYPE_TAG + + +

In-pack object type: reserved for future use. + In-pack object type: reserved for future use. +
+ + + In-pack object type: offset delta +

+ Objects stored with this type actually have a different type which must + be obtained from their delta base object. +

+ + In-pack object type: offset delta +

+ Objects stored with this type actually have a different type which must + be obtained from their delta base object. Delta objects store only the + changes needed to apply to the base object in order to recover the + original object. +

+ An offset delta uses a negative offset from the start of this object to + refer to its delta base. The base object must exist in this packfile + (even in the case of a thin pack). +

+ This constant is fixed and is defined by the Git packfile format. + + + +

+ In-pack object type: reference delta +

+ Objects stored with this type actually have a different type which must + be obtained from their delta base object. +

+ + In-pack object type: reference delta +

+ Objects stored with this type actually have a different type which must + be obtained from their delta base object. Delta objects store only the + changes needed to apply to the base object in order to recover the + original object. +

+ A reference delta uses a full object id (hash) to reference the delta + base. The base object is allowed to be omitted from the packfile, but + only in the case of a thin pack being transferred over the network. +

+ This constant is fixed and is defined by the Git packfile format. + + + +

Hash function used natively by Git for all objects. + Hash function used natively by Git for all objects. +
+ + Special name for the "HEAD" symbolic-ref. + Special name for the "HEAD" symbolic-ref. + + + Special name for the "FETCH_HEAD" symbolic-ref. + Special name for the "FETCH_HEAD" symbolic-ref. + + + Text string that identifies an object as a commit. + + Text string that identifies an object as a commit. +

+ Commits connect trees into a string of project histories, where each + commit is an assertion that the best way to continue is to use this other + tree (set of files). + + + +

Text string that identifies an object as a blob. + + Text string that identifies an object as a blob. +

+ Blobs store whole file revisions. They are used for any user file, as + well as for symlinks. Blobs form the bulk of any project's storage space. + + + +

Text string that identifies an object as a tree. + + Text string that identifies an object as a tree. +

+ Trees attach object ids (hashes) to names and file modes. The normal use + for a tree is to store a version of a directory and its contents. + + + +

Text string that identifies an object as an annotated tag. + + Text string that identifies an object as an annotated tag. +

+ Annotated tags store a pointer to any other object, and an additional + message. It is most commonly used to record a stable release of the + project. + + + +

+ Pack file signature that occurs at file header - identifies file as Git + packfile formatted. + + + Pack file signature that occurs at file header - identifies file as Git + packfile formatted. +

+ This constant is fixed and is defined by the Git packfile format. + + + +

Native character encoding for commit messages, file names... + Native character encoding for commit messages, file names... +
+ + Native character encoding for commit messages, file names... + Native character encoding for commit messages, file names... + + + Default main branch name + + + Default stash branch name + + + Prefix for branch refs + + + Prefix for remotes refs + + + Prefix for tag refs + + + Prefix for notes refs + + + Standard notes ref + + + Prefix for any ref + + + Standard stash ref + + + Logs folder name + + + Info refs folder + + + Packed refs file + + + The environment variable that contains the system user name + + + The environment variable that contains the author's name + + + The environment variable that contains the author's email + + + The environment variable that contains the commiter's name + + + The environment variable that contains the commiter's email + + + + The environment variable that limits how close to the root of the file + systems JGit will traverse when looking for a repository root. + + + The environment variable that limits how close to the root of the file + systems JGit will traverse when looking for a repository root. + + + + + The environment variable that tells us which directory is the ".git" + directory + + + + + The environment variable that tells us which directory is the working + directory. + + + The environment variable that tells us which directory is the working + directory. + + + + The environment variable that tells us which file holds the Git index. + The environment variable that tells us which file holds the Git index. + + + The environment variable that tells us where objects are stored + + + + The environment variable that tells us where to look for objects, besides + the default objects directory. + + + The environment variable that tells us where to look for objects, besides + the default objects directory. + + + + Default value for the user name if no other information is available + + + Beginning of the common "Signed-off-by: " commit message line + + + A gitignore file name + + + Default remote name used by clone, push and fetch operations + + + Default name for the Git repository directory + + + Default name for the Git repository configuration + + + A bare repository typically ends with this string + + + Name of the ignore file + + + Name of the submodules file + + + Create a new digest function for objects. + Create a new digest function for objects. + a new digest object. + + this Java virtual machine does not support the required hash + function. Very unlikely given that JGit uses a hash function + that is in the Java reference specification. + + + + Convert an OBJ_* type constant to a TYPE_* type constant. + Convert an OBJ_* type constant to a TYPE_* type constant. + the type code, from a pack representation. + the canonical string name of this type. + + + Convert an OBJ_* type constant to an ASCII encoded string constant. + + Convert an OBJ_* type constant to an ASCII encoded string constant. +

+ The ASCII encoded string is often the canonical representation of + the type within a loose object header, or within a tag header. + + the type code, from a pack representation. + the canonical ASCII encoded name of this type. + + +

Parse an encoded type string into a type constant. + Parse an encoded type string into a type constant. + + object id this type string came from; may be null if that is + not known at the time the parse is occurring. + + string version of the type code. + + character immediately following the type string. Usually ' ' + (space) or '\n' (line feed). + + + position within typeString where the parse + should start. Updated with the new position (just past + endMark when the parse is successful. + + + a type code constant (one of + OBJ_BLOB + , + OBJ_COMMIT + , + OBJ_TAG + , + OBJ_TREE + . + + there is no valid type identified by typeString. + +
+ + Convert an integer into its decimal representation. + Convert an integer into its decimal representation. + the integer to convert. + + a decimal representation of the input integer. The returned array + is the smallest array that will hold the value. + + + + Convert a string to US-ASCII encoding. + Convert a string to US-ASCII encoding. + + the string to convert. Must not contain any characters over + 127 (outside of 7-bit ASCII). + + + a byte array of the same length as the input string, holding the + same characters, in the same order. + + + the input string contains one or more characters outside of + the 7-bit ASCII character space. + + + + Convert a string to a byte array in the standard character encoding. + Convert a string to a byte array in the standard character encoding. + the string to convert. May contain any Unicode characters. + + a byte array representing the requested string, encoded using the + default character encoding (UTF-8). + + CHARACTER_ENCODING + + + name of the file containing the commit msg for a merge commit + + + name of the file containing the IDs of the parents of a merge commit + + + name of the file containing the ID of a cherry pick commit in case of conflicts + + + + name of the file containing the commit msg for a squash commit + + + + name of the ref ORIG_HEAD used by certain commands to store the original + value of HEAD + + + + objectid for the empty blob + + + This class keeps git repository core parameters. + This class keeps git repository core parameters. + + + + Key for + Config.Get<T>(SectionParser<T>) + + . + + + + The compression level to use when storing loose objects + + + the preferred pack index file format; 0 for oldest possible. + + + whether to log all refUpdates + + + path of excludesfile + + + + Permissible values for + core.autocrlf + . + + + + Constants describing various file modes recognized by GIT. + + Constants describing various file modes recognized by GIT. +

+ GIT uses a subset of the available UNIX file permission bits. The + FileMode class provides access to constants defining the modes + actually used by GIT. + + + +

Mask to apply to a file mode to obtain its type bits. + Mask to apply to a file mode to obtain its type bits. + TYPE_TREE + TYPE_SYMLINK + TYPE_FILE + TYPE_GITLINK + TYPE_MISSING +
+ + + Bit pattern for + TYPE_MASK + matching + TREE + . + + + + + Bit pattern for + TYPE_MASK + matching + SYMLINK + . + + + + + Bit pattern for + TYPE_MASK + matching + REGULAR_FILE + . + + + + + Bit pattern for + TYPE_MASK + matching + GITLINK + . + + + + + Bit pattern for + TYPE_MASK + matching + MISSING + . + + + + Mode indicating an entry is a tree (aka directory). + Mode indicating an entry is a tree (aka directory). + + + Mode indicating an entry is a symbolic link. + Mode indicating an entry is a symbolic link. + + + Mode indicating an entry is a non-executable file. + Mode indicating an entry is a non-executable file. + + + Mode indicating an entry is an executable file. + Mode indicating an entry is an executable file. + + + Mode indicating an entry is a submodule commit in another repository. + Mode indicating an entry is a submodule commit in another repository. + + + Mode indicating an entry is missing during parallel walks. + Mode indicating an entry is missing during parallel walks. + + + Convert a set of mode bits into a FileMode enumerated value. + Convert a set of mode bits into a FileMode enumerated value. + the mode bits the caller has somehow obtained. + the FileMode instance that represents the given bits. + + + + Test a file mode for equality with this + FileMode + object. + + + true if the mode bits represent the same mode as this object + + + Copy this mode as a sequence of octal US-ASCII bytes. + + Copy this mode as a sequence of octal US-ASCII bytes. +

+ The mode is copied as a sequence of octal digits using the US-ASCII + character encoding. The sequence does not use a leading '0' prefix to + indicate octal notation. This method is suitable for generation of a mode + string within a GIT tree object. + + stream to copy the mode to. + the stream encountered an error during the copy. + + + +

Copy this mode as a sequence of octal US-ASCII bytes. + + Copy this mode as a sequence of octal US-ASCII bytes. + The mode is copied as a sequence of octal digits using the US-ASCII + character encoding. The sequence does not use a leading '0' prefix to + indicate octal notation. This method is suitable for generation of a mode + string within a GIT tree object. + + buffer to copy the mode to. + + position within + buf + for first digit. + +
+ + + the number of bytes written by + CopyTo(Sharpen.OutputStream) + . + + + + Get the object type that should appear for this type of mode. + + Get the object type that should appear for this type of mode. +

+ See the object type constants in + Constants + . + + one of the well known object type constants. + + +

Format this mode as an octal string (for debugging only). + Format this mode as an octal string (for debugging only). +
+ + The mode bits as an integer. + + + + A representation of a file (blob) object in a + Tree + . + + + + This class represents an entry in a tree, like a blob or another tree. + This class represents an entry in a tree, like a blob or another tree. + + + Construct a named tree entry. + Construct a named tree entry. + + + + + + parent of this tree. + + + Delete this entry. + Delete this entry. + + + Detach this entry from it's parent. + Detach this entry from it's parent. + + + the repository owning this entry. + + + the raw byte name of this entry. + + + the name of this entry. + + + Rename this entry. + Rename this entry. + The new name + System.IO.IOException + + + Rename this entry. + Rename this entry. + The new name + System.IO.IOException + + + true if this entry is new or modified since being loaded. + + + Mark this entry as modified. + Mark this entry as modified. + + + SHA-1 of this tree entry (null for new unhashed entries) + + + Set (update) the SHA-1 of this entry. + + Set (update) the SHA-1 of this entry. Invalidates the id's of all + entries above this entry as they will have to be recomputed. + + SHA-1 for this entry. + + + repository relative name of this entry + + + + repository relative name of the entry + FIXME better encoding + + + + Helper for accessing tree/blob methods. + Helper for accessing tree/blob methods. + + '/' for Tree entries and NUL for non-treeish objects. + + + mode (type of object) + + + Constructor for a File (blob) object. + Constructor for a File (blob) object. + + The + Tree + holding this object (or null) + + the SHA-1 of the blob (or null for a yet unhashed file) + raw object name in the parent tree + true if the executable flag is set + + + true if this file is executable + + + set/reset the executable flag + + + + an + ObjectLoader + that will return the data + + System.IO.IOException + + + A representation of the Git index. + + A representation of the Git index. + The index points to the objects currently checked out or in the process of + being prepared for committing or objects involved in an unfinished merge. + The abstract format is:
path stage flags statdata SHA-1 +
    +
  • Path is the relative path in the workdir
  • +
  • stage is 0 (normally), but when + merging 1 is the common ancestor version, 2 is 'our' version and 3 is 'their' + version. A fully resolved merge only contains stage 0.
  • +
  • flags is the object type and information of validity
  • +
  • statdata is the size of this object and some other file system specifics, + some of it ignored by JGit
  • +
  • SHA-1 represents the content of the references object
  • +
+ An index can also contain a tree cache which we ignore for now. We drop the + tree cache when writing the index. +
+
+ + Stage 0 represents merged entries. + Stage 0 represents merged entries. + + + Construct a Git index representation. + Construct a Git index representation. + + + + true if we have modified the index in memory since reading it from disk + + + Reread index data from disk if the index file has been changed + System.IO.IOException + + + Add the content of a file to the index. + Add the content of a file to the index. + workdir + the file + a new or updated index entry for the path represented by f + System.IO.IOException + + + Add the content of a file to the index. + Add the content of a file to the index. + workdir + the file + content of the file + a new or updated index entry for the path represented by f + System.IO.IOException + + + Remove a path from the index. + Remove a path from the index. + workdir + the file whose path shall be removed. + true if such a path was found (and thus removed) + System.IO.IOException + + + Read the cache file into memory. + Read the cache file into memory. + System.IO.IOException + + + Write content of index to disk. + Write content of index to disk. + System.IO.IOException + + + + + + Read a Tree recursively into the index + The tree to read + System.IO.IOException + + + + + + Add tree entry to index + tree entry + new or modified index entry + System.IO.IOException + + + Check out content of the content represented by the index + workdir + System.IO.IOException + + + Check out content of the specified index entry + workdir + index entry + System.IO.IOException + + + Construct and write tree out of index. + Construct and write tree out of index. + SHA-1 of the constructed tree + System.IO.IOException + + + + Return the members of the index sorted by the unsigned byte + values of the path names. + + + Return the members of the index sorted by the unsigned byte + values of the path names. + Small beware: Unaccounted for are unmerged entries. You may want + to abort if members with stage != 0 are found if you are doing + any updating operations. All stages will be found after one another + here later. Currently only one stage per name is returned. + + The index entries sorted + + + Look up an entry with the specified path. + Look up an entry with the specified path. + + index entry for the path or null if not in index. + Sharpen.UnsupportedEncodingException + + + + The repository holding this index. + + + An index entry + + + + + + + + + + Update this index entry with stat and SHA-1 information if it looks + like the file has been modified in the workdir. + + + Update this index entry with stat and SHA-1 information if it looks + like the file has been modified in the workdir. + + file in work dir + true if a change occurred + System.IO.IOException + + + + Update this index entry with stat and SHA-1 information if it looks + like the file has been modified in the workdir. + + + Update this index entry with stat and SHA-1 information if it looks + like the file has been modified in the workdir. + + file in work dir + the new content of the file + true if a change occurred + System.IO.IOException + + + + Check if an entry's content is different from the cache, + File status information is used and status is same we + consider the file identical to the state in the working + directory. + + + Check if an entry's content is different from the cache, + File status information is used and status is same we + consider the file identical to the state in the working + directory. Native git uses more stat fields than we + have accessible in Java. + + working directory to compare content with + true if content is most likely different. + + + + Check if an entry's content is different from the cache, + File status information is used and status is same we + consider the file identical to the state in the working + directory. + + + Check if an entry's content is different from the cache, + File status information is used and status is same we + consider the file identical to the state in the working + directory. Native git uses more stat fields than we + have accessible in Java. + + working directory to compare content with + + True if the actual file content + should be checked if modification time differs. + + true if content is most likely different. + + + Returns the stages in which the entry's file is recorded in the index. + + Returns the stages in which the entry's file is recorded in the index. + The stages are bit-encoded: bit N is set if the file is present + in stage N. In particular, the N-th bit will be set if this entry + itself is in stage N (see getStage()). + + flags denoting stages + GetStage() + + + path name for this entry + + + path name for this entry as byte array, hopefully UTF-8 encoded + + + SHA-1 of the entry managed by this index + + + the stage this entry is in + + + size of disk object + + + true if this entry shall be assumed valid + + + true if this entry should be checked for changes + + + Set whether to always assume this entry valid + true to ignore changes + + + Set whether this entry must be checked + + + + Return raw file mode bits. + + Return raw file mode bits. See + FileMode + + file mode bits + + + + + + + + + A tree entry representing a gitlink entry used for submodules. + + A tree entry representing a gitlink entry used for submodules. + Note. Java cannot really handle these as file system objects. + + + + + Construct a + GitlinkTreeEntry + with the specified name and SHA-1 in + the specified parent + + + + + + + + Compares the index, a tree, and the working directory Ignored files are not + taken into account. + + + Compares the index, a tree, and the working directory Ignored files are not + taken into account. The following information is retrieved: +
    +
  • added files
  • +
  • changed files
  • +
  • removed files
  • +
  • missing files
  • +
  • modified files
  • +
  • conflicting files
  • +
  • untracked files
  • +
  • files with assume-unchanged flag
  • +
+
+
+ + Construct an IndexDiff + + + symbolic name e.g. HEAD + An EmptyTreeIterator is used if revstr cannot be resolved. + + iterator for working directory + System.IO.IOException + + + Construct an Indexdiff + + tree id. If null, an EmptyTreeIterator is used. + iterator for working directory + System.IO.IOException + + + Sets a filter. + + Sets a filter. Can be used e.g. for restricting the tree walk to a set of + files. + + + + + Run the diff operation. + + Run the diff operation. Until this is called, all lists will be empty. + Use + Diff(ProgressMonitor, int, int, string) + + if a progress + monitor is required. + + if anything is different between index, tree, and workdir + System.IO.IOException + + + Run the diff operation. + + Run the diff operation. Until this is called, all lists will be empty. +

+ The operation may be aborted by the progress monitor. In that event it + will report what was found before the cancel operation was detected. + Callers should ignore the result if monitor.isCancelled() is true. If a + progress monitor is not needed, callers should use + Diff() + instead. Progress reporting is crude and approximate and only intended + for informing the user. + + for reporting progress, may be null + number or estimated files in the working tree + number of estimated entries in the cache + + if anything is different between index, tree, and workdir + System.IO.IOException + + + list of files added to the index, not in the tree + + + list of files changed from tree to index + + + list of files removed from index, but in tree + + + list of files in index, but not filesystem + + + list of files modified on disk relative to the index + + + list of files that are not ignored, and not in the index. + + + list of files that are in conflict + + +

The method returns the list of ignored files and folders. + + The method returns the list of ignored files and folders. Only the root + folder of an ignored folder hierarchy is reported. If a/b/c is listed in + the .gitignore then you should not expect a/b/c/d/e/f to be reported + here. Only a/b/c will be reported. Furthermore only ignored files / + folders are returned that are NOT in the index. + + list of files / folders that are ignored +
+ + list of files with the flag assume-unchanged + + + list of folders containing only untracked files/folders + + + Get the file mode of the given path in the index + + file mode + + + Selects interesting tree entries during walking. + + Selects interesting tree entries during walking. +

+ This is an abstract interface. Applications may implement a subclass, or use + one of the predefined implementations already available within this package. +

+ Unless specifically noted otherwise a TreeFilter implementation is not thread + safe and may not be shared by different TreeWalk instances at the same time. + This restriction allows TreeFilter implementations to cache state within + their instances during + Include(NGit.Treewalk.TreeWalk) + if it is beneficial to + their implementation. Deep clones created by + Clone() + may be used to + construct a thread-safe copy of an existing filter. +

+ Path filters: +

    +
  • Matching pathname: + PathFilter +
  • +
+

+ Difference filters: +

    +
  • Only select differences: + ANY_DIFF + .
  • +
+

+ Boolean modifiers: +

    +
  • AND: + AndTreeFilter +
  • +
  • OR: + OrTreeFilter +
  • +
  • NOT: + NotTreeFilter +
  • +
+
+
+ + Selects all tree entries. + Selects all tree entries. + + + Selects only tree entries which differ between at least 2 trees. + + Selects only tree entries which differ between at least 2 trees. +

+ This filter also prevents a TreeWalk from recursing into a subtree if all + parent trees have the identical subtree at the same path. This + dramatically improves walk performance as only the changed subtrees are + entered into. +

+ If this filter is applied to a walker with only one tree it behaves like + ALL + , or as though the walker was matching a virtual empty tree + against the single tree it was actually given. Applications may wish to + treat such a difference as "all names added". +

+ When comparing + NGit.Treewalk.WorkingTreeIterator + and + NGit.Dircache.DirCacheIterator + applications should use + IndexDiffFilter + . + + + +

Create a new filter that does the opposite of this filter. + Create a new filter that does the opposite of this filter. + a new filter that includes tree entries this filter rejects. +
+ + Determine if the current entry is interesting to report. + + Determine if the current entry is interesting to report. +

+ This method is consulted for subtree entries even if + NGit.Treewalk.TreeWalk.Recursive() + + is enabled. The consultation allows the + filter to bypass subtree recursion on a case-by-case basis, even when + recursion is enabled at the application level. + + the walker the filter needs to examine. + + true if the current entry should be seen by the application; + false to hide the entry. + + + an object the filter needs to consult to determine its answer + does not exist in the Git repository the walker is operating + on. Filtering this current walker entry is impossible without + the object. + + + an object the filter needed to consult was not of the + expected object type. This usually indicates a corrupt + repository, as an object link is referencing the wrong type. + + + a loose object or pack file could not be read to obtain data + necessary for the filter to make its decision. + + + +

+ Does this tree filter require a recursive walk to match everything? +

+ If this tree filter is matching on full entry path names and its pattern + is looking for a '/' then the filter would require a recursive TreeWalk + to accurately make its decisions. +

+ + Does this tree filter require a recursive walk to match everything? +

+ If this tree filter is matching on full entry path names and its pattern + is looking for a '/' then the filter would require a recursive TreeWalk + to accurately make its decisions. The walker is not required to enable + recursive behavior for any particular filter, this is only a hint. + + + true if the filter would like to have the walker recurse into + subtrees to make sure it matches everything correctly; false if + the filter does not require entering subtrees. + + + +

Clone this tree filter, including its parameters. + + Clone this tree filter, including its parameters. +

+ This is a deep clone. If this filter embeds objects or other filters it + must also clone those, to ensure the instances do not share mutable data. + + another copy of this filter, suitable for another thread. + + + + + + + +

Creates zlib based inflaters as necessary for object decompression. + Creates zlib based inflaters as necessary for object decompression. +
+ + Obtain an Inflater for decompression. + + Obtain an Inflater for decompression. +

+ Inflaters obtained through this cache should be returned (if possible) by + Release(ICSharpCode.SharpZipLib.Zip.Compression.Inflater) + + to avoid garbage collection and reallocation. + + an available inflater. Never null. + + +

Release an inflater previously obtained from this cache. + Release an inflater previously obtained from this cache. + + the inflater to return. May be null, in which case this method + does nothing. + +
+ + A mutable SHA-1 abstraction. + A mutable SHA-1 abstraction. + + + Empty constructor. + Empty constructor. Initialize object with default (zeros) value. + + + Copying constructor. + Copying constructor. + original entry, to copy id from + + + Set any byte in the id. + Set any byte in the id. + + index of the byte to set in the raw form of the ObjectId. Must + be in range [0, + Constants.OBJECT_ID_LENGTH + ). + + + the value of the specified byte at + index + . Values are + unsigned and thus are in the range [0,255] rather than the + signed byte range of [-128, 127]. + + + index + is less than 0, equal to + Constants.OBJECT_ID_LENGTH + , or greater than + Constants.OBJECT_ID_LENGTH + . + + + + + Make this id match + ObjectId.ZeroId() + . + + + + Copy an ObjectId into this mutable buffer. + Copy an ObjectId into this mutable buffer. + the source id to copy from. + + + Convert an ObjectId from raw binary representation. + Convert an ObjectId from raw binary representation. + + the raw byte buffer to read from. At least 20 bytes must be + available within this byte array. + + + + Convert an ObjectId from raw binary representation. + Convert an ObjectId from raw binary representation. + + the raw byte buffer to read from. At least 20 bytes after p + must be available within this byte array. + + position to read the first byte of data from. + + + Convert an ObjectId from binary representation expressed in integers. + Convert an ObjectId from binary representation expressed in integers. + + the raw int buffer to read from. At least 5 integers must be + available within this integers array. + + + + Convert an ObjectId from binary representation expressed in integers. + Convert an ObjectId from binary representation expressed in integers. + + the raw int buffer to read from. At least 5 integers after p + must be available within this integers array. + + position to read the first integer of data from. + + + Convert an ObjectId from hex characters (US-ASCII). + Convert an ObjectId from hex characters (US-ASCII). + + the US-ASCII buffer to read from. At least 40 bytes after + offset must be available within this byte array. + + position to read the first character from. + + + Convert an ObjectId from hex characters. + Convert an ObjectId from hex characters. + the string to read from. Must be 40 characters long. + + + A NullProgressMonitor does not report progress anywhere. + A NullProgressMonitor does not report progress anywhere. + + + A progress reporting interface. + A progress reporting interface. + + + Constant indicating the total work units cannot be predicted. + Constant indicating the total work units cannot be predicted. + + + Advise the monitor of the total number of subtasks. + + Advise the monitor of the total number of subtasks. +

+ This should be invoked at most once per progress monitor interface. + + + the total number of tasks the caller will need to complete + their processing. + + + +

Begin processing a single task. + Begin processing a single task. + + title to describe the task. Callers should publish these as + stable string constants that implementations could match + against for translation support. + + + total number of work units the application will perform; + UNKNOWN + if it cannot be predicted in advance. + +
+ + Denote that some work units have been completed. + + Denote that some work units have been completed. +

+ This is an incremental update; if invoked once per work unit the correct + value for our argument is 1, to indicate a single unit of + work has been finished by the caller. + + the number of work units completed since the last call. + + +

Finish the current task, so the next can begin. + Finish the current task, so the next can begin. +
+ + Check for user task cancellation. + Check for user task cancellation. + true if the user asked the process to stop working. + + + Immutable instance of a null progress monitor. + Immutable instance of a null progress monitor. + + + Verifies that an object is formatted correctly. + + Verifies that an object is formatted correctly. +

+ Verifications made by this class only check that the fields of an object are + formatted correctly. The ObjectId checksum of the object is not verified, and + connectivity links between objects are also not verified. Its assumed that + the caller can provide both of these validations on its own. +

+ Instances of this class are not thread safe, but they may be reused to + perform multiple object validations. + + + +

Header "tree " +
+ + Header "parent " + + + Header "author " + + + Header "committer " + + + Header "encoding " + + + Header "object " + + + Header "type " + + + Header "tag " + + + Header "tagger " + + + Check an object for parsing errors. + Check an object for parsing errors. + + type of the object. Must be a valid object type code in + Constants + . + + + the raw data which comprises the object. This should be in the + canonical format (that is the format used to generate the + ObjectId of the object). The array is never modified. + + if an error is identified. + + + Check a commit for errors. + Check a commit for errors. + the commit data. The array is never modified. + if any error was detected. + + + Check an annotated tag for errors. + Check an annotated tag for errors. + the tag data. The array is never modified. + if any error was detected. + + + Check a canonical formatted tree for errors. + Check a canonical formatted tree for errors. + the raw tree data. The array is never modified. + if any error was detected. + + + Check a blob for errors. + Check a blob for errors. + the blob data. The array is never modified. + if any error was detected. + + + Abstraction of arbitrary object storage. + + Abstraction of arbitrary object storage. +

+ An object database stores one or more Git objects, indexed by their unique + ObjectId + . + + + +

Initialize a new database instance for access. + Initialize a new database instance for access. +
+ + Does this database exist yet? + + true if this database is already created; false if the caller + should invoke + Create() + to create this database location. + + + + Initialize a new object database at this location. + Initialize a new object database at this location. + the database could not be created. + + + + Create a new + ObjectInserter + to insert new objects. +

+ The returned inserter is not itself thread-safe, but multiple concurrent + inserter instances created from the same + ObjectDatabase + must be + thread-safe. +

+ writer the caller can use to create objects in this database. +
+ + + Create a new + ObjectReader + to read existing objects. +

+ The returned reader is not itself thread-safe, but multiple concurrent + reader instances created from the same + ObjectDatabase + must be + thread-safe. +

+ reader the caller can use to load objects from this database. +
+ + Close any resources held by this database. + Close any resources held by this database. + + + + Does the requested object exist in this database? +

+ This is a one-shot call interface which may be faster than allocating a + NewReader() + to perform the lookup. +

+ identity of the object to test for existence of. + true if the specified object is stored in this database. + the object store cannot be accessed. +
+ + Open an object from this database. + + Open an object from this database. +

+ This is a one-shot call interface which may be faster than allocating a + NewReader() + to perform the lookup. + + identity of the object to open. + + a + ObjectLoader + for accessing the object. + + the object does not exist. + the object store cannot be accessed. + + +

Open an object from this database. + + Open an object from this database. +

+ This is a one-shot call interface which may be faster than allocating a + NewReader() + to perform the lookup. + + identity of the object to open. + + hint about the type of object being requested; + ObjectReader.OBJ_ANY + if the object type is not known, + or does not matter to the caller. + + + a + ObjectLoader + for accessing the object. + + the object does not exist. + + typeHint was not OBJ_ANY, and the object's actual type does + not match typeHint. + + the object store cannot be accessed. + + +

Create a new cached database instance over this database. + + Create a new cached database instance over this database. This instance might + optimize queries by caching some information about database. So some modifications + done after instance creation might fail to be noticed. + + new cached database instance +
+ + A SHA-1 abstraction. + A SHA-1 abstraction. + + + Test a string of characters to verify it is a hex format. + + Test a string of characters to verify it is a hex format. +

+ If true the string can be parsed with + FromString(string) + . + + the string to test. + true if the string can converted into an ObjectId. + + +

Convert an ObjectId into a hex string representation. + Convert an ObjectId into a hex string representation. + the id to convert. May be null. + the hex string conversion of this id's content. +
+ + Compare to object identifier byte sequences for equality. + Compare to object identifier byte sequences for equality. + + the first buffer to compare against. Must have at least 20 + bytes from position ai through the end of the buffer. + + first offset within firstBuffer to begin testing. + + the second buffer to compare against. Must have at least 2 + bytes from position bi through the end of the buffer. + + first offset within secondBuffer to begin testing. + true if the two identifiers are the same. + + + Convert an ObjectId from raw binary representation. + Convert an ObjectId from raw binary representation. + + the raw byte buffer to read from. At least 20 bytes must be + available within this byte array. + + the converted object id. + + + Convert an ObjectId from raw binary representation. + Convert an ObjectId from raw binary representation. + + the raw byte buffer to read from. At least 20 bytes after p + must be available within this byte array. + + position to read the first byte of data from. + the converted object id. + + + Convert an ObjectId from raw binary representation. + Convert an ObjectId from raw binary representation. + + the raw integers buffer to read from. At least 5 integers must + be available within this int array. + + the converted object id. + + + Convert an ObjectId from raw binary representation. + Convert an ObjectId from raw binary representation. + + the raw integers buffer to read from. At least 5 integers + after p must be available within this int array. + + position to read the first integer of data from. + the converted object id. + + + Convert an ObjectId from hex characters (US-ASCII). + Convert an ObjectId from hex characters (US-ASCII). + + the US-ASCII buffer to read from. At least 40 bytes after + offset must be available within this byte array. + + position to read the first character from. + the converted object id. + + + Convert an ObjectId from hex characters. + Convert an ObjectId from hex characters. + the string to read from. Must be 40 characters long. + the converted object id. + + + Initialize this instance by copying another existing ObjectId. + + Initialize this instance by copying another existing ObjectId. +

+ This constructor is mostly useful for subclasses who want to extend an + ObjectId with more properties, but initialize from an existing ObjectId + instance acquired by other means. + + another already parsed ObjectId to copy the value out of. + + + + + + + + +

Get the special all-null ObjectId. + Get the special all-null ObjectId. + the all-null ObjectId, often used to stand-in for no object. +
+ + + A + Ref + that points directly at an + ObjectId + . + + + + + Pairing of a name and the + ObjectId + it currently has. +

+ A ref in Git is (more or less) a variable that holds a single object + identifier. The object identifier can be any valid Git object (blob, tree, + commit, annotated tag, ...). +

+ The ref name has the attributes of the ref that was asked for as well as the + ref it was resolved to for symbolic refs plus the object id it points to and + (for tags) the peeled target object id, i.e. the tag resolved recursively + until a non-tag object is referenced. +

+
+ + What this ref is called within the repository. + What this ref is called within the repository. + name of this ref. + + + Test if this reference is a symbolic reference. + + Test if this reference is a symbolic reference. +

+ A symbolic reference does not have its own + ObjectId + value, but + instead points to another + Ref + in the same database and always + uses that other reference's value as its own. + + + true if this is a symbolic reference; false if this reference + contains its own ObjectId. + + + +

+ Traverse target references until + IsSymbolic() + is false. +

+ If + IsSymbolic() + is false, returns + this + . +

+ If + IsSymbolic() + is true, this method recursively traverses + GetTarget() + until + IsSymbolic() + returns false. +

+ This method is effectively +

+            return isSymbolic() ? getTarget().getLeaf() : this;
+            
+
+ the reference that actually stores the ObjectId value. +
+ + + Get the reference this reference points to, or + this + . +

+ If + IsSymbolic() + is true this method returns the reference it + directly names, which might not be the leaf reference, but could be + another symbolic reference. +

+ If this is a leaf level reference that contains its own ObjectId,this + method returns + this + . +

+ + the target reference, or + this + . + +
+ + Cached value of this ref. + Cached value of this ref. + the value of this ref at the last time we read it. + + + Cached value of ref^{} (the ref peeled to commit). + Cached value of ref^{} (the ref peeled to commit). + + if this ref is an annotated tag the id of the commit (or tree or + blob) that the annotated tag refers to; null if this ref does not + refer to an annotated tag. + + + + whether the Ref represents a peeled tag + + + + How was this ref obtained? +

+ The current storage model of a Ref may influence how the ref must be + updated or deleted from the repository. +

+ + How was this ref obtained? +

+ The current storage model of a Ref may influence how the ref must be + updated or deleted from the repository. + + type of ref. + + +

Create a new ref pairing. + Create a new ref pairing. + method used to store this ref. + name of this ref. + + current value of the ref. May be null to indicate a ref that + does not exist yet. + +
+ + Any reference whose peeled value is not yet known. + Any reference whose peeled value is not yet known. + + + Create a new ref pairing. + Create a new ref pairing. + method used to store this ref. + name of this ref. + + current value of the ref. May be null to indicate a ref + that does not exist yet. + + + + An annotated tag whose peeled object has been cached. + An annotated tag whose peeled object has been cached. + + + Create a new ref pairing. + Create a new ref pairing. + method used to store this ref. + name of this ref. + current value of the ref. + + the first non-tag object that tag + id + points to. + + + + A reference to a non-tag object coming from a cached source. + A reference to a non-tag object coming from a cached source. + + + Create a new ref pairing. + Create a new ref pairing. + method used to store this ref. + name of this ref. + + current value of the ref. May be null to indicate a ref + that does not exist yet. + + + + + Fast, efficient map specifically for + ObjectId + subclasses. +

+ This map provides an efficient translation from any ObjectId instance to a + cached subclass of ObjectId that has the same value. +

+ If object instances are stored in only one map, + ObjectIdOwnerMap<V> + is a + more efficient implementation. +

+
+ + Create an empty map. + Create an empty map. + + + Remove all entries from this map. + Remove all entries from this map. + + + Lookup an existing mapping. + Lookup an existing mapping. + the object identifier to find. + the instance mapped to toFind, or null if no mapping exists. + + + Returns true if this map contains the specified object. + Returns true if this map contains the specified object. + object to find. + true if the mapping exists for this object; false otherwise. + + + Store an object for future lookup. + + Store an object for future lookup. +

+ An existing mapping for must not be in this map. Callers must + first call + ObjectIdSubclassMap<V>.Get(AnyObjectId) + + to verify there is no current + mapping prior to adding a new mapping, or use + ObjectIdSubclassMap<V>.AddIfAbsent<Q>(ObjectId) + + . + + the object to store. + + +

Store an object for future lookup. + + Store an object for future lookup. +

+ Stores + newValue + , but only if there is not already an object for + the same object name. Callers can tell if the value is new by checking + the return value with reference equality: +

+            V obj = ...;
+            boolean wasNew = map.addIfAbsent(obj) == obj;
+            
+
+ the object to store. + + + newValue + if stored, or the prior value already stored and + that would have been returned had the caller used + get(newValue) + first. + +
+ + number of objects in map + + + + true if + ObjectIdSubclassMap<V>.Size() + is 0. + + + + + Inserts objects into an existing + ObjectDatabase + . +

+ An inserter is not thread-safe. Individual threads should each obtain their + own unique inserter instance, or must arrange for locking at a higher level + to ensure the inserter is in use by no more than one thread at a time. +

+ Objects written by an inserter may not be immediately visible for reading + after the insert method completes. Callers must invoke either + Release() + or + Flush() + prior to updating references or + otherwise making the returned ObjectIds visible to other code. +

+
+ + Digest to compute the name of an object. + Digest to compute the name of an object. + + + Temporary working buffer for streaming data through. + Temporary working buffer for streaming data through. + + + Create a new inserter for a database. + Create a new inserter for a database. + + + Obtain a temporary buffer for use by the ObjectInserter or its subclass. + + + Obtain a temporary buffer for use by the ObjectInserter or its subclass. +

+ This buffer is supplied by the ObjectInserter base class to itself and + its subclasses for the purposes of pulling data from a supplied + InputStream, passing it through a Deflater, or formatting the canonical + format of a small object like a small tree or commit. +

+ This buffer IS NOT for translation such as auto-CRLF or content + filtering and must not be used for such purposes. +

+ The returned buffer is small, around a few KiBs, and the size may change + between versions of JGit. Callers using this buffer must always check the + length of the returned array to ascertain how much space was provided. +

+ There is a single buffer for each ObjectInserter, repeated calls to this + method will (usually) always return the same buffer. If the caller needs + more than one buffer, or needs a buffer of a larger size, it must manage + that buffer on its own. +

+ The buffer is usually on first demand for a buffer. + + a temporary byte array for use by the caller. + + + digest to help compute an ObjectId + + +

Compute the name of an object, without inserting it. + Compute the name of an object, without inserting it. + type code of the object to store. + complete content of the object. + the name of the object. +
+ + Compute the name of an object, without inserting it. + Compute the name of an object, without inserting it. + type code of the object to store. + complete content of the object. + + first position within + data + . + + + number of bytes to copy from + data + . + + the name of the object. + + + Compute the name of an object, without inserting it. + Compute the name of an object, without inserting it. + type code of the object to store. + + number of bytes to scan from + in + . + + + stream providing the object content. The caller is responsible + for closing the stream. + + the name of the object. + the source stream could not be read. + + + Compute the ObjectId for the given tree without inserting it. + Compute the ObjectId for the given tree without inserting it. + + the computed ObjectId + + + Insert a single tree into the store, returning its unique name. + Insert a single tree into the store, returning its unique name. + the formatter containing the proposed tree's data. + the name of the tree object. + the object could not be stored. + + + Insert a single commit into the store, returning its unique name. + Insert a single commit into the store, returning its unique name. + the builder containing the proposed commit's data. + the name of the commit object. + the object could not be stored. + + + Insert a single annotated tag into the store, returning its unique name. + + Insert a single annotated tag into the store, returning its unique name. + + the builder containing the proposed tag's data. + the name of the tag object. + the object could not be stored. + + + Insert a single object into the store, returning its unique name. + Insert a single object into the store, returning its unique name. + type code of the object to store. + complete content of the object. + the name of the object. + the object could not be stored. + + + Insert a single object into the store, returning its unique name. + Insert a single object into the store, returning its unique name. + type code of the object to store. + complete content of the object. + + first position within + data + . + + + number of bytes to copy from + data + . + + the name of the object. + the object could not be stored. + + + Insert a single object into the store, returning its unique name. + Insert a single object into the store, returning its unique name. + type code of the object to store. + + number of bytes to copy from + in + . + + + stream providing the object content. The caller is responsible + for closing the stream. + + the name of the object. + + the object could not be stored, or the source stream could + not be read. + + + + Initialize a parser to read from a pack formatted stream. + Initialize a parser to read from a pack formatted stream. + + the input stream. The stream is not closed by the parser, and + must instead be closed by the caller once parsing is complete. + + the pack parser. + + the parser instance, which can be configured and then used to + parse objects into the ObjectDatabase. + + + + Make all inserted objects visible. + + Make all inserted objects visible. +

+ The flush may take some period of time to make the objects available to + other threads. + + + the flush could not be completed; objects inserted thus far + are in an indeterminate state. + + + +

Release any resources used by this inserter. + + Release any resources used by this inserter. +

+ An inserter that has been released can be used again, but may need to be + released after the subsequent usage. + + + +

An inserter that can be used for formatting and id generation only. + An inserter that can be used for formatting and id generation only. +
+ + + + + + + + + + + Wraps a delegate ObjectInserter. + Wraps a delegate ObjectInserter. + + + delegate ObjectInserter to handle all processing. + + + + + + + + + + + + + + + + + + + + + Base class for a set of loaders for different representations of Git objects. + + + Base class for a set of loaders for different representations of Git objects. + New loaders are constructed for every object. + + + + + Git in pack object type, see + Constants + . + + + + size of object in bytes + + + + true if this object is too large to obtain as a byte array. + Objects over a certain threshold should be accessed only by their + OpenStream() + to prevent overflowing the JVM heap. + + + + Obtain a copy of the bytes of this object. + + Obtain a copy of the bytes of this object. +

+ Unlike + GetCachedBytes() + this method returns an array that might + be modified by the caller. + + the bytes of this object. + + if the object won't fit into a byte array, because + IsLarge() + returns true. Callers should use + OpenStream() + instead to access the contents. + + + +

Obtain a copy of the bytes of this object. + + Obtain a copy of the bytes of this object. + If the object size is less than or equal to + sizeLimit + this method + will provide it as a byte array, even if + IsLarge() + is true. This + utility is useful for application code that absolutely must have the + object as a single contiguous byte array in memory. + Unlike + GetCachedBytes(int) + this method returns an array that + might be modified by the caller. + + + maximum number of bytes to return. If the object is larger + than this limit, + NGit.Errors.LargeObjectException + will be thrown. + + the bytes of this object. + + if the object is bigger than + sizeLimit + , or if + System.OutOfMemoryException + occurs during allocation of the + result array. Callers should use + OpenStream() + instead to access the contents. + + the object is large, and it no longer exists. + + the object store cannot be accessed. +
+ + Obtain a reference to the (possibly cached) bytes of this object. + + Obtain a reference to the (possibly cached) bytes of this object. +

+ This method offers direct access to the internal caches, potentially + saving on data copies between the internal cache and higher level code. + Callers who receive this reference must not modify its contents. + Changes (if made) will affect the cache but not the repository itself. + + the cached bytes of this object. Do not modify it. + + if the object won't fit into a byte array, because + IsLarge() + returns true. Callers should use + OpenStream() + instead to access the contents. + + + +

Obtain a reference to the (possibly cached) bytes of this object. + + Obtain a reference to the (possibly cached) bytes of this object. + If the object size is less than or equal to + sizeLimit + this method + will provide it as a byte array, even if + IsLarge() + is true. This + utility is useful for application code that absolutely must have the + object as a single contiguous byte array in memory. + This method offers direct access to the internal caches, potentially + saving on data copies between the internal cache and higher level code. + Callers who receive this reference must not modify its contents. + Changes (if made) will affect the cache but not the repository itself. + + + maximum number of bytes to return. If the object size is + larger than this limit and + IsLarge() + is true, + NGit.Errors.LargeObjectException + will be thrown. + + the cached bytes of this object. Do not modify it. + + if the object is bigger than + sizeLimit + , or if + System.OutOfMemoryException + occurs during allocation of the + result array. Callers should use + OpenStream() + instead to access the contents. + + the object is large, and it no longer exists. + + the object store cannot be accessed. +
+ + Obtain an input stream to read this object's data. + Obtain an input stream to read this object's data. + + a stream of this object's data. Caller must close the stream when + through with it. The returned stream is buffered with a + reasonable buffer size. + + the object no longer exists. + + the object store cannot be accessed. + + + Copy this object to the output stream. + + Copy this object to the output stream. +

+ For some object store implementations, this method may be more efficient + than reading from + OpenStream() + into a temporary byte array, then + writing to the destination stream. +

+ The default implementation of this method is to copy with a temporary + byte array for large objects, or to pass through the cached byte array + for small objects. + + + stream to receive the complete copy of this object's data. + Caller is responsible for flushing or closing this stream + after this method returns. + + the object no longer exists. + + + the object store cannot be accessed, or the stream cannot be + written to. + + + +

Simple loader around the cached byte array. + + Simple loader around the cached byte array. +

+ ObjectReader implementations can use this stream type when the object's + content is small enough to be accessed as a single byte array. + + + +

Construct a small object loader. + Construct a small object loader. + type of the object. + + the object's data array. This array will be returned as-is + for the + GetCachedBytes() + method. + +
+ + + Reads an + ObjectDatabase + for a single thread. +

+ Readers that can support efficient reuse of pack encoded objects should also + implement the companion interface + NGit.Storage.Pack.ObjectReuseAsIs + . +

+
+ + Type hint indicating the caller doesn't know the type. + Type hint indicating the caller doesn't know the type. + + + Construct a new reader from the same data. + + Construct a new reader from the same data. +

+ Applications can use this method to build a new reader from the same data + source, but for an different thread. + + a brand new reader, using the same data source. + + +

Obtain a unique abbreviation (prefix) of an object SHA-1. + + Obtain a unique abbreviation (prefix) of an object SHA-1. + This method uses a reasonable default for the minimum length. Callers who + don't care about the minimum length should prefer this method. + The returned abbreviation would expand back to the argument ObjectId when + passed to + Resolve(AbbreviatedObjectId) + , assuming no new objects + are added to this repository between calls. + + object identity that needs to be abbreviated. + SHA-1 abbreviation. + the object store cannot be read. +
+ + Obtain a unique abbreviation (prefix) of an object SHA-1. + + Obtain a unique abbreviation (prefix) of an object SHA-1. + The returned abbreviation would expand back to the argument ObjectId when + passed to + Resolve(AbbreviatedObjectId) + , assuming no new objects + are added to this repository between calls. + The default implementation of this method abbreviates the id to the + minimum length, then resolves it to see if there are multiple results. + When multiple results are found, the length is extended by 1 and resolve + is tried again. + + object identity that needs to be abbreviated. + + minimum length of the abbreviated string. Must be in the range + [2, + Constants#OBJECT_ID_STRING_LENGTH + ]. + + + SHA-1 abbreviation. If no matching objects exist in the + repository, the abbreviation will match the minimum length. + + the object store cannot be read. + + + Resolve an abbreviated ObjectId to its full form. + + Resolve an abbreviated ObjectId to its full form. + This method searches for an ObjectId that begins with the abbreviation, + and returns at least some matching candidates. + If the returned collection is empty, no objects start with this + abbreviation. The abbreviation doesn't belong to this repository, or the + repository lacks the necessary objects to complete it. + If the collection contains exactly one member, the abbreviation is + (currently) unique within this database. There is a reasonably high + probability that the returned id is what was previously abbreviated. + If the collection contains 2 or more members, the abbreviation is not + unique. In this case the implementation is only required to return at + least 2 candidates to signal the abbreviation has conflicts. User + friendly implementations should return as many candidates as reasonably + possible, as the caller may be able to disambiguate further based on + context. However since databases can be very large (e.g. 10 million + objects) returning 625,000 candidates for the abbreviation "0" is simply + unreasonable, so implementors should draw the line at around 256 matches. + + + abbreviated id to resolve to a complete identity. The + abbreviation must have a length of at least 2. + + candidates that begin with the abbreviated identity. + the object store cannot be read. + + + Does the requested object exist in this database? + identity of the object to test for existence of. + true if the specified object is stored in this database. + the object store cannot be accessed. + + + Does the requested object exist in this database? + identity of the object to test for existence of. + + hint about the type of object being requested; + OBJ_ANY + if the object type is not known, or does not + matter to the caller. + + true if the specified object is stored in this database. + + typeHint was not OBJ_ANY, and the object's actual type does + not match typeHint. + + the object store cannot be accessed. + + + Open an object from this database. + Open an object from this database. + identity of the object to open. + + a + ObjectLoader + for accessing the object. + + the object does not exist. + the object store cannot be accessed. + + + Open an object from this database. + Open an object from this database. + identity of the object to open. + + hint about the type of object being requested; + OBJ_ANY + if the object type is not known, or does not + matter to the caller. + + + a + ObjectLoader + for accessing the object. + + the object does not exist. + + typeHint was not OBJ_ANY, and the object's actual type does + not match typeHint. + + the object store cannot be accessed. + + + Asynchronous object opening. + Asynchronous object opening. + + objects to open from the object store. The supplied collection + must not be modified until the queue has finished. + + + if true missing objects are reported by calling failure with a + MissingObjectException. This may be more expensive for the + implementation to guarantee. If false the implementation may + choose to report MissingObjectException, or silently skip over + the object with no warning. + + queue to read the objects from. + + + Get only the size of an object. + + Get only the size of an object. +

+ The default implementation of this method opens an ObjectLoader. + Databases are encouraged to override this if a faster access method is + available to them. + + identity of the object to open. + + hint about the type of object being requested; + OBJ_ANY + if the object type is not known, or does not + matter to the caller. + + size of object in bytes. + the object does not exist. + + typeHint was not OBJ_ANY, and the object's actual type does + not match typeHint. + + the object store cannot be accessed. + + +

Asynchronous object size lookup. + Asynchronous object size lookup. + + objects to get the size of from the object store. The supplied + collection must not be modified until the queue has finished. + + + if true missing objects are reported by calling failure with a + MissingObjectException. This may be more expensive for the + implementation to guarantee. If false the implementation may + choose to report MissingObjectException, or silently skip over + the object with no warning. + + queue to read object sizes from. +
+ + + Advice from a + NGit.Revwalk.RevWalk + that a walk is starting from these roots. + + the revision pool that is using this reader. + + starting points of the revision walk. The starting points have + their headers parsed, but might be missing bodies. + + the reader cannot initialize itself to support the walk. + + + + + Advice from an + NGit.Revwalk.ObjectWalk + that trees will be traversed. + + the object pool that is using this reader. + the first commit whose root tree will be read. + the last commit whose root tree will be read. + the reader cannot initialize itself to support the walk. + + + + Advice from that a walk is over. + Advice from that a walk is over. + + + Release any resources used by this reader. + + Release any resources used by this reader. +

+ A reader that has been released can be used again, but may need to be + released after the subsequent usage. + + + + + + + + + + + + + + +

+ Stream of data coming from an object loaded by + ObjectLoader + . + +
+ + + Git object type, see + Constants + . + + + + total size of object in bytes + + + Simple stream around the cached byte array created by a loader. + + Simple stream around the cached byte array created by a loader. +

+ ObjectLoader implementations can use this stream type when the object's + content is small enough to be accessed as a single byte array, but the + application has still requested it in stream format. + + + +

Create the stream from an existing loader's cached bytes. + Create the stream from an existing loader's cached bytes. + the loader. +
+ + Create the stream from an existing byte array and type. + Create the stream from an existing byte array and type. + the type constant for the object. + the fully inflated content of the object. + + + Simple filter stream around another stream. + + Simple filter stream around another stream. +

+ ObjectLoader implementations can use this stream type when the object's + content is available from a standard InputStream. + + + +

Create a filter stream for an object. + Create a filter stream for an object. + the type of the object. + total size of the object, in bytes. + + stream the object's raw data is available from. This + stream should be buffered with some reasonable amount of + buffering. + +
+ + + + + + + + + + + + + + + + + + + + A combination of a person identity and time in Git. + + A combination of a person identity and time in Git. + Git combines Name + email + time + time zone to specify who wrote or + committed something. + + + + Creates new PersonIdent from config info in repository, with current time. + + + Creates new PersonIdent from config info in repository, with current time. + This new PersonIdent gets the info from the default committer as available + from the configuration. + + + + + + Copy a + PersonIdent + . + + + Original + PersonIdent + + + + + Construct a new + PersonIdent + with current time. + + + + + + Copy a PersonIdent, but alter the clone's time stamp + + original + PersonIdent + + local time + time zone + + + + Copy a + PersonIdent + , but alter the clone's time stamp + + + original + PersonIdent + + local time + + + Construct a PersonIdent from simple data + + + local time stamp + time zone + + + Copy a PersonIdent, but alter the clone's time stamp + + original + PersonIdent + + local time stamp + time zone + + + + Construct a + PersonIdent + + + + local time stamp + time zone + + + Name of person + + + email address of person + + + timestamp + + + this person's declared time zone; null if time zone is unknown. + + + + this person's declared time zone as minutes east of UTC. If the + timezone is to the west of UTC it is negative. + + + + Format for Git storage. + Format for Git storage. + a string in the git author format + + + + Location where a + Ref + is stored. + + + + The ref does not exist yet, updating it may create it. + + The ref does not exist yet, updating it may create it. +

+ Creation is likely to choose + LOOSE + storage. + + + +

The ref is stored in a file by itself. + + The ref is stored in a file by itself. +

+ Updating this ref affects only this ref. + + + +

The ref is stored in the packed-refs file, with others. + + The ref is stored in the packed-refs file, with others. +

+ Updating this ref requires rewriting the file, with perhaps many + other refs being included at the same time. + + + +

+ The ref is both + LOOSE + and + PACKED + . +

+ Updating this ref requires only updating the loose file, but deletion + requires updating both the loose file and the packed refs file. +

+
+ + The ref came from a network advertisement and storage is unknown. + + The ref came from a network advertisement and storage is unknown. +

+ This ref cannot be updated without Git-aware support on the remote + side, as Git-aware code consolidate the remote refs and reported them + to this process. + + + + true if this storage has a loose file. + + + true if this storage is inside the packed file. + + +

Util for sorting (or comparing) Ref instances by name. + + Util for sorting (or comparing) Ref instances by name. +

+ Useful for command line tools or writing out refs to file. + + + +

Singleton instance of RefComparator +
+ + Sorts the collection of refs, returning a new collection. + Sorts the collection of refs, returning a new collection. + collection to be sorted + sorted collection of refs + + + Compare a reference to a name. + Compare a reference to a name. + the reference instance. + the name to compare to. + standard Comparator result of < 0, 0, > 0. + + + Compare two references by name. + Compare two references by name. + the reference instance. + the other reference instance. + standard Comparator result of < 0, 0, > 0. + + + + Abstraction of name to + ObjectId + mapping. +

+ A reference database stores a mapping of reference names to + ObjectId + . + Every + Repository + has a single reference database, mapping names to + the tips of the object graph contained by the + ObjectDatabase + . +

+
+ + + Maximum number of times a + SymbolicRef + can be traversed. +

+ If the reference is nested deeper than this depth, the implementation + should either fail, or at least claim the reference does not exist. +

+
+ + Order of prefixes to search when using non-absolute references. + + Order of prefixes to search when using non-absolute references. +

+ The implementation's + GetRef(string) + method must take this search + space into consideration when locating a reference by name. The first + entry in the path is always + "" + , ensuring that absolute references + are resolved without further mangling. + + + +

+ Magic value for + GetRefs(string) + to return all references. + +
+ + Initialize a new reference database at this location. + Initialize a new reference database at this location. + the database could not be created. + + + Close any resources held by this database. + Close any resources held by this database. + + + Determine if a proposed reference name overlaps with an existing one. + + Determine if a proposed reference name overlaps with an existing one. +

+ Reference names use '/' as a component separator, and may be stored in a + hierarchical storage such as a directory on the local filesystem. +

+ If the reference "refs/heads/foo" exists then "refs/heads/foo/bar" must + not exist, as a reference cannot have a value and also be a container for + other references at the same time. +

+ If the reference "refs/heads/foo/bar" exists than the reference + "refs/heads/foo" cannot exist, for the same reason. + + proposed name. + + true if the name overlaps with an existing reference; false if + using this name right now would be safe. + + the database could not be read to check for conflicts. + + + +

Create a new update command to create, modify or delete a reference. + Create a new update command to create, modify or delete a reference. + the name of the reference. + + if + true + and + name + is currently a + SymbolicRef + , the update will replace it with an + ObjectIdRef + . Otherwise, the update will recursively + traverse + SymbolicRef + s and operate on the leaf + ObjectIdRef + . + + a new update for the requested name; never null. + the reference space cannot be accessed. +
+ + Create a new update command to rename a reference. + Create a new update command to rename a reference. + name of reference to rename from + name of reference to rename to + an update command that knows how to rename a branch to another. + the reference space cannot be accessed. + + + Create a new batch update to attempt on this database. + + Create a new batch update to attempt on this database. +

+ The default implementation performs a sequential update of each command. + + a new batch update object. + + +

Read a single reference. + + Read a single reference. +

+ Aside from taking advantage of + SEARCH_PATH + , this method may be + able to more quickly resolve a single reference name than obtaining the + complete namespace by + getRefs(ALL).get(name) + . + + + the name of the reference. May be a short name which must be + searched for using the standard + SEARCH_PATH + . + + + the reference (if it exists); else + null + . + + the reference space cannot be accessed. + + +

Get a section of the reference namespace. + Get a section of the reference namespace. + + prefix to search the namespace with; must end with + / + . + If the empty string ( + ALL + ), obtain a complete snapshot + of all references. + + + modifiable map that is a complete snapshot of the current + reference namespace, with + prefix + removed from the start + of each key. The map can be an unsorted map. + + the reference space cannot be accessed. +
+ + Get the additional reference-like entities from the repository. + + Get the additional reference-like entities from the repository. +

+ The result list includes non-ref items such as MERGE_HEAD and + FETCH_RESULT cast to be refs. The names of these refs are not returned by + getRefs(ALL) but are accepted by + GetRef(string) + + a list of additional refs + the reference space cannot be accessed. + + +

Peel a possibly unpeeled reference by traversing the annotated tags. + + Peel a possibly unpeeled reference by traversing the annotated tags. +

+ If the reference cannot be peeled (as it does not refer to an annotated + tag) the peeled id stays null, but + Ref.IsPeeled() + will be true. +

+ Implementors should check + Ref.IsPeeled() + before performing any + additional work effort. + + The reference to peel + + + ref + if + ref.isPeeled() + is true; otherwise a new + Ref object representing the same data as Ref, but isPeeled() will + be true and getPeeledObjectId() will contain the peeled object + (or null). + + the reference space or object space cannot be accessed. + + + +

Triggers a refresh of all internal data structures. + + Triggers a refresh of all internal data structures. +

+ In case the RefDatabase implementation has internal caches this method + will trigger that all these caches are cleared. +

+ Implementors should overwrite this method if they use any kind of caches. + + + +

A RefUpdate combination for renaming a reference. + + A RefUpdate combination for renaming a reference. +

+ If the source reference is currently pointed to by + HEAD + , then the + HEAD symbolic reference is updated to point to the new destination. + + + +

Update operation to read and delete the source reference. + Update operation to read and delete the source reference. +
+ + Update operation to create/overwrite the destination reference. + Update operation to create/overwrite the destination reference. + + + Initialize a new rename operation. + Initialize a new rename operation. + operation to read and delete the source. + operation to create (or overwrite) the destination. + + + identity of the user making the change in the reflog. + + + Set the identity of the user appearing in the reflog. + + Set the identity of the user appearing in the reflog. +

+ The timestamp portion of the identity is ignored. A new identity with the + current timestamp will be created automatically when the rename occurs + and the log record is written. + + + identity of the user. If null the identity will be + automatically determined based on the repository + configuration. + + + +

Get the message to include in the reflog. + Get the message to include in the reflog. + + message the caller wants to include in the reflog; null if the + rename should not be logged. + +
+ + Set the message to include in the reflog. + Set the message to include in the reflog. + the message to describe this change. + + + Don't record this rename in the ref's associated reflog. + Don't record this rename in the ref's associated reflog. + + + result of rename operation + + + the result of the new ref update + System.IO.IOException + + + the result of the rename operation. + System.IO.IOException + + + + true if the + Constants#HEAD + reference needs to be linked + to the new destination name. + + + the current value of + HEAD + cannot be read. + + + + Creates, updates or deletes any reference. + Creates, updates or deletes any reference. + + + New value the caller wants this ref to have. + New value the caller wants this ref to have. + + + Does this specification ask for forced updated (rewind/reset)? + + + Identity to record action as within the reflog. + Identity to record action as within the reflog. + + + Message the caller wants included in the reflog. + Message the caller wants included in the reflog. + + + + Should the Result value be appended to + refLogMessage + . + + + + Old value of the ref, obtained after we lock it. + Old value of the ref, obtained after we lock it. + + + + If non-null, the value + oldValue + must have to continue. + + + + Result of the update operation. + Result of the update operation. + + + + Is this RefUpdate detaching a symbolic ref? + We need this info since this.ref will normally be peeled of in case of + detaching a symbolic ref (HEAD for example). + + + Is this RefUpdate detaching a symbolic ref? + We need this info since this.ref will normally be peeled of in case of + detaching a symbolic ref (HEAD for example). + Without this flag we cannot decide whether the ref has to be updated or + not in case when it was a symbolic ref and the newValue == oldValue. + + + + Construct a new update operation for the reference. + + Construct a new update operation for the reference. +

+ ref.getObjectId() + will be used to seed + GetOldObjectId() + , + which callers can use as part of their own update logic. + + the reference that will be updated by this operation. + + + the reference database this update modifies. + + + the repository storing the database's objects. + + +

Try to acquire the lock on the reference. + + Try to acquire the lock on the reference. +

+ If the locking was successful the implementor must set the current + identity value by calling + SetOldObjectId(ObjectId) + . + + + true if the lock should be taken against the leaf level + reference; false if it should be taken exactly against the + current reference. + + + true if the lock was acquired and the reference is likely + protected from concurrent modification; false if it failed. + + + the lock couldn't be taken due to an unexpected storage + failure, and not because of a concurrent update. + + + +

+ Releases the lock taken by + TryLock(bool) + if it succeeded. + +
+ + + + + result + + System.IO.IOException + + + + + + result + + System.IO.IOException + + + + + + Result.NEW + on success. + + System.IO.IOException + + + Get the name of the ref this update will operate on. + Get the name of the ref this update will operate on. + name of underlying ref. + + + the reference this update will create or modify. + + + Get the new value the ref will be (or was) updated to. + Get the new value the ref will be (or was) updated to. + new value. Null if the caller has not configured it. + + + Tells this RefUpdate that it is actually detaching a symbolic ref. + Tells this RefUpdate that it is actually detaching a symbolic ref. + + + Set the new value the ref will update to. + Set the new value the ref will update to. + the new value. + + + + the expected value of the ref after the lock is taken, but before + update occurs. Null to avoid the compare and swap test. Use + ObjectId.ZeroId() + to indicate expectation of a + non-existant ref. + + + + + the expected value of the ref after the lock is taken, but + before update occurs. Null to avoid the compare and swap test. + Use + ObjectId.ZeroId() + to indicate expectation of a + non-existant ref. + + + + Check if this update wants to forcefully change the ref. + Check if this update wants to forcefully change the ref. + true if this update should ignore merge tests. + + + Set if this update wants to forcefully change the ref. + Set if this update wants to forcefully change the ref. + true if this update should ignore merge tests. + + + identity of the user making the change in the reflog. + + + Set the identity of the user appearing in the reflog. + + Set the identity of the user appearing in the reflog. +

+ The timestamp portion of the identity is ignored. A new identity with the + current timestamp will be created automatically when the update occurs + and the log record is written. + + + identity of the user. If null the identity will be + automatically determined based on the repository + configuration. + + + +

Get the message to include in the reflog. + Get the message to include in the reflog. + + message the caller wants to include in the reflog; null if the + update should not be logged. + +
+ + + + true + if the ref log message should show the result. + + + + Set the message to include in the reflog. + Set the message to include in the reflog. + + the message to describe this change. It may be null if + appendStatus is null in order not to append to the reflog + + + true if the status of the ref change (fast-forward or + forced-update) should be appended to the user supplied + message. + + + + Don't record this update in the ref's associated reflog. + Don't record this update in the ref's associated reflog. + + + The old value of the ref, prior to the update being attempted. + + The old value of the ref, prior to the update being attempted. +

+ This value may differ before and after the update method. Initially it is + populated with the value of the ref before the lock is taken, but the old + value may change if someone else modified the ref between the time we + last read it and when the ref was locked for update. + + + the value of the ref prior to the update being attempted; null if + the updated has not been attempted yet. + + + +

Set the old value of the ref. + Set the old value of the ref. + the old value. +
+ + Get the status of this update. + + Get the status of this update. +

+ The same value that was previously returned from an update method. + + the status of the update. + + +

Force the ref to take the new value. + + Force the ref to take the new value. +

+ This is just a convenient helper for setting the force flag, and as such + the merge test is performed. + + the result status of the update. + an unexpected IO error occurred while writing changes. + + + +

Gracefully update the ref to the new value. + + Gracefully update the ref to the new value. +

+ Merge test will be performed according to + IsForceUpdate() + . +

+ This is the same as: +

+            return update(new RevWalk(getRepository()));
+            
+
+ the result status of the update. + an unexpected IO error occurred while writing changes. + +
+ + Gracefully update the ref to the new value. + + Gracefully update the ref to the new value. +

+ Merge test will be performed according to + IsForceUpdate() + . + + + a RevWalk instance this update command can borrow to perform + the merge test. The walk will be reset to perform the test. + + the result status of the update. + an unexpected IO error occurred while writing changes. + + + +

Delete the ref. + + Delete the ref. +

+ This is the same as: +

+            return delete(new RevWalk(getRepository()));
+            
+
+ the result status of the delete. + System.IO.IOException +
+ + Delete the ref. + Delete the ref. + + a RevWalk instance this delete command can borrow to perform + the merge test. The walk will be reset to perform the test. + + the result status of the delete. + System.IO.IOException + + + Replace this reference with a symbolic reference to another reference. + + Replace this reference with a symbolic reference to another reference. +

+ This exact reference (not its traversed leaf) is replaced with a symbolic + reference to the requested name. + + + name of the new target for this reference. The new target name + must be absolute, so it must begin with + refs/ + . + + + + Result.NEW + or + Result.FORCED + on success. + + System.IO.IOException + + + + + + + + +

Status of an update request. + Status of an update request. +
+ + Handle the abstraction of storing a ref update. + + Handle the abstraction of storing a ref update. This is because both + updating and deleting of a ref have merge testing in common. + + + + + + + + + + + + + + Writes out refs to the + Constants.INFO_REFS + and + Constants.PACKED_REFS + files. + This class is abstract as the writing of the files must be handled by the + caller. This is because it is used by transport classes as well. + + + + + the complete set of references. This should have been computed + by applying updates to the advertised refs already discovered. + + + + + the complete set of references. This should have been computed + by applying updates to the advertised refs already discovered. + + + + + the complete set of references. This should have been computed + by applying updates to the advertised refs already discovered. + + + + + Rebuild the + Constants.INFO_REFS + . +

+ This method rebuilds the contents of the + Constants.INFO_REFS + file + to match the passed list of references. +

+ + writing is not supported, or attempting to write the file + failed, possibly due to permissions or remote disk full, etc. + +
+ + + Rebuild the + Constants.PACKED_REFS + file. +

+ This method rebuilds the contents of the + Constants.PACKED_REFS + file to match the passed list of references, including only those refs + that have a storage type of + RefStorage.PACKED + . +

+ + writing is not supported, or attempting to write the file + failed, possibly due to permissions or remote disk full, etc. + +
+ + + Handles actual writing of ref files to the git repository, which may + differ slightly depending on the destination and transport. + + + Handles actual writing of ref files to the git repository, which may + differ slightly depending on the destination and transport. + + path to ref file. + byte content of file to be written. + System.IO.IOException + + + Represents a Git repository. + + Represents a Git repository. +

+ A repository holds all objects and refs used for managing source code (could + be any type of file, but source code is what SCM's are typically used for). +

+ This class is thread-safe. + + + + the global listener list observing all events in this JVM. + + +

Metadata directory holding the repository's critical files. + Metadata directory holding the repository's critical files. +
+ + File abstraction used to resolve paths. + File abstraction used to resolve paths. + + + If not bare, the top level directory of the working files. + If not bare, the top level directory of the working files. + + + If not bare, the index file caching the working file states. + If not bare, the index file caching the working file states. + + + Initialize a new repository instance. + Initialize a new repository instance. + options to configure the repository. + + + Fire an event to all registered listeners. + + Fire an event to all registered listeners. +

+ The source repository of the event is automatically set to this + repository, before the event is delivered to any listeners. + + the event to deliver. + + +

Create a new Git repository. + + Create a new Git repository. +

+ Repository with working tree is created using this method. This method is + the same as + create(false) + . + + System.IO.IOException + Create(bool) + + +

+ Create a new Git repository initializing the necessary files and + directories. + + + Create a new Git repository initializing the necessary files and + directories. + + + if true, a bare repository (a repository without a working + directory) is created. + + in case of IO problem +
+ + + a new inserter to create objects in + ObjectDatabase() + + + + + + a new reader to read objects from + ObjectDatabase() + + + + + the configuration of this repository + + + + + true if the specified object is stored in this repo or any of the + known shared repositories. + + + + Open an object from this repository. + + Open an object from this repository. +

+ This is a one-shot call interface which may be faster than allocating a + NewObjectReader() + to perform the lookup. + + identity of the object to open. + + a + ObjectLoader + for accessing the object. + + the object does not exist. + the object store cannot be accessed. + + +

Open an object from this repository. + + Open an object from this repository. +

+ This is a one-shot call interface which may be faster than allocating a + NewObjectReader() + to perform the lookup. + + identity of the object to open. + + hint about the type of object being requested; + ObjectReader.OBJ_ANY + if the object type is not known, + or does not matter to the caller. + + + a + ObjectLoader + for accessing the object. + + the object does not exist. + + typeHint was not OBJ_ANY, and the object's actual type does + not match typeHint. + + the object store cannot be accessed. + + +

Create a command to update, create or delete a ref in this repository. + Create a command to update, create or delete a ref in this repository. + name of the ref the caller wants to modify. + + an update command. The caller must finish populating this command + and then invoke one of the update methods to actually make a + change. + + + a symbolic ref was passed in and could not be resolved back + to the base ref, as the symbolic ref could not be read. + +
+ + Create a command to update, create or delete a ref in this repository. + Create a command to update, create or delete a ref in this repository. + name of the ref the caller wants to modify. + true to create a detached head + + an update command. The caller must finish populating this command + and then invoke one of the update methods to actually make a + change. + + + a symbolic ref was passed in and could not be resolved back + to the base ref, as the symbolic ref could not be read. + + + + Create a command to rename a ref in this repository + name of ref to rename from + name of ref to rename to + an update command that knows how to rename a branch to another. + the rename could not be performed. + + + Parse a git revision string and return an object id. + + Parse a git revision string and return an object id. + Combinations of these operators are supported: +
    +
  • HEAD, MERGE_HEAD, FETCH_HEAD
  • +
  • SHA-1: a complete or abbreviated SHA-1
  • +
  • refs/...: a complete reference name
  • +
  • short-name: a short reference name under + refs/heads + , + refs/tags + , or + refs/remotes + namespace
  • +
  • tag-NN-gABBREV: output from describe, parsed by treating + ABBREV + as an abbreviated SHA-1.
  • +
  • id^: first parent of commit id, this is the same + as + id^1 +
  • +
  • id^0: ensure id is a commit
  • +
  • id^n: n-th parent of commit id
  • +
  • id~n: n-th historical ancestor of id, by first + parent. + id~3 + is equivalent to + id^1^1^1 + or + id^^^ + .
  • +
  • id:path: Lookup path under tree named by id
  • +
  • id^{commit}: ensure id is a commit
  • +
  • id^{tree}: ensure id is a tree
  • +
  • id^{tag}: ensure id is a tag
  • +
  • id^{blob}: ensure id is a blob
  • +
+

+ The following operators are specified by Git conventions, but are not + supported by this method: +

    +
  • ref@{n}: n-th version of ref as given by its reflog
  • +
  • ref@{time}: value of ref at the designated time
  • +
+
+ A git object references expression + an ObjectId or null if revstr can't be resolved to any ObjectId + + revstr + contains an abbreviated ObjectId and this + repository contains more than one object which match to the + input abbreviation. + + + the id parsed does not meet the type required to finish + applying the operators in the expression. + + + the expression is not supported by this implementation, or + does not meet the standard syntax. + + on serious errors +
+ + + Simplify an expression, but unlike + Resolve(string) + it will not + resolve a branch passed or resulting from the expression, such as @{-}. + Thus this method can be used to process an expression to a method that + expects a branch or revision id. + + + object id or ref name from resolved expression + NGit.Errors.AmbiguousObjectException + + System.IO.IOException + + + + + + + + + + + + + + + + + + + + + + + Increment the use counter by one, requiring a matched + Close() + . + + + + Decrement the use count, and maybe close resources. + Decrement the use count, and maybe close resources. + + + + Invoked when the use count drops to zero during + Close() + . +

+ The default implementation closes the object and ref databases. +

+
+ + + Get the name of the reference that + HEAD + points to. +

+ This is essentially the same as doing: +

+            return getRef(Constants.HEAD).getTarget().getName()
+            
+ Except when HEAD is detached, in which case this method returns the + current ObjectId in hexadecimal string format. +
+ + name of current branch (for example + refs/heads/master + ) or + an ObjectId in hex format if the current branch is detached. + + System.IO.IOException +
+ + + Get the short name of the current branch that + HEAD + points to. +

+ This is essentially the same as + GetFullBranch() + , except the + leading prefix + refs/heads/ + is removed from the reference before + it is returned to the caller. +

+ + name of current branch (for example + master + ), or an + ObjectId in hex format if the current branch is detached. + + System.IO.IOException +
+ + + Objects known to exist but not expressed by + GetAllRefs() + . +

+ When a repository borrows objects from another repository, it can + advertise that it safely has that other repository's references, without + exposing any other details about the other repository. This may help + a client trying to push changes avoid pushing more than it needs to. +

+ unmodifiable collection of other known objects. +
+ + Get a ref by name. + Get a ref by name. + + the name of the ref to lookup. May be a short-hand form, e.g. + "master" which is is automatically expanded to + "refs/heads/master" if "refs/heads/master" already exists. + + the Ref with the given name, or null if it does not exist + System.IO.IOException + + + mutable map of all known refs (heads, tags, remotes). + + + + mutable map of all tags; key is short tag name ("v1.0") and value + of the entry contains the ref with the full tag name + ("refs/tags/v1.0"). + + + + Peel a possibly unpeeled reference to an annotated tag. + + Peel a possibly unpeeled reference to an annotated tag. +

+ If the ref cannot be peeled (as it does not refer to an annotated tag) + the peeled id stays null, but + Ref.IsPeeled() + will be true. + + The ref to peel + + ref if ref.isPeeled() is true; else a + new Ref object representing the same data as Ref, but isPeeled() + will be true and getPeeledObjectId will contain the peeled object + (or null). + + + + a map with all objects referenced by a peeled ref. + + + the index file location + + if this is bare, which implies it has no working directory. + See + IsBare() + . + + + +

Create a new in-core index representation and read an index from disk. + + Create a new in-core index representation and read an index from disk. +

+ The new index will be read before it is returned to the caller. Read + failures are reported as exceptions and therefore prevent the method from + returning a partially populated index. + + + a cache representing the contents of the specified index file (if + it exists) or an empty cache if the file does not exist. + + + if this is bare, which implies it has no working directory. + See + IsBare() + . + + the index file is present but could not be read. + + + the index file is using a format or extension that this + library does not support. + + + +

Create a new in-core index representation, lock it, and read from disk. + + Create a new in-core index representation, lock it, and read from disk. +

+ The new index will be locked and then read before it is returned to the + caller. Read failures are reported as exceptions and therefore prevent + the method from returning a partially populated index. + + + a cache representing the contents of the specified index file (if + it exists) or an empty cache if the file does not exist. + + + if this is bare, which implies it has no working directory. + See + IsBare() + . + + + the index file is present but could not be read, or the lock + could not be obtained. + + + the index file is using a format or extension that this + library does not support. + + + + an important state + + +

Check validity of a ref name. + + Check validity of a ref name. It must not contain character that has + a special meaning in a Git object reference expression. Some other + dangerous characters are also excluded. + For portability reasons '\' is excluded + + + true if refName is a valid ref name +
+ + Strip work dir and return normalized repository path. + Strip work dir and return normalized repository path. + Work dir + File whose path shall be stripped of its workdir + + normalized repository relative path or the empty + string if the file is not relative to the work directory. + + + + Force a scan for changed refs. + Force a scan for changed refs. + System.IO.IOException + + + Notify that the index changed + + + + a more user friendly ref name + + + + + a + NGit.Storage.File.ReflogReader + for the supplied refname, or null if the + named ref does not exist. + + the ref could not be accessed. + + + Return the information stored in the file $GIT_DIR/MERGE_MSG. + + Return the information stored in the file $GIT_DIR/MERGE_MSG. In this + file operations triggering a merge will store a template for the commit + message of the merge commit. + + + a String containing the content of the MERGE_MSG file or + null + if this file doesn't exist + + System.IO.IOException + + if this is bare, which implies it has no working directory. + See + IsBare() + . + + + + Write new content to the file $GIT_DIR/MERGE_MSG. + + Write new content to the file $GIT_DIR/MERGE_MSG. In this file operations + triggering a merge will store a template for the commit message of the + merge commit. If null is specified as message the file will + be deleted. + + + the message which should be written or null to + delete the file + + System.IO.IOException + + + Return the information stored in the file $GIT_DIR/MERGE_HEAD. + + Return the information stored in the file $GIT_DIR/MERGE_HEAD. In this + file operations triggering a merge will store the IDs of all heads which + should be merged together with HEAD. + + + a list of commits which IDs are listed in the MERGE_HEAD file or + null + if this file doesn't exist. Also if the file exists + but is empty + null + will be returned + + System.IO.IOException + + if this is bare, which implies it has no working directory. + See + IsBare() + . + + + + Write new merge-heads into $GIT_DIR/MERGE_HEAD. + + Write new merge-heads into $GIT_DIR/MERGE_HEAD. In this file operations + triggering a merge will store the IDs of all heads which should be merged + together with HEAD. If null is specified as list of commits + the file will be deleted + + + a list of commits which IDs should be written to + $GIT_DIR/MERGE_HEAD or null to delete the file + + System.IO.IOException + + + Return the information stored in the file $GIT_DIR/CHERRY_PICK_HEAD. + Return the information stored in the file $GIT_DIR/CHERRY_PICK_HEAD. + + object id from CHERRY_PICK_HEAD file or + null + if this file + doesn't exist. Also if the file exists but is empty + null + will be returned + + System.IO.IOException + + if this is bare, which implies it has no working directory. + See + IsBare() + . + + + + Write cherry pick commit into $GIT_DIR/CHERRY_PICK_HEAD. + + Write cherry pick commit into $GIT_DIR/CHERRY_PICK_HEAD. This is used in + case of conflicts to store the cherry which was tried to be picked. + + + an object id of the cherry commit or null to + delete the file + + System.IO.IOException + + + Write original HEAD commit into $GIT_DIR/ORIG_HEAD. + Write original HEAD commit into $GIT_DIR/ORIG_HEAD. + + an object id of the original HEAD commit or null + to delete the file + + System.IO.IOException + + + Return the information stored in the file $GIT_DIR/ORIG_HEAD. + Return the information stored in the file $GIT_DIR/ORIG_HEAD. + + object id from ORIG_HEAD file or + null + if this file + doesn't exist. Also if the file exists but is empty + null + will be returned + + System.IO.IOException + + if this is bare, which implies it has no working directory. + See + IsBare() + . + + + + Return the information stored in the file $GIT_DIR/SQUASH_MSG. + + Return the information stored in the file $GIT_DIR/SQUASH_MSG. In this + file operations triggering a squashed merge will store a template for the + commit message of the squash commit. + + + a String containing the content of the SQUASH_MSG file or + null + if this file doesn't exist + + System.IO.IOException + + if this is bare, which implies it has no working directory. + See + IsBare() + . + + + + Write new content to the file $GIT_DIR/SQUASH_MSG. + + Write new content to the file $GIT_DIR/SQUASH_MSG. In this file + operations triggering a squashed merge will store a template for the + commit message of the squash commit. If null is specified as + message the file will be deleted. + + + the message which should be written or null to + delete the file + + System.IO.IOException + + + + + + + + + Read a file from the git directory. + Read a file from the git directory. + + the raw contents or null if the file doesn't exist or is empty + System.IO.IOException + + + Write the given heads to a file in the git directory. + Write the given heads to a file in the git directory. + + a list of object ids to write or null if the file should be + deleted. + + + System.IO.FileNotFoundException + + System.IO.IOException + + + listeners observing only events on this repository. + + + local metadata directory; null if repository isn't local. + + + the object database which stores this repository's data. + + + the reference database which stores the reference namespace. + + + the used file system abstraction + + + true if this is bare, which implies it has no working directory. + + + + the root directory of the working tree, where files are checked + out for viewing and editing. + + + if this is bare, which implies it has no working directory. + See + IsBare() + . + + + + + Receives + IndexChangedEvent + s. + + + + A listener can register for event delivery. + A listener can register for event delivery. + + + Invoked when any change is made to the index. + Invoked when any change is made to the index. + information about the changes. + + + + Base class to support constructing a + Repository + . +

+ Applications must set one of + BaseRepositoryBuilder<B, R>.SetGitDir(Sharpen.FilePath) + + or + BaseRepositoryBuilder<B, R>.SetWorkTree(Sharpen.FilePath) + + , or use + BaseRepositoryBuilder<B, R>.ReadEnvironment() + + or + BaseRepositoryBuilder<B, R>.FindGitDir() + + in order to configure the minimum property set + necessary to open a repository. +

+ Single repository applications trying to be compatible with other Git + implementations are encouraged to use a model such as: +

+            new RepositoryBuilder() //
+            .setGitDir(gitDirArgument) // --git-dir if supplied, no-op if null
+            .readEnviroment() // scan environment GIT_* variables
+            .findGitDir() // scan up the file system tree
+            .build()
+            
+
+ NGit.Storage.File.FileRepositoryBuilder + +
+ + + Cache of active + Repository + instances. + + + + Open an existing repository, reusing a cached instance if possible. + + Open an existing repository, reusing a cached instance if possible. +

+ When done with the repository, the caller must call + Repository.Close() + to decrement the repository's usage counter. + + + where the local repository is. Typically a + FileKey + . + + the repository instance requested; caller must close when done. + + the repository could not be read (likely its core.version + property is not supported). + + there is no repository at the given location. + + + +

Open a repository, reusing a cached instance if possible. + + Open a repository, reusing a cached instance if possible. +

+ When done with the repository, the caller must call + Repository.Close() + to decrement the repository's usage counter. + + + where the local repository is. Typically a + FileKey + . + + + If true, and the repository is not found, throws + RepositoryNotFoundException + . If false, a repository instance + is created and registered anyway. + + the repository instance requested; caller must close when done. + + the repository could not be read (likely its core.version + property is not supported). + + + There is no repository at the given location, only thrown if + mustExist + is true. + + + +

Register one repository into the cache. + + Register one repository into the cache. +

+ During registration the cache automatically increments the usage counter, + permitting it to retain the reference. A + FileKey + for the + repository's + Repository.Directory() + is used to index the + repository in the cache. +

+ If another repository already is registered in the cache at this + location, the other instance is closed. + + repository to register. + + +

Remove a repository from the cache. + + Remove a repository from the cache. +

+ Removes a repository from the cache, if it is still registered here, + permitting it to close. + + repository to unregister. + + +

Unregister all repositories from the cache. + Unregister all repositories from the cache. +
+ + + + + + Abstract hash key for + RepositoryCache + entries. +

+ A Key instance should be lightweight, and implement hashCode() and + equals() such that two Key instances are equal if they represent the same + Repository location. +

+
+ + + Called by + RepositoryCache.Open(Key) + if it doesn't exist yet. +

+ If a repository does not exist yet in the cache, the cache will call + this method to acquire a handle to it. +

+ + true if the repository must exist in order to be opened; + false if a new non-existent repository is permitted to be + created (the caller is responsible for calling create). + + the new repository instance. + + the repository could not be read (likely its core.version + property is not supported). + + + There is no repository at the given location, only thrown + if + mustExist + is true. + +
+ + Location of a Repository, using the standard java.io.File API. + Location of a Repository, using the standard java.io.File API. + + + Obtain a pointer to an exact location on disk. + + Obtain a pointer to an exact location on disk. +

+ No guessing is performed, the given location is exactly the GIT_DIR + directory of the repository. + + location where the repository database is. + + the file system abstraction which will be necessary to + perform certain file system operations. + + a key for the given directory. + Lenient(Sharpen.FilePath, NGit.Util.FS) + + + +

Obtain a pointer to a location on disk. + + Obtain a pointer to a location on disk. +

+ The method performs some basic guessing to locate the repository. + Searched paths are: +

    +
  1. + directory + // assume exact match
  2. +
  3. + directory + + "/.git" // assume working directory
  4. +
  5. + directory + + ".git" // assume bare
  6. +
+
+ location where the repository database might be. + + the file system abstraction which will be necessary to + perform certain file system operations. + + a key for the given directory. + Exact(Sharpen.FilePath, NGit.Util.FS) + +
+ + exact location of the repository. + + the file system abstraction which will be necessary to + perform certain file system operations. + + + + location supplied to the constructor. + + + + + + Guess if a directory contains a Git repository. + + Guess if a directory contains a Git repository. +

+ This method guesses by looking for the existence of some key files + and directories. + + the location of the directory to examine. + + the file system abstraction which will be necessary to + perform certain file system operations. + + + true if the directory "looks like" a Git repository; false if + it doesn't look enough like a Git directory to really be a + Git directory. + + + +

Guess the proper path for a Git repository. + + Guess the proper path for a Git repository. +

+ The method performs some basic guessing to locate the repository. + Searched paths are: +

    +
  1. + directory + // assume exact match
  2. +
  3. + directory + + "/.git" // assume working directory
  4. +
  5. + directory + + ".git" // assume bare
  6. +
+
+ location to guess from. Several permutations are tried. + + the file system abstraction which will be necessary to + perform certain file system operations. + + + the actual directory location if a better match is found; + null if there is no suitable match. + +
+ + + Important state of the repository that affects what can and cannot bed + done. + + + Important state of the repository that affects what can and cannot bed + done. This is things like unhandled conflicted merges and unfinished rebase. + The granularity and set of states are somewhat arbitrary. The methods + on the state are the only supported means of deciding what to do. + + + + Has no work tree and cannot be used for normal editing. + Has no work tree and cannot be used for normal editing. + + + A safe state for working normally + + + An unfinished merge. + An unfinished merge. Must resolve or reset before continuing normally + + + An merge where all conflicts have been resolved. + + An merge where all conflicts have been resolved. The index does not + contain any unmerged paths. + + + + An unfinished cherry-pick. + An unfinished cherry-pick. Must resolve or reset before continuing normally + + + + A cherry-pick where all conflicts have been resolved. + + A cherry-pick where all conflicts have been resolved. The index does not + contain any unmerged paths. + + + + An unfinished rebase or am. + An unfinished rebase or am. Must resolve, skip or abort before normal work can take place + + + + An unfinished rebase. + An unfinished rebase. Must resolve, skip or abort before normal work can take place + + + + An unfinished apply. + An unfinished apply. Must resolve, skip or abort before normal work can take place + + + + An unfinished rebase with merge. + An unfinished rebase with merge. Must resolve, skip or abort before normal work can take place + + + + An unfinished interactive rebase. + An unfinished interactive rebase. Must resolve, skip or abort before normal work can take place + + + + Bisecting being done. + Bisecting being done. Normal work may continue but is discouraged + + + true if changing HEAD is sane. + + + true if we can commit + + + true if reset to another HEAD is considered SAFE + + + true if amending is considered SAFE + + + a human readable description of the state. + + + Persistent configuration that can be stored and loaded from a location. + Persistent configuration that can be stored and loaded from a location. + + + Create a configuration with no default fallback. + Create a configuration with no default fallback. + + + Create an empty configuration with a fallback for missing keys. + Create an empty configuration with a fallback for missing keys. + + the base configuration to be consulted when a key is missing + from this configuration instance. + + + + Load the configuration from the persistent store. + + Load the configuration from the persistent store. +

+ If the configuration does not exist, this configuration is cleared, and + thus behaves the same as though the backing store exists, but is empty. + + the configuration could not be read (but does exist). + + the configuration is not properly formatted. + + + +

Save the configuration to the persistent store. + Save the configuration to the persistent store. + the configuration could not be written. +
+ + + A reference that indirectly points at another + Ref + . +

+ A symbolic reference always derives its current value from the target + reference. +

+
+ + Create a new ref pairing. + Create a new ref pairing. + name of this ref. + the ref we reference and derive our value from. + + + A tree entry representing a symbolic link. + + A tree entry representing a symbolic link. + Note. Java cannot really handle these as file system objects. + + + + + Construct a + SymlinkTreeEntry + with the specified name and SHA-1 in + the specified parent + + + + + + + Mutable builder to construct an annotated tag recording a project state. + + + Mutable builder to construct an annotated tag recording a project state. + Applications should use this object when they need to manually construct a + tag and want precise control over its fields. + To read a tag object, construct a + NGit.Revwalk.RevWalk + and obtain a + NGit.Revwalk.RevTag + instance by calling + NGit.Revwalk.RevWalk.ParseTag(AnyObjectId) + + . + + + + the type of object this tag refers to. + + + the object this tag refers to. + + + Set the object this tag refers to, and its type. + Set the object this tag refers to, and its type. + the object. + + the type of + obj + . Must be a valid type code. + + + + Set the object this tag refers to, and infer its type. + Set the object this tag refers to, and infer its type. + the object the tag will refer to. + + + + short name of the tag (no + refs/tags/ + prefix). + + + + Set the name of this tag. + Set the name of this tag. + + new short name of the tag. This short name should not start + with + refs/ + as typically a tag is stored under the + reference derived from + "refs/tags/" + getTag() + . + + + + creator of this tag. May be null. + + + Set the creator of this tag. + Set the creator of this tag. + the creator. May be null. + + + the complete commit message. + + + Set the tag's message. + Set the tag's message. + the tag's message. + + + Format this builder's state as an annotated tag object. + Format this builder's state as an annotated tag object. + + this object in the canonical annotated tag format, suitable for + storage in a repository. + + + + Format this builder's state as an annotated tag object. + Format this builder's state as an annotated tag object. + + this object in the canonical annotated tag format, suitable for + storage in a repository. + + + + A simple progress reporter printing on a stream. + A simple progress reporter printing on a stream. + + + ProgressMonitor that batches update events. + ProgressMonitor that batches update events. + + + Set an optional delay before the first output. + Set an optional delay before the first output. + + how long to wait before output. If 0 output begins on the + first + Update(int) + call. + + + time unit of + time + . + + + + Update the progress monitor if the total work isn't known, + name of the task. + number of units already completed. + + + Finish the progress monitor when the total wasn't known in advance. + Finish the progress monitor when the total wasn't known in advance. + name of the task. + total number of units processed. + + + Update the progress monitor when the total is known in advance. + Update the progress monitor when the total is known in advance. + name of the task. + number of units already completed. + estimated number of units to process. + + workCurr * 100 / workTotal + . + + + + Finish the progress monitor when the total is known in advance. + Finish the progress monitor when the total is known in advance. + name of the task. + total number of units processed. + estimated number of units to process. + + workCurr * 100 / workTotal + . + + + + Title of the current task. + Title of the current task. + + + + Number of work units, or + ProgressMonitor.UNKNOWN + . + + + + True when timer expires and output should occur on next update. + True when timer expires and output should occur on next update. + + + Scheduled timer, supporting cancellation if task ends early. + Scheduled timer, supporting cancellation if task ends early. + + + True if the task has displayed anything. + True if the task has displayed anything. + + + Number of work units already completed. + Number of work units already completed. + + + + Percentage of + totalWork + that is done. + + + + Initialize a new progress monitor. + Initialize a new progress monitor. + + + Initialize a new progress monitor. + Initialize a new progress monitor. + the stream to receive messages on. + + + + Wrapper around the general + ProgressMonitor + to make it thread safe. + Updates to the underlying ProgressMonitor are made only from the thread that + allocated this wrapper. Callers are responsible for ensuring the allocating + thread uses + PollForUpdates() + or + WaitForCompletion() + to + update the underlying ProgressMonitor. + Only + Update(int) + , + IsCancelled() + , and + EndWorker() + may be invoked from a worker thread. All other methods of the ProgressMonitor + interface can only be called from the thread that allocates this wrapper. + + + + Wrap a ProgressMonitor to be thread safe. + Wrap a ProgressMonitor to be thread safe. + the underlying monitor to receive events. + + + Notify the monitor a worker is starting. + Notify the monitor a worker is starting. + + + Notify the monitor of workers starting. + Notify the monitor of workers starting. + the number of worker threads that are starting. + + + Notify the monitor a worker is finished. + Notify the monitor a worker is finished. + + + Non-blocking poll for pending updates. + + Non-blocking poll for pending updates. + This method can only be invoked by the same thread that allocated this + ThreadSafeProgressMonior. + + + + Process pending updates and wait for workers to finish. + + Process pending updates and wait for workers to finish. + This method can only be invoked by the same thread that allocated this + ThreadSafeProgressMonior. + + + if the main thread is interrupted while waiting for + completion of workers. + + + + The standard "transfer", "fetch" and "receive" configuration parameters. + + The standard "transfer", "fetch" and "receive" configuration parameters. + + + + + Key for + Config.Get<T>(SectionParser<T>) + + . + + + + strictly verify received objects? + + + A representation of a Git tree entry. + A representation of a Git tree entry. A Tree is a directory in Git. + + + Compare two names represented as bytes. + + Compare two names represented as bytes. Since git treats names of trees and + blobs differently we have one parameter that represents a '/' for trees. For + other objects the value should be NUL. The names are compare by their positive + byte value (0..255). + A blob and a tree with the same name will not compare equal. + + name + name + '/' if a is a tree, else NUL + '/' if b is a tree, else NUL + < 0 if a is sorted before b, 0 if they are the same, else b + + + Constructor for a new Tree + The repository that owns the Tree. + + + Construct a Tree object with known content and hash value + + + + System.IO.IOException + + + Construct a new Tree under another Tree + + + + + Construct a Tree with a known SHA-1 under another tree. + + Construct a Tree with a known SHA-1 under another tree. Data is not yet + specified and will have to be loaded on demand. + + + + + + + true if this Tree is the top level Tree. + + + true of the data of this Tree is loaded + + + Forget the in-memory data for this tree. + Forget the in-memory data for this tree. + + + Adds a new or existing file with the specified name to this tree. + + Adds a new or existing file with the specified name to this tree. + Trees are added if necessary as the name may contain '/':s. + + Name + + a + FileTreeEntry + for the added file. + + System.IO.IOException + + + Adds a new or existing file with the specified name to this tree. + + Adds a new or existing file with the specified name to this tree. + Trees are added if necessary as the name may contain '/':s. + + an array containing the name + when the name starts in the tree. + + a + FileTreeEntry + for the added file. + + System.IO.IOException + + + Adds a new or existing Tree with the specified name to this tree. + + Adds a new or existing Tree with the specified name to this tree. + Trees are added if necessary as the name may contain '/':s. + + Name + + a + FileTreeEntry + for the added tree. + + System.IO.IOException + + + Adds a new or existing Tree with the specified name to this tree. + + Adds a new or existing Tree with the specified name to this tree. + Trees are added if necessary as the name may contain '/':s. + + an array containing the name + when the name starts in the tree. + + a + FileTreeEntry + for the added tree. + + System.IO.IOException + + + Add the specified tree entry to this tree. + Add the specified tree entry to this tree. + + System.IO.IOException + + + number of members in this tree + System.IO.IOException + + + Return all members of the tree sorted in Git order. + + Return all members of the tree sorted in Git order. + Entries are sorted by the numerical unsigned byte + values with (sub)trees having an implicit '/'. An + example of a tree with three entries. a:b is an + actual file name here. +

+ 100644 blob e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 a.b + 040000 tree 4277b6e69d25e5efa77c455340557b384a4c018a a + 100644 blob e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 a:b + + all entries in this Tree, sorted. + System.IO.IOException + + + + + + to the tree. + + true if a tree with the specified path can be found under this + tree. + + System.IO.IOException + + + of the non-tree entry. + + true if a blob, symlink, or gitlink with the specified name + can be found under this tree. + + System.IO.IOException + + + + + + + + + blob name + + a + TreeEntry + representing an object with the specified + relative path. + + System.IO.IOException + + + Tree Name + a Tree with the name s or null + System.IO.IOException + + + + + + + + + +

Format this Tree in canonical format. + Format this Tree in canonical format. + canonical encoding of the tree object. + the tree cannot be loaded, or its not in a writable state. + +
+ + The standard "user" configuration parameters. + The standard "user" configuration parameters. + + + + Key for + Config.Get<T>(SectionParser<T>) + + . + + + + + the author name as defined in the git variables and + configurations. If no name could be found, try to use the system + user name instead. + + + + + the committer name as defined in the git variables and + configurations. If no name could be found, try to use the system + user name instead. + + + + + the author email as defined in git variables and + configurations. If no email could be found, try to + propose one default with the user name and the + host name. + + + + + the committer email as defined in git variables and + configurations. If no email could be found, try to + propose one default with the user name and the + host name. + + + + + true if the author name was not explicitly configured but + constructed from information the system has about the logged on + user + + + + + true if the author email was not explicitly configured but + constructed from information the system has about the logged on + user + + + + + true if the committer name was not explicitly configured but + constructed from information the system has about the logged on + user + + + + + true if the author email was not explicitly configured but + constructed from information the system has about the logged on + user + + + + + try to get user name of the logged on user from the operating + system + + + + + try to construct email for logged on user using system + information + + + + + A class used to execute a + Add + command. It has setters for all + supported options and arguments of this command and a + Call() + method + to finally execute the command. Each instance of this class should only be + used for one invocation of the command (means: one call to + Call() + ) + + Git documentation about Add + + + + Common superclass of all commands in the package + org.eclipse.jgit.api +

+ This class ensures that all commands fulfill the + Sharpen.Callable<V> + interface. + It also has a property + GitCommand<T>.repo + holding a reference to the git + NGit.Repository + this command should work with. +

+ Finally this class stores a state telling whether it is allowed to call + GitCommand<T>.Call() + on this instance. Instances of + GitCommand<T> + can only be + used for one single successful call to + GitCommand<T>.Call() + . Afterwards this + instance may not be used anymore to set/modify any properties or to call + GitCommand<T>.Call() + again. This is achieved by setting the + GitCommand<T>.callable + property to false after the successful execution of + GitCommand<T>.Call() + and to + check the state (by calling + GitCommand<T>.CheckCallable() + ) before setting of + properties and inside + GitCommand<T>.Call() + . +

+
+ + The repository this command is working with + + + + a state which tells whether it is allowed to call + GitCommand<T>.Call() + on this + instance. + + + + Creates a new command which interacts with a single repository + + the + NGit.Repository + this command should interact with + + + + + the + NGit.Repository + this command is interacting with + + + + + Set's the state which tells whether it is allowed to call + GitCommand<T>.Call() + on this instance. + GitCommand<T>.CheckCallable() + will throw an exception when + called and this property is set to + false + + + if true it is allowed to call + GitCommand<T>.Call() + on + this instance. + + + + + Checks that the property + GitCommand<T>.callable + is + true + . If not then + an + System.InvalidOperationException + is thrown + + + when this method is called and the property + GitCommand<T>.callable + is + false + + + + Executes the command + T a result. Each command has its own return type + or subclass thereof when an error occurs + + + + + + + + File to add content from. Also a leading directory name (e.g. + dir to add dir/file1 and dir/file2) can be given to add all + files in the directory, recursively. Fileglobs (e.g. *.c) are + not yet supported. + + + + this + + + + Allow clients to provide their own implementation of a FileTreeIterator + + + + this + + + + + Executes the + Add + command. Each instance of this class should only + be used for one invocation of the command. Don't call this method twice + on an instance. + + the DirCache after Add + + + + + + If set to true, the command only matches + filepattern + against already tracked files in the index rather than the + working tree. That means that it will never stage new files, + but that it will stage modified new contents of tracked files + and that it will remove files from the index if the + corresponding files in the working tree have been removed. + In contrast to the git command line a + filepattern + must + exist also if update is set to true as there is no + concept of a working directory here. + + + + this + + + + is the parameter update is set + + + + A class used to execute a + Commit + command. It has setters for all + supported options and arguments of this command and a + Call() + method + to finally execute the command. + + Git documentation about Commit + + + parents this commit should have. + + parents this commit should have. The current HEAD will be in this list + and also all commits mentioned in .git/MERGE_HEAD + + + + + + + + Executes the + commit + command with all the options and parameters + collected by the setter methods of this class. Each instance of this + class should only be used for one invocation of the command (means: one + call to + Call() + ) + + + a + NGit.Revwalk.RevCommit + object representing the successful commit. + + when called on a git repo without a HEAD reference + + when called without specifying a commit message + + when the current index contained unmerged paths (conflicts) + + + when HEAD or branch ref is updated concurrently by someone + else + + when repository is not in the right state for committing + + + + + + + + + + + + Look an entry's path up in the list of paths specified by the --only/ -o + option + In case the complete (file) path (e.g. + + + Look an entry's path up in the list of paths specified by the --only/ -o + option + In case the complete (file) path (e.g. "d1/d2/f1") cannot be found in + only, lookup is also tried with (parent) directory paths + (e.g. "d1/d2" and "d1"). + + entry's path + the item's index in only; -1 if no item matches + + + Sets default values for not explicitly specified options. + + Sets default values for not explicitly specified options. Then validates + that all required data has been provided. + + the state of the repository we are working on + if the commit message has not been specified + + + + + the commit message used for the + commit + + + + this + + + + the commit message used for the commit + + + + Sets the committer for this + commit + . If no committer is explicitly + specified because this method is never called or called with + null + value then the committer will be deduced from config info in repository, + with current time. + + + the committer used for the + commit + + + + this + + + + + Sets the committer for this + commit + . If no committer is explicitly + specified because this method is never called then the committer will be + deduced from config info in repository, with current time. + + + the name of the committer used for the + commit + + + the email of the committer used for the + commit + + + + this + + + + + the committer used for the + commit + . If no committer was + specified + null + is returned and the default + NGit.PersonIdent + of this repo is used during execution of the + command + + + + + Sets the author for this + commit + . If no author is explicitly + specified because this method is never called or called with + null + value then the author will be set to the committer or to the original + author when amending. + + + the author used for the + commit + + + + this + + + + + Sets the author for this + commit + . If no author is explicitly + specified because this method is never called then the author will be set + to the committer or to the original author when amending. + + + the name of the author used for the + commit + + + the email of the author used for the + commit + + + + this + + + + + the author used for the + commit + . If no author was + specified + null + is returned and the default + NGit.PersonIdent + of this repo is used during execution of the + command + + + + + If set to true the Commit command automatically stages files that have + been modified and deleted, but new files not known by the repository are + not affected. + + + If set to true the Commit command automatically stages files that have + been modified and deleted, but new files not known by the repository are + not affected. This corresponds to the parameter -a on the command line. + + + + + this + + in case of an illegal combination of arguments/ options + + + + Used to amend the tip of the current branch. + + Used to amend the tip of the current branch. If set to true, the previous + commit will be amended. This is equivalent to --amend on the command + line. + + + + + this + + + + + Commit dedicated path only + This method can be called several times to add multiple paths. + + + Commit dedicated path only + This method can be called several times to add multiple paths. Full file + paths are supported as well as directory paths; in the latter case this + commits all files/ directories below the specified path. + + path to commit + + + this + + + + + If set to true a change id will be inserted into the commit message + An existing change id is not replaced. + + + If set to true a change id will be inserted into the commit message + An existing change id is not replaced. An initial change id (I000...) + will be replaced by the change id. + + + + + this + + + + Override the message written to the reflog + + + + this + + + + Any index record update. + + Any index record update. +

+ Applications should subclass and provide their own implementation for the + Apply(DirCacheEntry) + method. The editor will invoke apply once + for each record in the index which matches the path name. If there are + multiple records (for example in stages 1, 2 and 3), the edit instance + will be called multiple times, once for each stage. + + + +

+ Updates a + DirCache + by supplying discrete edit commands. +

+ An editor updates a DirCache by taking a list of + PathEdit + commands + and executing them against the entries of the destination cache to produce a + new cache. This edit style allows applications to insert a few commands and + then have the editor compute the proper entry indexes necessary to perform an + efficient in-order update of the index records. This can be easier to use + than + DirCacheBuilder + . +

+

+ DirCacheBuilder +
+ + + Generic update/editing support for + DirCache + . +

+ The different update strategies extend this class to provide their own unique + services to applications. +

+
+ + + The cache instance this editor updates during + Finish() + . + + + + + Entry table this builder will eventually replace into + cache + . +

+ Use + FastAdd(DirCacheEntry) + or + FastKeep(int, int) + to + make additions to this table. The table is automatically expanded if it + is too small for a new addition. +

+ Typically the entries in here are sorted by their path names, just like + they are in the DirCache instance. +

+
+ + + Total number of valid entries in + entries + . + + + + Construct a new editor. + Construct a new editor. + the cache this editor will eventually update. + + estimated number of entries the editor will have upon + completion. This sizes the initial entry table. + + + + + the cache we will update on + Finish() + . + + + + Append one entry into the resulting entry list. + + Append one entry into the resulting entry list. +

+ The entry is placed at the end of the entry list. The caller is + responsible for making sure the final table is correctly sorted. +

+ The + entries + table is automatically expanded if there is + insufficient space for the new addition. + + the new entry to add. + + +

Add a range of existing entries from the destination cache. + + Add a range of existing entries from the destination cache. +

+ The entries are placed at the end of the entry list, preserving their + current order. The caller is responsible for making sure the final table + is correctly sorted. +

+ This method copies from the destination cache, which has not yet been + updated with this editor's new table. So all offsets into the destination + cache are not affected by any updates that may be currently taking place + in this editor. +

+ The + entries + table is automatically expanded if there is + insufficient space for the new additions. + + first entry to copy from the destination cache. + number of entries to copy. + + +

+ Finish this builder and update the destination + DirCache + . +

+ When this method completes this builder instance is no longer usable by + the calling application. A new builder must be created to make additional + changes to the index entries. +

+ After completion the DirCache returned by + GetDirCache() + will + contain all modifications. +

+ Note to implementors: Make sure + entries + is fully sorted + then invoke + Replace() + to update the DirCache with the new table. +

+
+ + + Update the DirCache with the contents of + entries + . +

+ This method should be invoked only during an implementation of + Finish() + , and only after + entries + is sorted. +

+
+ + Finish, write, commit this change, and release the index lock. + + Finish, write, commit this change, and release the index lock. +

+ If this method fails (returns false) the lock is still released. +

+ This is a utility method for applications as the finish-write-commit + pattern is very common after using a builder to update entries. + + + true if the commit was successful and the file contains the new + data; false if the commit failed and the file remains with the + old data. + + the lock is not held. + + the output file could not be created. The caller no longer + holds the lock. + + + +

Construct a new editor. + Construct a new editor. + the cache this editor will eventually update. + + estimated number of entries the editor will have upon + completion. This sizes the initial entry table. + +
+ + Append one edit command to the list of commands to be applied. + + Append one edit command to the list of commands to be applied. +

+ Edit commands may be added in any order chosen by the application. They + are automatically rearranged by the builder to provide the most efficient + update possible. + + another edit command. + + + + + +

Any index record update. + + Any index record update. +

+ Applications should subclass and provide their own implementation for the + Apply(DirCacheEntry) + method. The editor will invoke apply once + for each record in the index which matches the path name. If there are + multiple records (for example in stages 1, 2 and 3), the edit instance + will be called multiple times, once for each stage. + + + +

Create a new update command by path name. + Create a new update command by path name. + path of the file within the repository. +
+ + Create a new update command for an existing entry instance. + Create a new update command for an existing entry instance. + + entry instance to match path of. Only the path of this + entry is actually considered during command evaluation. + + + + Apply the update to a single cache entry matching the path. + + Apply the update to a single cache entry matching the path. +

+ After apply is invoked the entry is added to the output table, and + will be included in the new index. + + + the entry being processed. All fields are zeroed out if + the path is a new path in the index. + + + +

Deletes a single file entry from the index. + + Deletes a single file entry from the index. +

+ This deletion command removes only a single file at the given location, + but removes multiple stages (if present) for that path. To remove a + complete subtree use + DeleteTree + instead. + + DeleteTree + + +

Create a new deletion command by path name. + Create a new deletion command by path name. + path of the file within the repository. +
+ + Create a new deletion command for an existing entry instance. + Create a new deletion command for an existing entry instance. + + entry instance to remove. Only the path of this entry is + actually considered during command evaluation. + + + + Recursively deletes all paths under a subtree. + + Recursively deletes all paths under a subtree. +

+ This deletion command is more generic than + DeletePath + as it can + remove all records which appear recursively under the same subtree. + Multiple stages are removed (if present) for any deleted entry. +

+ This command will not remove a single file entry. To remove a single file + use + DeletePath + . + + DeletePath + + +

Create a new tree deletion command by path name. + Create a new tree deletion command by path name. + + path of the subtree within the repository. If the path + does not end with "/" a "/" is implicitly added to ensure + only the subtree's contents are matched by the command. + The special case "" (not "/"!) deletes all entries. + +
+ + Offers a "GitPorcelain"-like API to interact with a git repository. + + Offers a "GitPorcelain"-like API to interact with a git repository. +

+ The GitPorcelain commands are described in the <a href= + "http://www.kernel.org/pub/software/scm/git/docs/git.html#_high_level_commands_porcelain" + >Git Documentation</a>. +

+ This class only offers methods to construct so-called command classes. Each + GitPorcelain command is represented by one command class.
+ Example: this class offers a + commit() + method returning an instance of + the + CommitCommand + class. The + CommitCommand + class has setters + for all the arguments and options. The + CommitCommand + class also has a + call + method to actually execute the commit. The following code show's + how to do a simple commit: +

+            Git git = new Git(myRepo);
+            git.commit().setMessage("Fix393").setAuthor(developerIdent).call();
+            
+ All mandatory parameters for commands have to be specified in the methods of + this class, the optional parameters have to be specified by the + setter-methods of the Command class. +

+ This class is intended to be used internally (e.g. by JGit tests) or by + external components (EGit, third-party tools) when they need exactly the + functionality of a GitPorcelain command. There are use-cases where this class + is not optimal and where you should use the more low-level JGit classes. The + methods in this class may for example offer too much functionality or they + offer the functionality with the wrong arguments. + + + +

The git repository this class is interacting with +
+ + + the repository to open. May be either the GIT_DIR, or the + working tree directory that contains + .git + . + + + a + Git + object for the existing git repository + + System.IO.IOException + + + + the repository to open. May be either the GIT_DIR, or the + working tree directory that contains + .git + . + + filesystem abstraction to use when accessing the repository. + + a + Git + object for the existing git repository + + System.IO.IOException + + + + the git repository this class is interacting with. + null + is not allowed + + + a + Git + object for the existing git repository + + + + + Returns a command object to execute a + clone + command + + Git documentation about clone + + a + CloneCommand + used to collect all optional parameters + and to finally execute the + clone + command + + + + + Returns a command object to execute a + init + command + + Git documentation about init + + a + InitCommand + used to collect all optional parameters and + to finally execute the + init + command + + + + + Constructs a new + Git + object which can interact with the specified + git repository. All command classes returned by methods of this class + will always interact with this git repository. + + + the git repository this class is interacting with. + null + is not allowed + + + + + Returns a command object to execute a + Commit + command + + Git documentation about Commit + + a + CommitCommand + used to collect all optional parameters + and to finally execute the + Commit + command + + + + + Returns a command object to execute a + Log + command + + Git documentation about Log + + a + LogCommand + used to collect all optional parameters and + to finally execute the + Log + command + + + + + Returns a command object to execute a + Merge + command + + Git documentation about Merge + + a + MergeCommand + used to collect all optional parameters + and to finally execute the + Merge + command + + + + + Returns a command object to execute a + Pull + command + + + a + PullCommand + + + + Returns a command object used to create branches + + a + CreateBranchCommand + + + + Returns a command object used to delete branches + + a + DeleteBranchCommand + + + + Returns a command object used to list branches + + a + ListBranchCommand + + + + Returns a command object used to list tags + + a + ListTagCommand + + + + Returns a command object used to rename branches + + a + RenameBranchCommand + + + + + Returns a command object to execute a + Add + command + + Git documentation about Add + + a + AddCommand + used to collect all optional parameters and + to finally execute the + Add + command + + + + + Returns a command object to execute a + Tag + command + + Git documentation about Tag + + a + TagCommand + used to collect all optional parameters and + to finally execute the + Tag + command + + + + + Returns a command object to execute a + Fetch + command + + Git documentation about Fetch + + a + FetchCommand + used to collect all optional parameters + and to finally execute the + Fetch + command + + + + + Returns a command object to execute a + Push + command + + Git documentation about Push + + a + PushCommand + used to collect all optional parameters and + to finally execute the + Push + command + + + + + Returns a command object to execute a + cherry-pick + command + + Git documentation about cherry-pick + + a + CherryPickCommand + used to collect all optional + parameters and to finally execute the + cherry-pick + command + + + + + Returns a command object to execute a + revert + command + + Git documentation about reverting changes + + a + RevertCommand + used to collect all optional parameters + and to finally execute the + cherry-pick + command + + + + + Returns a command object to execute a + Rebase + command + + Git documentation about rebase + + a + RebaseCommand + used to collect all optional parameters + and to finally execute the + rebase + command + + + + + Returns a command object to execute a + rm + command + + Git documentation about rm + + a + RmCommand + used to collect all optional parameters and + to finally execute the + rm + command + + + + + Returns a command object to execute a + checkout + command + + Git documentation about checkout + + a + CheckoutCommand + used to collect all optional parameters + and to finally execute the + checkout + command + + + + + Returns a command object to execute a + reset + command + + Git documentation about reset + + a + ResetCommand + used to collect all optional parameters + and to finally execute the + reset + command + + + + + Returns a command object to execute a + status + command + + Git documentation about status + + a + StatusCommand + used to collect all optional parameters + and to finally execute the + status + command + + + + Returns a command to add notes to an object + + a + AddNoteCommand + + + + Returns a command to remove notes on an object + + a + RemoveNoteCommand + + + + Returns a command to list all notes + + a + ListNotesCommand + + + + Returns a command to show notes on an object + + a + ShowNoteCommand + + + + + Returns a command object to execute a + ls-remote + command + + Git documentation about ls-remote + + a + LsRemoteCommand + used to collect all optional parameters + and to finally execute the + status + command + + + + + Returns a command object to execute a + clean + command + + Git documentation about Clean + + a + CleanCommand + used to collect all optional parameters + and to finally execute the + clean + command + + + + + Returns a command object to execute a + blame + command + + Git documentation about Blame + + a + BlameCommand + used to collect all optional parameters + and to finally execute the + blame + command + + + + + Returns a command object to execute a + reflog + command + + Git documentation about reflog + + a + ReflogCommand + used to collect all optional parameters + and to finally execute the + reflog + command + + + + + Returns a command object to execute a + diff + command + + Git documentation about diff + + a + DiffCommand + used to collect all optional parameters and + to finally execute the + diff + command + + + + Returns a command object used to delete tags + + a + DeleteTagCommand + + + + + Returns a command object to execute a + submodule add + command + + + a + SubmoduleAddCommand + used to add a new submodule to a + parent repository + + + + + Returns a command object to execute a + submodule init + command + + + a + SubmoduleInitCommand + used to initialize the + repository's config with settings from the .gitmodules file in + the working tree + + + + + Returns a command object to execute a + submodule status + command + + + a + SubmoduleStatusCommand + used to report the status of a + repository's configured submodules + + + + + Returns a command object to execute a + submodule sync + command + + + a + SubmoduleSyncCommand + used to update the URL of a + submodule from the parent repository's .gitmodules file + + + + + Returns a command object to execute a + submodule update + command + + + a + SubmoduleUpdateCommand + used to update the submodules in + a repository to the configured revision + + + + Returns a command object used to list stashed commits + + a + StashListCommand + + + + Returns a command object used to create a stashed commit + + a + StashCreateCommand + + 2.0 + + + Returns a command object used to apply a stashed commit + + a + StashApplyCommand + + 2.0 + + + Returns a command object used to drop a stashed commit + + a + StashDropCommand + + 2.0 + + + + Returns a command object to execute a + apply + command + + Git documentation about apply + + a + ApplyCommand + used to collect all optional parameters + and to finally execute the + apply + command + + 2.0 + + + the git repository this class is interacting with + + + + A class used to execute a + Log + command. It has setters for all + supported options and arguments of this command and a + Call() + method + to finally execute the command. Each instance of this class should only be + used for one invocation of the command (means: one call to + Call() + ) +

+ This is currently a very basic implementation which takes only one starting + revision as option. + TODO: add more options (revision ranges, sorting, ...) +

+ Git documentation about Log +
+ + + + + + Executes the + Log + command with all the options and parameters + collected by the setter methods (e.g. + Add(NGit.AnyObjectId) + , + Not(NGit.AnyObjectId) + , ..) of this class. Each instance of this class + should only be used for one invocation of the command. Don't call this + method twice on an instance. + + an iteration over RevCommits + of the references ref cannot be resolved + + + + + Mark a commit to start graph traversal from. + Mark a commit to start graph traversal from. + NGit.Revwalk.RevWalk.MarkStart(NGit.Revwalk.RevCommit) + + + + + this + + + the commit supplied is not available from the object + database. This usually indicates the supplied commit is + invalid, but the reference was constructed during an earlier + invocation to + NGit.Revwalk.RevWalk.LookupCommit(NGit.AnyObjectId) + + . + + + the object was not parsed yet and it was discovered during + parsing that it is not actually a commit. This usually + indicates the caller supplied a non-commit SHA-1 to + NGit.Revwalk.RevWalk.LookupCommit(NGit.AnyObjectId) + + . + + + a low-level exception of JGit has occurred. The original + exception can be retrieved by calling + System.Exception.InnerException() + . Expect only + IOException's + to be wrapped. Subclasses of + System.IO.IOException + (e.g. + NGit.Errors.MissingObjectException + + ) are + typically not wrapped here but thrown as original exception + + + + + Same as + --not start + , or + ^start + + + + + this + + + the commit supplied is not available from the object + database. This usually indicates the supplied commit is + invalid, but the reference was constructed during an earlier + invocation to + NGit.Revwalk.RevWalk.LookupCommit(NGit.AnyObjectId) + + . + + + the object was not parsed yet and it was discovered during + parsing that it is not actually a commit. This usually + indicates the caller supplied a non-commit SHA-1 to + NGit.Revwalk.RevWalk.LookupCommit(NGit.AnyObjectId) + + . + + + a low-level exception of JGit has occurred. The original + exception can be retrieved by calling + System.Exception.InnerException() + . Expect only + IOException's + to be wrapped. Subclasses of + System.IO.IOException + (e.g. + NGit.Errors.MissingObjectException + + ) are + typically not wrapped here but thrown as original exception + + + + + Adds the range + since..until + + + + + + this + + + the commit supplied is not available from the object + database. This usually indicates the supplied commit is + invalid, but the reference was constructed during an earlier + invocation to + NGit.Revwalk.RevWalk.LookupCommit(NGit.AnyObjectId) + + . + + + the object was not parsed yet and it was discovered during + parsing that it is not actually a commit. This usually + indicates the caller supplied a non-commit SHA-1 to + NGit.Revwalk.RevWalk.LookupCommit(NGit.AnyObjectId) + + . + + + a low-level exception of JGit has occurred. The original + exception can be retrieved by calling + System.Exception.InnerException() + . Expect only + IOException's + to be wrapped. Subclasses of + System.IO.IOException + (e.g. + NGit.Errors.MissingObjectException + + ) are + typically not wrapped here but thrown as original exception + + + + Add all refs as commits to start the graph traversal from. + Add all refs as commits to start the graph traversal from. + Add(NGit.AnyObjectId) + + + this + + the references could not be accessed + + + Show only commits that affect any of the specified paths. + + Show only commits that affect any of the specified paths. The path must + either name a file or a directory exactly. Note that regex expressions or + wildcards are not supported. + + a path is relative to the top level of the repository + + + this + + + + Skip the number of commits before starting to show the commit output. + Skip the number of commits before starting to show the commit output. + the number of commits to skip + + + this + + + + Limit the number of commits to output. + Limit the number of commits to output. + the limit + + + this + + + + + + + + + + A class used to execute a + Merge + command. It has setters for all + supported options and arguments of this command and a + Call() + method + to finally execute the command. Each instance of this class should only be + used for one invocation of the command (means: one call to + Call() + ) + + Git documentation about Merge + + + + + + + Executes the + Merge + command with all the options and parameters + collected by the setter methods (e.g. + Include(NGit.Ref) + ) of this + class. Each instance of this class should only be used for one invocation + of the command. Don't call this method twice on an instance. + + the result of the merge + + + + + + + + + + + + + + + the + NGit.Merge.MergeStrategy + to be used + + + + this + + + + a reference to a commit which is merged with the current head + + + + this + + + + the Id of a commit which is merged with the current head + + + this + + + + a name given to the commit + the Id of a commit which is merged with the current head + + + this + + + + + If true, will prepare the next commit in working tree and + index as if a real merge happened, but do not make the commit or move the + HEAD. + + + If true, will prepare the next commit in working tree and + index as if a real merge happened, but do not make the commit or move the + HEAD. Otherwise, perform the merge and commit the result. +

+ In case the merge was successful but this flag was set to + true a + MergeCommandResult + with status + MergeStatus.MERGED_SQUASHED + or + MergeStatus.FAST_FORWARD_SQUASHED + is returned. + + whether to squash commits or not + + + this + + 2.0 + + +

+ A class used to execute a + Tag + command. It has setters for all + supported options and arguments of this command and a + Call() + method + to finally execute the command. + + Git documentation about Tag +
+ + + + + + Executes the + tag + command with all the options and parameters + collected by the setter methods of this class. Each instance of this + class should only be used for one invocation of the command (means: one + call to + Call() + ) + + + a + NGit.Ref + a ref pointing to a tag + + when called on a git repo without a HEAD reference + + 2.0 + + + + + + Sets default values for not explicitly specified options. + + Sets default values for not explicitly specified options. Then validates + that all required data has been provided. + + the state of the repository we are working on + if the tag name is null or invalid + + if the tag is signed (not supported yet) + + + + + the tag name used for the + tag + + + + this + + + + the tag name used for the tag + + + the tag message used for the tag + + + + the tag message used for the + tag + + + + this + + + + whether the tag is signed + + + If set to true the Tag command creates a signed tag object. + + If set to true the Tag command creates a signed tag object. This + corresponds to the parameter -s on the command line. + + + + + this + + + + Sets the tagger of the tag. + + Sets the tagger of the tag. If the tagger is null, a PersonIdent will be + created from the info in the repository. + + + + + this + + + + the tagger of the tag + + + the object id of the tag + + + Sets the object id of the tag. + + Sets the object id of the tag. If the object id is null, the commit + pointed to from HEAD will be used. + + + + + this + + + + is this a force update + + + If set to true the Tag command may replace an existing tag object. + + If set to true the Tag command may replace an existing tag object. This + corresponds to the parameter -f on the command line. + + + + + this + + + + + Exception thrown when a command can't succeed because of unresolved + conflicts. + + + Exception thrown when a command can't succeed because of unresolved + conflicts. + + + + + Superclass of all exceptions thrown by the API classes in + org.eclipse.jgit.api + + + + Translate internal exception to API exception + list of conflicting paths + + a + NGit.Errors.CheckoutConflictException + + exception + + + + all the paths where unresolved conflicts have been detected + + + Adds a new conflicting path + + + + this + + + + + Exception thrown when a command wants to update a ref but failed because + another process is accessing (or even also updating) the ref. + + + Exception thrown when a command wants to update a ref but failed because + another process is accessing (or even also updating) the ref. + + NGit.RefUpdate.Result.LOCK_FAILURE + + + + + + + + + + + + + + + + the + NGit.Ref + which was tried to by updated + + + + + the result which was returned by + NGit.RefUpdate.Update() + and + which caused this error + + + + + Exception thrown when a merge command was called without specifying the + proper amount/type of merge heads. + + + Exception thrown when a merge command was called without specifying the + proper amount/type of merge heads. E.g. a non-octopus merge strategy was + confronted with more than one head to be merged into HEAD. Another + case would be if a merge was called without including any head. + + + + + + + + Exception thrown when a tag command was called with an invalid tag name (or + null), such as bad~tag. + + + Exception thrown when a tag command was called with an invalid tag name (or + null), such as bad~tag. + + + + + + + + Exception thrown when during command execution a low-level exception from the + JGit library is thrown. + + + Exception thrown when during command execution a low-level exception from the + JGit library is thrown. Also when certain low-level error situations are + reported by JGit through return codes this Exception will be thrown. +

+ During command execution a lot of exceptions may be thrown. Some of them + represent error situations which can be handled specifically by the caller of + the command. But a lot of exceptions are so low-level that is is unlikely + that the caller of the command can handle them effectively. The huge number + of these low-level exceptions which are thrown by the commands lead to a + complicated and wide interface of the commands. Callers of the API have to + deal with a lot of exceptions they don't understand. +

+ To overcome this situation this class was introduced. Commands will wrap all + exceptions they declare as low-level in their context into an instance of + this class. Callers of the commands have to deal with one type of low-level + exceptions. Callers will always get access to the original exception (if + available) by calling + #getCause() + . + + + +

Construct an exception for low-level internal exceptions + + +
+ + Construct an exception for low-level internal exceptions + + + + + Exception thrown when the options given to a command don't include a + file pattern which is mandatory for processing. + + + Exception thrown when the options given to a command don't include a + file pattern which is mandatory for processing. + + + + + + + + + + + + Exception thrown when a command expected the + HEAD + reference to exist + but couldn't find such a reference + + + + + + + + + + + + Exception thrown when the options given to a command don't include a + specification of a message text (e.g. + + + Exception thrown when the options given to a command don't include a + specification of a message text (e.g. a commit was called without explicitly + specifying a commit message (or other options telling where to take the + message from. + + + + + + + + + + + + Exception thrown when the state of the repository doesn't allow the execution + of a certain command. + + + Exception thrown when the state of the repository doesn't allow the execution + of a certain command. E.g. when a CommitCommand should be executed on a + repository with unresolved conflicts this exception will be thrown. + + + + + + + + + + + Keeps track of diff related configuration options. + Keeps track of diff related configuration options. + + + + Key for + NGit.Config.Get<T>(NGit.Config.SectionParser<T>) + + . + + + + true if the prefix "a/" and "b/" should be suppressed. + + + true if rename detection is enabled by default. + + + type of rename detection to perform. + + + limit on number of paths to perform inexact rename detection. + + + + Permissible values for + diff.renames + . + + + + A value class representing a change to a file + + + Magical SHA1 used for file adds or deletes + + + Magical file name used for file adds or deletes. + Magical file name used for file adds or deletes. + + + Create an empty DiffEntry + + + Convert the TreeWalk into DiffEntry headers. + Convert the TreeWalk into DiffEntry headers. + the TreeWalk to walk through. Must have exactly two trees. + headers describing the changed files. + the repository cannot be accessed. + When given TreeWalk doesn't have exactly two trees. + + + + + Convert the TreeWalk into DiffEntry headers, depending on + includeTrees + it will add tree objects into result or not. + + + the TreeWalk to walk through. Must have exactly two trees and + when + includeTrees + parameter is + true + it can't + be recursive. + + include tree object's. + headers describing the changed files. + the repository cannot be accessed. + + when + includeTrees + is true and given TreeWalk is + recursive. Or when given TreeWalk doesn't have exactly two + trees + + + + Breaks apart a DiffEntry into two entries, one DELETE and one ADD. + Breaks apart a DiffEntry into two entries, one DELETE and one ADD. + the DiffEntry to break apart. + + a list containing two entries. Calling + GetChangeType() + on the first entry will return ChangeType.DELETE. Calling it on + the second entry will return ChangeType.ADD. + + + + File name of the old (pre-image). + File name of the old (pre-image). + + + File name of the new (post-image). + File name of the new (post-image). + + + Old mode of the file, if described by the patch, else null. + Old mode of the file, if described by the patch, else null. + + + New mode of the file, if described by the patch, else null. + New mode of the file, if described by the patch, else null. + + + General type of change indicated by the patch. + General type of change indicated by the patch. + + + + Similarity score if + changeType + is a copy or rename. + + + + ObjectId listed on the index line for the old (pre-image) + + + ObjectId listed on the index line for the new (post-image) + + + Get the old name associated with this file. + + Get the old name associated with this file. +

+ The meaning of the old name can differ depending on the semantic meaning + of this patch: +

    +
  • file add: always /dev/null
  • +
  • file modify: always + GetNewPath() +
  • +
  • file delete: always the file being deleted
  • +
  • file copy: source file the copy originates from
  • +
  • file rename: source file the rename originates from
  • +
+
+ old name for this file. +
+ + Get the new name associated with this file. + + Get the new name associated with this file. +

+ The meaning of the new name can differ depending on the semantic meaning + of this patch: +

    +
  • file add: always the file being created
  • +
  • file modify: always + GetOldPath() +
  • +
  • file delete: always /dev/null
  • +
  • file copy: destination file the copy ends up at
  • +
  • file rename: destination file the rename ends up at
  • +
+
+ new name for this file. +
+ + Get the path associated with this file. + Get the path associated with this file. + which path to obtain. + name for this file. + + + the old file mode, if described in the patch + + + the new file mode, if described in the patch + + + Get the mode associated with this file. + Get the mode associated with this file. + which mode to obtain. + the mode. + + + + the type of change this patch makes on + GetNewPath() + + + + + + similarity score between + GetOldPath() + and + GetNewPath() + if + GetChangeType() + is + ChangeType.COPY + or + ChangeType.RENAME + . + + + + Get the old object id from the index. + Get the old object id from the index. + the object id; null if there is no index line + + + Get the new object id from the index. + Get the new object id from the index. + the object id; null if there is no index line + + + Get the object id. + Get the object id. + the side of the id to get. + the object id; null if there is no index line + + + General type of change a single file-level patch describes. + General type of change a single file-level patch describes. + + + Specify the old or new side for more generalized access. + Specify the old or new side for more generalized access. + + + Format a Git style patch script. + Format a Git style patch script. + + + Magic return content indicating it is empty or no content present. + Magic return content indicating it is empty or no content present. + + + Magic return indicating the content is binary. + Magic return indicating the content is binary. + + + Create a new formatter with a default level of context. + Create a new formatter with a default level of context. + + the stream the formatter will write line data to. This stream + should have buffering arranged by the caller, as many small + writes are performed to it. + + + + the stream we are outputting data to. + + + Set the repository the formatter can load object contents from. + + Set the repository the formatter can load object contents from. + Once a repository has been set, the formatter must be released to ensure + the internal ObjectReader is able to release its resources. + + source repository holding referenced objects. + + + Change the number of lines of context to display. + Change the number of lines of context to display. + + number of lines of context to see before the first + modification and after the last modification within a hunk of + the modified file. + + + + Change the number of digits to show in an ObjectId. + Change the number of digits to show in an ObjectId. + number of digits to show in an ObjectId. + + + Set the algorithm that constructs difference output. + Set the algorithm that constructs difference output. + the algorithm to produce text file differences. + HistogramDiff + + + Set the line equivalence function for text file differences. + Set the line equivalence function for text file differences. + + The equivalence function used to determine if two lines of + text are identical. The function can be changed to ignore + various types of whitespace. + + RawTextComparator.DEFAULT + RawTextComparator.WS_IGNORE_ALL + RawTextComparator.WS_IGNORE_CHANGE + + RawTextComparator.WS_IGNORE_LEADING + + RawTextComparator.WS_IGNORE_TRAILING + + + + Set maximum file size for text files. + + Set maximum file size for text files. + Files larger than this size will be treated as though they are binary and + not text. Default is + #DEFAULT_BINARY_FILE_THRESHOLD + . + + + the limit, in bytes. Files larger than this size will be + assumed to be binary, even if they aren't. + + + + Set the prefix applied in front of old file paths. + Set the prefix applied in front of old file paths. + + the prefix in front of old paths. Typically this is the + standard string + "a/" + , but may be any prefix desired by + the caller. Must not be null. Use the empty string to have no + prefix at all. + + + + Get the prefix applied in front of old file paths. + Get the prefix applied in front of old file paths. + the prefix + 2.0 + + + Set the prefix applied in front of new file paths. + Set the prefix applied in front of new file paths. + + the prefix in front of new paths. Typically this is the + standard string + "b/" + , but may be any prefix desired by + the caller. Must not be null. Use the empty string to have no + prefix at all. + + + + Get the prefix applied in front of new file paths. + Get the prefix applied in front of new file paths. + the prefix + 2.0 + + + true if rename detection is enabled. + + + Enable or disable rename detection. + + Enable or disable rename detection. + Before enabling rename detection the repository must be set with + SetRepository(NGit.Repository) + . Once enabled the detector can be + configured away from its defaults by obtaining the instance directly from + GetRenameDetector() + and invoking configuration. + + if rename detection should be enabled. + + + the rename detector if rename detection is enabled. + + + Set the progress monitor for long running rename detection. + Set the progress monitor for long running rename detection. + progress monitor to receive rename detection status through. + + + Set the filter to produce only specific paths. + + Set the filter to produce only specific paths. + If the filter is an instance of + NGit.Revwalk.FollowFilter + , the filter path + will be updated during successive scan or format invocations. The updated + path can be obtained from + GetPathFilter() + . + + the tree filter to apply. + + + the current path filter. + + + Flush the underlying output stream of this formatter. + Flush the underlying output stream of this formatter. + the stream's own flush method threw an exception. + + + + Release the internal ObjectReader state. + Release the internal ObjectReader state. + + + Determine the differences between two trees. + + Determine the differences between two trees. + No output is created, instead only the file paths that are different are + returned. Callers may choose to format these paths themselves, or convert + them into + NGit.Patch.FileHeader + instances with a complete edit list by + calling + ToFileHeader(DiffEntry) + . + + the old (or previous) side. + the new (or updated) side. + the paths that are different. + trees cannot be read or file contents cannot be read. + + + + Determine the differences between two trees. + + Determine the differences between two trees. + No output is created, instead only the file paths that are different are + returned. Callers may choose to format these paths themselves, or convert + them into + NGit.Patch.FileHeader + instances with a complete edit list by + calling + ToFileHeader(DiffEntry) + . + + the old (or previous) side. + the new (or updated) side. + the paths that are different. + trees cannot be read or file contents cannot be read. + + + + Determine the differences between two trees. + + Determine the differences between two trees. + No output is created, instead only the file paths that are different are + returned. Callers may choose to format these paths themselves, or convert + them into + NGit.Patch.FileHeader + instances with a complete edit list by + calling + ToFileHeader(DiffEntry) + . + + the old (or previous) side. + the new (or updated) side. + the paths that are different. + trees cannot be read or file contents cannot be read. + + + + + + + Format the differences between two trees. + + Format the differences between two trees. + The patch is expressed as instructions to modify + a + to make it + b + . + + the old (or previous) side. + the new (or updated) side. + + trees cannot be read, file contents cannot be read, or the + patch cannot be output. + + + + Format the differences between two trees. + + Format the differences between two trees. + The patch is expressed as instructions to modify + a + to make it + b + . + + the old (or previous) side. + the new (or updated) side. + + trees cannot be read, file contents cannot be read, or the + patch cannot be output. + + + + Format the differences between two trees. + + Format the differences between two trees. + The patch is expressed as instructions to modify + a + to make it + b + . + + the old (or previous) side. + the new (or updated) side. + + trees cannot be read, file contents cannot be read, or the + patch cannot be output. + + + + Format a patch script from a list of difference entries. + + Format a patch script from a list of difference entries. Requires + Scan(NGit.Treewalk.AbstractTreeIterator, NGit.Treewalk.AbstractTreeIterator) + to have been + called first. + + entries describing the affected files. + + a file's content cannot be read, or the output stream cannot + be written to. + + + + Format a patch script for one file entry. + Format a patch script for one file entry. + the entry to be formatted. + + a file's content cannot be read, or the output stream cannot + be written to. + + + + + + + Format a patch script, reusing a previously parsed FileHeader. + + Format a patch script, reusing a previously parsed FileHeader. +

+ This formatter is primarily useful for editing an existing patch script + to increase or reduce the number of lines of context within the script. + All header lines are reused as-is from the supplied FileHeader. + + existing file header containing the header lines to copy. + + text source for the pre-image version of the content. This + must match the content of + DiffEntry.GetOldId() + . + + + text source for the post-image version of the content. This + must match the content of + DiffEntry.GetNewId() + . + + writing to the supplied stream failed. + + +

Formats a list of edits in unified diff format + some differences which have been calculated between A and B + the text A which was compared + the text B which was compared + System.IO.IOException +
+ + Output a line of context (unmodified line). + Output a line of context (unmodified line). + RawText for accessing raw data + the line number within text + System.IO.IOException + + + Output an added line. + Output an added line. + RawText for accessing raw data + the line number within text + System.IO.IOException + + + Output a removed line + RawText for accessing raw data + the line number within text + System.IO.IOException + + + Output a hunk header + within first source + within first source + within second source + within second source + System.IO.IOException + + + + + + Write a standard patch script line. + Write a standard patch script line. + prefix before the line, typically '-', '+', ' '. + the text object to obtain the line from. + line number to output. + the stream threw an exception while writing to it. + + + + + Creates a + NGit.Patch.FileHeader + representing the given + DiffEntry +

+ This method does not use the OutputStream associated with this + DiffFormatter instance. It is therefore safe to instantiate this + DiffFormatter instance with a + NGit.Util.IO.DisabledOutputStream + if this method + is the only one that will be used. +

+ the DiffEntry to create the FileHeader for + + a FileHeader representing the DiffEntry. The FileHeader's buffer + will contain only the header of the diff output. It will also + contain one + NGit.Patch.HunkHeader + . + + + the stream threw an exception while writing to it, or one of + the blobs referenced by the DiffEntry could not be read. + + one of the blobs referenced by the DiffEntry is corrupt. + + one of the blobs referenced by the DiffEntry is missing. + +
+ + + + + + + + + + + + + the stream the formatter will write line data to + the DiffEntry to create the FileHeader for + writing to the supplied stream failed. + + + + + + A modified region detected between two versions of roughly the same content. + + + A modified region detected between two versions of roughly the same content. +

+ An edit covers the modified region only. It does not cover a common region. +

+ Regions should be specified using 0 based notation, so add 1 to the start and + end marks for line numbers in a file. +

+ An edit where beginA == endA && beginB < endB is an insert edit, + that is sequence B inserted the elements in region + [beginB, endB) at beginA. +

+ An edit where beginA < endA && beginB == endB is a delete edit, + that is sequence B has removed the elements between + [beginA, endA). +

+ An edit where beginA < endA && beginB < endB is a replace edit, + that is sequence B has replaced the range of elements between + [beginA, endA) with those found in [beginB, endB). + + + +

Create a new empty edit. + Create a new empty edit. + beginA: start and end of region in sequence A; 0 based. + beginB: start and end of region in sequence B; 0 based. +
+ + Create a new edit. + Create a new edit. + beginA: start of region in sequence A; 0 based. + endA: end of region in sequence A; must be >= as. + beginB: start of region in sequence B; 0 based. + endB: end of region in sequence B; must be >= bs. + + + the type of this region + + + true if the edit is empty (lengths of both a and b is zero). + + + start point in sequence A. + + + end point in sequence A. + + + start point in sequence B. + + + end point in sequence B. + + + length of the region in A. + + + length of the region in B. + + + Construct a new edit representing the region before cut. + Construct a new edit representing the region before cut. + + the cut point. The beginning A and B points are used as the + end points of the returned edit. + + + an edit representing the slice of + this + edit that occurs + before + cut + starts. + + + + Construct a new edit representing the region after cut. + Construct a new edit representing the region after cut. + + the cut point. The ending A and B points are used as the + starting points of the returned edit. + + + an edit representing the slice of + this + edit that occurs + after + cut + ends. + + + + + Increase + GetEndA() + by 1. + + + + + Increase + GetEndB() + by 1. + + + + Swap A and B, so the edit goes the other direction. + Swap A and B, so the edit goes the other direction. + + + Type of edit + + + + Specialized list of + Edit + s in a document. + + + + Construct an edit list containing a single edit. + Construct an edit list containing a single edit. + the edit to return in the list. + + list containing only + edit + . + + + + Create a new, empty edit list. + Create a new, empty edit list. + + + Create an empty edit list with the specified capacity. + Create an empty edit list with the specified capacity. + + the initial capacity of the edit list. If additional edits are + added to the list, it will be grown to support them. + + + + + Diff algorithm, based on "An O(ND) Difference Algorithm and its Variations", + by Eugene Myers. + + + Diff algorithm, based on "An O(ND) Difference Algorithm and its Variations", + by Eugene Myers. +

+ The basic idea is to put the line numbers of text A as columns ("x") and the + lines of text B as rows ("y"). Now you try to find the shortest "edit path" + from the upper left corner to the lower right corner, where you can always go + horizontally or vertically, but diagonally from (x,y) to (x+1,y+1) only if + line x in text A is identical to line y in text B. +

+ Myers' fundamental concept is the "furthest reaching D-path on diagonal k": a + D-path is an edit path starting at the upper left corner and containing + exactly D non-diagonal elements ("differences"). The furthest reaching D-path + on diagonal k is the one that contains the most (diagonal) elements which + ends on diagonal k (where k = y - x). +

+ Example: +

+            H E L L O   W O R L D
+            ____
+            L     \___
+            O         \___
+            W             \________
+            
+

+ Since every D-path has exactly D horizontal or vertical elements, it can only + end on the diagonals -D, -D+2, ..., D-2, D. +

+ Since every furthest reaching D-path contains at least one furthest reaching + (D-1)-path (except for D=0), we can construct them recursively. +

+ Since we are really interested in the shortest edit path, we can start + looking for a 0-path, then a 1-path, and so on, until we find a path that + ends in the lower right corner. +

+ To save space, we do not need to store all paths (which has quadratic space + requirements), but generate the D-paths simultaneously from both sides. When + the ends meet, we will have found "the middle" of the path. From the end + points of that diagonal part, we can generate the rest recursively. +

+ This only requires linear space. +

+ The overall (runtime) complexity is: +

+            O(N * D^2 + 2 * N/2 * (D/2)^2 + 4 * N/4 * (D/4)^2 + ...)
+            = O(N * D^2 * 5 / 4) = O(N * D^2),
+            
+

+ (With each step, we have to find the middle parts of twice as many regions as + before, but the regions (as well as the D) are halved.) +

+ So the overall runtime complexity stays the same with linear space, albeit + with a larger constant factor. + + + +

Singleton instance of MyersDiff. + Singleton instance of MyersDiff. +
+ + + The list of edits found during the last call to + MyersDiff<S>.CalculateEdits(Edit) + + + + + Comparison function for sequences. + Comparison function for sequences. + + + The first text to be compared. + The first text to be compared. Referred to as "Text A" in the comments + + + The second text to be compared. + The second text to be compared. Referred to as "Text B" in the comments + + + Entrypoint into the algorithm this class is all about. + + Entrypoint into the algorithm this class is all about. This method triggers that the + differences between A and B are calculated in form of a list of edits. + + portion of the sequences to examine. + + + Calculates the differences between a given part of A against another given part of B + + start of the part of A which should be compared (0<=beginA<sizeof(A)) + + end of the part of A which should be compared (beginA<=endA<sizeof(A)) + + start of the part of B which should be compared (0<=beginB<sizeof(B)) + + end of the part of B which should be compared (beginB<=endB<sizeof(B)) + + + + two filenames specifying the contents to be diffed + + + Compares two sequences primarily based upon hash codes. + Compares two sequences primarily based upon hash codes. + + + + Compares two + Sequence + s to create an + EditList + of changes. +

+ An algorithm's + diff + method must be callable from concurrent threads + without data collisions. This permits some algorithms to use a singleton + pattern, with concurrent invocations using the same singleton. Other + algorithms may support parameterization, in which case the caller can create + a unique instance per thread. +

+
+ + + the diff algorithm for which an implementation should be + returned + + an implementation of the specified diff algorithm + + + Compare two sequences and identify a list of edits between them. + Compare two sequences and identify a list of edits between them. + the comparator supplying the element equivalence function. + + the first (also known as old or pre-image) sequence. Edits + returned by this algorithm will reference indexes using the + 'A' side: + Edit.GetBeginA() + , + Edit.GetEndA() + . + + + the second (also known as new or post-image) sequence. Edits + returned by this algorithm will reference indexes using the + 'B' side: + Edit.GetBeginB() + , + Edit.GetEndB() + . + + + a modifiable edit list comparing the two sequences. If empty, the + sequences are identical according to + cmp + 's rules. The + result list is never null. + + + + Compare two sequences and identify a list of edits between them. + + Compare two sequences and identify a list of edits between them. + This method should be invoked only after the two sequences have been + proven to have no common starting or ending elements. The expected + elimination of common starting and ending elements is automatically + performed by the + Diff<S>(SequenceComparator<S>, Sequence, Sequence) + + method, which invokes this method using + Subsequence<S> + s. + + the comparator supplying the element equivalence function. + + the first (also known as old or pre-image) sequence. Edits + returned by this algorithm will reference indexes using the + 'A' side: + Edit.GetBeginA() + , + Edit.GetEndA() + . + + + the second (also known as new or post-image) sequence. Edits + returned by this algorithm will reference indexes using the + 'B' side: + Edit.GetBeginB() + , + Edit.GetEndB() + . + + a modifiable edit list comparing the two sequences. + + + Supported diff algorithm + + + Compare two sequences and identify a list of edits between them. + + Compare two sequences and identify a list of edits between them. + This method should be invoked only after the two sequences have been + proven to have no common starting or ending elements. The expected + elimination of common starting and ending elements is automatically + performed by the + DiffAlgorithm.Diff<S>(SequenceComparator<S>, Sequence, Sequence) + + method, which invokes this method using + Subsequence<S> + s. + + result list to append the region's edits onto. + the comparator supplying the element equivalence function. + + the first (also known as old or pre-image) sequence. Edits + returned by this algorithm will reference indexes using the + 'A' side: + Edit.GetBeginA() + , + Edit.GetEndA() + . + + + the second (also known as new or post-image) sequence. Edits + returned by this algorithm will reference indexes using the + 'B' side: + Edit.GetBeginB() + , + Edit.GetEndB() + . + + the region being compared within the two sequences. + + + + A class to help bisecting the sequences a and b to find minimal + edit paths. + + + A class to help bisecting the sequences a and b to find minimal + edit paths. + As the arrays are reused for space efficiency, you will need one + instance per thread. + The entry function is the calculate() method. + + + + A Sequence supporting UNIX formatted text in byte[] format. + + A Sequence supporting UNIX formatted text in byte[] format. +

+ Elements of the sequence are the lines of the file, as delimited by the UNIX + newline character ('\n'). The file content is treated as 8 bit binary text, + with no assumptions or requirements on character encoding. +

+ Note that the first line of the file is element 0, as defined by the Sequence + interface API. Traditionally in a text editor a patch file the first line is + line number 1. Callers may need to subtract 1 prior to invoking methods if + they are converting from "line number" to "element index". + + + +

Arbitrary sequence of elements. + + Arbitrary sequence of elements. +

+ A sequence of elements is defined to contain elements in the index range + [0, + Size() + ), like a standard Java List implementation. + Unlike a List, the members of the sequence are not directly obtainable. +

+ Implementations of Sequence are primarily intended for use in content + difference detection algorithms, to produce an + EditList + of + Edit + instances describing how two Sequence instances differ. +

+ To be compared against another Sequence of the same type, a supporting + SequenceComparator<S> + must also be supplied. + + + + total number of items in the sequence. + + +

+ Number of bytes to check for heuristics in + IsBinary(byte[]) + + +
+ + A Rawtext of length 0 + + + The file content for this sequence. + The file content for this sequence. + + + + Map of line number to starting position within + content + . + + + + Create a new sequence from an existing content byte array. + + Create a new sequence from an existing content byte array. +

+ The entire array (indexes 0 through length-1) is used as the content. + + + the content array. The array is never modified, so passing + through cached arrays is safe. + + + +

Create a new sequence from a file. + + Create a new sequence from a file. +

+ The entire file contents are used. + + the text file. + if Exceptions occur while reading the file + + + + total number of items in the sequence. + + +

Write a specific line to the output stream, without its trailing LF. + + Write a specific line to the output stream, without its trailing LF. +

+ The specified line is copied as-is, with no character encoding + translation performed. +

+ If the specified line ends with an LF ('\n'), the LF is not + copied. It is up to the caller to write the LF, if desired, between + output lines. + + stream to copy the line data onto. + + index of the line to extract. Note this is 0-based, so line + number 1 is actually index 0. + + the stream write operation failed. + + +

Determine if the file ends with a LF ('\n'). + Determine if the file ends with a LF ('\n'). + true if the last line has an LF; false otherwise. +
+ + Get the text for a single line. + Get the text for a single line. + + index of the line to extract. Note this is 0-based, so line + number 1 is actually index 0. + + the text for the line, without a trailing LF. + + + Get the text for a region of lines. + Get the text for a region of lines. + + index of the first line to extract. Note this is 0-based, so + line number 1 is actually index 0. + + index of one past the last line to extract. + + if true the trailing LF ('\n') of the last returned line is + dropped, if present. + + + the text for lines + [begin, end) + . + + + + Decode a region of the text into a String. + + Decode a region of the text into a String. + The default implementation of this method tries to guess the character + set by considering UTF-8, the platform default, and falling back on + ISO-8859-1 if neither of those can correctly decode the region given. + + first byte of the content to decode. + one past the last byte of the content to decode. + + the region + [start, end) + decoded as a String. + + + + + Determine heuristically whether a byte array represents binary (as + opposed to text) content. + + + Determine heuristically whether a byte array represents binary (as + opposed to text) content. + + the raw file content. + true if raw is likely to be a binary file, false otherwise + + + + Determine heuristically whether the bytes contained in a stream + represents binary (as opposed to text) content. + + + Determine heuristically whether the bytes contained in a stream + represents binary (as opposed to text) content. + Note: Do not further use this stream after having called this method! The + stream may not be fully read and will be left at an unknown position + after consuming an unknown number of bytes. The caller is responsible for + closing the stream. + + input stream containing the raw file content. + true if raw is likely to be a binary file, false otherwise + if input stream could not be read + + + + Determine heuristically whether a byte array represents binary (as + opposed to text) content. + + + Determine heuristically whether a byte array represents binary (as + opposed to text) content. + + the raw file content. + + number of bytes in + raw + to evaluate. This should be + raw.length + unless + raw + was over-allocated by + the caller. + + true if raw is likely to be a binary file, false otherwise + + + Get the line delimiter for the first line. + Get the line delimiter for the first line. + 2.0 + the line delimiter or null + + + Detect and resolve object renames. + Detect and resolve object renames. + + + Similarity score required to pair an add/delete as a rename. + Similarity score required to pair an add/delete as a rename. + + + Similarity score required to keep modified file pairs together. + + Similarity score required to keep modified file pairs together. Any + modified file pairs with a similarity score below this will be broken + apart. + + + + Limit in the number of files to consider for renames. + Limit in the number of files to consider for renames. + + + Set if the number of adds or deletes was over the limit. + Set if the number of adds or deletes was over the limit. + + + Create a new rename detector for the given repository + the repository to use for rename detection + + + + minimum score required to pair an add/delete as a rename. The + score ranges are within the bounds of (0, 100). + + + + Set the minimum score required to pair an add/delete as a rename. + + Set the minimum score required to pair an add/delete as a rename. +

+ When comparing two files together their score must be greater than or + equal to the rename score for them to be considered a rename match. The + score is computed based on content similarity, so a score of 60 implies + that approximately 60% of the bytes in the files are identical. + + new rename score, must be within [0, 100]. + the score was not within [0, 100]. + + + + the similarity score required to keep modified file pairs + together. Any modify pairs that score below this will be broken + apart into separate add/deletes. Values less than or equal to + zero indicate that no modifies will be broken apart. Values over + 100 cause all modify pairs to be broken. + + + + + the similarity score required to keep modified file pairs + together. Any modify pairs that score below this will be + broken apart into separate add/deletes. Values less than or + equal to zero indicate that no modifies will be broken apart. + Values over 100 cause all modify pairs to be broken. + + + + limit on number of paths to perform inexact rename detection. + + +

Set the limit on the number of files to perform inexact rename detection. + + + Set the limit on the number of files to perform inexact rename detection. +

+ The rename detector has to build a square matrix of the rename limit on + each side, then perform that many file compares to determine similarity. + If 1000 files are added, and 1000 files are deleted, a 1000*1000 matrix + must be allocated, and 1,000,000 file compares may need to be performed. + + new file limit. + + +

Check if the detector is over the rename limit. + + Check if the detector is over the rename limit. +

+ This method can be invoked either before or after + getEntries + has + been used to perform rename detection. + + + true if the detector has more file additions or removals than the + rename limit is currently set to. In such configurations the + detector will skip expensive computation. + + + +

Add entries to be considered for rename detection. + Add entries to be considered for rename detection. + one or more entries to add. + + if + getEntries + was already invoked. + +
+ + Add an entry to be considered for rename detection. + Add an entry to be considered for rename detection. + to add. + + if + getEntries + was already invoked. + + + + Detect renames in the current file set. + + Detect renames in the current file set. +

+ This convenience function runs without a progress monitor. + + + an unmodifiable list of + DiffEntry + s representing all files + that have been changed. + + file contents cannot be read from the repository. + + + +

Detect renames in the current file set. + Detect renames in the current file set. + report progress during the detection phases. + + an unmodifiable list of + DiffEntry + s representing all files + that have been changed. + + file contents cannot be read from the repository. + +
+ + Detect renames in the current file set. + Detect renames in the current file set. + reader to obtain objects from the repository with. + report progress during the detection phases. + + an unmodifiable list of + DiffEntry + s representing all files + that have been changed. + + file contents cannot be read from the repository. + + + + Detect renames in the current file set. + Detect renames in the current file set. + reader to obtain objects from the repository with. + report progress during the detection phases. + + an unmodifiable list of + DiffEntry + s representing all files + that have been changed. + + file contents cannot be read from the repository. + + + + Reset this rename detector for another rename detection pass. + Reset this rename detector for another rename detection pass. + + + + + + + + + + + + + Find the best match by file path for a given DiffEntry from a list of + DiffEntrys. + + + Find the best match by file path for a given DiffEntry from a list of + DiffEntrys. The returned DiffEntry will be of the same type as <src>. If + no DiffEntry can be found that has the same type, this method will return + null. + + the DiffEntry to try to find a match for + a list of DiffEntrys to search through + the DiffEntry from <list> who's file path best matches <src> + + + Index structure of lines/blocks in one file. + + Index structure of lines/blocks in one file. +

+ This structure can be used to compute an approximation of the similarity + between two files. The index is used by + SimilarityRenameDetector + to + compute scores between files. +

+ To save space in memory, this index uses a space efficient encoding which + will not exceed 1 MiB per instance. The index starts out at a smaller size + (closer to 2 KiB), but may grow as more distinct blocks within the scanned + file are discovered. + + + +

Shift to apply before storing a key. + + Shift to apply before storing a key. +

+ Within the 64 bit table record space, we leave the highest bit unset so + all values are positive. The lower 32 bits to count bytes. + + + +

Maximum value of the count field, also mask to extract the count. + Maximum value of the count field, also mask to extract the count. +
+ + + A special + TableFullException + used in place of OutOfMemoryError. + + + + Total size of the file we hashed into the structure. + Total size of the file we hashed into the structure. + + + + Number of non-zero entries in + idHash + . + + + + + idSize + that triggers + idHash + to double in size. + + + + Pairings of content keys and counters. + + Pairings of content keys and counters. +

+ Slots in the table are actually two ints wedged into a single long. The + upper 32 bits stores the content key, and the remaining lower bits stores + the number of bytes associated with that key. Empty slots are denoted by + 0, which cannot occur because the count cannot be 0. Values can only be + positive, which we enforce during key addition. + + + +

+ idHash.length == 1 << idHashBits + . + +
+ + + + + + + + + + + + + + Sort the internal table so it can be used for efficient scoring. + + Sort the internal table so it can be used for efficient scoring. +

+ Once sorted, additional lines/blocks cannot be added to the index. + + + + + + + + + + + + +

Number of bits we need to express an index into src or dst list. + + Number of bits we need to express an index into src or dst list. +

+ This must be 28, giving us a limit of 2^28 entries in either list, which + is an insane limit of 536,870,912 file names being considered in a single + rename pass. The other 8 bits are used to store the score, while staying + under 127 so the long doesn't go negative. + + + +

All sources to consider for copies or renames. + + All sources to consider for copies or renames. +

+ A source is typically a + ChangeType.DELETE + change, but could be + another type when trying to perform copy detection concurrently with + rename detection. + + + +

All destinations to consider looking for a rename. + + All destinations to consider looking for a rename. +

+ A destination is typically an + ChangeType.ADD + , as the name has + just come into existence, and we want to discover where its initial + content came from. + + + +

Matrix of all examined file pairs, and their scores. + + Matrix of all examined file pairs, and their scores. +

+ The upper 8 bits of each long stores the score, but the score is bounded + to be in the range (0, 128] so that the highest bit is never set, and all + entries are therefore positive. +

+ List indexes to an element of + srcs + and + dsts + are encoded + as the lower two groups of 28 bits, respectively, but the encoding is + inverted, so that 0 is expressed as + (1 << 28) - 1 + . This sorts + lower list indices later in the matrix, giving precedence to files whose + names sort earlier in the tree. + + + +

Score a pair must exceed to be considered a rename. + Score a pair must exceed to be considered a rename. +
+ + + Set if any + TableFullException + occurs. + + + + + + + + + + + + + + + + + Support for the Git dircache (aka index file). + + Support for the Git dircache (aka index file). +

+ The index file keeps track of which objects are currently checked out in the + working directory, and the last modified time of those working files. Changes + in the working directory can be detected by comparing the modification times + to the cached modification time within the index file. +

+ Index files are also used during merges, where the merge happens within the + index file first, and the working directory is updated as a post-merge step. + Conflicts are stored in the index file to allow tool (and human) based + resolutions to be easily performed. + + + +

Create a new empty index which is never stored on disk. + Create a new empty index which is never stored on disk. + + an empty cache which has no backing store file. The cache may not + be read or written, but it may be queried and updated (in + memory). + +
+ + Create a new in-core index representation and read an index from disk. + + Create a new in-core index representation and read an index from disk. +

+ The new index will be read before it is returned to the caller. Read + failures are reported as exceptions and therefore prevent the method from + returning a partially populated index. + + repository containing the index to read + + a cache representing the contents of the specified index file (if + it exists) or an empty cache if the file does not exist. + + the index file is present but could not be read. + + + the index file is using a format or extension that this + library does not support. + + + +

Create a new in-core index representation and read an index from disk. + + Create a new in-core index representation and read an index from disk. +

+ The new index will be read before it is returned to the caller. Read + failures are reported as exceptions and therefore prevent the method from + returning a partially populated index. + + location of the index file on disk. + + the file system abstraction which will be necessary to perform + certain file system operations. + + + a cache representing the contents of the specified index file (if + it exists) or an empty cache if the file does not exist. + + the index file is present but could not be read. + + + the index file is using a format or extension that this + library does not support. + + + +

Create a new in-core index representation, lock it, and read from disk. + + Create a new in-core index representation, lock it, and read from disk. +

+ The new index will be locked and then read before it is returned to the + caller. Read failures are reported as exceptions and therefore prevent + the method from returning a partially populated index. On read failure, + the lock is released. + + location of the index file on disk. + + the file system abstraction which will be necessary to perform + certain file system operations. + + + a cache representing the contents of the specified index file (if + it exists) or an empty cache if the file does not exist. + + + the index file is present but could not be read, or the lock + could not be obtained. + + + the index file is using a format or extension that this + library does not support. + + + +

Create a new in-core index representation, lock it, and read from disk. + + Create a new in-core index representation, lock it, and read from disk. +

+ The new index will be locked and then read before it is returned to the + caller. Read failures are reported as exceptions and therefore prevent + the method from returning a partially populated index. On read failure, + the lock is released. + + repository containing the index to lock and read + listener to be informed when DirCache is committed + + + a cache representing the contents of the specified index file (if + it exists) or an empty cache if the file does not exist. + + + the index file is present but could not be read, or the lock + could not be obtained. + + + the index file is using a format or extension that this + library does not support. + + 2.0 + + +

Create a new in-core index representation, lock it, and read from disk. + + Create a new in-core index representation, lock it, and read from disk. +

+ The new index will be locked and then read before it is returned to the + caller. Read failures are reported as exceptions and therefore prevent + the method from returning a partially populated index. On read failure, + the lock is released. + + location of the index file on disk. + + the file system abstraction which will be necessary to perform + certain file system operations. + + listener to be informed when DirCache is committed + + + a cache representing the contents of the specified index file (if + it exists) or an empty cache if the file does not exist. + + + the index file is present but could not be read, or the lock + could not be obtained. + + + the index file is using a format or extension that this + library does not support. + + + +

Location of the current version of the index file. + Location of the current version of the index file. +
+ + Individual file index entries, sorted by path name. + Individual file index entries, sorted by path name. + + + + Number of positions within + sortedEntries + that are valid. + + + + Cache tree for this index; null if the cache tree is not available. + Cache tree for this index; null if the cache tree is not available. + + + Our active lock (if we hold it); null if we don't have it locked. + Our active lock (if we hold it); null if we don't have it locked. + + + file system abstraction + + + Keep track of whether the index has changed or not + + + index checksum when index was read from disk + + + index checksum when index was written to disk + + + listener to be informed on commit + + + Repository containing this index + + + Create a new in-core index representation. + + Create a new in-core index representation. +

+ The new index will be empty. Callers may wish to read from the on disk + file first with + Read() + . + + location of the index file on disk. + + the file system abstraction which will be necessary to perform + certain file system operations. + + + +

Create a new builder to update this cache. + + Create a new builder to update this cache. +

+ Callers should add all entries to the builder, then use + DirCacheBuilder.Finish() + to update this instance. + + a new builder instance for this cache. + + +

Create a new editor to recreate this cache. + + Create a new editor to recreate this cache. +

+ Callers should add commands to the editor, then use + DirCacheEditor.Finish() + to update this instance. + + a new builder instance for this cache. + + +

Read the index from disk, if it has changed on disk. + + Read the index from disk, if it has changed on disk. +

+ This method tries to avoid loading the index if it has not changed since + the last time we consulted it. A missing index file will be treated as + though it were present but had no file entries in it. + + + the index file is present but could not be read. This + DirCache instance may not be populated correctly. + + + the index file is using a format or extension that this + library does not support. + + + + true if the memory state differs from the index file + System.IO.IOException + + +

Empty this index, removing all entries. + Empty this index, removing all entries. +
+ + + + + + + + + + + + Try to establish an update lock on the cache file. + Try to establish an update lock on the cache file. + + true if the lock is now held by the caller; false if it is held + by someone else. + + + the output file could not be created. The caller does not + hold the lock. + + + + Write the entry records from memory to disk. + + Write the entry records from memory to disk. +

+ The cache must be locked first by calling + Lock() + and receiving + true as the return value. Applications are encouraged to lock the index, + then invoke + Read() + to ensure the in-memory data is current, + prior to updating the in-memory entries. +

+ Once written the lock is closed and must be either committed with + Commit() + or rolled back with + Unlock() + . + + + the output file could not be created. The caller no longer + holds the lock. + + + + + + +

Commit this change and release the lock. + + Commit this change and release the lock. +

+ If this method fails (returns false) the lock is still released. + + + true if the commit was successful and the file contains the new + data; false if the commit failed and the file remains with the + old data. + + the lock is not held. + + +

Unlock this file and abort this change. + + Unlock this file and abort this change. +

+ The temporary file (if created) is deleted before returning. + + + +

Locate the position a path's entry is at in the index. + + Locate the position a path's entry is at in the index. +

+ If there is at least one entry in the index for this path the position of + the lowest stage is returned. Subsequent stages can be identified by + testing consecutive entries until the path differs. +

+ If no path matches the entry -(position+1) is returned, where position is + the location it would have gone within the index. + + the path to search for. + + if >= 0 then the return value is the position of the entry in the + index; pass to + GetEntry(int) + to obtain the entry + information. If < 0 the entry does not exist in the index. + + + +

Determine the next index position past all entries with the same name. + + Determine the next index position past all entries with the same name. +

+ As index entries are sorted by path name, then stage number, this method + advances the supplied position to the first position in the index whose + path name does not match the path name of the supplied position's entry. + + entry position of the path that should be skipped. + position of the next entry whose path is after the input. + + +

Total number of file entries stored in the index. + + Total number of file entries stored in the index. +

+ This count includes unmerged stages for a file entry if the file is + currently conflicted in a merge. This means the total number of entries + in the index may be up to 3 times larger than the number of files in the + working directory. +

+ Note that this value counts only files. + + number of entries available. + GetEntry(int) + + +

Get a specific entry. + Get a specific entry. + position of the entry to get. + the entry at position i. +
+ + Get a specific entry. + Get a specific entry. + the path to search for. + the entry for the given path. + + + Recursively get all entries within a subtree. + Recursively get all entries within a subtree. + the subtree path to get all entries within. + all entries recursively contained within the subtree. + + + Obtain (or build) the current cache tree structure. + + Obtain (or build) the current cache tree structure. +

+ This method can optionally recreate the cache tree, without flushing the + tree objects themselves to disk. + + + if true and the cache tree is not present in the index it will + be generated and returned to the caller. + + + the cache tree; null if there is no current cache tree available + and build was false. + + + +

Write all index trees to the object store, returning the root tree. + Write all index trees to the object store, returning the root tree. + + the writer to use when serializing to the store. The caller is + responsible for flushing the inserter before trying to use the + returned tree identity. + + identity for the root tree. + + one or more paths contain higher-order stages (stage > 0), + which cannot be stored in a tree object. + + + one or more paths contain an invalid mode which should never + appear in a tree object. + + an unexpected error occurred writing to the object store. + +
+ + Tells whether this index contains unmerged paths. + Tells whether this index contains unmerged paths. + + + true + if this index contains unmerged paths. Means: at + least one entry is of a stage different from 0. + false + will be returned if all entries are of stage 0. + + + + Update any smudged entries with information from the working tree. + Update any smudged entries with information from the working tree. + System.IO.IOException + + + + Iterate and update a + DirCache + as part of a TreeWalk. +

+ Like + DirCacheIterator + this iterator allows a DirCache to be used in + parallel with other sorts of iterators in a TreeWalk. However any entry which + appears in the source DirCache and which is skipped by the TreeFilter is + automatically copied into + DirCacheBuilder + , thus retaining it in the + newly updated index. +

+ This iterator is suitable for update processes, or even a simple delete + algorithm. For example deleting a path: +

+            final DirCache dirc = db.lockDirCache();
+            final DirCacheBuilder edit = dirc.builder();
+            final TreeWalk walk = new TreeWalk(db);
+            walk.reset();
+            walk.setRecursive(true);
+            walk.setFilter(PathFilter.create("name/to/remove"));
+            walk.addTree(new DirCacheBuildIterator(edit));
+            while (walk.next())
+            ; // do nothing on a match as we want to remove matches
+            edit.commit();
+            
+
+
+ + + Iterate a + DirCache + as part of a TreeWalk. +

+ This is an iterator to adapt a loaded DirCache instance (such as + read from an existing .git/index file) to the tree structure + used by a TreeWalk, making it possible for applications to walk + over any combination of tree objects already in the object database, index + files, or working directories. +

+ NGit.Treewalk.TreeWalk +
+ + Walks a Git tree (directory) in Git sort order. + + Walks a Git tree (directory) in Git sort order. +

+ A new iterator instance should be positioned on the first entry, or at eof. + Data for the first entry (if not at eof) should be available immediately. +

+ Implementors must walk a tree in the Git sort order, which has the following + odd sorting: +

    +
  1. A.c
  2. +
  3. A/c
  4. +
  5. A0c
  6. +
+

+ In the second item, A is the name of a subtree and + c is a file within that subtree. The other two items are files + in the root level tree. + + CanonicalTreeParser + + +

+ Default size for the + path + buffer. + +
+ + A dummy object id buffer that matches the zero ObjectId. + A dummy object id buffer that matches the zero ObjectId. + + + Iterator for the parent tree; null if we are the root iterator. + Iterator for the parent tree; null if we are the root iterator. + + + The iterator this current entry is path equal to. + The iterator this current entry is path equal to. + + + Number of entries we moved forward to force a D/F conflict match. + Number of entries we moved forward to force a D/F conflict match. + NameConflictTreeWalk + + + Mode bits for the current entry. + + Mode bits for the current entry. +

+ A numerical value from FileMode is usually faster for an iterator to + obtain from its data source so this is the preferred representation. + + NGit.FileMode + + +

Path buffer for the current entry. + + Path buffer for the current entry. +

+ This buffer is pre-allocated at the start of walking and is shared from + parent iterators down into their subtree iterators. The sharing allows + the current entry to always be a full path from the root, while each + subtree only needs to populate the part that is under their control. + + + +

+ Position within + path + this iterator starts writing at. +

+ This is the first offset in + path + that this iterator must + populate during + Next(int) + . At the root level (when + parent + is null) this is 0. For a subtree iterator the index before this position + should have the value '/'. +

+
+ + Total length of the current entry's complete path from the root. + + Total length of the current entry's complete path from the root. +

+ This is the number of bytes within + path + that pertain to the + current entry. Values at this index through the end of the array are + garbage and may be randomly populated from prior entries. + + + +

Create a new iterator with no parent. + Create a new iterator with no parent. +
+ + Create a new iterator with no parent and a prefix. + + Create a new iterator with no parent and a prefix. +

+ The prefix path supplied is inserted in front of all paths generated by + this iterator. It is intended to be used when an iterator is being + created for a subsection of an overall repository and needs to be + combined with other iterators that are created to run over the entire + repository namespace. + + + position of this iterator in the repository tree. The value + may be null or the empty string to indicate the prefix is the + root of the repository. A trailing slash ('/') is + automatically appended if the prefix does not end in '/'. + + + +

Create a new iterator with no parent and a prefix. + + Create a new iterator with no parent and a prefix. +

+ The prefix path supplied is inserted in front of all paths generated by + this iterator. It is intended to be used when an iterator is being + created for a subsection of an overall repository and needs to be + combined with other iterators that are created to run over the entire + repository namespace. + + + position of this iterator in the repository tree. The value + may be null or the empty array to indicate the prefix is the + root of the repository. A trailing slash ('/') is + automatically appended if the prefix does not end in '/'. + + + +

Create an iterator for a subtree of an existing iterator. + Create an iterator for a subtree of an existing iterator. + parent tree iterator. +
+ + Create an iterator for a subtree of an existing iterator. + + Create an iterator for a subtree of an existing iterator. +

+ The caller is responsible for setting up the path of the child iterator. + + parent tree iterator. + + path array to be used by the child iterator. This path must + contain the path from the top of the walk to the first child + and must end with a '/'. + + + position within childPath where the child can + insert its data. The value at + childPath[childPathOffset-1] must be '/'. + + + +

Grow the path buffer larger. + Grow the path buffer larger. + + number of live bytes in the path buffer. This many bytes will + be moved into the larger buffer. + +
+ + + Ensure that path is capable to hold at least + capacity + bytes + + the amount of bytes to hold + the amount of live bytes in path buffer + + + Set path buffer capacity to the specified size + the new size + the amount of bytes to copy + + + Compare the path of this current entry to another iterator's entry. + Compare the path of this current entry to another iterator's entry. + the other iterator to compare the path against. + + -1 if this entry sorts first; 0 if the entries are equal; 1 if + p's entry sorts first. + + + + Compare the path of this current entry to a raw buffer. + Compare the path of this current entry to a raw buffer. + the raw path buffer. + position to start reading the raw buffer. + one past the end of the raw buffer (length is end - pos). + the mode of the path. + + -1 if this entry sorts first; 0 if the entries are equal; 1 if + p's entry sorts first. + + + + Check if the current entry of both iterators has the same id. + + Check if the current entry of both iterators has the same id. +

+ This method is faster than + EntryObjectId() + as it does not + require copying the bytes out of the buffers. A direct + IdBuffer() + compare operation is performed. + + the other iterator to test against. + true if both iterators have the same object id; false otherwise. + + +

Obtain the ObjectId for the current entry. + Obtain the ObjectId for the current entry. + buffer to copy the object id into. +
+ + the internal buffer holding the current path. + + + + length of the path in + GetEntryPathBuffer() + . + + + + Get the current entry's path hash code. + + Get the current entry's path hash code. +

+ This method computes a hash code on the fly for this path, the hash is + suitable to cluster objects that may have similar paths together. + + path hash code; any integer may be returned. + + +

Create a new iterator for the current entry's subtree. + + Create a new iterator for the current entry's subtree. +

+ The parent reference of the iterator must be this, + otherwise the caller would not be able to exit out of the subtree + iterator correctly and return to continue walking this. + + reader to load the tree data from. + a new parser that walks over the current subtree. + + the current entry is not actually a tree and cannot be parsed + as though it were a tree. + + a loose object or pack file could not be read. + + + +

Create a new iterator as though the current entry were a subtree. + Create a new iterator as though the current entry were a subtree. + a new empty tree iterator. +
+ + Create a new iterator for the current entry's subtree. + + Create a new iterator for the current entry's subtree. +

+ The parent reference of the iterator must be this, otherwise + the caller would not be able to exit out of the subtree iterator + correctly and return to continue walking this. + + reader to load the tree data from. + temporary ObjectId buffer for use by this method. + a new parser that walks over the current subtree. + + the current entry is not actually a tree and cannot be parsed + as though it were a tree. + + a loose object or pack file could not be read. + + + +

Position this iterator on the first entry. + + Position this iterator on the first entry. + The default implementation of this method uses + back(1) + until + first() + is true. This is most likely not the most efficient + method of repositioning the iterator to its first entry, so subclasses + are strongly encouraged to override the method. + + the tree is invalid. +
+ + Move to next entry, populating this iterator with the entry data. + + Move to next entry, populating this iterator with the entry data. +

+ The delta indicates how many moves forward should occur. The most common + delta is 1 to move to the next entry. +

+ Implementations must populate the following members: +

    +
  • + mode +
  • +
  • + path + (from + pathOffset + to + pathLen + )
  • +
  • + pathLen +
  • +
+ as well as any implementation dependent information necessary to + accurately return data from + IdBuffer() + and + IdOffset() + when demanded. +
+ + number of entries to move the iterator by. Must be a positive, + non-zero integer. + + the tree is invalid. +
+ + Move to prior entry, populating this iterator with the entry data. + + Move to prior entry, populating this iterator with the entry data. +

+ The delta indicates how many moves backward should occur.The most common + delta is 1 to move to the prior entry. +

+ Implementations must populate the following members: +

    +
  • + mode +
  • +
  • + path + (from + pathOffset + to + pathLen + )
  • +
  • + pathLen +
  • +
+ as well as any implementation dependent information necessary to + accurately return data from + IdBuffer() + and + IdOffset() + when demanded. +
+ + number of entries to move the iterator by. Must be a positive, + non-zero integer. + + the tree is invalid. +
+ + Advance to the next tree entry, populating this iterator with its data. + + Advance to the next tree entry, populating this iterator with its data. +

+ This method behaves like seek(1) but is called by + TreeWalk + only if a + NGit.Treewalk.Filter.TreeFilter + was used and ruled out the + current entry from the results. In such cases this tree iterator may + perform special behavior. + + the tree is invalid. + + +

Indicates to the iterator that no more entries will be read. + + Indicates to the iterator that no more entries will be read. +

+ This is only invoked by TreeWalk when the iteration is aborted early due + to a + NGit.Errors.StopWalkException + being thrown from + within a TreeFilter. + + + +

+ JGit internal API for use by + NGit.Dircache.DirCacheCheckout + + + start of name component part within + GetEntryPathBuffer() + + 2.0 +
+ + + Get the name component of the current entry path into the provided + buffer. + + + Get the name component of the current entry path into the provided + buffer. + + + the buffer to get the name into, it is assumed that buffer can + hold the name + + the offset of the name in the buffer + NameLength() + + + true if the entry has a valid ObjectId. + + + Get the object id of the current entry. + Get the object id of the current entry. + an object id for the current entry. + + + the file mode of the current entry. + + + the file mode of the current entry as bits + + + path of the current entry, as a string. + + + Get the byte array buffer object IDs must be copied out of. + + Get the byte array buffer object IDs must be copied out of. +

+ The id buffer contains the bytes necessary to construct an ObjectId for + the current entry of this iterator. The buffer can be the same buffer for + all entries, or it can be a unique buffer per-entry. Implementations are + encouraged to expose their private buffer whenever possible to reduce + garbage generation and copying costs. + + byte array the implementation stores object IDs within. + EntryObjectId() + + +

+ Get the position within + IdBuffer() + of this entry's ObjectId. + + + offset into the array returned by + IdBuffer() + where the + ObjectId must be copied out of. + +
+ + + Is this tree iterator positioned on its first entry? +

+ An iterator is positioned on the first entry if back(1) + would be an invalid request as there is no entry before the current one. +

+ + Is this tree iterator positioned on its first entry? +

+ An iterator is positioned on the first entry if back(1) + would be an invalid request as there is no entry before the current one. +

+ An empty iterator (one with no entries) will be + first() && eof(). + + true if the iterator is positioned on the first entry. + + +

+ Is this tree iterator at its EOF point (no more entries)? +

+ An iterator is at EOF if there is no current entry. +

+ + Is this tree iterator at its EOF point (no more entries)? +

+ An iterator is at EOF if there is no current entry. + + true if we have walked all entries and have none left. + + + the length of the name component of the path for the current entry + + +

The cache this iterator was created to walk. + The cache this iterator was created to walk. +
+ + The tree this iterator is walking. + The tree this iterator is walking. + + + First position in this tree. + First position in this tree. + + + Last position in this tree. + Last position in this tree. + + + + Special buffer to hold the ObjectId of + currentSubtree + . + + + + + Index of entry within + cache + . + + + + + Next subtree to consider within + tree + . + + + + + The current file entry from + cache + . + + + + + The subtree containing + currentEntry + if this is first entry. + + + + Create a new iterator for an already loaded DirCache instance. + + Create a new iterator for an already loaded DirCache instance. +

+ The iterator implementation may copy part of the cache's data during + construction, so the cache must be read in prior to creating the + iterator. + + the cache to walk. It must be already loaded into memory. + + + + + + +

Get the DirCacheEntry for the current file. + Get the DirCacheEntry for the current file. + + the current cache entry, if this iterator is positioned on a + non-tree. + +
+ + Create a new iterator for an already loaded DirCache instance. + + Create a new iterator for an already loaded DirCache instance. +

+ The iterator implementation may copy part of the cache's data during + construction, so the cache must be read in prior to creating the + iterator. + + + the cache builder for the cache to walk. The cache must be + already loaded into memory. + + + + + + + + + + +

+ Updates a + DirCache + by adding individual + DirCacheEntry + s. +

+ A builder always starts from a clean slate and appends in every single + DirCacheEntry which the final updated index must have to reflect + its new content. +

+ For maximum performance applications should add entries in path name order. + Adding entries out of order is permitted, however a final sorting pass will + be implicitly performed during + Finish() + to correct any out-of-order + entries. Duplicate detection is also delayed until the sorting is complete. +

+ DirCacheEditor +
+ + Construct a new builder. + Construct a new builder. + the cache this builder will eventually update. + + estimated number of entries the builder will have upon + completion. This sizes the initial entry table. + + + + Append one entry into the resulting entry list. + + Append one entry into the resulting entry list. +

+ The entry is placed at the end of the entry list. If the entry causes the + list to now be incorrectly sorted a final sorting phase will be + automatically enabled within + Finish() + . +

+ The internal entry table is automatically expanded if there is + insufficient space for the new addition. + + the new entry to add. + If the FileMode of the entry was not set by the caller. + + + +

Add a range of existing entries from the destination cache. + + Add a range of existing entries from the destination cache. +

+ The entries are placed at the end of the entry list. If any of the + entries causes the list to now be incorrectly sorted a final sorting + phase will be automatically enabled within + Finish() + . +

+ This method copies from the destination cache, which has not yet been + updated with this editor's new table. So all offsets into the destination + cache are not affected by any updates that may be currently taking place + in this editor. +

+ The internal entry table is automatically expanded if there is + insufficient space for the new additions. + + first entry to copy from the destination cache. + number of entries to copy. + + +

Recursively add an entire tree into this builder. + + Recursively add an entire tree into this builder. +

+ If pathPrefix is "a/b" and the tree contains file "c" then the resulting + DirCacheEntry will have the path "a/b/c". +

+ All entries are inserted at stage 0, therefore assuming that the + application will not insert any other paths with the same pathPrefix. + + + UTF-8 encoded prefix to mount the tree's entries at. If the + path does not end with '/' one will be automatically inserted + as necessary. + + stage of the entries when adding them. + + reader the tree(s) will be read from during recursive + traversal. This must be the same repository that the resulting + DirCache would be written out to (or used in) otherwise the + caller is simply asking for deferred MissingObjectExceptions. + Caller is responsible for releasing this reader when done. + + + the tree to recursively add. This tree's contents will appear + under pathPrefix. The ObjectId must be that of a + tree; the caller is responsible for dereferencing a tag or + commit (if necessary). + + a tree cannot be read to iterate through its entries. + + + +

This class handles checking out one or two trees merging with the index. + + This class handles checking out one or two trees merging with the index. + +
+ + a list of updated paths and objectIds + + + a list of conflicts created by this checkout + + + + a list of paths (relative to the start of the working tree) of + files which couldn't be deleted during last call to + Checkout() + . + Checkout() + detected that these + files should be deleted but the deletion in the filesystem failed + (e.g. because a file was locked). To have a consistent state of + the working tree these files have to be deleted by the callers of + DirCacheCheckout + . + + + + a list of all files removed by this checkout + + + + Constructs a DirCacheCeckout for merging and checking out two trees (HEAD + and mergeCommitTree) and the index. + + + Constructs a DirCacheCeckout for merging and checking out two trees (HEAD + and mergeCommitTree) and the index. + + the repository in which we do the checkout + the id of the tree of the head commit + the (already locked) Dircache for this repo + the id of the tree we want to fast-forward to + an iterator over the repositories Working Tree + System.IO.IOException + + + + Constructs a DirCacheCeckout for merging and checking out two trees (HEAD + and mergeCommitTree) and the index. + + + Constructs a DirCacheCeckout for merging and checking out two trees (HEAD + and mergeCommitTree) and the index. As iterator over the working tree + this constructor creates a standard + NGit.Treewalk.FileTreeIterator + + the repository in which we do the checkout + the id of the tree of the head commit + the (already locked) Dircache for this repo + the id of the tree we want to fast-forward to + System.IO.IOException + + + + Constructs a DirCacheCeckout for checking out one tree, merging with the + index. + + + Constructs a DirCacheCeckout for checking out one tree, merging with the + index. + + the repository in which we do the checkout + the (already locked) Dircache for this repo + the id of the tree we want to fast-forward to + an iterator over the repositories Working Tree + System.IO.IOException + + + + Constructs a DirCacheCeckout for checking out one tree, merging with the + index. + + + Constructs a DirCacheCeckout for checking out one tree, merging with the + index. As iterator over the working tree this constructor creates a + standard + NGit.Treewalk.FileTreeIterator + + the repository in which we do the checkout + the (already locked) Dircache for this repo + the id of the tree of the + System.IO.IOException + + + Scan head, index and merge tree. + + Scan head, index and merge tree. Used during normal checkout or merge + operations. + + NGit.Errors.CorruptObjectException + + System.IO.IOException + + + + + + + + Scan index and merge tree (no HEAD). + + Scan index and merge tree (no HEAD). Used e.g. for initial checkout when + there is no head yet. + + NGit.Errors.MissingObjectException + + NGit.Errors.IncorrectObjectTypeException + + NGit.Errors.CorruptObjectException + + System.IO.IOException + + + + Processing an entry in the context of + PrescanOneTree() + when only + one tree is given + + the tree to merge + the index + the working tree + System.IO.IOException + + + Execute this checkout + + false if this method could not delete all the files + which should be deleted (e.g. because of of the files was + locked). In this case + GetToBeDeleted() + lists the files + which should be tried to be deleted outside of this method. + Although false is returned the checkout was + successful and the working tree was updated for all other files. + true is returned when no such problem occurred + + System.IO.IOException + + + + + + + + + + + Compares whether two pairs of ObjectId and FileMode are equal. + Compares whether two pairs of ObjectId and FileMode are equal. + + + + + + true if FileModes and ObjectIds are equal. + false otherwise + + + + Here the main work is done. + + Here the main work is done. This method is called for each existing path + in head, index and merge. This method decides what to do with the + corresponding index entry: keep it, update it, remove it or mark a + conflict. + + the entry for the head + the entry for the merge + the entry for the index + the file in the working tree + System.IO.IOException + + + A conflict is detected - add the three different stages to the index + the path of the conflicting entry + the previous index entry + the first tree you want to merge (the HEAD) + the second tree you want to merge + + + + If true, will scan first to see if it's possible to check + out, otherwise throw + NGit.Errors.CheckoutConflictException + + . If + false, it will silently deal with the problem. + + + + + + This method implements how to handle conflicts when + failOnConflict + is false + + NGit.Errors.CheckoutConflictException + + + + + + + + + Updates the file in the working tree with content and mode from an entry + in the index. + + + Updates the file in the working tree with content and mode from an entry + in the index. The new content is first written to a new temporary file in + the same directory as the real file. Then that new file is renamed to the + final filename. Use this method only for checkout of a single entry. + Otherwise use + checkoutEntry(Repository, File f, DirCacheEntry, ObjectReader) + instead which allows to reuse one + ObjectReader + for multiple + entries. +

+ TODO: this method works directly on File IO, we may need another + abstraction (like WorkingTreeIterator). This way we could tell e.g. + Eclipse that Files in the workspace got changed + + + + the file to be modified. The parent directory for this file + has to exist already + + the entry containing new mode and content + System.IO.IOException + + +

+ Updates the file in the working tree with content and mode from an entry + in the index. + + + Updates the file in the working tree with content and mode from an entry + in the index. The new content is first written to a new temporary file in + the same directory as the real file. Then that new file is renamed to the + final filename. +

+ TODO: this method works directly on File IO, we may need another + abstraction (like WorkingTreeIterator). This way we could tell e.g. + Eclipse that Files in the workspace got changed + + + + the file to be modified. The parent directory for this file + has to exist already + + the entry containing new mode and content + object reader to use for checkout + System.IO.IOException + + +

+ A single file (or stage of a file) in a + DirCache + . +

+ An entry represents exactly one stage of a file. If a file path is unmerged + then multiple DirCacheEntry instances may appear for the same path name. +

+
+ + The standard (fully merged) stage for an entry. + The standard (fully merged) stage for an entry. + + + The base tree revision for an entry. + The base tree revision for an entry. + + + The first tree revision (usually called "ours"). + The first tree revision (usually called "ours"). + + + The second tree revision (usually called "theirs"). + The second tree revision (usually called "theirs"). + + + + Mask applied to data in + P_FLAGS + to get the name length. + + + + In-core flag signaling that the entry should be considered as modified. + In-core flag signaling that the entry should be considered as modified. + + + (Possibly shared) header information storage. + (Possibly shared) header information storage. + + + + First location within + info + where our header starts. + + + + Our encoded path name, from the root of the repository. + Our encoded path name, from the root of the repository. + + + Flags which are never stored to disk. + Flags which are never stored to disk. + + + + + + Create an empty entry at stage 0. + Create an empty entry at stage 0. + name of the cache entry. + + If the path starts or ends with "/", or contains "//" either + "\0". These sequences are not permitted in a git tree object + or DirCache file. + + + + Create an empty entry at the specified stage. + Create an empty entry at the specified stage. + name of the cache entry. + the stage index of the new entry. + + If the path starts or ends with "/", or contains "//" either + "\0". These sequences are not permitted in a git tree object + or DirCache file. Or if + stage + is outside of the + range 0..3, inclusive. + + + + Create an empty entry at stage 0. + Create an empty entry at stage 0. + name of the cache entry, in the standard encoding. + + If the path starts or ends with "/", or contains "//" either + "\0". These sequences are not permitted in a git tree object + or DirCache file. + + + + Create an empty entry at the specified stage. + Create an empty entry at the specified stage. + name of the cache entry, in the standard encoding. + the stage index of the new entry. + + If the path starts or ends with "/", or contains "//" either + "\0". These sequences are not permitted in a git tree object + or DirCache file. Or if + stage + is outside of the + range 0..3, inclusive. + + + + + + + + Is it possible for this entry to be accidentally assumed clean? +

+ The "racy git" problem happens when a work file can be updated faster + than the filesystem records file modification timestamps. +

+ + Is it possible for this entry to be accidentally assumed clean? +

+ The "racy git" problem happens when a work file can be updated faster + than the filesystem records file modification timestamps. It is possible + for an application to edit a work file, update the index, then edit it + again before the filesystem will give the work file a new modification + timestamp. This method tests to see if file was written out at the same + time as the index. + + seconds component of the index's last modified time. + nanoseconds component of the index's last modified time. + true if extra careful checks should be used. + + +

Force this entry to no longer match its working tree file. + + Force this entry to no longer match its working tree file. +

+ This avoids the "racy git" problem by making this index entry no longer + match the file in the working directory. Later git will be forced to + compare the file content to ensure the file matches the working tree. + + + +

Returns whether this entry is in the fully-merged stage (0). + Returns whether this entry is in the fully-merged stage (0). + true if this entry is merged +
+ + Get the cached creation time of this file, in milliseconds. + Get the cached creation time of this file, in milliseconds. + + cached creation time of this file, in milliseconds since the + Java epoch (midnight Jan 1, 1970 UTC). + + + + Set the cached creation time of this file, using milliseconds. + Set the cached creation time of this file, using milliseconds. + new cached creation time of the file, in milliseconds. + + + Set the cached size (in bytes) of this file. + Set the cached size (in bytes) of this file. + + new cached size of the file, as bytes. If the file is larger + than 2G, cast it to (int) before calling this method. + + + + Set the cached size (in bytes) of this file. + Set the cached size (in bytes) of this file. + new cached size of the file, as bytes. + + + Obtain the ObjectId for the entry. + + Obtain the ObjectId for the entry. +

+ Using this method to compare ObjectId values between entries is + inefficient as it causes memory allocation. + + object identifier for the entry. + + +

Set the ObjectId for the entry. + Set the ObjectId for the entry. + + new object identifier for the entry. May be + NGit.ObjectId.ZeroId() + to remove the current identifier. + +
+ + Set the ObjectId for the entry from the raw binary representation. + Set the ObjectId for the entry from the raw binary representation. + + the raw byte buffer to read from. At least 20 bytes after p + must be available within this byte array. + + position to read the first byte of data from. + + + Use for debugging only ! + + + Copy the ObjectId and other meta fields from an existing entry. + + Copy the ObjectId and other meta fields from an existing entry. +

+ This method copies everything except the path from one entry to another, + supporting renaming. + + the entry to copy ObjectId and meta fields from. + + +

Copy the ObjectId and other meta fields from an existing entry. + + Copy the ObjectId and other meta fields from an existing entry. +

+ This method copies everything except the path and possibly stage from one + entry to another, supporting renaming. + + the entry to copy ObjectId and meta fields from. + if true, the stage attribute will not be copied + + +

+ Check whether this entry has been smudged or not +

+ If a blob has length 0 we know his id see + NGit.Constants.EMPTY_BLOB_ID + . If an entry + has length 0 and an ID different from the one for empty blob we know this + entry was smudged. +

+ + true if the entry is smudged, false + otherwise + +
+ + + Is this entry always thought to be unmodified? +

+ Most entries in the index do not have this flag set. +

+ + Is this entry always thought to be unmodified? +

+ Most entries in the index do not have this flag set. Users may however + set them on if the file system stat() costs are too high on this working + directory, such as on NFS or SMB volumes. + + true if we must assume the entry is unmodified. +

Set the assume valid flag for this entry, + + true to ignore apparent modifications; false to look at last + modified to detect file modifications. + +
+ + true if this entry should be checked for changes + Set whether this entry must be checked for changes + + + + Get the stage of this entry. + + Get the stage of this entry. +

+ Entries have one of 4 possible stages: 0-3. + + the stage of this entry. + + +

Returns whether this entry should be skipped from the working tree. + Returns whether this entry should be skipped from the working tree. + true if this entry should be skipepd. +
+ + Returns whether this entry is intent to be added to the Index. + Returns whether this entry is intent to be added to the Index. + true if this entry is intent to add. + + + + Obtain the raw + NGit.FileMode + bits for this entry. + + mode bits for the entry. + NGit.FileMode.FromBits(int) + + + + Obtain the + NGit.FileMode + for this entry. + + the file mode singleton for this entry. + Set the file mode for this entry. + Set the file mode for this entry. + the new mode constant. + + If + mode + is + NGit.FileMode.MISSING + , + NGit.FileMode.TREE + , or any other type code not permitted + in a tree object. + + + + Get the cached last modification date of this file, in milliseconds. + + Get the cached last modification date of this file, in milliseconds. +

+ One of the indicators that the file has been modified by an application + changing the working tree is if the last modification time for the file + differs from the time stored in this entry. + + + last modification time of this file, in milliseconds since the + Java epoch (midnight Jan 1, 1970 UTC). + +

Set the cached last modification date of this file, using milliseconds. + Set the cached last modification date of this file, using milliseconds. + new cached modification date of the file, in milliseconds. +
+ + Get the cached size (mod 4 GB) (in bytes) of this file. + + Get the cached size (mod 4 GB) (in bytes) of this file. +

+ One of the indicators that the file has been modified by an application + changing the working tree is if the size of the file (in bytes) differs + from the size stored in this entry. +

+ Note that this is the length of the file in the working directory, which + may differ from the size of the decompressed blob if work tree filters + are being used, such as LF<->CRLF conversion. +

+ Note also that for very large files, this is the size of the on-disk file + truncated to 32 bits, i.e. modulo 4294967296. If that value is larger + than 2GB, it will appear negative. + + cached size of the working directory file, in bytes. + + +

Get the entry's complete path. + + Get the entry's complete path. +

+ This method is not very efficient and is primarily meant for debugging + and final output generation. Applications should try to avoid calling it, + and if invoked do so only once per interesting entry, where the name is + absolutely required for correct function. + + + complete path of the entry, from the root of the repository. If + the entry is in a subtree there will be at least one '/' in the + returned string. + + + + true if the entry contains extended flags. + + +

+ Single tree record from the 'TREE' + DirCache + extension. +

+ A valid cache tree record contains the object id of a tree object and the + total number of + DirCacheEntry + instances (counted recursively) from + the DirCache contained within the tree. This information facilitates faster + traversal of the index and quicker generation of tree objects prior to + creating a new commit. +

+ An invalid cache tree record indicates a known subtree whose file entries + have changed in ways that cause the tree to no longer have a known object id. + Invalid cache tree records must be revalidated prior to use. +

+
+ + Tree this tree resides in; null if we are the root. + Tree this tree resides in; null if we are the root. + + + Name of this tree within its parent. + Name of this tree within its parent. + + + + Number of + DirCacheEntry + records that belong to this tree. + + + + Unique SHA-1 of this tree; null if invalid. + Unique SHA-1 of this tree; null if invalid. + + + + Child trees, if any, sorted by + encodedName + . + + + + + Number of valid children in + children + . + + + + + + + Determine if this cache is currently valid. + + Determine if this cache is currently valid. +

+ A valid cache tree knows how many + DirCacheEntry + instances from + the parent + DirCache + reside within this tree (recursively + enumerated). It also knows the object id of the tree, as the tree should + be readily available from the repository's object database. + + + true if this tree is knows key details about itself; false if the + tree needs to be regenerated. + + + +

Get the number of entries this tree spans within the DirCache. + + Get the number of entries this tree spans within the DirCache. +

+ If this tree is not valid (see + IsValid() + ) this method's return + value is always strictly negative (less than 0) but is otherwise an + undefined result. + + total number of entries (recursively) contained within this tree. + + +

Get the number of cached subtrees contained within this tree. + Get the number of cached subtrees contained within this tree. + number of child trees available through this tree. +
+ + Get the i-th child cache tree. + Get the i-th child cache tree. + index of the child to obtain. + the child tree. + + + Get the tree's name within its parent. + + Get the tree's name within its parent. +

+ This method is not very efficient and is primarily meant for debugging + and final output generation. Applications should try to avoid calling it, + and if invoked do so only once per interesting entry, where the name is + absolutely required for correct function. + + name of the tree. This does not contain any '/' characters. + + +

Get the tree's path within the repository. + + Get the tree's path within the repository. +

+ This method is not very efficient and is primarily meant for debugging + and final output generation. Applications should try to avoid calling it, + and if invoked do so only once per interesting entry, where the name is + absolutely required for correct function. + + + path of the tree, relative to the repository root. If this is not + the root tree the path ends with '/'. The root tree's path string + is the empty string (""). + + + +

Write (if necessary) this tree to the object store. + Write (if necessary) this tree to the object store. + the complete cache from DirCache. + + first position of cache that is a member of this + tree. The path of cache[cacheIdx].path for the + range [0,pathOff-1) matches the complete path of + this tree, from the root of the repository. + + + number of bytes of cache[cacheIdx].path that + matches this tree's path. The value at array position + cache[cacheIdx].path[pathOff-1] is always '/' if + pathOff is > 0. + + the writer to use when serializing to the store. + identity of this tree. + + one or more paths contain higher-order stages (stage > 0), + which cannot be stored in a tree object. + + an unexpected error occurred writing to the object store. + +
+ + + + + + Update (if necessary) this tree's entrySpan. + Update (if necessary) this tree's entrySpan. + the complete cache from DirCache. + + number of entries in cache that are valid for + iteration. + + + first position of cache that is a member of this + tree. The path of cache[cacheIdx].path for the + range [0,pathOff-1) matches the complete path of + this tree, from the root of the repository. + + + number of bytes of cache[cacheIdx].path that + matches this tree's path. The value at array position + cache[cacheIdx].path[pathOff-1] is always '/' if + pathOff is > 0. + + + + + An + NGit.AbbreviatedObjectId + cannot be extended. + + + + Construct a MissingObjectException for the specified object id. + + Construct a MissingObjectException for the specified object id. Expected + type is reported to simplify tracking down the problem. + + SHA-1 + the candidate matches returned by the ObjectReader. + + + the AbbreviatedObjectId that has more than one result. + + + the matching candidates (or at least a subset of them). + + + Exception thrown if a conflict occurs during a merge checkout. + Exception thrown if a conflict occurs during a merge checkout. + + + Construct a CheckoutConflictException for the specified file + + + + Construct a CheckoutConflictException for the specified set of files + + + + An exception detailing multiple reasons for failure. + An exception detailing multiple reasons for failure. + + + Constructs an exception detailing many potential reasons for failure. + Constructs an exception detailing many potential reasons for failure. + Two or more exceptions that may have been the problem. + + + Get the complete list of reasons why this failure happened. + Get the complete list of reasons why this failure happened. + unmodifiable collection of all possible reasons. + + + Indicates a text string is not a valid Git style configuration. + Indicates a text string is not a valid Git style configuration. + + + Construct an invalid configuration error. + Construct an invalid configuration error. + why the configuration is invalid. + + + Construct an invalid configuration error. + Construct an invalid configuration error. + why the configuration is invalid. + root cause of the error. + + + Exception thrown when an object cannot be read from Git. + Exception thrown when an object cannot be read from Git. + + + + Construct a CorruptObjectException for reporting a problem specified + object id + + + + + + + Construct a CorruptObjectException for reporting a problem specified + object id + + + + + + + Construct a CorruptObjectException for reporting a problem not associated + with a specific object id. + + + Construct a CorruptObjectException for reporting a problem not associated + with a specific object id. + + + + + Attempt to add an entry to a tree that already exists. + Attempt to add an entry to a tree that already exists. + + + + Construct an EntryExistsException when the specified name already + exists in a tree. + + + Construct an EntryExistsException when the specified name already + exists in a tree. + + workdir relative file name + + + + An exception thrown when a gitlink entry is found and cannot be + handled. + + + An exception thrown when a gitlink entry is found and cannot be + handled. + + + + Construct a GitlinksNotSupportedException for the specified link + name of link in tree or workdir + + + An inconsistency with respect to handling different object types. + + An inconsistency with respect to handling different object types. + This most likely signals a programming error rather than a corrupt + object database. + + + + Construct and IncorrectObjectTypeException for the specified object id. + + Construct and IncorrectObjectTypeException for the specified object id. + Provide the type to make it easier to track down the problem. + + SHA-1 + object type + + + Construct and IncorrectObjectTypeException for the specified object id. + + Construct and IncorrectObjectTypeException for the specified object id. + Provide the type to make it easier to track down the problem. + + SHA-1 + object type + + + Cannot write a modified index. + + Cannot write a modified index. This is a serious error that users need to be + made aware of. + + + + Constructs an IndexWriteException with the default message. + Constructs an IndexWriteException with the default message. + + + Constructs an IndexWriteException with the specified detail message. + Constructs an IndexWriteException with the specified detail message. + message + + + Constructs an IndexWriteException with the specified detail message. + Constructs an IndexWriteException with the specified detail message. + message + root cause exception + + + Thrown when an invalid object id is passed in as an argument. + Thrown when an invalid object id is passed in as an argument. + + + Create exception with bytes of the invalid object id. + Create exception with bytes of the invalid object id. + containing the invalid id. + in the byte array where the error occurred. + of the sequence of invalid bytes. + + + Thrown when a pattern passed in an argument was wrong. + Thrown when a pattern passed in an argument was wrong. + + + explains what was wrong with the pattern. + the invalid pattern. + + + the invalid pattern. + + + An object is too big to load into memory as a single byte array. + An object is too big to load into memory as a single byte array. + + + Create a large object exception, where the object isn't known. + Create a large object exception, where the object isn't known. + + + Create a large object exception, naming the object that is too big. + Create a large object exception, naming the object that is too big. + + identity of the object that is too big to be loaded as a byte + array in this JVM. + + + + identity of the object that is too large; may be null. + + + either the hex encoded name of the object, or 'unknown object'. + + + Set the identity of the object, if its not already set. + Set the identity of the object, if its not already set. + the id of the object that is too large to process. + + + An error caused by the JVM being out of heap space. + An error caused by the JVM being out of heap space. + + + Construct a wrapper around the original OutOfMemoryError. + Construct a wrapper around the original OutOfMemoryError. + the original root cause. + + + Object size exceeds JVM limit of 2 GiB per byte array. + Object size exceeds JVM limit of 2 GiB per byte array. + + + Object size exceeds the caller's upper limit. + Object size exceeds the caller's upper limit. + + + Construct an exception for a particular size being exceeded. + Construct an exception for a particular size being exceeded. + the limit the caller imposed on the object. + the actual size of the object. + + + An expected object is missing. + An expected object is missing. + + + Construct a MissingObjectException for the specified object id. + + Construct a MissingObjectException for the specified object id. + Expected type is reported to simplify tracking down the problem. + + SHA-1 + object type + + + Construct a MissingObjectException for the specified object id. + + Construct a MissingObjectException for the specified object id. + Expected type is reported to simplify tracking down the problem. + + SHA-1 + object type + + + Construct a MissingObjectException for the specified object id. + + Construct a MissingObjectException for the specified object id. Expected + type is reported to simplify tracking down the problem. + + SHA-1 + object type + + + the ObjectId that was not found. + + + + Thrown when a pattern contains a character group which is open to the right + side or a character class which is open to the right side. + + + Thrown when a pattern contains a character group which is open to the right + side or a character class which is open to the right side. + + + + the position of the [ character which has no ] character. + + the unclosed bracket. + the missing closing bracket. + the invalid pattern. + + + + Indicates a + NGit.Repository + has no working directory, and is thus bare. + + + + Creates an exception indicating there is no work tree for a repository. + Creates an exception indicating there is no work tree for a repository. + + + Cannot store an object in the object database. + + Cannot store an object in the object database. This is a serious + error that users need to be made aware of. + + + + Constructs an ObjectWritingException with the specified detail message. + Constructs an ObjectWritingException with the specified detail message. + message + + + Constructs an ObjectWritingException with the specified detail message. + Constructs an ObjectWritingException with the specified detail message. + message + root cause exception + + + Thrown when a PackFile previously failed and is known to be unusable + + + Construct a pack invalid error. + Construct a pack invalid error. + path of the invalid pack file. + + + Construct a pack invalid error. + Construct a pack invalid error. + path of the invalid pack file. + + + Thrown when a PackFile no longer matches the PackIndex. + Thrown when a PackFile no longer matches the PackIndex. + + + Construct a pack modification error. + Construct a pack modification error. + description of the type of error. + + + + Indicates a checked exception was thrown inside of + NGit.Revwalk.RevWalk + . +

+ Usually this exception is thrown from the Iterator created around a RevWalk + instance, as the Iterator API does not allow checked exceptions to be thrown + from hasNext() or next(). The + System.Exception.InnerException() + of this exception + is the original checked exception that we really wanted to throw back to the + application for handling and recovery. +

+
+ + Create a new walk exception an original cause. + Create a new walk exception an original cause. + the checked exception that describes why the walk failed. + + + + This signals a revision or object reference was not + properly formatted. + + + This signals a revision or object reference was not + properly formatted. + + + + + Construct a RevisionSyntaxException indicating a syntax problem with a + revision (or object) string. + + + Construct a RevisionSyntaxException indicating a syntax problem with a + revision (or object) string. + + The problematic revision string + + + + Construct a RevisionSyntaxException indicating a syntax problem with a + revision (or object) string. + + + Construct a RevisionSyntaxException indicating a syntax problem with a + revision (or object) string. + + a specific reason + The problematic revision string + + + Stops the driver loop of walker and finish with current results. + Stops the driver loop of walker and finish with current results. + NGit.Revwalk.Filter.RevFilter + + + Singleton instance for throwing within a filter. + Singleton instance for throwing within a filter. + + + A previously selected representation is no longer available. + A previously selected representation is no longer available. + + + Construct an error for an object. + Construct an error for an object. + the object whose current representation is no longer present. + + + + An exception thrown when a symlink entry is found and cannot be + handled. + + + An exception thrown when a symlink entry is found and cannot be + handled. + + + + Construct a SymlinksNotSupportedException for the specified link + name of link in tree or workdir + + + Common base class for all translation bundle related exceptions. + Common base class for all translation bundle related exceptions. + + + + To construct an instance of + TranslationBundleException + + exception message + bundle class for which the exception occurred + locale for which the exception occurred + + original exception that caused this exception. Usually thrown + from the + Sharpen.ResourceBundle + class. + + + + bundle class for which the exception occurred + + + locale for which the exception occurred + + + + This exception will be thrown when a translation bundle loading + fails. + + + This exception will be thrown when a translation bundle loading + fails. + + + + + Construct a + TranslationBundleLoadingException + for the specified + bundle class and locale. + + the bundle class for which the loading failed + the locale for which the loading failed + + the original exception thrown from the + Sharpen.ResourceBundle.GetBundle(string, System.Globalization.CultureInfo) + method. + + + + + This exception will be thrown when a translation string for a translation + bundle and locale is missing. + + + This exception will be thrown when a translation string for a translation + bundle and locale is missing. + + + + + Construct a + TranslationStringMissingException + for the specified + bundle class, locale and translation key + + the bundle class for which a translation string was missing + + the locale for which a translation string was missing + the key of the missing translation string + + the original exception thrown from the + Sharpen.ResourceBundle.GetString(string) + + method. + + + + the key of the missing translation string + + + Indicates one or more paths in a DirCache have non-zero stages present. + Indicates one or more paths in a DirCache have non-zero stages present. + + + Create a new unmerged path exception. + Create a new unmerged path exception. + the first non-zero stage of the unmerged path. + + + the first non-zero stage of the unmerged path + + + Describes a change to one or more keys in the configuration. + Describes a change to one or more keys in the configuration. + + + Describes a modification made to a repository. + Describes a modification made to a repository. + + + Set the repository this event occurred on. + + Set the repository this event occurred on. +

+ This method should only be invoked once on each event object, and is + automatically set by + NGit.Repository.FireEvent(RepositoryEvent<T>) + + . + + the repository. + + + the repository that was changed. + + + type of listener this event dispatches to. + + +

Dispatch this event to the given listener. + Dispatch this event to the given listener. + listener that wants this event. +
+ + + Receives + ConfigChangedEvent + s. + + + + Invoked when any change is made to the configuration. + Invoked when any change is made to the configuration. + information about the changes. + + + Describes a change to one or more paths in the index file. + Describes a change to one or more paths in the index file. + + + + Tracks a previously registered + RepositoryListener + . + + + + Remove the listener and stop receiving events. + Remove the listener and stop receiving events. + + + + Manages a thread-safe list of + RepositoryListener + s. + + + + Register an IndexChangedListener. + Register an IndexChangedListener. + the listener implementation. + handle to later remove the listener. + + + Register a RefsChangedListener. + Register a RefsChangedListener. + the listener implementation. + handle to later remove the listener. + + + Register a ConfigChangedListener. + Register a ConfigChangedListener. + the listener implementation. + handle to later remove the listener. + + + Add a listener to the list. + Add a listener to the list. + type of listener being registered. + the listener instance. + a handle to later remove the registration, if desired. + + + Dispatch an event to all interested listeners. + + Dispatch an event to all interested listeners. +

+ Listeners are selected by the type of listener the event delivers to. + + the event to deliver. + + +

Describes a change to one or more references of a repository. + Describes a change to one or more references of a repository. +
+ + + Receives + RefsChangedEvent + s. + + + + Invoked when any reference changes. + Invoked when any reference changes. + information about the changes. + + + the character which decides which heads are returned. + a list of heads based on the input. + + + + a list of + Head + s which will not be modified. + + + + This class can be used to match filenames against fnmatch like patterns. + + + This class can be used to match filenames against fnmatch like patterns. It + is not thread save. +

+ Supported are the wildcard characters * and ? and groups with: +

    +
  • characters e.g. [abc]
  • +
  • ranges e.g. [a-z]
  • +
  • the following character classes +
      +
    • [:alnum:]
    • +
    • [:alpha:]
    • +
    • [:blank:]
    • +
    • [:cntrl:]
    • +
    • [:digit:]
    • +
    • [:graph:]
    • +
    • [:lower:]
    • +
    • [:print:]
    • +
    • [:punct:]
    • +
    • [:space:]
    • +
    • [:upper:]
    • +
    • [:word:]
    • +
    • [:xdigit:]
    • +
    + e. g. [[:xdigit:]]
  • +
+
+
+ + + { + ExtendStringToMatchByOneCharacter(char) + + needs a list for the + new heads, allocating a new array would be bad for the performance, as + the method gets called very often. + + + + must be a list which will never be modified. + + + must be a list which will never be modified. + + a list which will be cloned and then used as current head + list. + + + + must contain a pattern which fnmatch would accept. + + if this parameter isn't null then this character will not + match at wildcards(* and ? are wildcards). + + if the patternString contains a invalid fnmatch pattern. + + + + + A Copy Constructor which creates a new + FileNameMatcher + with the + same state and reset point like other. + + + another + FileNameMatcher + instance. + + + + + + + + + + + + + + extends the string which is matched against the patterns of + this class. + + + + Resets this matcher to it's state right after construction. + Resets this matcher to it's state right after construction. + + + + a + FileNameMatcher + instance which uses the same pattern + like this matcher, but has the current state of this matcher as + reset and start point. + + + + true, if the string currently being matched does match. + + + + false, if the string being matched will not match when the string + gets extended. + + + + + + + the character to test + returns true if the character matches a pattern. + + + + Don't call this constructor, use + INSTANCE + + + + Represents a bundle of ignore rules inherited from a base directory. + + Represents a bundle of ignore rules inherited from a base directory. + This class is not thread safe, it maintains state about the last match. + + + + The rules that have been parsed into this node. + The rules that have been parsed into this node. + + + Create an empty ignore node with no rules. + Create an empty ignore node with no rules. + + + Create an ignore node with given rules. + Create an ignore node with given rules. + list of rules. + + + Parse files according to gitignore standards. + Parse files according to gitignore standards. + + input stream holding the standard ignore format. The caller is + responsible for closing the stream. + + Error thrown when reading an ignore file. + + + + list of all ignore rules held by this node. + + + Determine if an entry path matches an ignore rule. + Determine if an entry path matches an ignore rule. + + the path to test. The path must be relative to this ignore + node's own repository path, and in repository path format + (uses '/' and not '\'). + + true if the target item is a directory. + status of the path. + + + + Result from + IsIgnored(string, bool) + . + + + + + A single ignore rule corresponding to one line in a .gitignore or + ignore file. + + + A single ignore rule corresponding to one line in a .gitignore or + ignore file. Parses the ignore pattern + Inspiration from: Ferry Huberts + + + + Create a new ignore rule with the given pattern. + + Create a new ignore rule with the given pattern. Assumes that + the pattern is already trimmed. + + + Base pattern for the ignore rule. This pattern will + be parsed to generate rule parameters. + + + + Remove leading/trailing characters as needed. + + Remove leading/trailing characters as needed. Set up + rule variables for later matching. + + + + True if the pattern is just a file name and not a path + + + True if the pattern should match directories only + + + True if the pattern had a "!" in front of it + + + The blob pattern to be used as a matcher + + + Returns true if a match was made. + + Returns true if a match was made. +
+ This function does NOT return the actual ignore status of the + target! Please consult + GetResult() + for the ignore status. The actual + ignore status may be true or false depending on whether this rule is + an ignore rule or a negation rule. +
+ Name pattern of the file, relative to the base directory of this rule + + Whether the target file is a directory or not + + True if a match was made. This does not necessarily mean that + the target is ignored. Call + getResult() + for the result. + +
+ + + If a call to isMatch(String, boolean) was previously + made, this will return whether or not the target was ignored. + + + If a call to isMatch(String, boolean) was previously + made, this will return whether or not the target was ignored. Otherwise + this just indicates whether the rule is non-negation or negation. + + True if the target is to be ignored, false otherwise. + + + + Provides the merge algorithm which does a three-way merge on content provided + as RawText. + + + Provides the merge algorithm which does a three-way merge on content provided + as RawText. By default + NGit.Diff.HistogramDiff + is used as diff algorithm. + + + + + Creates a new MergeAlgorithm which uses + NGit.Diff.HistogramDiff + as diff + algorithm + + + + Creates a new MergeAlgorithm + the diff algorithm used by this merge + + + Does the three way merge between a common base and two sequences. + Does the three way merge between a common base and two sequences. + comparison method for this execution. + the common base sequence + the first sequence to be merged + the second sequence to be merged + the resulting content + + + Helper method which returns the next Edit for an Iterator over Edits. + + Helper method which returns the next Edit for an Iterator over Edits. + When there are no more edits left this method will return the constant + END_EDIT. + + the iterator for which the next edit should be returned + + the next edit from the iterator or END_EDIT if there no more + edits + + + + One chunk from a merge result. + + One chunk from a merge result. Each chunk contains a range from a + single sequence. In case of conflicts multiple chunks are reported for one + conflict. The conflictState tells when conflicts start and end. + + + + Creates a new empty MergeChunk + + determines to which sequence this chunks belongs to. Same as + in + MergeResult<S>.Add(int, int, int, ConflictState) + + + + the first element from the specified sequence which should be + included in the merge result. Indexes start with 0. + + + specifies the end of the range to be added. The element this + index points to is the first element which not added to the + merge result. All elements between begin (including begin) and + this element are added. + + + the state of this chunk. See + ConflictState + + + + + the index of the sequence to which sequence this chunks belongs + to. Same as in + MergeResult<S>.Add(int, int, int, ConflictState) + + + + + + the first element from the specified sequence which should be + included in the merge result. Indexes start with 0. + + + + + the end of the range of this chunk. The element this index + points to is the first element which not added to the merge + result. All elements between begin (including begin) and this + element are added. + + + + + the state of this chunk. See + ConflictState + + + + A state telling whether a MergeChunk belongs to a conflict or not. + + A state telling whether a MergeChunk belongs to a conflict or not. The + first chunk of a conflict is reported with a special state to be able to + distinguish the border between two consecutive conflicts + + + + A class to convert merge results into a Git conformant textual presentation + + + + + Formats the results of a merge of + NGit.Diff.RawText + objects in a Git + conformant way. This method also assumes that the + NGit.Diff.RawText + objects + being merged are line oriented files which use LF as delimiter. This + method will also use LF to separate chunks and conflict metadata, + therefore it fits only to texts that are LF-separated lines. + + the outputstream where to write the textual presentation + the merge result which should be presented + + When a conflict is reported each conflicting range will get a + name. This name is following the "<<<<<<< " or ">>>>>>> " + conflict markers. The names for the sequences are given in + this list + + + the name of the characterSet used when writing conflict + metadata + + System.IO.IOException + + + + Formats the results of a merge of exactly two + NGit.Diff.RawText + objects in + a Git conformant way. This convenience method accepts the names for the + three sequences (base and the two merged sequences) as explicit + parameters and doesn't require the caller to specify a List + + + the + Sharpen.OutputStream + where to write the textual + presentation + + the merge result which should be presented + the name ranges from the base should get + the name ranges from ours should get + the name ranges from theirs should get + + the name of the characterSet used when writing conflict + metadata + + System.IO.IOException + + + + The result of merging a number of + NGit.Diff.Sequence + objects. These sequences + have one common predecessor sequence. The result of a merge is a list of + MergeChunks. Each MergeChunk contains either a range (a subsequence) from + one of the merged sequences, a range from the common predecessor or a + conflicting range from one of the merged sequences. A conflict will be + reported as multiple chunks, one for each conflicting range. The first chunk + for a conflict is marked specially to distinguish the border between two + consecutive conflicts. +

+ This class does not know anything about how to present the merge result to + the end-user. MergeFormatters have to be used to construct something human + readable. +

+
+ + Creates a new empty MergeResult + + contains the common predecessor sequence at position 0 + followed by the merged sequences. This list should not be + modified anymore during the lifetime of this + MergeResult<S> + . + + + + + Adds a new range from one of the merged sequences or from the common + predecessor. + + + Adds a new range from one of the merged sequences or from the common + predecessor. This method can add conflicting and non-conflicting ranges + controlled by the conflictState parameter + + + determines from which sequence this range comes. An index of + x specifies the x+1 element in the list of sequences + specified to the constructor + + + the first element from the specified sequence which should be + included in the merge result. Indexes start with 0. + + + specifies the end of the range to be added. The element this + index points to is the first element which not added to the + merge result. All elements between begin (including begin) and + this element are added. + + + when set to NO_CONLICT a non-conflicting range is added. + This will end implicitly all open conflicts added before. + + + + + Returns the common predecessor sequence and the merged sequence in one + list. + + + Returns the common predecessor sequence and the merged sequence in one + list. The common predecessor is is the first element in the list + + + the common predecessor at position 0 followed by the merged + sequences. + + + + + an iterator over the MergeChunks. The iterator does not support + the remove operation + + + + true if this merge result contains conflicts + + + A method of combining two or more trees together to form an output tree. + + + A method of combining two or more trees together to form an output tree. +

+ Different strategies may employ different techniques for deciding which paths + (and ObjectIds) to carry from the input trees into the final output tree. + + + +

Simple strategy that sets the output tree to the first input tree. + Simple strategy that sets the output tree to the first input tree. +
+ + Simple strategy that sets the output tree to the second input tree. + Simple strategy that sets the output tree to the second input tree. + + + Simple strategy to merge paths, without simultaneous edits. + Simple strategy to merge paths, without simultaneous edits. + + + Simple strategy to merge paths. + Simple strategy to merge paths. It tries to merge also contents. Multiple merge bases are not supported + + + + Register a merge strategy so it can later be obtained by name. + Register a merge strategy so it can later be obtained by name. + the strategy to register. + a strategy by the same name has already been registered. + + + + Register a merge strategy so it can later be obtained by name. + Register a merge strategy so it can later be obtained by name. + name the strategy can be looked up under. + the strategy to register. + a strategy by the same name has already been registered. + + + + Locate a strategy by name. + Locate a strategy by name. + name of the strategy to locate. + the strategy instance; null if no strategy matches the name. + + + Get all registered strategies. + Get all registered strategies. + + the registered strategy instances. No inherit order is returned; + the caller may modify (and/or sort) the returned array if + necessary to obtain a reasonable ordering. + + + + default name of this strategy implementation. + + + Create a new merge instance. + Create a new merge instance. + + repository database the merger will read from, and eventually + write results back to. + + the new merge instance which implements this strategy. + + + Create a new merge instance. + Create a new merge instance. + + repository database the merger will read from, and eventually + write results back to. + + + the merge will happen in memory, working folder will not be + modified, in case of a non-trivial merge that requires manual + resolution, the merger will fail. + + the new merge instance which implements this strategy. + + + + Instance of a specific + MergeStrategy + for a single + NGit.Repository + . + + + + The repository this merger operates on. + The repository this merger operates on. + + + + Reader to support + walk + and other object loading. + + + + A RevWalk for computing merge bases, or listing incoming commits. + A RevWalk for computing merge bases, or listing incoming commits. + + + The original objects supplied in the merge; this can be any tree-ish. + The original objects supplied in the merge; this can be any tree-ish. + + + + If + sourceObjects + [i] is a commit, this is the commit. + + + + + The trees matching every entry in + sourceObjects + . + + + + Create a new merge instance for a repository. + Create a new merge instance for a repository. + the repository this merger will read and write data on. + + + the repository this merger operates on. + + + + an object writer to create objects in + GetRepository() + . + If no inserter has been set on this instance, one will be created + and returned by all future calls. + + + + Set the inserter this merger will use to create objects. + + Set the inserter this merger will use to create objects. +

+ If an inserter was already set on this instance (such as by a prior set, + or a prior call to + GetObjectInserter() + ), the prior inserter will + be released first. + + + the inserter instance to use. Must be associated with the + repository instance returned by + GetRepository() + . + + + +

Merge together two or more tree-ish objects. + + Merge together two or more tree-ish objects. +

+ Any tree-ish may be supplied as inputs. Commits and/or tags pointing at + trees or commits may be passed as input objects. + + + source trees to be combined together. The merge base is not + included in this set. + + + true if the merge was completed without conflicts; false if the + merge strategy cannot handle this merge or there were conflicts + preventing it from automatically resolving all paths. + + + one of the input objects is not a commit, but the strategy + requires it to be a commit. + + + one or more sources could not be read, or outputs could not + be written to the Repository. + + + +

Create an iterator to walk the merge base of two commits. + Create an iterator to walk the merge base of two commits. + + index of the first commit in + sourceObjects + . + + + index of the second commit in + sourceObjects + . + + the new iterator + one of the input objects is not a commit. + + objects are missing or multiple merge bases were found. + +
+ + Return the merge base of two commits. + Return the merge base of two commits. + + index of the first commit in + sourceObjects + . + + + index of the second commit in + sourceObjects + . + + the merge base of two commits + one of the input objects is not a commit. + + objects are missing or multiple merge bases were found. + + + + Open an iterator over a tree. + Open an iterator over a tree. + the tree to scan; must be a tree (not a treeish). + an iterator for the tree. + the input object is not a tree. + + the tree object is not found or cannot be read. + + + + Execute the merge. + + Execute the merge. +

+ This method is called from + Merge(NGit.AnyObjectId[]) + after the + sourceObjects + , + sourceCommits + and + sourceTrees + have been populated. + + + true if the merge was completed without conflicts; false if the + merge strategy cannot handle this merge or there were conflicts + preventing it from automatically resolving all paths. + + + one of the input objects is not a commit, but the strategy + requires it to be a commit. + + + one or more sources could not be read, or outputs could not + be written to the Repository. + + + + + resulting tree, if + Merge(NGit.AnyObjectId[]) + returned true. + + + +

A three-way merger performing a content-merge if necessary +
+ + A merge of 2 trees, using a common base ancestor tree. + A merge of 2 trees, using a common base ancestor tree. + + + Create a new merge instance for a repository. + Create a new merge instance for a repository. + the repository this merger will read and write data on. + + + Create a new merge instance for a repository. + Create a new merge instance for a repository. + the repository this merger will read and write data on. + perform the merge in core with no working folder involved + + + Set the common ancestor tree. + Set the common ancestor tree. + + common base treeish; null to automatically compute the common + base from the input commits during + Merge(NGit.AnyObjectId[]) + . + + the object is not a treeish. + + the object does not exist. + the object could not be read. + + + + + + Create an iterator to walk the merge base. + Create an iterator to walk the merge base. + + an iterator over the caller-specified merge base, or the natural + merge base of the two input commits. + + System.IO.IOException + + + + + + + + + + + + + + + + + + + + Reverts the worktree after an unsuccessful merge. + + Reverts the worktree after an unsuccessful merge. We know that for all + modified files the old content was in the old index and the index + contained only stage 0. In case if inCore operation just clear + the history of modified files. + + System.IO.IOException + NGit.Errors.CorruptObjectException + + NGit.Errors.NoWorkTreeException + + + + adds a new path with the specified stage to the index builder + + + + + + the entry which was added to the index + + + + adds a entry to the index builder which is a copy of the specified + DirCacheEntry + + the entry which should be copied + the entry which was added to the index + + + Processes one path and tries to merge. + + Processes one path and tries to merge. This method will do all do all + trivial (not content) merges and will also detect if a merge will fail. + The merge will fail when one of the following is true +
    +
  • the index entry does not match the entry in ours. When merging one + branch into the current HEAD, ours will point to HEAD and theirs will + point to the other branch. It is assumed that the index matches the HEAD + because it will only not match HEAD if it was populated before the merge + operation. But the merge commit should not accidentally contain + modifications done before the merge. Check the <a href= + "http://www.kernel.org/pub/software/scm/git/docs/git-read-tree.html#_3_way_merge" + >git read-tree</a> documentation for further explanations.
  • +
  • A conflict was detected and the working-tree file is dirty. When a + conflict is detected the content-merge algorithm will try to write a + merged version into the working-tree. If the file is dirty we would + override unsaved data.
  • +
+
+ the common base for ours and theirs + + the ours side of the merge. When merging a branch into the + HEAD ours will point to HEAD + + + the theirs side of the merge. When merging a branch into the + current HEAD theirs will point to the branch which is merged + into HEAD. + + the index entry + the file in the working tree + + false if the merge will fail because the index entry + didn't match ours or the working-dir file was dirty and a + conflict occurred + + NGit.Errors.MissingObjectException + + NGit.Errors.IncorrectObjectTypeException + + NGit.Errors.CorruptObjectException + + System.IO.IOException +
+ + Does the content merge. + + Does the content merge. The three texts base, ours and theirs are + specified with + NGit.Treewalk.CanonicalTreeParser + . If any of the parsers is + specified as null then an empty text will be used instead. + + + + + the result of the content merge + System.IO.IOException + + + Updates the index after a content merge has happened. + + Updates the index after a content merge has happened. If no conflict has + occurred this includes persisting the merged content to the object + database. In case of conflicts this method takes care to write the + correct stages to the index. + + + + + + + System.IO.FileNotFoundException + + System.IO.IOException + + + Writes merged file content to the working tree. + + Writes merged file content to the working tree. In case + inCore + is set and we don't have a working tree the content is written to a + temporary file + + the result of the content merge + the file to which the merged content was written + System.IO.FileNotFoundException + + System.IO.IOException + + + Try to merge filemodes. + + Try to merge filemodes. If only ours or theirs have changed the mode + (compared to base) we choose that one. If ours and theirs have equal + modes return that one. If also that is not the case the modes are not + mergeable. Return + NGit.FileMode.MISSING + int that case. + + filemode found in BASE + filemode found in OURS + filemode found in THEIRS + + the merged filemode or + NGit.FileMode.MISSING + in case of a + conflict + + + + + + + + the names of the commits as they would appear in conflict + markers + + + + + the names of the commits as they would appear in conflict + markers. + + + + + the paths with conflicts. This is a subset of the files listed + by + GetModifiedFiles() + + + + + the paths of files which have been modified by this merge. A + file will be modified if a content-merge works on this path or if + the merge algorithm decides to take the theirs-version. This is a + superset of the files listed by + GetUnmergedPaths() + . + + + + + a map which maps the paths of files which have to be checked out + because the merge created new fully-merged content for this file + into the index. This means: the merge wrote a new stage 0 entry + for this path. + + + + the mergeResults + + + + lists paths causing this merge to fail (not stopped because of a + conflict). null is returned if this merge didn't + fail. + + + + Returns whether this merge failed (i.e. + + Returns whether this merge failed (i.e. not stopped because of a + conflict) + + + true if a failure occurred, false + otherwise + + + + Sets the DirCache which shall be used by this merger. + + Sets the DirCache which shall be used by this merger. If the DirCache is + not set explicitly this merger will implicitly get and lock a default + DirCache. If the DirCache is explicitly set the caller is responsible to + lock it in advance. Finally the merger will call + NGit.Dircache.DirCache.Commit() + which requires that the DirCache is locked. If + the + MergeImpl() + returns without throwing an exception the lock + will be released. In case of exceptions the caller is responsible to + release the lock. + + the DirCache to set + + + Sets the WorkingTreeIterator to be used by this merger. + + Sets the WorkingTreeIterator to be used by this merger. If no + WorkingTreeIterator is set this merger will ignore the working tree and + fail if a content merge is necessary. +

+ TODO: enhance WorkingTreeIterator to support write operations. Then this + merger will be able to merge with a different working tree abstraction. + + the workingTreeIt to set + + +

+ If the merge fails (means: not stopped because of unresolved conflicts) + this enum is used to explain why it failed + +
+ + Trivial merge strategy to make the resulting tree exactly match an input. + + + Trivial merge strategy to make the resulting tree exactly match an input. +

+ This strategy can be used to cauterize an entire side branch of history, by + setting the output tree to one of the inputs, and ignoring any of the paths + of the other inputs. + + + +

Create a new merge strategy to select a specific input tree. + Create a new merge strategy to select a specific input tree. + name of this strategy. + the position of the input tree to accept as the result. +
+ + + + + A three-way merge strategy performing a content-merge if necessary + + + A merge strategy to merge 2 trees, using a common base ancestor tree. + A merge strategy to merge 2 trees, using a common base ancestor tree. + + + Merges two commits together in-memory, ignoring any working directory. + + Merges two commits together in-memory, ignoring any working directory. +

+ The strategy chooses a path from one of the two input trees if the path is + unchanged in the other relative to their common merge base tree. This is a + trivial 3-way merge (at the file path level only). +

+ Modifications of the same file path (content and/or file mode) by both input + trees will cause a merge conflict, as this strategy does not attempt to merge + file contents. + + + +

Create a new instance of the strategy. + Create a new instance of the strategy. +
+ + + + + + + + Global cache of translation bundles. + + Global cache of translation bundles. +

+ Every translation bundle will be cached here when it gets loaded for the + first time from a thread. Another lookup for the same translation bundle + (same locale and type) from the same or a different thread will return the + cached one. +

+ Note that NLS instances maintain per-thread Map of loaded translation + bundles. Once a thread accesses a translation bundle it will keep reference + to it and will not call + LookupBundle<T>(System.Globalization.CultureInfo, System.Type<T>) + + again for the + same translation bundle as long as its locale doesn't change. + + + +

Looks up for a translation bundle in the global cache. + + Looks up for a translation bundle in the global cache. If found returns + the cached bundle. If not found creates a new instance puts it into the + cache and returns it. + + the preferred locale + required bundle type + an instance of the required bundle type + + TranslationBundleLoadingException + see + TranslationBundle.Load(System.Globalization.CultureInfo) + + + + TranslationStringMissingException + see + TranslationBundle.Load(System.Globalization.CultureInfo) + + +
+ + + The purpose of this class is to provide NLS (National Language Support) + configurable per thread. + + + The purpose of this class is to provide NLS (National Language Support) + configurable per thread. +

+ The + SetLocale(System.Globalization.CultureInfo) + + method is used to configure locale for the + calling thread. The locale setting is thread inheritable. This means that a + child thread will have the same locale setting as its creator thread until it + changes it explicitly. +

+ Example of usage: +

+            NLS.setLocale(Locale.GERMAN);
+            TransportText t = NLS.getBundleFor(TransportText.class);
+            
+
+
+ + The root locale constant. + The root locale constant. It is defined here because the Locale.ROOT is not defined in Java 5 + + + + Sets the locale for the calling thread. + + Sets the locale for the calling thread. +

+ The + GetBundleFor<T>(System.Type<T>) + + method will honor this setting if if it + is supported by the provided resource bundle property files. Otherwise, + it will use a fall back locale as described in the + TranslationBundle + + the preferred locale + + +

Sets the JVM default locale as the locale for the calling thread. + + Sets the JVM default locale as the locale for the calling thread. +

+ Semantically this is equivalent to NLS.setLocale(Locale.getDefault()). + + + +

Returns an instance of the translation bundle of the required type. + + Returns an instance of the translation bundle of the required type. All + public String fields of the bundle instance will get their values + injected as described in the + TranslationBundle + . + + required bundle type + an instance of the required bundle type + + TranslationBundleLoadingException + see + NGit.Errors.TranslationBundleLoadingException + + + + TranslationStringMissingException + see + NGit.Errors.TranslationStringMissingException + + +
+ + + Base class for all translation bundles that provides injection of translated + texts into public String fields. + + + Base class for all translation bundles that provides injection of translated + texts into public String fields. +

+ The usage pattern is shown with the following example. First define a new + translation bundle: +

+            public class TransportText extends TranslationBundle {
+            public static TransportText get() {
+            return NLS.getBundleFor(TransportText.class);
+            }
+            public String repositoryNotFound;
+            public String transportError;
+            }
+            
+ Second, define one or more resource bundle property files. +
+            TransportText_en_US.properties:
+            repositoryNotFound=repository {0} not found
+            transportError=unknown error talking to {0}
+            TransportText_de.properties:
+            repositoryNotFound=repository {0} nicht gefunden
+            transportError=unbekannter Fehler w√§hrend der Kommunikation mit {0}
+            ...
+            
+ Then make use of it: +
+            NLS.setLocale(Locale.GERMAN); // or skip this call to stick to the JVM default locale
+            ...
+            throw new TransportException(uri, TransportText.get().transportError);
+            
+ The translated text is automatically injected into the public String fields + according to the locale set with + NLS.SetLocale(System.Globalization.CultureInfo) + + . However, the + NLS.SetLocale(System.Globalization.CultureInfo) + + method defines only prefered locale which will + be honored only if it is supported by the provided resource bundle property + files. Basically, this class will use + Sharpen.ResourceBundle.GetBundle(string, System.Globalization.CultureInfo) + method to load a resource + bundle. See the documentation of this method for a detailed explanation of + resource bundle loading strategy. After a bundle is created the + EffectiveLocale() + method can be used to determine whether the + bundle really corresponds to the requested locale or is a fallback. +

+ To load a String from a resource bundle property file this class uses the + Sharpen.ResourceBundle.GetString(string) + + . This method can throw the + Sharpen.MissingResourceException + and this class is not making any effort to + catch and/or translate this exception. +

+ To define a concrete translation bundle one has to: +

    +
  • extend this class
  • +
  • define a public static get() method like in the example above
  • +
  • define public static String fields for each text message
  • +
  • make sure the translation bundle class provide public no arg constructor
  • +
  • provide one or more resource bundle property files in the same package + where the translation bundle class resides
  • +
+
+
+ + + the locale locale used for loading the resource bundle from which + the field values were taken + + + + the resource bundle on which this translation bundle is based + + + Injects locale specific text in all instance fields of this instance. + + Injects locale specific text in all instance fields of this instance. + Only public instance fields of type String are considered. +

+ The name of this (sub)class plus the given locale parameter + define the resource bundle to be loaded. In other words the + this.getClass().getName() is used as the + baseName parameter in the + Sharpen.ResourceBundle.GetBundle(string, System.Globalization.CultureInfo) + parameter to load the + resource bundle. +

+ + defines the locale to be used when loading the resource bundle + + + TranslationBundleLoadingException + see + NGit.Errors.TranslationBundleLoadingException + + + + TranslationStringMissingException + see + NGit.Errors.TranslationStringMissingException + + + + + +

Part of a "GIT binary patch" to describe the pre-image or post-image +
+ + + Offset within + file + .buf to the "literal" or "delta " line. + + + + + Position 1 past the end of this hunk within + file + 's buf. + + + + Type of the data meaning. + Type of the data meaning. + + + Inflated length of the data. + Inflated length of the data. + + + header for the file this hunk applies to + + + the byte array holding this hunk's patch script. + + + + offset the start of this hunk in + GetBuffer() + . + + + + + offset one past the end of the hunk in + GetBuffer() + . + + + + type of this binary hunk + + + inflated size of this hunk's data + + + Type of information stored in a binary hunk. + Type of information stored in a binary hunk. + + + A file in the Git "diff --cc" or "diff --combined" format. + + A file in the Git "diff --cc" or "diff --combined" format. +

+ A combined diff shows an n-way comparison between two or more ancestors and + the final revision. Its primary function is to perform code reviews on a + merge which introduces changes not in any ancestor. + + + +

Patch header describing an action for a single file path. + Patch header describing an action for a single file path. +
+ + Buffer holding the patch data for this file. + Buffer holding the patch data for this file. + + + + Offset within + buf + to the "diff ..." line. + + + + + Position 1 past the end of this file within + buf + . + + + + Type of patch used to modify this file + + + The hunks of this file + + + + If + patchType + is + PatchType.GIT_BINARY + , the new image + + + + + If + patchType + is + PatchType.GIT_BINARY + , the old image + + + + Constructs a new FileHeader + buffer holding the diff header for this file + the edits for this file + the type of patch used to modify this file + + + the byte array holding this file's patch script. + + + + offset the start of this file's script in + GetBuffer() + . + + + + offset one past the end of the file script. + + + Convert the patch script for this file into a string. + + Convert the patch script for this file into a string. +

+ The default character encoding ( + NGit.Constants.CHARSET + ) is assumed for + both the old and new files. + + the patch script, as a Unicode string. + + +

Convert the patch script for this file into a string. + Convert the patch script for this file into a string. + hint character set to decode the old lines with. + hint character set to decode the new lines with. + the patch script, as a Unicode string. +
+ + style of patch used to modify this file + + + true if this patch modifies metadata about a file + + + hunks altering this file; in order of appearance in patch + + + + if a + PatchType.GIT_BINARY + , the new-image delta/literal + + + + + if a + PatchType.GIT_BINARY + , the old-image delta/literal + + + + a list describing the content edits performed on this file. + + + Parse a "diff --git" or "diff --cc" line. + Parse a "diff --git" or "diff --cc" line. + first character after the "diff --git " or "diff --cc " part. + one past the last position to parse. + first character after the LF at the end of the line; -1 on error. + + + Determine if this is a patch hunk header. + Determine if this is a patch hunk header. + the buffer to scan + first position in the buffer to evaluate + + last position to consider; usually the end of the buffer ( + buf.length) or the first position on the next + line. This is only used to avoid very long runs of '@' from + killing the scan loop. + + + the number of "ancestor revisions" in the hunk header. A + traditional two-way diff ("@@ -...") returns 1; a combined diff + for a 3 way-merge returns 3. If this is not a hunk header, 0 is + returned instead. + + + + Type of patch used by this file. + Type of patch used by this file. + + + number of ancestor revisions mentioned in this diff. + + + get the file mode of the first parent. + + + Get the file mode of the nth ancestor + the ancestor to get the mode of + the mode of the requested ancestor. + + + get the object id of the first parent. + + + Get the ObjectId of the nth ancestor + the ancestor to get the object id of + the id of the requested ancestor. + + + Convert the patch script for this file into a string. + Convert the patch script for this file into a string. + + optional array to suggest the character set to use when + decoding each file's line. If supplied the array must have a + length of + GetParentCount() + + 1 + representing the old revision character sets and the new + revision character set. + + the patch script, as a Unicode string. + + + Hunk header for a hunk appearing in a "diff --cc" style patch. + Hunk header for a hunk appearing in a "diff --cc" style patch. + + + Hunk header describing the layout of a single block of lines + + + + Offset within + file + .buf to the "@@ -" line. + + + + + Position 1 past the end of this hunk within + file + 's buf. + + + + First line number in the post-image file where the hunk starts + + + Total number of post-image lines this hunk covers (context + inserted) + + + Total number of lines of context appearing in this hunk + + + header for the file this hunk applies to + + + the byte array holding this hunk's patch script. + + + + offset the start of this hunk in + GetBuffer() + . + + + + + offset one past the end of the hunk in + GetBuffer() + . + + + + information about the old image mentioned in this hunk. + + + first line number in the post-image file where the hunk starts + + + Total number of post-image lines this hunk covers + + + total number of lines of context appearing in this hunk + + + a list describing the content edits performed within the hunk. + + + + + + Details about an old image of the file. + Details about an old image of the file. + + + First line number the hunk starts on in this file. + First line number the hunk starts on in this file. + + + Total number of lines this hunk covers in this file. + Total number of lines this hunk covers in this file. + + + Number of lines deleted by the post-image from this file. + Number of lines deleted by the post-image from this file. + + + Number of lines added by the post-image not in this file. + Number of lines added by the post-image not in this file. + + + first line number the hunk starts on in this file. + + + total number of lines this hunk covers in this file. + + + number of lines deleted by the post-image from this file. + + + number of lines added by the post-image not in this file. + + + object id of the pre-image file. + + + Get the OldImage data related to the nth ancestor + the ancestor to get the old image data of + image data of the requested ancestor. + + + + + + An error in a patch script + + + the severity of the error. + + + a message describing the error. + + + the byte buffer holding the patch script. + + + + byte offset within + GetBuffer() + where the error is + + + + line of the patch script the error appears on. + + + Classification of an error. + Classification of an error. + + + + A parsed collection of + FileHeader + s from a unified diff patch file + + + + The files, in the order they were parsed out of the input. + The files, in the order they were parsed out of the input. + + + Formatting errors, if any were identified. + Formatting errors, if any were identified. + + + Create an empty patch. + Create an empty patch. + + + Add a single file to this patch. + + Add a single file to this patch. +

+ Typically files should be added by parsing the text through one of this + class's parse methods. + + the header of the file. + + + list of files described in the patch, in occurrence order. + + +

Add a formatting error to this patch script. + Add a formatting error to this patch script. + the error description. +
+ + collection of formatting errors, if any. + + + Parse a patch received from an InputStream. + + Parse a patch received from an InputStream. +

+ Multiple parse calls on the same instance will concatenate the patch + data, but each parse input must start with a valid file header (don't + split a single file across parse calls). + + + the stream to read the patch data from. The stream is read + until EOF is reached. + + there was an error reading from the input stream. + + + + + + +

Parse a patch stored in a byte[]. + + Parse a patch stored in a byte[]. +

+ Multiple parse calls on the same instance will concatenate the patch + data, but each parse input must start with a valid file header (don't + split a single file across parse calls). + + the buffer to parse. + starting position to parse from. + + 1 past the last position to end parsing. The total length to + be parsed is end - ptr. + + + +

Basic commit graph renderer for graphical user interfaces. + + Basic commit graph renderer for graphical user interfaces. +

+ Lanes are drawn as columns left-to-right in the graph, and the commit short + message is drawn to the right of the lane lines for this cell. It is assumed + that the commits are being drawn as rows of some sort of table. +

+ Client applications can subclass this implementation to provide the necessary + drawing primitives required to display a commit graph. Most of the graph + layout is handled by this class, allowing applications to implement only a + handful of primitive stubs. +

+ This class is suitable for us within an AWT TableCellRenderer or within a SWT + PaintListener registered on a Table instance. It is meant to rubber stamp the + graphics necessary for one row of a plotted commit list. +

+ Subclasses should call + AbstractPlotRenderer<TLane, TColor>.PaintCommit(PlotCommit<L>, int) + + after they have + otherwise configured their instance to draw one commit into the current + location. +

+ All drawing methods assume the coordinate space for the current commit's cell + starts at (upper left corner is) 0,0. If this is not true (like say in SWT) + the implementation must perform the cell offset computations within the + various draw methods. + + + +

Paint one commit using the underlying graphics library. + Paint one commit using the underlying graphics library. + the commit to render in this cell. Must not be null. + total height (in pixels) of this cell. +
+ + Draw a decoration for the Ref ref at x,y + left + top + A peeled ref + width of label in pixels + + + Obtain the color reference used to paint this lane. + + Obtain the color reference used to paint this lane. +

+ Colors returned by this method will be passed to the other drawing + primitives, so the color returned should be application specific. +

+ If a null lane is supplied the return value must still be acceptable to a + drawing method. Usually this means the implementation should return a + default color. + + the current lane. May be null. + graphics specific color reference. Must be a valid color. + + +

Draw a single line within this cell. + Draw a single line within this cell. + the color to use while drawing the line. + starting X coordinate, 0 based. + starting Y coordinate, 0 based. + ending X coordinate, 0 based. + ending Y coordinate, 0 based. + number of pixels wide for the line. Always at least 1. +
+ + Draw a single commit dot. + + Draw a single commit dot. +

+ Usually the commit dot is a filled oval in blue, then a drawn oval in + black, using the same coordinates for both operations. + + upper left of the oval's bounding box. + upper left of the oval's bounding box. + width of the oval's bounding box. + height of the oval's bounding box. + + +

Draw a single boundary commit (aka uninteresting commit) dot. + + Draw a single boundary commit (aka uninteresting commit) dot. +

+ Usually a boundary commit dot is a light gray oval with a white center. + + upper left of the oval's bounding box. + upper left of the oval's bounding box. + width of the oval's bounding box. + height of the oval's bounding box. + + +

Draw a single line of text. + + Draw a single line of text. +

+ The font and colors used to render the text are left up to the + implementation. + + the text to draw. Does not contain LFs. + + first pixel from the left that the text can be drawn at. + Character data must not appear before this position. + + + pixel coordinate of the centerline of the text. + Implementations must adjust this coordinate to account for the + way their implementation handles font rendering. + + + +

A commit reference to a commit in the DAG. + A commit reference to a commit in the DAG. + PlotCommitList<L> +
+ + A commit reference to a commit in the DAG. + A commit reference to a commit in the DAG. + + + Base object type accessed during revision walking. + Base object type accessed during revision walking. + + + + Type of entry stored in the + ObjectIdOwnerMap<V> + . + + + + + Type of entry stored in the + ObjectIdOwnerMap<V> + . + + + + Initialize this entry with a specific ObjectId. + Initialize this entry with a specific ObjectId. + the id the entry represents. + + + + + + + + + + + + + Test to see if the flag has been set on this object. + Test to see if the flag has been set on this object. + the flag to test. + true if the flag has been added to this object; false if not. + + + Test to see if any flag in the set has been set on this object. + Test to see if any flag in the set has been set on this object. + the flags to test. + + true if any flag in the set has been added to this object; false + if not. + + + + Test to see if all flags in the set have been set on this object. + Test to see if all flags in the set have been set on this object. + the flags to test. + + true if all flags of the set have been added to this object; + false if some or none have been added. + + + + Add a flag to this object. + + Add a flag to this object. +

+ If the flag is already set on this object then the method has no effect. + + the flag to mark on this object, for later testing. + + +

Add a set of flags to this object. + Add a set of flags to this object. + the set of flags to mark on this object, for later testing. +
+ + Remove a flag from this object. + + Remove a flag from this object. +

+ If the flag is not set on this object then the method has no effect. + + the flag to remove from this object. + + +

Remove a set of flags from this object. + Remove a set of flags from this object. + the flag to remove from this object. +
+ + buffer to append a debug description of core RevFlags onto. + + + Get Git object type. + + Get Git object type. See + NGit.Constants + . + + object type + + + Get the name of this object. + Get the name of this object. + unique hash of this object. + + + Parse a commit from its canonical format. + + Parse a commit from its canonical format. + This method constructs a temporary revision pool, parses the commit as + supplied, and returns it to the caller. Since the commit was built inside + of a private revision pool its parent pointers will be initialized, but + will not have their headers loaded. + Applications are discouraged from using this API. Callers usually need + more than one commit. Use + RevWalk.ParseCommit(NGit.AnyObjectId) + + to + obtain a RevCommit from an existing repository. + + the canonical formatted commit to be parsed. + + the parsed commit, in an isolated revision pool that is not + available to the caller. + + + + Parse a commit from its canonical format. + + Parse a commit from its canonical format. + This method inserts the commit directly into the caller supplied revision + pool, making it appear as though the commit exists in the repository, + even if it doesn't. The repository under the pool is not affected. + + + the revision pool to allocate the commit within. The commit's + tree and parent pointers will be obtained from this pool. + + the canonical formatted commit to be parsed. + + the parsed commit, in an isolated revision pool that is not + available to the caller. + + + + Create a new commit reference. + Create a new commit reference. + object name for the commit. + + + + + + + + + + + + + Carry a RevFlag set on this commit to its parents. + + Carry a RevFlag set on this commit to its parents. +

+ If this commit is parsed, has parents, and has the supplied flag set on + it we automatically add it to the parents, grand-parents, and so on until + an unparsed commit or a commit with no parents is discovered. This + permits applications to force a flag through the history chain when + necessary. + + the single flag value to carry back onto parents. + + +

Get the nth parent from this commit's parent list. + Get the nth parent from this commit's parent list. + + parent index to obtain. Must be in the range 0 through + ParentCount() + -1. + + the specified parent. + an invalid parent index was specified. + +
+ + Parse the author identity from the raw buffer. + + Parse the author identity from the raw buffer. +

+ This method parses and returns the content of the author line, after + taking the commit's character set into account and decoding the author + name and email address. This method is fairly expensive and produces a + new PersonIdent instance on each invocation. Callers should invoke this + method only if they are certain they will be outputting the result, and + should cache the return value for as long as necessary to use all + information from it. +

+ RevFilter implementations should try to use + NGit.Util.RawParseUtils + to scan + the + RawBuffer() + instead, as this will allow faster evaluation + of commits. + + + identity of the author (name, email) and the time the commit was + made by the author; null if no author line was found. + + + +

Parse the committer identity from the raw buffer. + + Parse the committer identity from the raw buffer. +

+ This method parses and returns the content of the committer line, after + taking the commit's character set into account and decoding the committer + name and email address. This method is fairly expensive and produces a + new PersonIdent instance on each invocation. Callers should invoke this + method only if they are certain they will be outputting the result, and + should cache the return value for as long as necessary to use all + information from it. +

+ RevFilter implementations should try to use + NGit.Util.RawParseUtils + to scan + the + RawBuffer() + instead, as this will allow faster evaluation + of commits. + + + identity of the committer (name, email) and the time the commit + was made by the committer; null if no committer line was found. + + + +

Parse the complete commit message and decode it to a string. + + Parse the complete commit message and decode it to a string. +

+ This method parses and returns the message portion of the commit buffer, + after taking the commit's character set into account and decoding the + buffer using that character set. This method is a fairly expensive + operation and produces a new string on each invocation. + + decoded commit message as a string. Never null. + + +

Parse the commit message and return the first "line" of it. + + Parse the commit message and return the first "line" of it. +

+ The first line is everything up to the first pair of LFs. This is the + "oneline" format, suitable for output in a single line display. +

+ This method parses and returns the message portion of the commit buffer, + after taking the commit's character set into account and decoding the + buffer using that character set. This method is a fairly expensive + operation and produces a new string on each invocation. + + + decoded commit message as a string. Never null. The returned + string does not contain any LFs, even if the first paragraph + spanned multiple lines. Embedded LFs are converted to spaces. + + + +

Parse the footer lines (e.g. + + Parse the footer lines (e.g. "Signed-off-by") for machine processing. +

+ This method splits all of the footer lines out of the last paragraph of + the commit message, providing each line as a key-value pair, ordered by + the order of the line's appearance in the commit message itself. +

+ A footer line's key must match the pattern + ^[A-Za-z0-9-]+: + , while + the value is free-form, but must not contain an LF. Very common keys seen + in the wild are: +

    +
  • + Signed-off-by + (agrees to Developer Certificate of Origin)
  • +
  • + Acked-by + (thinks change looks sane in context)
  • +
  • + Reported-by + (originally found the issue this change fixes)
  • +
  • + Tested-by + (validated change fixes the issue for them)
  • +
  • + CC + , + Cc + (copy on all email related to this change)
  • +
  • + Bug + (link to project's bug tracking system)
  • +
+
+ ordered list of footer lines; empty list if no footers found. +
+ + Get the values of all footer lines with the given key. + Get the values of all footer lines with the given key. + footer key to find values of, case insensitive. + + values of footers with key of + keyName + , ordered by their + order of appearance. Duplicates may be returned if the same + footer appeared more than once. Empty list if no footers appear + with the specified key, or there are no footers at all. + + GetFooterLines() + + + Get the values of all footer lines with the given key. + Get the values of all footer lines with the given key. + footer key to find values of, case insensitive. + + values of footers with key of + keyName + , ordered by their + order of appearance. Duplicates may be returned if the same + footer appeared more than once. Empty list if no footers appear + with the specified key, or there are no footers at all. + + GetFooterLines() + + + Reset this commit to allow another RevWalk with the same instances. + + Reset this commit to allow another RevWalk with the same instances. +

+ Subclasses must call super.reset() to ensure the + basic information can be correctly cleared out. + + + +

Time from the "committer " line of the buffer. + Time from the "committer " line of the buffer. + time, expressed as seconds since the epoch. +
+ + Get a reference to this commit's tree. + Get a reference to this commit's tree. + tree of this commit. + + + Get the number of parent commits listed in this commit. + Get the number of parent commits listed in this commit. + number of parents; always a positive value but can be 0. + + + Obtain an array of all parents (NOTE - THIS IS NOT A COPY). + + Obtain an array of all parents (NOTE - THIS IS NOT A COPY). +

+ This method is exposed only to provide very fast, efficient access to + this commit's parent list. Applications relying on this list should be + very careful to ensure they do not modify its contents during their use + of it. + + the array of parents. + + +

Obtain the raw unparsed commit body (NOTE - THIS IS NOT A COPY). + + Obtain the raw unparsed commit body (NOTE - THIS IS NOT A COPY). +

+ This method is exposed only to provide very fast, efficient access to + this commit's message buffer within a RevFilter. Applications relying on + this buffer should be very careful to ensure they do not modify its + contents during their use of it. + + + the raw unparsed commit body. This is NOT A COPY. + Altering the contents of this buffer may alter the walker's + knowledge of this commit, and the results it produces. + + + +

Determine the encoding of the commit message buffer. + + Determine the encoding of the commit message buffer. +

+ Locates the "encoding" header (if present) and then returns the proper + character set to apply to this buffer to evaluate its contents as + character data. +

+ If no encoding header is present, + NGit.Constants.CHARSET + is assumed. + + + the preferred encoding of + RawBuffer() + . + + + +

Create a new commit. + Create a new commit. + the identity of this commit. +
+ + Get the number of child commits listed in this commit. + Get the number of child commits listed in this commit. + number of children; always a positive value but can be 0. + + + Get the nth child from this commit's child list. + Get the nth child from this commit's child list. + + child index to obtain. Must be in the range 0 through + PlotCommit<L>.GetChildCount() + -1. + + the specified child. + an invalid child index was specified. + + + + Determine if the given commit is a child (descendant) of this commit. + Determine if the given commit is a child (descendant) of this commit. + the commit to test. + true if the given commit built on top of this commit. + + + Get the number of refs for this commit. + Get the number of refs for this commit. + number of refs; always a positive value but can be 0. + + + Get the nth Ref from this commit's ref list. + Get the nth Ref from this commit's ref list. + + ref index to obtain. Must be in the range 0 through + PlotCommit<L>.GetRefCount() + -1. + + the specified ref. + an invalid ref index was specified. + + + + Obtain the lane this commit has been plotted into. + Obtain the lane this commit has been plotted into. + the assigned lane for this commit. + + + + An ordered list of + PlotCommit<L> + subclasses. +

+ Commits are allocated into lanes as they enter the list, based upon their + connections between descendant (child) commits and ancestor (parent) commits. +

+ The source of the list must be a + PlotWalk + and + NGit.Revwalk.RevCommitList<E>.FillTo(int) + + must be used to populate the list. +

+
+ + + An ordered list of + RevCommit + subclasses. + + + + + An ordered list of + RevObject + subclasses. + + + + Items stored in this list. + + Items stored in this list. +

+ If + RevObjectListBlock.shift + = 0 this block holds the list elements; otherwise + it holds pointers to other + RevObjectListBlock + instances which use a shift that + is + RevObjectList<E>.BLOCK_SHIFT + smaller. + + + +

Current number of elements in the list. + Current number of elements in the list. +
+ + Create an empty object list. + Create an empty object list. + + + Apply a flag to all commits matching the specified filter. + + Apply a flag to all commits matching the specified filter. +

+ Same as applyFlag(matching, flag, 0, size()), but without + the incremental behavior. + + + the filter to test commits with. If the filter includes a + commit it will have the flag set; if the filter does not + include the commit the flag will be unset. + + + the flag to apply (or remove). Applications are responsible + for allocating this flag from the source RevWalk. + + + revision filter needed to read additional objects, but an + error occurred while reading the pack files or loose objects + of the repository. + + + revision filter needed to read additional objects, but an + object was not of the correct type. Repository corruption may + have occurred. + + + revision filter needed to read additional objects, but an + object that should be present was not found. Repository + corruption may have occurred. + + + +

Apply a flag to all commits matching the specified filter. + + Apply a flag to all commits matching the specified filter. +

+ This version allows incremental testing and application, such as from a + background thread that needs to periodically halt processing and send + updates to the UI. + + + the filter to test commits with. If the filter includes a + commit it will have the flag set; if the filter does not + include the commit the flag will be unset. + + + the flag to apply (or remove). Applications are responsible + for allocating this flag from the source RevWalk. + + + first commit within the list to begin testing at, inclusive. + Must not be negative, but may be beyond the end of the list. + + + last commit within the list to end testing at, exclusive. If + smaller than or equal to rangeBegin then no + commits will be tested. + + + revision filter needed to read additional objects, but an + error occurred while reading the pack files or loose objects + of the repository. + + + revision filter needed to read additional objects, but an + object was not of the correct type. Repository corruption may + have occurred. + + + revision filter needed to read additional objects, but an + object that should be present was not found. Repository + corruption may have occurred. + + + +

Remove the given flag from all commits. + + Remove the given flag from all commits. +

+ Same as clearFlag(flag, 0, size()), but without the + incremental behavior. + + + the flag to remove. Applications are responsible for + allocating this flag from the source RevWalk. + + + +

Remove the given flag from all commits. + + Remove the given flag from all commits. +

+ This method is actually implemented in terms of: + applyFlag(RevFilter.NONE, flag, rangeBegin, rangeEnd). + + + the flag to remove. Applications are responsible for + allocating this flag from the source RevWalk. + + + first commit within the list to begin testing at, inclusive. + Must not be negative, but may be beyond the end of the list. + + + last commit within the list to end testing at, exclusive. If + smaller than or equal to rangeBegin then no + commits will be tested. + + + +

Find the next commit that has the given flag set. + Find the next commit that has the given flag set. + the flag to test commits against. + + first commit index to test at. Applications may wish to begin + at 0, to test the first commit in the list. + + + index of the first commit at or after index begin + that has the specified flag set on it; -1 if no match is found. + +
+ + Find the next commit that has the given flag set. + Find the next commit that has the given flag set. + the flag to test commits against. + + first commit index to test at. Applications may wish to begin + at size()-1, to test the last commit in the + list. + + + index of the first commit at or before index begin + that has the specified flag set on it; -1 if no match is found. + + + + Set the revision walker this list populates itself from. + Set the revision walker this list populates itself from. + the walker to populate from. + RevCommitList<E>.FillTo(int) + + + Is this list still pending more items? + + true if + RevCommitList<E>.FillTo(int) + might be able to extend the list + size when called. + + + + Ensure this list contains at least a specified number of commits. + + Ensure this list contains at least a specified number of commits. +

+ The revision walker specified by + RevCommitList<E>.Source(RevWalk) + + is pumped until + the given number of commits are contained in this list. If there are + fewer total commits available from the walk then the method will return + early. Callers can test the final size of the list by + RevObjectList<E>.Count() + to + determine if the high water mark specified was met. + + + number of commits the caller wants this list to contain when + the fill operation is complete. + + + see + RevWalk.Next() + + + see + RevWalk.Next() + + + see + RevWalk.Next() + + + +

Ensures all commits until the given commit are loaded. + + Ensures all commits until the given commit are loaded. The revision + walker specified by + RevCommitList<E>.Source(RevWalk) + + is pumped until the + specified commit is loaded. Callers can test the final size of the list + by + RevObjectList<E>.Count() + to determine if the high water mark specified was met. +

+ + + commit the caller wants this list to contain when the fill + operation is complete. + + + maximum number of commits the caller wants this list to + contain when the fill operation is complete. If highMark is 0 + the walk is pumped until the specified commit or the end of + the walk is reached. + + + see + RevWalk.Next() + + + see + RevWalk.Next() + + + see + RevWalk.Next() + + + +

Optional callback invoked when commits enter the list by fillTo. + + Optional callback invoked when commits enter the list by fillTo. +

+ This method is only called during + RevCommitList<E>.FillTo(int) + . + + the list position this object will appear at. + the object being added (or set) into the list. + + +

Find the set of lanes passing through a commit's row. + + Find the set of lanes passing through a commit's row. +

+ Lanes passing through a commit are lanes that the commit is not directly + on, but that need to travel through this commit to connect a descendant + (child) commit to an ancestor (parent) commit. Typically these lanes will + be drawn as lines in the passed commit's box, and the passed commit won't + appear to be connected to those lines. +

+ This method modifies the passed collection by adding the lanes in any + order. + + the commit the caller needs to get the lanes from. + collection to add the passing lanes into. + + +

+ when connecting a plotcommit to the child make sure that you will not be + located on a lane on which a passed commit is located on. + + + when connecting a plotcommit to the child make sure that you will not be + located on a lane on which a passed commit is located on. Otherwise we + would have to draw a line through a commit. + + + + +
+ + a new Lane appropriate for this particular PlotList. + + + + Return colors and other reusable information to the plotter when a lane + is no longer needed. + + + Return colors and other reusable information to the plotter when a lane + is no longer needed. + + + + + A line space within the graph. + + A line space within the graph. +

+ Commits are strung onto a lane. For many UIs a lane represents a column. + + + +

Logical location of this lane within the graphing plane. + Logical location of this lane within the graphing plane. + location of this lane, 0 through the maximum number of lanes. +
+ + Specialized RevWalk for visualization of a commit graph. + Specialized RevWalk for visualization of a commit graph. + + + Walks a commit graph and produces the matching commits in order. + + Walks a commit graph and produces the matching commits in order. +

+ A RevWalk instance can only be used once to generate results. Running a + second time requires creating a new RevWalk instance, or invoking + Reset() + before starting again. Resetting an existing instance may be + faster for some applications as commit body parsing can be avoided on the + later invocations. +

+ RevWalk instances are not thread-safe. Applications must either restrict + usage of a RevWalk instance to a single thread, or implement their own + synchronization at a higher level. +

+ Multiple simultaneous RevWalk instances per + NGit.Repository + are permitted, + even from concurrent threads. Equality of + RevCommit + s from two + different RevWalk instances is never true, even if their + NGit.ObjectId + s + are equal (and thus they describe the same commit). +

+ The offered iterator is over the list of RevCommits described by the + configuration of this instance. Applications should restrict themselves to + using either the provided Iterator or + Next() + , but never use both on + the same RevWalk at the same time. The Iterator may buffer RevCommits, while + Next() + does not. + + + +

Set on objects whose important header data has been loaded. + + Set on objects whose important header data has been loaded. +

+ For a RevCommit this indicates we have pulled apart the tree and parent + references from the raw bytes available in the repository and translated + those to our own local RevTree and RevCommit instances. The raw buffer is + also available for message and other header filtering. +

+ For a RevTag this indicates we have pulled part the tag references to + find out who the tag refers to, and what that object's type is. + + + +

+ Set on RevCommit instances added to our + pending + queue. +

+ We use this flag to avoid adding the same commit instance twice to our + queue, especially if we reached it by more than one path. +

+
+ + Set on RevCommit instances the caller does not want output. + + Set on RevCommit instances the caller does not want output. +

+ We flag commits as uninteresting if the caller does not want commits + reachable from a commit given to + MarkUninteresting(RevCommit) + . + This flag is always carried into the commit's parents and is a key part + of the "rev-list B --not A" feature; A is marked UNINTERESTING. + + + +

Set on a RevCommit that can collapse out of the history. + + Set on a RevCommit that can collapse out of the history. +

+ If the + treeFilter + concluded that this commit matches his + parents' for all of the paths that the filter is interested in then we + mark the commit REWRITE. Later we can rewrite the parents of a REWRITE + child to remove chains of REWRITE commits before we produce the child to + the application. + + RewriteGenerator + + +

Temporary mark for use within generators or filters. + + Temporary mark for use within generators or filters. +

+ This mark is only for local use within a single scope. If someone sets + the mark they must unset it before any other code can see the mark. + + + +

+ Temporary mark for use within + TopoSortGenerator + . +

+ This mark indicates the commit could not produce when it wanted to, as at + least one child was behind it. Commits with this flag are delayed until + all children have been output first. +

+
+ + Number of flag bits we keep internal for our own use. + Number of flag bits we keep internal for our own use. See above flags. + + + Exists ONLY to support legacy Tag and Commit objects. + Exists ONLY to support legacy Tag and Commit objects. + + + Create a new revision walker for a given repository. + Create a new revision walker for a given repository. + + the repository the walker will obtain data from. An + ObjectReader will be created by the walker, and must be + released by the caller. + + + + Create a new revision walker for a given repository. + Create a new revision walker for a given repository. + + the reader the walker will obtain data from. The reader should + be released by the caller when the walker is no longer + required. + + + + the reader this walker is using to load objects. + + + Release any resources used by this walker's reader. + + Release any resources used by this walker's reader. +

+ A walker that has been released can be used again, but may need to be + released after the subsequent usage. + + + +

Mark a commit to start graph traversal from. + + Mark a commit to start graph traversal from. +

+ Callers are encouraged to use + ParseCommit(NGit.AnyObjectId) + to obtain + the commit reference, rather than + LookupCommit(NGit.AnyObjectId) + , as + this method requires the commit to be parsed before it can be added as a + root for the traversal. +

+ The method will automatically parse an unparsed commit, but error + handling may be more difficult for the application to explain why a + RevCommit is not actually a commit. The object pool of this walker would + also be 'poisoned' by the non-commit RevCommit. + + + the commit to start traversing from. The commit passed must be + from this same revision walker. + + + the commit supplied is not available from the object + database. This usually indicates the supplied commit is + invalid, but the reference was constructed during an earlier + invocation to + LookupCommit(NGit.AnyObjectId) + . + + + the object was not parsed yet and it was discovered during + parsing that it is not actually a commit. This usually + indicates the caller supplied a non-commit SHA-1 to + LookupCommit(NGit.AnyObjectId) + . + + a pack file or loose object could not be read. + + + +

Mark commits to start graph traversal from. + Mark commits to start graph traversal from. + + commits to start traversing from. The commits passed must be + from this same revision walker. + + + one of the commits supplied is not available from the object + database. This usually indicates the supplied commit is + invalid, but the reference was constructed during an earlier + invocation to + LookupCommit(NGit.AnyObjectId) + . + + + the object was not parsed yet and it was discovered during + parsing that it is not actually a commit. This usually + indicates the caller supplied a non-commit SHA-1 to + LookupCommit(NGit.AnyObjectId) + . + + a pack file or loose object could not be read. + +
+ + Mark a commit to not produce in the output. + + Mark a commit to not produce in the output. +

+ Uninteresting commits denote not just themselves but also their entire + ancestry chain, back until the merge base of an uninteresting commit and + an otherwise interesting commit. +

+ Callers are encouraged to use + ParseCommit(NGit.AnyObjectId) + to obtain + the commit reference, rather than + LookupCommit(NGit.AnyObjectId) + , as + this method requires the commit to be parsed before it can be added as a + root for the traversal. +

+ The method will automatically parse an unparsed commit, but error + handling may be more difficult for the application to explain why a + RevCommit is not actually a commit. The object pool of this walker would + also be 'poisoned' by the non-commit RevCommit. + + + the commit to start traversing from. The commit passed must be + from this same revision walker. + + + the commit supplied is not available from the object + database. This usually indicates the supplied commit is + invalid, but the reference was constructed during an earlier + invocation to + LookupCommit(NGit.AnyObjectId) + . + + + the object was not parsed yet and it was discovered during + parsing that it is not actually a commit. This usually + indicates the caller supplied a non-commit SHA-1 to + LookupCommit(NGit.AnyObjectId) + . + + a pack file or loose object could not be read. + + + +

Determine if a commit is reachable from another commit. + + Determine if a commit is reachable from another commit. +

+ A commit base is an ancestor of tip if we + can find a path of commits that leads from tip and ends at + base. +

+ This utility function resets the walker, inserts the two supplied + commits, and then executes a walk until an answer can be obtained. + Currently allocated RevFlags that have been added to RevCommit instances + will be retained through the reset. + + commit the caller thinks is reachable from tip. + + commit to start iteration from, and which is most likely a + descendant (child) of base. + + + true if there is a path directly from tip to + base (and thus base is fully merged + into tip); false otherwise. + + + one or or more of the next commit's parents are not available + from the object database, but were thought to be candidates + for traversal. This usually indicates a broken link. + + + one or or more of the next commit's parents are not actually + commit objects. + + a pack file or loose object could not be read. + + + +

Pop the next most recent commit. + Pop the next most recent commit. + next most recent commit; null if traversal is over. + + one or or more of the next commit's parents are not available + from the object database, but were thought to be candidates + for traversal. This usually indicates a broken link. + + + one or or more of the next commit's parents are not actually + commit objects. + + a pack file or loose object could not be read. + +
+ + Obtain the sort types applied to the commits returned. + Obtain the sort types applied to the commits returned. + + the sorting strategies employed. At least one strategy is always + used, but that strategy may be + RevSort.NONE + . + + + + Check whether the provided sorting strategy is enabled. + Check whether the provided sorting strategy is enabled. + a sorting strategy to look for. + true if this strategy is enabled, false otherwise + + + Select a single sorting strategy for the returned commits. + + Select a single sorting strategy for the returned commits. +

+ Disables all sorting strategies, then enables only the single strategy + supplied by the caller. + + a sorting strategy to enable. + + +

Add or remove a sorting strategy for the returned commits. + + Add or remove a sorting strategy for the returned commits. +

+ Multiple strategies can be applied at once, in which case some strategies + may take precedence over others. As an example, + RevSort.TOPO + must + take precedence over + RevSort.COMMIT_TIME_DESC + , otherwise it + cannot enforce its ordering. + + a sorting strategy to enable or disable. + + true if this strategy should be used, false if it should be + removed. + + + +

Get the currently configured commit filter. + Get the currently configured commit filter. + the current filter. Never null as a filter is always needed. +
+ + Set the commit filter for this walker. + + Set the commit filter for this walker. +

+ Multiple filters may be combined by constructing an arbitrary tree of + AndRevFilter or OrRevFilter instances to + describe the boolean expression required by the application. Custom + filter implementations may also be constructed by applications. +

+ Note that filters are not thread-safe and may not be shared by concurrent + RevWalk instances. Every RevWalk must be supplied its own unique filter, + unless the filter implementation specifically states it is (and always + will be) thread-safe. Callers may use + NGit.Revwalk.Filter.RevFilter.Clone() + + to create + a unique filter tree for this RevWalk instance. + + + the new filter. If null the special + NGit.Revwalk.Filter.RevFilter.ALL + filter will be used instead, as it matches every commit. + + NGit.Revwalk.Filter.AndRevFilter + + NGit.Revwalk.Filter.OrRevFilter + + +

Get the tree filter used to simplify commits by modified paths. + Get the tree filter used to simplify commits by modified paths. + + the current filter. Never null as a filter is always needed. If + no filter is being applied + NGit.Treewalk.Filter.TreeFilter.ALL + + is returned. + +
+ + Set the tree filter used to simplify commits by modified paths. + + Set the tree filter used to simplify commits by modified paths. +

+ If null or + NGit.Treewalk.Filter.TreeFilter.ALL + + the path limiter is removed. Commits + will not be simplified. +

+ If non-null and not + NGit.Treewalk.Filter.TreeFilter.ALL + + then the tree filter will be + installed and commits will have their ancestry simplified to hide commits + that do not contain tree entries matched by the filter. +

+ Usually callers should be inserting a filter graph including + NGit.Treewalk.Filter.TreeFilter.ANY_DIFF + + along with one or more + NGit.Treewalk.Filter.PathFilter + instances. + + + new filter. If null the special + NGit.Treewalk.Filter.TreeFilter.ALL + + filter + will be used instead, as it matches everything. + + NGit.Treewalk.Filter.PathFilter + + +

+ Should the body of a commit or tag be retained after parsing its headers? +

+ Usually the body is always retained, but some application code might not + care and would prefer to discard the body of a commit as early as + possible, to reduce memory usage. +

+ + Should the body of a commit or tag be retained after parsing its headers? +

+ Usually the body is always retained, but some application code might not + care and would prefer to discard the body of a commit as early as + possible, to reduce memory usage. + + true if the body should be retained; false it is discarded. + + +

Set whether or not the body of a commit or tag is retained. + + Set whether or not the body of a commit or tag is retained. +

+ If a body of a commit or tag is not retained, the application must + call + ParseBody(RevObject) + before the body can be safely + accessed through the type specific access methods. + + true to retain bodies; false to discard them early. + + +

Locate a reference to a blob without loading it. + + Locate a reference to a blob without loading it. +

+ The blob may or may not exist in the repository. It is impossible to tell + from this method's return value. + + name of the blob object. + reference to the blob object. Never null. + + +

Locate a reference to a tree without loading it. + + Locate a reference to a tree without loading it. +

+ The tree may or may not exist in the repository. It is impossible to tell + from this method's return value. + + name of the tree object. + reference to the tree object. Never null. + + +

Locate a reference to a commit without loading it. + + Locate a reference to a commit without loading it. +

+ The commit may or may not exist in the repository. It is impossible to + tell from this method's return value. +

+ See + ParseHeaders(RevObject) + and + ParseBody(RevObject) + for loading contents. + + name of the commit object. + reference to the commit object. Never null. + + +

Locate a reference to a tag without loading it. + + Locate a reference to a tag without loading it. +

+ The tag may or may not exist in the repository. It is impossible to tell + from this method's return value. + + name of the tag object. + reference to the tag object. Never null. + + +

Locate a reference to any object without loading it. + + Locate a reference to any object without loading it. +

+ The object may or may not exist in the repository. It is impossible to + tell from this method's return value. + + name of the object. + type of the object. Must be a valid Git object type. + reference to the object. Never null. + + +

Locate an object that was previously allocated in this walk. + Locate an object that was previously allocated in this walk. + name of the object. + + reference to the object if it has been previously located; + otherwise null. + +
+ + Locate a reference to a commit and immediately parse its content. + + Locate a reference to a commit and immediately parse its content. +

+ Unlike + LookupCommit(NGit.AnyObjectId) + this method only returns + successfully if the commit object exists, is verified to be a commit, and + was parsed without error. + + name of the commit object. + reference to the commit object. Never null. + the supplied commit does not exist. + + the supplied id is not a commit or an annotated tag. + + a pack file or loose object could not be read. + + + +

Locate a reference to a tree. + + Locate a reference to a tree. +

+ This method only returns successfully if the tree object exists, is + verified to be a tree. + + + name of the tree object, or a commit or annotated tag that may + reference a tree. + + reference to the tree object. Never null. + the supplied tree does not exist. + + the supplied id is not a tree, a commit or an annotated tag. + + a pack file or loose object could not be read. + + + +

Locate a reference to an annotated tag and immediately parse its content. + + + Locate a reference to an annotated tag and immediately parse its content. +

+ Unlike + LookupTag(NGit.AnyObjectId) + this method only returns + successfully if the tag object exists, is verified to be a tag, and was + parsed without error. + + name of the tag object. + reference to the tag object. Never null. + the supplied tag does not exist. + + the supplied id is not a tag or an annotated tag. + + a pack file or loose object could not be read. + + + +

Locate a reference to any object and immediately parse its headers. + + Locate a reference to any object and immediately parse its headers. +

+ This method only returns successfully if the object exists and was parsed + without error. Parsing an object can be expensive as the type must be + determined. For blobs this may mean the blob content was unpacked + unnecessarily, and thrown away. + + name of the object. + reference to the object. Never null. + the supplied does not exist. + + a pack file or loose object could not be read. + + + + + + + + + + + + + + + + + + + + +

Asynchronous object parsing. + Asynchronous object parsing. + + objects to open from the object store. The supplied collection + must not be modified until the queue has finished. + + + if true missing objects are reported by calling failure with a + MissingObjectException. This may be more expensive for the + implementation to guarantee. If false the implementation may + choose to report MissingObjectException, or silently skip over + the object with no warning. + + queue to read the objects from. +
+ + Ensure the object's critical headers have been parsed. + + Ensure the object's critical headers have been parsed. +

+ This method only returns successfully if the object exists and was parsed + without error. + + the object the caller needs to be parsed. + the supplied does not exist. + + a pack file or loose object could not be read. + + + +

Ensure the object's full body content is available. + + Ensure the object's full body content is available. +

+ This method only returns successfully if the object exists and was parsed + without error. + + the object the caller needs to be parsed. + the supplied does not exist. + + a pack file or loose object could not be read. + + + +

Peel back annotated tags until a non-tag object is found. + Peel back annotated tags until a non-tag object is found. + the starting object. + + If + obj + is not an annotated tag, + obj + . Otherwise + the first non-tag object that + obj + references. The + returned object's headers have been parsed. + + a referenced object cannot be found. + + a pack file or loose object could not be read. + +
+ + Create a new flag for application use during walking. + + Create a new flag for application use during walking. +

+ Applications are only assured to be able to create 24 unique flags on any + given revision walker instance. Any flags beyond 24 are offered only if + the implementation has extra free space within its internal storage. + + description of the flag, primarily useful for debugging. + newly constructed flag instance. + too many flags have been reserved on this revision walker. + + + +

Automatically carry a flag from a child commit to its parents. + + Automatically carry a flag from a child commit to its parents. +

+ A carried flag is copied from the child commit onto its parents when the + child commit is popped from the lowest level of walk's internal graph. + + the flag to carry onto parents, if set on a descendant. + + +

Automatically carry flags from a child commit to its parents. + + Automatically carry flags from a child commit to its parents. +

+ A carried flag is copied from the child commit onto its parents when the + child commit is popped from the lowest level of walk's internal graph. + + the flags to carry onto parents, if set on a descendant. + + +

Allow a flag to be recycled for a different use. + + Allow a flag to be recycled for a different use. +

+ Recycled flags always come back as a different Java object instance when + assigned again by + NewFlag(string) + . +

+ If the flag was previously being carried, the carrying request is + removed. Disposing of a carried flag while a traversal is in progress has + an undefined behavior. + + the to recycle. + + +

Resets internal state and allows this instance to be used again. + + Resets internal state and allows this instance to be used again. +

+ Unlike + Dispose() + previously acquired RevObject (and RevCommit) + instances are not invalidated. RevFlag instances are not invalidated, but + are removed from all RevObjects. + + + +

Resets internal state and allows this instance to be used again. + + Resets internal state and allows this instance to be used again. +

+ Unlike + Dispose() + previously acquired RevObject (and RevCommit) + instances are not invalidated. RevFlag instances are not invalidated, but + are removed from all RevObjects. + + + application flags that should not be cleared from + existing commit objects. + + + +

Resets internal state and allows this instance to be used again. + + Resets internal state and allows this instance to be used again. +

+ Unlike + Dispose() + previously acquired RevObject (and RevCommit) + instances are not invalidated. RevFlag instances are not invalidated, but + are removed from all RevObjects. + + + application flags that should not be cleared from + existing commit objects. + + + +

Resets internal state and allows this instance to be used again. + + Resets internal state and allows this instance to be used again. +

+ Unlike + Dispose() + previously acquired RevObject (and RevCommit) + instances are not invalidated. RevFlag instances are not invalidated, but + are removed from all RevObjects. + + + application flags that should not be cleared from + existing commit objects. + + + +

Dispose all internal state and invalidate all RevObject instances. + + Dispose all internal state and invalidate all RevObject instances. +

+ All RevObject (and thus RevCommit, etc.) instances previously acquired + from this RevWalk are invalidated by a dispose call. Applications must + not retain or use RevObject instances obtained prior to the dispose call. + All RevFlag instances are also invalidated, and must not be reused. + + + +

Returns an Iterator over the commits of this walker. + + Returns an Iterator over the commits of this walker. +

+ The returned iterator is only useful for one walk. If this RevWalk gets + reset a new iterator must be obtained to walk over the new results. +

+ Applications must not use both the Iterator and the + Next() + API + at the same time. Pick one API and use that for the entire walk. +

+ If a checked exception is thrown during the walk (see + Next() + ) + it is rethrown from the Iterator as a + NGit.Errors.RevWalkException + . + + an iterator over this walker's commits. + NGit.Errors.RevWalkException + + +

Throws an exception if we have started producing output. + Throws an exception if we have started producing output. +
+ + + Create and return an + ObjectWalk + using the same objects. +

+ Prior to using this method, the caller must reset this RevWalk to clean + any flags that were used during the last traversal. +

+ The returned ObjectWalk uses the same ObjectReader, internal object pool, + and free RevFlags. Once the ObjectWalk is created, this RevWalk should + not be used anymore. +

+ a new walk, using the exact same object pool. +
+ + Construct a new unparsed commit for the given object. + Construct a new unparsed commit for the given object. + the object this walker requires a commit reference for. + a new unparsed reference for the object. + + + Queue to lookup and parse objects asynchronously. + + Queue to lookup and parse objects asynchronously. + A queue may perform background lookup of objects and supply them (possibly + out-of-order) to the application. + + + + Obtain the next object. + Obtain the next object. + the object; null if there are no more objects remaining. + + the object does not exist. There may be more objects + remaining in the iteration, the application should call + Next() + again. + + the object store cannot be accessed. + + + + + + + + Create a new revision walker for a given repository. + Create a new revision walker for a given repository. + the repository the walker will obtain data from. + + + Add additional refs to the walk + additional refs + System.IO.IOException + + + + + + + + Produces commits for RevWalk to return to applications. + + Produces commits for RevWalk to return to applications. +

+ Implementations of this basic class provide the real work behind RevWalk. + Conceptually a Generator is an iterator or a queue, it returns commits until + there are no more relevant. Generators may be piped/stacked together to + create a more complex set of operations. + + PendingGenerator + StartGenerator + + +

Commits are sorted by commit date and time, descending. + Commits are sorted by commit date and time, descending. +
+ + + Output may have + RevWalk.REWRITE + marked on it. + + + + + Output needs + RewriteGenerator + . + + + + Topological ordering is enforced (all children before parents). + Topological ordering is enforced (all children before parents). + + + + Output may have + RevWalk.UNINTERESTING + marked on it. + + + + Connect the supplied queue to this generator's own free list (if any). + Connect the supplied queue to this generator's own free list (if any). + another FIFO queue that wants to share our queue's free list. + + + Obtain flags describing the output behavior of this generator. + Obtain flags describing the output behavior of this generator. + + one or more of the constants declared in this class, describing + how this generator produces its results. + + + + Return the next commit to the application, or the next generator. + Return the next commit to the application, or the next generator. + next available commit; null if no more are to be returned. + NGit.Errors.MissingObjectException + + NGit.Errors.IncorrectObjectTypeException + + System.IO.IOException + + + Current output flags set for this generator instance. + Current output flags set for this generator instance. + + + Add a commit to the queue. + + Add a commit to the queue. +

+ This method always adds the commit, even if it is already in the queue or + previously was in the queue but has already been removed. To control + queue admission use + Add(RevCommit, RevFlag) + . + + commit to add. + + +

Add a commit if it does not have a flag set yet, then set the flag. + + Add a commit if it does not have a flag set yet, then set the flag. +

+ This method permits the application to test if the commit has the given + flag; if it does not already have the flag than the commit is added to + the queue and the flag is set. This later will prevent the commit from + being added twice. + + commit to add. + flag that controls admission to the queue. + + +

Add a commit's parents if one does not have a flag set yet. + + Add a commit's parents if one does not have a flag set yet. +

+ This method permits the application to test if the commit has the given + flag; if it does not already have the flag than the commit is added to + the queue and the flag is set. This later will prevent the commit from + being added twice. + + commit whose parents should be added. + flag that controls admission to the queue. + + +

Remove the first commit from the queue. + Remove the first commit from the queue. + the first commit of this queue. +
+ + Remove all entries from this queue. + Remove all entries from this queue. + + + Create an empty queue. + Create an empty queue. + + + Next block in our chain of blocks; null if we are the last. + Next block in our chain of blocks; null if we are the last. + + + Our table of queued objects. + Our table of queued objects. + + + + Next valid entry in + objects + . + + + + + Next free entry in + objects + for addition at. + + + + Create an empty revision queue. + Create an empty revision queue. + + + + + + + + Reconfigure this queue to share the same free list as another. + + Reconfigure this queue to share the same free list as another. +

+ Multiple revision queues can be connected to the same free list, making + it less expensive for applications to shuttle commits between them. This + method arranges for the receiver to take from / return to the same free + list as the supplied queue. +

+ Free lists are not thread-safe. Applications must ensure that all queues + sharing the same free list are doing so from only a single thread. + + the other queue we will steal entries from. + + +

Next block in our chain of blocks; null if we are the last. + Next block in our chain of blocks; null if we are the last. +
+ + Our table of queued commits. + Our table of queued commits. + + + + Next valid entry in + commits + . + + + + + Next free entry in + commits + for addition at. + + + + + + + + + + + + + + A queue of commits sorted by commit time order. + A queue of commits sorted by commit time order. + + + Create an empty date queue. + Create an empty date queue. + + + + + + + + Peek at the next commit, without removing it. + Peek at the next commit, without removing it. + the next available commit; null if there are no commits left. + + + + Delays commits to be at least + PendingGenerator.OVER_SCAN + late. +

+ This helps to "fix up" weird corner cases resulting from clock skew, by + slowing down what we produce to the caller we get a better chance to ensure + PendingGenerator reached back far enough in the graph to correctly mark + commits + RevWalk.UNINTERESTING + if necessary. +

+ This generator should appear before + FixUninterestingGenerator + if the + lower level + pending + isn't already fully buffered. +

+
+ + + + + + + A queue of commits in FIFO order. + A queue of commits in FIFO order. + + + Create an empty FIFO queue. + Create an empty FIFO queue. + + + + + + + + Insert the commit pointer at the front of the queue. + Insert the commit pointer at the front of the queue. + the commit to insert into the queue. + + + + Filters out commits marked + RevWalk.UNINTERESTING + . +

+ This generator is only in front of another generator that has fully buffered + commits, such that we are called only after the + PendingGenerator + has + exhausted its input queue and given up. It skips over any uninteresting + commits that may have leaked out of the PendingGenerator due to clock skew + being detected in the commit objects. +

+
+ + + + + + + Updates the internal path filter to follow copy/renames. + + Updates the internal path filter to follow copy/renames. +

+ This is a special filter that performs + AND(path, ANY_DIFF) + , but also + triggers rename detection so that the path node is updated to include a prior + file name as the RevWalk traverses history. + The renames found will be reported to a + RenameCallback + if one is set. +

+ Results with this filter are unpredictable if the path being followed is a + subdirectory. + + + +

Create a new tree filter for a user supplied path. + + Create a new tree filter for a user supplied path. +

+ Path strings are relative to the root of the repository. If the user's + input should be assumed relative to a subdirectory of the repository the + caller must prepend the subdirectory's path prior to creating the filter. +

+ Path strings use '/' to delimit directories on all platforms. + + + the path to filter on. Must not be the empty string. All + trailing '/' characters will be trimmed before string's length + is checked or is used as part of the constructed filter. + + a new filter for the requested path. + the path supplied was the empty string. + + + + the path this filter matches. + + + + + + + + + the callback to which renames are reported, or null + if none + + + +

Sets the callback to which renames shall be reported. + Sets the callback to which renames shall be reported. + the callback to use +
+ + + Case insensitive key for a + FooterLine + . + + + + + Standard + Signed-off-by + + + + + + Standard + Acked-by + + + + + + Standard + CC + + + + + Create a key for a specific footer line. + Create a key for a specific footer line. + name of the footer line. + + + name of this footer line. + + + Single line at the end of a message, such as a "Signed-off-by: someone". + + + Single line at the end of a message, such as a "Signed-off-by: someone". +

+ These footer lines tend to be used to represent additional information about + a commit, like the path it followed through reviewers before finally being + accepted into the project's main repository as an immutable commit. + + RevCommit.GetFooterLines() + + + key to test this line's key name against. + + true if + key.getName().equalsIgnorecase(getKey()) + . + + + + + key name of this footer; that is the text before the ":" on the + line footer's line. The text is decoded according to the commit's + specified (or assumed) character encoding. + + + + + value of this footer; that is the text after the ":" and any + leading whitespace has been skipped. May be the empty string if + the footer has no value (line ended with ":"). The text is + decoded according to the commit's specified (or assumed) + character encoding. + + + +

Extract the email address (if present) from the footer. + + Extract the email address (if present) from the footer. +

+ If there is an email address looking string inside of angle brackets + (e.g. "<a@b>"), the return value is the part extracted from inside the + brackets. If no brackets are found, then + GetValue() + is returned + if the value contains an '@' sign. Otherwise, null. + + email address appearing in the value of this footer, or null. + + +

A queue of commits in LIFO order. + A queue of commits in LIFO order. +
+ + Create an empty LIFO queue. + Create an empty LIFO queue. + + + + + + + + Computes the merge base(s) of the starting commits. + + Computes the merge base(s) of the starting commits. +

+ This generator is selected if the RevFilter is only + NGit.Revwalk.Filter.RevFilter.MERGE_BASE + + . +

+ To compute the merge base we assign a temporary flag to each of the starting + commits. The maximum number of starting commits is bounded by the number of + free flags available in the RevWalk when the generator is initialized. These + flags will be automatically released on the next reset of the RevWalk, but + not until then, as they are assigned to commits throughout the history. +

+ Several internal flags are reused here for a different purpose, but this + should not have any impact as this generator should be run alone, and without + any other generators wrapped around it. + + + + + + + + +

Specialized subclass of RevWalk to include trees, blobs and tags. + + Specialized subclass of RevWalk to include trees, blobs and tags. +

+ Unlike RevWalk this subclass is able to remember starting roots that include + annotated tags, or arbitrary trees or blobs. Once commit generation is + complete and all commits have been popped by the application, individual + annotated tag, tree and blob objects can be popped through the additional + method + NextObject() + . +

+ Tree and blob objects reachable from interesting commits are automatically + scheduled for inclusion in the results of + NextObject() + , returning + each object exactly once. Objects are sorted and returned according to the + the commits that reference them and the order they appear within a tree. + Ordering can be affected by changing the + RevSort + used to order the + commits that are returned first. + + + +

+ Indicates a non-RevCommit is in + pendingObjects + . +

+ We can safely reuse + RevWalk.REWRITE + here for the same value as it + is only set on RevCommit and + pendingObjects + never has RevCommit + instances inserted into it. +

+
+ + Create a new revision and object walker for a given repository. + Create a new revision and object walker for a given repository. + the repository the walker will obtain data from. + + + Create a new revision and object walker for a given repository. + Create a new revision and object walker for a given repository. + + the reader the walker will obtain data from. The reader should + be released by the caller when the walker is no longer + required. + + + + Mark an object or commit to start graph traversal from. + + Mark an object or commit to start graph traversal from. +

+ Callers are encouraged to use + RevWalk.ParseAny(NGit.AnyObjectId) + + instead of + RevWalk.LookupAny(NGit.AnyObjectId, int) + + , as this method + requires the object to be parsed before it can be added as a root for the + traversal. +

+ The method will automatically parse an unparsed object, but error + handling may be more difficult for the application to explain why a + RevObject is not actually valid. The object pool of this walker would + also be 'poisoned' by the invalid RevObject. +

+ This method will automatically call + RevWalk.MarkStart(RevCommit) + if passed RevCommit instance, or a RevTag that directly (or indirectly) + references a RevCommit. + + + the object to start traversing from. The object passed must be + from this same revision walker. + + + the object supplied is not available from the object + database. This usually indicates the supplied object is + invalid, but the reference was constructed during an earlier + invocation to + RevWalk.LookupAny(NGit.AnyObjectId, int) + + . + + + the object was not parsed yet and it was discovered during + parsing that it is not actually the type of the instance + passed in. This usually indicates the caller used the wrong + type in a + RevWalk.LookupAny(NGit.AnyObjectId, int) + + call. + + a pack file or loose object could not be read. + + + +

Mark an object to not produce in the output. + + Mark an object to not produce in the output. +

+ Uninteresting objects denote not just themselves but also their entire + reachable chain, back until the merge base of an uninteresting commit and + an otherwise interesting commit. +

+ Callers are encouraged to use + RevWalk.ParseAny(NGit.AnyObjectId) + + instead of + RevWalk.LookupAny(NGit.AnyObjectId, int) + + , as this method + requires the object to be parsed before it can be added as a root for the + traversal. +

+ The method will automatically parse an unparsed object, but error + handling may be more difficult for the application to explain why a + RevObject is not actually valid. The object pool of this walker would + also be 'poisoned' by the invalid RevObject. +

+ This method will automatically call + RevWalk.MarkStart(RevCommit) + if passed RevCommit instance, or a RevTag that directly (or indirectly) + references a RevCommit. + + the object to start traversing from. The object passed must be + + the object supplied is not available from the object + database. This usually indicates the supplied object is + invalid, but the reference was constructed during an earlier + invocation to + RevWalk.LookupAny(NGit.AnyObjectId, int) + + . + + + the object was not parsed yet and it was discovered during + parsing that it is not actually the type of the instance + passed in. This usually indicates the caller used the wrong + type in a + RevWalk.LookupAny(NGit.AnyObjectId, int) + + call. + + a pack file or loose object could not be read. + + + + + + + + +

Pop the next most recent object. + Pop the next most recent object. + next most recent object; null if traversal is over. + + one or or more of the next objects are not available from the + object database, but were thought to be candidates for + traversal. This usually indicates a broken link. + + + one or or more of the objects in a tree do not match the type + indicated. + + a pack file or loose object could not be read. + +
+ + + + + + + Verify all interesting objects are available, and reachable. + + Verify all interesting objects are available, and reachable. +

+ Callers should populate starting points and ending points with + MarkStart(RevObject) + and + MarkUninteresting(RevObject) + and then use this method to verify all objects between those two points + exist in the repository and are readable. +

+ This method returns successfully if everything is connected; it throws an + exception if there is a connectivity problem. The exception message + provides some detail about the connectivity failure. + + + one or or more of the next objects are not available from the + object database, but were thought to be candidates for + traversal. This usually indicates a broken link. + + + one or or more of the objects in a tree do not match the type + indicated. + + a pack file or loose object could not be read. + + + +

Get the current object's complete path. + + Get the current object's complete path. +

+ This method is not very efficient and is primarily meant for debugging + and final output generation. Applications should try to avoid calling it, + and if invoked do so only once per interesting entry, where the name is + absolutely required for correct function. + + + complete path of the current entry, from the root of the + repository. If the current entry is in a subtree there will be at + least one '/' in the returned string. Null if the current entry + has no path, such as for annotated tags or root level trees. + + + +

Get the current object's path hash code. + + Get the current object's path hash code. +

+ This method computes a hash code on the fly for this path, the hash is + suitable to cluster objects that may have similar paths together. + + path hash code; any integer may be returned. + + + the internal buffer holding the current path. + + + + length of the path in + GetPathBuffer() + . + + + + + + + + + + + + + + +

Parent tree visit that entered this tree, null if root tree. + Parent tree visit that entered this tree, null if root tree. +
+ + The RevTree currently being iterated through. + The RevTree currently being iterated through. + + + + Canonical encoding of the tree named by + obj + . + + + + + Index of next entry to parse in + buf + . + + + + + Start of the current name entry in + buf + . + + + + + One past end of name, + nameEnd - namePtr + is the length. + + + + Number of bytes in the path leading up to this tree. + Number of bytes in the path leading up to this tree. + + + Default (and first pass) RevCommit Generator implementation for RevWalk. + + + Default (and first pass) RevCommit Generator implementation for RevWalk. +

+ This generator starts from a set of one or more commits and process them in + descending (newest to oldest) commit time order. Commits automatically cause + their parents to be enqueued for further processing, allowing the entire + commit graph to be walked. A + NGit.Revwalk.Filter.RevFilter + may be used to select a subset + of the commits and return them to the caller. + + + +

Number of additional commits to scan after we think we are done. + + Number of additional commits to scan after we think we are done. +

+ This small buffer of commits is scanned to ensure we didn't miss anything + as a result of clock skew when the commits were made. We need to set our + constant to 1 additional commit due to the use of a pre-increment + operator when accessing the value. + + + +

+ A commit near the end of time, to initialize + last + with. + +
+ + + Last commit produced to the caller from + Next() + . + + + + Number of commits we have remaining in our over-scan allotment. + + Number of commits we have remaining in our over-scan allotment. +

+ Only relevant if there are + UNINTERESTING + commits in the + pending + queue. + + + + + + + + +

A binary file, or a symbolic link. + A binary file, or a symbolic link. +
+ + Create a new blob reference. + Create a new blob reference. + object name for the blob. + + + + + + + + + + + + + + Application level mark bit for + RevObject + s. +

+ To create a flag use + RevWalk.NewFlag(string) + . +

+
+ + + Uninteresting by + RevWalk.MarkUninteresting(RevCommit) + + . +

+ We flag commits as uninteresting if the caller does not want commits + reachable from a commit to + RevWalk.MarkUninteresting(RevCommit) + + . + This flag is always carried into the commit's parents and is a key part + of the "rev-list B --not A" feature; A is marked UNINTERESTING. +

+ This is a static flag. Its RevWalk is not available. +

+
+ + Get the revision walk instance this flag was created from. + Get the revision walk instance this flag was created from. + the walker this flag was allocated out of, and belongs to. + + + + Multiple application level mark bits for + RevObject + s. + + RevFlag + + + Create an empty set of flags. + Create an empty set of flags. + + + Create a set of flags, copied from an existing set. + Create a set of flags, copied from an existing set. + the set to copy flags from. + + + Create a set of flags, copied from an existing collection. + Create a set of flags, copied from an existing collection. + the collection to copy flags from. + + + One level of contents, either an intermediate level or a leaf level. + One level of contents, either an intermediate level or a leaf level. + + + An annotated tag. + An annotated tag. + + + Parse an annotated tag from its canonical format. + + Parse an annotated tag from its canonical format. + This method constructs a temporary revision pool, parses the tag as + supplied, and returns it to the caller. Since the tag was built inside of + a private revision pool its object pointer will be initialized, but will + not have its headers loaded. + Applications are discouraged from using this API. Callers usually need + more than one object. Use + RevWalk.ParseTag(NGit.AnyObjectId) + + to obtain + a RevTag from an existing repository. + + the canonical formatted tag to be parsed. + + the parsed tag, in an isolated revision pool that is not + available to the caller. + + the tag contains a malformed header that cannot be handled. + + + + Parse an annotated tag from its canonical format. + + Parse an annotated tag from its canonical format. + This method inserts the tag directly into the caller supplied revision + pool, making it appear as though the tag exists in the repository, even + if it doesn't. The repository under the pool is not affected. + + + the revision pool to allocate the tag within. The tag's object + pointer will be obtained from this pool. + + the canonical formatted tag to be parsed. + + the parsed tag, in an isolated revision pool that is not + available to the caller. + + the tag contains a malformed header that cannot be handled. + + + + Create a new tag reference. + Create a new tag reference. + object name for the tag. + + + + + + + + + + + + + + + + Parse the tagger identity from the raw buffer. + + Parse the tagger identity from the raw buffer. +

+ This method parses and returns the content of the tagger line, after + taking the tag's character set into account and decoding the tagger + name and email address. This method is fairly expensive and produces a + new PersonIdent instance on each invocation. Callers should invoke this + method only if they are certain they will be outputting the result, and + should cache the return value for as long as necessary to use all + information from it. + + + identity of the tagger (name, email) and the time the tag + was made by the tagger; null if no tagger line was found. + + + +

Parse the complete tag message and decode it to a string. + + Parse the complete tag message and decode it to a string. +

+ This method parses and returns the message portion of the tag buffer, + after taking the tag's character set into account and decoding the buffer + using that character set. This method is a fairly expensive operation and + produces a new string on each invocation. + + decoded tag message as a string. Never null. + + +

Parse the tag message and return the first "line" of it. + + Parse the tag message and return the first "line" of it. +

+ The first line is everything up to the first pair of LFs. This is the + "oneline" format, suitable for output in a single line display. +

+ This method parses and returns the message portion of the tag buffer, + after taking the tag's character set into account and decoding the buffer + using that character set. This method is a fairly expensive operation and + produces a new string on each invocation. + + + decoded tag message as a string. Never null. The returned string + does not contain any LFs, even if the first paragraph spanned + multiple lines. Embedded LFs are converted to spaces. + + + +

Get a reference to the object this tag was placed on. + Get a reference to the object this tag was placed on. + object this tag refers to. +
+ + Get the name of this tag, from the tag header. + Get the name of this tag, from the tag header. + name of the tag, according to the tag header. + + + A reference to a tree of subtrees/files. + A reference to a tree of subtrees/files. + + + Create a new tree reference. + Create a new tree reference. + object name for the tree. + + + + + + + + + + + + + Replaces a RevCommit's parents until not colored with REWRITE. + + Replaces a RevCommit's parents until not colored with REWRITE. +

+ Before a RevCommit is returned to the caller its parents are updated to + create a dense DAG. Instead of reporting the actual parents as recorded when + the commit was created the returned commit will reflect the next closest + commit that matched the revision walker's filters. +

+ This generator is the second phase of a path limited revision walk and + assumes it is receiving RevCommits from + RewriteTreeFilter + , + after they have been fully buffered by + AbstractRevQueue + . The full + buffering is necessary to allow the simple loop used within our own + Rewrite(RevCommit) + to pull completely through a strand of + RevWalk.REWRITE + colored commits and come up with a simplification + that makes the DAG dense. Not fully buffering the commits first would cause + this loop to abort early, due to commits not being parsed and colored + correctly. + + RewriteTreeFilter + + +

+ For + Cleanup(RevCommit[]) + to remove duplicate parents. + +
+ + + + + + + First phase of a path limited revision walk. + + First phase of a path limited revision walk. +

+ This filter is ANDed to evaluate after all other filters and ties the + configured + NGit.Treewalk.Filter.TreeFilter + into the revision walking process. +

+ Each commit is differenced concurrently against all of its parents to look + for tree entries that are interesting to the TreeFilter. If none are found + the commit is colored with + RevWalk.REWRITE + , allowing a later pass + implemented by + RewriteGenerator + to remove those colored commits from + the DAG. + + RewriteGenerator + + +

Selects interesting revisions during walking. + + Selects interesting revisions during walking. +

+ This is an abstract interface. Applications may implement a subclass, or use + one of the predefined implementations already available within this package. + Filters may be chained together using AndRevFilter and + OrRevFilter to create complex boolean expressions. +

+ Applications should install the filter on a RevWalk by + NGit.Revwalk.RevWalk.SetRevFilter(RevFilter) + + prior to starting traversal. +

+ Unless specifically noted otherwise a RevFilter implementation is not thread + safe and may not be shared by different RevWalk instances at the same time. + This restriction allows RevFilter implementations to cache state within their + instances during + Include(NGit.Revwalk.RevWalk, NGit.Revwalk.RevCommit) + + if it is beneficial to + their implementation. Deep clones created by + Clone() + may be used to + construct a thread-safe copy of an existing filter. +

+ Message filters: +

    +
  • Author name/email: + AuthorRevFilter +
  • +
  • Committer name/email: + CommitterRevFilter +
  • +
  • Message body: + MessageRevFilter +
  • +
+

+ Merge filters: +

    +
  • Skip all merges: + NO_MERGES + .
  • +
+

+ Boolean modifiers: +

    +
  • AND: + AndRevFilter +
  • +
  • OR: + OrRevFilter +
  • +
  • NOT: + NotRevFilter +
  • +
+
+
+ + Default filter that always returns true (thread safe). + Default filter that always returns true (thread safe). + + + Default filter that always returns false (thread safe). + Default filter that always returns false (thread safe). + + + Excludes commits with more than one parent (thread safe). + Excludes commits with more than one parent (thread safe). + + + Selects only merge bases of the starting points (thread safe). + + Selects only merge bases of the starting points (thread safe). +

+ This is a special case filter that cannot be combined with any other + filter. Its include method always throws an exception as context + information beyond the arguments is necessary to determine if the + supplied commit is a merge base. + + + +

Create a new filter that does the opposite of this filter. + Create a new filter that does the opposite of this filter. + a new filter that includes commits this filter rejects. +
+ + true if the filter needs the commit body to be parsed. + + + Determine if the supplied commit should be included in results. + Determine if the supplied commit should be included in results. + the active walker this filter is being invoked from within. + + the commit currently being tested. The commit has been parsed + and its body is available for inspection only if the filter + returns true from + RequiresCommitBody() + . + + + true to include this commit in the results; false to have this + commit be omitted entirely from the results. + + + the filter knows for certain that no additional commits can + ever match, and the current commit doesn't match either. The + walk is halted and no more results are provided. + + + an object the filter needs to consult to determine its answer + does not exist in the Git repository the walker is operating + on. Filtering this commit is impossible without the object. + + + an object the filter needed to consult was not of the + expected object type. This usually indicates a corrupt + repository, as an object link is referencing the wrong type. + + + a loose object or pack file could not be read to obtain data + necessary for the filter to make its decision. + + + + Clone this revision filter, including its parameters. + + Clone this revision filter, including its parameters. +

+ This is a deep clone. If this filter embeds objects or other filters it + must also clone those, to ensure the instances do not share mutable data. + + another copy of this filter, suitable for another thread. + + + + + + + + + + + + + + +

Initial RevWalk generator that bootstraps a new walk. + + Initial RevWalk generator that bootstraps a new walk. +

+ Initially RevWalk starts with this generator as its chosen implementation. + The first request for a RevCommit from the RevWalk instance calls to our + Next() + method, and we replace ourselves with the best Generator + implementation available based upon the current RevWalk configuration. + + + + + + + + +

Sorts commits in topological order. + Sorts commits in topological order. +
+ + Create a new sorter and completely spin the generator. + + Create a new sorter and completely spin the generator. +

+ When the constructor completes the supplied generator will have no + commits remaining, as all of the commits will be held inside of this + generator's internal buffer. + + generator to pull all commits out of, and into this buffer. + NGit.Errors.MissingObjectException + + NGit.Errors.IncorrectObjectTypeException + + System.IO.IOException + + + + + + + +

Includes a commit only if all subfilters include the same commit. + + Includes a commit only if all subfilters include the same commit. +

+ Classic shortcut behavior is used, so evaluation of the + RevFilter.Include(NGit.Revwalk.RevWalk, NGit.Revwalk.RevCommit) + + method stops as soon as a false + result is obtained. Applications can improve filtering performance by placing + faster filters that are more likely to reject a result earlier in the list. + + + +

Create a filter with two filters, both of which must match. + Create a filter with two filters, both of which must match. + first filter to test. + second filter to test. + a filter that must match both input filters. +
+ + Create a filter around many filters, all of which must match. + Create a filter around many filters, all of which must match. + + list of filters to match against. Must contain at least 2 + filters. + + a filter that must match all input filters. + + + Create a filter around many filters, all of which must match. + Create a filter around many filters, all of which must match. + + list of filters to match against. Must contain at least 2 + filters. + + a filter that must match all input filters. + + + + + + + + + + + + + Matches only commits whose author name matches the pattern. + Matches only commits whose author name matches the pattern. + + + Create a new author filter. + + Create a new author filter. +

+ An optimized substring search may be automatically selected if the + pattern does not contain any regular expression meta-characters. +

+ The search is performed using a case-insensitive comparison. The + character encoding of the commit message itself is not respected. The + filter matches on raw UTF-8 byte sequences. + + regular expression pattern to match. + + a new filter that matches the given expression against the author + name and address of a commit. + + + +

Abstract filter that searches text using extended regular expressions. + Abstract filter that searches text using extended regular expressions. +
+ + Encode a string pattern for faster matching on byte arrays. + + Encode a string pattern for faster matching on byte arrays. +

+ Force the characters to our funny UTF-8 only convention that we use on + raw buffers. This avoids needing to perform character set decodes on the + individual commit buffers. + + + original pattern string supplied by the user or the + application. + + + same pattern, but re-encoded to match our funny raw UTF-8 + character sequence + NGit.Util.RawCharSequence + . + + + +

Construct a new pattern matching filter. + Construct a new pattern matching filter. + + text of the pattern. Callers may want to surround their + pattern with ".*" on either end to allow matching in the + middle of the string. + + + should .* be wrapped around the pattern of ^ and $ are + missing? Most users will want this set. + + + should + ForceToRaw(string) + be applied to the pattern + before compiling it? + + + flags from + Sharpen.Pattern + to control how matching performs. + +
+ + Get the pattern this filter uses. + Get the pattern this filter uses. + the pattern this filter is applying to candidate strings. + + + + + + + + Obtain the raw text to match against. + Obtain the raw text to match against. + current commit being evaluated. + sequence for the commit's content that we need to match on. + + + Abstract filter that searches text using only substring search. + Abstract filter that searches text using only substring search. + + + Can this string be safely handled by a substring filter? + the pattern text proposed by the user. + + true if a substring filter can perform this pattern match; false + if + PatternMatchRevFilter + must be used instead. + + + + Construct a new matching filter. + Construct a new matching filter. + + text to locate. This should be a safe string as described by + the + Safe(string) + as regular expression meta + characters are treated as literals. + + + + + + + + + Obtain the raw text to match against. + Obtain the raw text to match against. + current commit being evaluated. + sequence for the commit's content that we need to match on. + + + Selects commits based upon the commit time field. + Selects commits based upon the commit time field. + + + Create a new filter to select commits before a given date/time. + Create a new filter to select commits before a given date/time. + the point in time to cut on. + a new filter to select commits on or before ts. + + + Create a new filter to select commits before a given date/time. + Create a new filter to select commits before a given date/time. + the point in time to cut on, in milliseconds + a new filter to select commits on or before ts. + + + Create a new filter to select commits after a given date/time. + Create a new filter to select commits after a given date/time. + the point in time to cut on. + a new filter to select commits on or after ts. + + + Create a new filter to select commits after a given date/time. + Create a new filter to select commits after a given date/time. + the point in time to cut on, in milliseconds. + a new filter to select commits on or after ts. + + + + Create a new filter to select commits after or equal a given date/time since + and before or equal a given date/time until. + + + Create a new filter to select commits after or equal a given date/time since + and before or equal a given date/time until. + + the point in time to cut on. + the point in time to cut off. + a new filter to select commits between the given date/times. + + + + Create a new filter to select commits after or equal a given date/time since + and before or equal a given date/time until. + + + Create a new filter to select commits after or equal a given date/time since + and before or equal a given date/time until. + + the point in time to cut on, in milliseconds. + the point in time to cut off, in millisconds. + a new filter to select commits between the given date/times. + + + + + + + + + + + + + + + + + + + + + Matches only commits whose committer name matches the pattern. + Matches only commits whose committer name matches the pattern. + + + Create a new committer filter. + + Create a new committer filter. +

+ An optimized substring search may be automatically selected if the + pattern does not contain any regular expression meta-characters. +

+ The search is performed using a case-insensitive comparison. The + character encoding of the commit message itself is not respected. The + filter matches on raw UTF-8 byte sequences. + + regular expression pattern to match. + + a new filter that matches the given expression against the author + name and address of a commit. + + + +

Matches only commits whose message matches the pattern. + Matches only commits whose message matches the pattern. +
+ + Create a message filter. + + Create a message filter. +

+ An optimized substring search may be automatically selected if the + pattern does not contain any regular expression meta-characters. +

+ The search is performed using a case-insensitive comparison. The + character encoding of the commit message itself is not respected. The + filter matches on raw UTF-8 byte sequences. + + regular expression pattern to match. + + a new filter that matches the given expression against the + message body of the commit. + + + +

Includes a commit only if the subfilter does not include the commit. + Includes a commit only if the subfilter does not include the commit. +
+ + Create a filter that negates the result of another filter. + Create a filter that negates the result of another filter. + filter to negate. + a filter that does the reverse of a. + + + + + + + + Includes a commit if any subfilters include the same commit. + + Includes a commit if any subfilters include the same commit. +

+ Classic shortcut behavior is used, so evaluation of the + RevFilter.Include(NGit.Revwalk.RevWalk, NGit.Revwalk.RevCommit) + + method stops as soon as a true + result is obtained. Applications can improve filtering performance by placing + faster filters that are more likely to accept a result earlier in the list. + + + +

Create a filter with two filters, one of which must match. + Create a filter with two filters, one of which must match. + first filter to test. + second filter to test. + a filter that must match at least one input filter. +
+ + Create a filter around many filters, one of which must match. + Create a filter around many filters, one of which must match. + + list of filters to match against. Must contain at least 2 + filters. + + a filter that must match at least one input filter. + + + Create a filter around many filters, one of which must match. + Create a filter around many filters, one of which must match. + + list of filters to match against. Must contain at least 2 + filters. + + a filter that must match at least one input filter. + + + + + + + + + + + + + Matches only commits with some/all RevFlags already set. + Matches only commits with some/all RevFlags already set. + + + Create a new filter that tests for a single flag. + Create a new filter that tests for a single flag. + the flag to test. + filter that selects only commits with flag a. + + + Create a new filter that tests all flags in a set. + Create a new filter that tests all flags in a set. + set of flags to test. + filter that selects only commits with all flags in a. + + + Create a new filter that tests all flags in a set. + Create a new filter that tests all flags in a set. + set of flags to test. + filter that selects only commits with all flags in a. + + + Create a new filter that tests for any flag in a set. + Create a new filter that tests for any flag in a set. + set of flags to test. + filter that selects only commits with any flag in a. + + + Create a new filter that tests for any flag in a set. + Create a new filter that tests for any flag in a set. + set of flags to test. + filter that selects only commits with any flag in a. + + + + + + + + + + + + + + A + ByteWindow + with an underlying byte array for storage. + + + + A window of data currently stored within a cache. + + A window of data currently stored within a cache. +

+ All bytes in the window can be assumed to be "immediately available", that is + they are very likely already in memory, unless the operating system's memory + is very low and has paged part of this process out to disk. Therefore copying + bytes from a window is very inexpensive. + + + +

Copy bytes from the window to a caller supplied buffer. + Copy bytes from the window to a caller supplied buffer. + offset within the file to start copying from. + destination buffer to copy into. + offset within dstbuf to start copying into. + + number of bytes to copy. This value may exceed the number of + bytes remaining in the window starting at offset + pos. + + + number of bytes actually copied; this may be less than + cnt if cnt exceeded the number of + bytes available. + +
+ + Copy bytes from the window to a caller supplied buffer. + Copy bytes from the window to a caller supplied buffer. + offset within the window to start copying from. + destination buffer to copy into. + offset within dstbuf to start copying into. + + number of bytes to copy. This value may exceed the number of + bytes remaining in the window starting at offset + pos. + + + number of bytes actually copied; this may be less than + cnt if cnt exceeded the number of + bytes available. + + + + + + + + + + + + + + + + + + + + + + + A window for accessing git packs using a + Sharpen.ByteBuffer + for storage. + + ByteWindow + + + + + + + + + + The cached instance of an + ObjectDirectory + . +

+ This class caches the list of loose objects in memory, so the file system is + not queried with stat calls. +

+
+ + + Does the requested object exist in this database? +

+ Alternates (if present) are searched automatically. +

+ + Does the requested object exist in this database? +

+ Alternates (if present) are searched automatically. + + identity of the object to test for existence of. + + true if the specified object is stored in this database, or any + of the alternate databases. + + + +

Compute the location of a loose object file. + Compute the location of a loose object file. + identity of the loose object to map to the directory. + location of the object, if it were to exist as a loose object. +
+ + + + + Open an object from this database. + + Open an object from this database. +

+ Alternates (if present) are searched automatically. + + temporary working space associated with the calling thread. + identity of the object to open. + + a + NGit.ObjectLoader + for accessing the data of the named + object, or null if the object does not exist. + + System.IO.IOException + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+ The set that contains unpacked objects identifiers, it is created when + the cached instance is created. + + + The set that contains unpacked objects identifiers, it is created when + the cached instance is created. + +
+ + The constructor + the wrapped database + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + The configuration file that is stored in the file of the file system. + The configuration file that is stored in the file of the file system. + + + Create a configuration with no default fallback. + Create a configuration with no default fallback. + the location of the configuration file on the file system + + + the file system abstraction which will be necessary to perform + certain file system operations. + + + + The constructor + the base configuration file + the location of the configuration file on the file system + + + the file system abstraction which will be necessary to perform + certain file system operations. + + + + location of the configuration file on disk + + + Load the configuration as a Git text style configuration file. + + Load the configuration as a Git text style configuration file. +

+ If the file does not exist, this configuration is cleared, and thus + behaves the same as though the file exists, but is empty. + + the file could not be read (but does exist). + + the file is not a properly formatted configuration file. + + + +

Save the configuration as a Git text style configuration file. + + Save the configuration as a Git text style configuration file. +

+ Warning: Although this method uses the traditional Git file + locking approach to protect against concurrent writes of the + configuration file, it does not ensure that the file has not been + modified since the last read, which means updates performed by other + objects accessing the same backing file may be lost. + + the file could not be written. + + + + returns true if the currently loaded configuration file is older + than the file on disk + + + +

Represents a Git repository. + + Represents a Git repository. A repository holds all objects and refs used for + managing source code (could by any type of file, but source code is what + SCM's are typically used for). + In Git terms all data is stored in GIT_DIR, typically a directory called + .git. A work tree is maintained unless the repository is a bare repository. + Typically the .git directory is located at the root of the work dir. +
    +
  • GIT_DIR +
      +
    • objects/ - objects
    • +
    • refs/ - tags and heads
    • +
    • config - configuration
    • +
    • info/ - more configurations
    • +
    +
  • +
+

+ This class is thread-safe. +

+ This implementation only handles a subtly undocumented subset of git features. + + + +

Construct a representation of a Git repository. + + Construct a representation of a Git repository. +

+ The work tree, object directory, alternate object directories and index + file locations are deduced from the given git directory and the default + rules by running + FileRepositoryBuilder + . This constructor is the + same as saying: +

+            new FileRepositoryBuilder().setGitDir(gitDir).build()
+            
+
+ GIT_DIR (the location of the repository metadata). + + the repository appears to already exist but cannot be + accessed. + + FileRepositoryBuilder +
+ + + A convenience API for + FileRepository(Sharpen.FilePath) + . + + GIT_DIR (the location of the repository metadata). + + the repository appears to already exist but cannot be + accessed. + + FileRepositoryBuilder + + + Create a repository using the local file system. + Create a repository using the local file system. + description of the repository's important paths. + + the user configuration file or repository configuration file + cannot be accessed. + + + + + + + + + + + + + + Create a new Git repository initializing the necessary files and + directories. + + + Create a new Git repository initializing the necessary files and + directories. + + if true, a bare repository is created. + in case of IO problem + + + the configuration of this repository + + + + Objects known to exist but not expressed by + NGit.Repository.GetAllRefs() + . +

+ When a repository borrows objects from another repository, it can + advertise that it safely has that other repository's references, without + exposing any other details about the other repository. This may help + a client trying to push changes avoid pushing more than it needs to. +

+ unmodifiable collection of other known objects. +
+ + Add a single existing pack to the list of available pack files. + Add a single existing pack to the list of available pack files. + path of the pack file to open. + path of the corresponding index file. + + index file could not be opened, read, or is not recognized as + a Git pack file index. + + + + + + + Detect index changes. + Detect index changes. + + + + + a + ReflogReader + for the supplied refname, or null if the + named ref does not exist. + + the ref could not be accessed. + + + the directory containing the objects owned by this repository. + + + the object database which stores this repository's data. + + + the reference database which stores the reference namespace. + + + + Constructs a + FileRepository + . +

+ Applications must set one of + NGit.BaseRepositoryBuilder<B, R>.SetGitDir(Sharpen.FilePath) + + or + NGit.BaseRepositoryBuilder<B, R>.SetWorkTree(Sharpen.FilePath) + + , or use + NGit.BaseRepositoryBuilder<B, R>.ReadEnvironment() + + or + NGit.BaseRepositoryBuilder<B, R>.FindGitDir() + + in order to configure the minimum property set + necessary to open a repository. +

+ Single repository applications trying to be compatible with other Git + implementations are encouraged to use a model such as: +

+            new FileRepositoryBuilder() //
+            .setGitDir(gitDirArgument) // --git-dir if supplied, no-op if null
+            .readEnviroment() // scan environment GIT_* variables
+            .findGitDir() // scan up the file system tree
+            .build()
+            
+
+
+ + Create a repository matching the configuration in this builder. + + Create a repository matching the configuration in this builder. +

+ If an option was not set, the build method will try to default the option + based on other options. If insufficient information is available, an + exception is thrown to the caller. + + a repository matching this configuration. + insufficient parameters were set. + + the repository could not be accessed to configure the rest of + the builder's parameters. + + + + + + + + + + + + + + + + + + + + + +

Inflates a delta in an incremental way. + + Inflates a delta in an incremental way. +

+ Implementations must provide a means to access a stream for the base object. + This stream may be accessed multiple times, in order to randomly position it + to match the copy instructions. A + DeltaStream + performs an efficient + skip by only moving through the delta stream, making restarts of stacked + deltas reasonably efficient. + + + +

Stream to read from the base object. + Stream to read from the base object. +
+ + + Current position within + baseStream + . + + + + + If + curcmd == CMD_COPY + , position the base has to be at. + + + + Total number of bytes in this current command. + Total number of bytes in this current command. + + + Construct a delta application stream, reading instructions. + Construct a delta application stream, reading instructions. + the stream to read delta instructions from. + + the delta instruction stream cannot be read, or is + inconsistent with the the base object information. + + + + Open the base stream. + + Open the base stream. +

+ The + DeltaStream + may close and reopen the base stream multiple + times if copy instructions use offsets out of order. This can occur if a + large block in the file was moved from near the top, to near the bottom. + In such cases the reopened stream is skipped to the target offset, so + skip(long) + should be as efficient as possible. + + + stream to read from the base object. This stream should not be + buffered (or should be only minimally buffered), and does not + need to support mark/reset. + + the base object cannot be opened for reading. + + + + length of the base object, in bytes. + the length of the base cannot be determined. + + + + total size of this stream, in bytes. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+ An object representation + PackWriter + can consider for packing. + +
+ + + Special unknown value for + GetWeight() + . + + + + Stored in pack format, as a delta to another object. + Stored in pack format, as a delta to another object. + + + Stored in pack format, without delta. + Stored in pack format, without delta. + + + Only available after inflating to canonical format. + Only available after inflating to canonical format. + + + + relative size of this object's packed form. The special value + WEIGHT_UNKNOWN + can be returned to indicate the + implementation doesn't know, or cannot supply the weight up + front. + + + + + the storage format type, which must be one of + PACK_DELTA + , + PACK_WHOLE + , or + FORMAT_OTHER + . + + + + + identity of the object this delta applies to in order to recover + the original object content. This method should only be called if + GetFormat() + returned + PACK_DELTA + . + + + + + whether the current representation of the object has had delta + compression attempted on it. + + + + + NGit.Storage.Pack.ObjectToPack + for + ObjectDirectory + . + + + + + Per-object state used by + PackWriter + . +

+ PackWriter + uses this class to track the things it needs to include in + the newly generated pack file, and how to efficiently obtain the raw data for + each object as they are written to the output stream. +

+
+ + Description of an object stored in a pack file, including offset. + + Description of an object stored in a pack file, including offset. +

+ When objects are stored in packs Git needs the ObjectId and the offset + (starting position of the object data) to perform random-access reads of + objects from the pack. This extension of ObjectId includes the offset. + + + +

Create a new structure to remember information about an object. + Create a new structure to remember information about an object. + the identity of the object the new instance tracks. +
+ + + offset in pack when object has been already written, or 0 if it + has not been written yet + + + + Set the offset in pack when object has been written to. + Set the offset in pack when object has been written to. + offset where written object starts + + + the 32 bit CRC checksum for the packed data. + + + Record the 32 bit CRC checksum for the packed data. + Record the 32 bit CRC checksum for the packed data. + + checksum of all packed data (including object type code, + inflated length and delta base reference) as computed by + Sharpen.CRC32 + . + + + + Other object being packed that this will delta against. + Other object being packed that this will delta against. + + + + Bit field, from bit 0 to bit 31: +
    +
  • 1 bit: wantWrite
  • +
  • 1 bit: canReuseAsIs
  • +
  • 1 bit: doNotDelta
  • +
  • 1 bit: edgeObject
  • +
  • 1 bit: deltaAttempted
  • +
  • 3 bits: type
  • +
  • 4 bits: subclass flags (if any)
  • +
  • --
  • +
  • 20 bits: deltaDepth
  • +
+
+
+ + Hash of the object's tree path. + Hash of the object's tree path. + + + If present, deflated delta instruction stream for this object. + If present, deflated delta instruction stream for this object. + + + Construct for the specified object id. + Construct for the specified object id. + object id of object for packing + real type code of the object, not its in-pack type. + + + Construct for the specified object. + Construct for the specified object. + + identity of the object that will be packed. The object's + parsed status is undefined here. Implementers must not rely on + the object being parsed. + + + + + delta base object id if object is going to be packed in delta + representation; null otherwise - if going to be packed as a + whole object. + + + + + delta base object to pack if object is going to be packed in + delta representation and delta is specified as object to + pack; null otherwise - if going to be packed as a whole + object or delta base is specified only as id. + + + + Set delta base for the object. + + Set delta base for the object. Delta base set by this method is used + by + PackWriter + to write object - determines its representation + in a created pack. + + + delta base object or null if object should be packed as a + whole object. + + + + + true if object is going to be written as delta; false + otherwise. + + + + Check if object is already written in a pack. + + Check if object is already written in a pack. This information is + used to achieve delta-base precedence in a pack file. + + true if object is already written; false otherwise. + + + the type of this object. + + + + true if an existing representation was selected to be reused + as-is into the pack stream. + + + + Forget the reuse information previously stored. + + Forget the reuse information previously stored. +

+ Implementations may subclass this method, but they must also invoke the + super version with + super.clearReuseAsIs() + to ensure the flag is + properly cleared for the writer. + + + + the extended flags on this object, in the range [0x0, 0xf]. + + +

Determine if a particular extended flag bit has been set. + + Determine if a particular extended flag bit has been set. + This implementation may be faster than calling + GetExtendedFlags() + and testing the result. + + the flag mask to test, must be between 0x0 and 0xf. + true if any of the bits matching the mask are non-zero. +
+ + Set an extended flag bit. + + Set an extended flag bit. + This implementation is more efficient than getting the extended flags, + adding the bit, and setting them all back. + + the bits to set, must be between 0x0 and 0xf. + + + Clear an extended flag bit. + + Clear an extended flag bit. + This implementation is more efficient than getting the extended flags, + removing the bit, and setting them all back. + + the bits to clear, must be between 0x0 and 0xf. + + + Set the extended flags used by the subclass. + + Set the extended flags used by the subclass. + Subclass implementations may store up to 4 bits of information inside of + the internal flags field already used by the base ObjectToPack instance. + + + additional flag bits to store in the flags field. Due to space + constraints only values [0x0, 0xf] are permitted. + + + + Remember a specific representation for reuse at a later time. + + Remember a specific representation for reuse at a later time. +

+ Implementers should remember the representation chosen, so it can be + reused at a later time. + PackWriter + may invoke this method + multiple times for the same object, each time saving the current best + representation found. + + the object representation. + + +

Pack to reuse compressed data from, otherwise null. + Pack to reuse compressed data from, otherwise null. +
+ + + Offset of the object's header in + pack + . + + + + Length of the data section of the object. + Length of the data section of the object. + + + Git style file locking and replacement. + + Git style file locking and replacement. +

+ To modify a ref file Git tries to use an atomic update approach: we write the + new data into a brand new file, then rename it in place over the old name. + This way we can just delete the temporary file if anything goes wrong, and + nothing has been damaged. To coordinate access from multiple processes at + once Git tries to atomically create the new temporary file under a well-known + name. + + + +

Unlock the given file. + + Unlock the given file. +

+ This method can be used for recovering from a thrown + NGit.Errors.LockFailedException + . This method does not validate that the lock + is or is not currently held before attempting to unlock it. + + + true if unlocked, false if unlocking failed + + +

Get the lock file corresponding to the given file. + Get the lock file corresponding to the given file. + + lock file +
+ + Filter to skip over active lock files when listing a directory. + Filter to skip over active lock files when listing a directory. + + + Create a new lock for any file. + Create a new lock for any file. + the file that will be locked. + + the file system abstraction which will be necessary to perform + certain file system operations. + + + + Try to establish the lock. + Try to establish the lock. + + true if the lock is now held by the caller; false if it is held + by someone else. + + + the temporary output file could not be created. The caller + does not hold the lock. + + + + Try to establish the lock for appending. + Try to establish the lock for appending. + + true if the lock is now held by the caller; false if it is held + by someone else. + + + the temporary output file could not be created. The caller + does not hold the lock. + + + + Copy the current file content into the temporary file. + + Copy the current file content into the temporary file. +

+ This method saves the current file content by inserting it into the + temporary file, so that the caller can safely append rather than replace + the primary file. +

+ This method does nothing if the current file does not exist, or exists + but is empty. + + + the temporary file could not be written, or a read error + occurred while reading from the current file. The lock is + released before throwing the underlying IO exception to the + caller. + + + the temporary file could not be written. The lock is released + before throwing the underlying exception to the caller. + + + +

Write an ObjectId and LF to the temporary file. + Write an ObjectId and LF to the temporary file. + + the id to store in the file. The id will be written in hex, + followed by a sole LF. + + + the temporary file could not be written. The lock is released + before throwing the underlying IO exception to the caller. + + + the temporary file could not be written. The lock is released + before throwing the underlying exception to the caller. + +
+ + Write arbitrary data to the temporary file. + Write arbitrary data to the temporary file. + + the bytes to store in the temporary file. No additional bytes + are added, so if the file must end with an LF it must appear + at the end of the byte array. + + + the temporary file could not be written. The lock is released + before throwing the underlying IO exception to the caller. + + + the temporary file could not be written. The lock is released + before throwing the underlying exception to the caller. + + + + Obtain the direct output stream for this lock. + + Obtain the direct output stream for this lock. +

+ The stream may only be accessed once, and only after + Lock() + has + been successfully invoked and returned true. Callers must close the + stream prior to calling + Commit() + to commit the change. + + a stream to write to the new file. The stream is unbuffered. + + +

+ Request that + Commit() + remember modification time. +

+ This is an alias for + setNeedSnapshot(true) + . +

+ true if the commit method must remember the modification time. +
+ + + Request that + Commit() + remember the + FileSnapshot + . + + true if the commit method must remember the FileSnapshot. + + + + Request that + Commit() + force dirty data to the drive. + + true if dirty data should be forced to the drive. + + + Wait until the lock file information differs from the old file. + + Wait until the lock file information differs from the old file. +

+ This method tests the last modification date. If both are the same, this + method sleeps until it can force the new lock file's modification date to + be later than the target file. + + + the thread was interrupted before the last modified date of + the lock file was different from the last modified date of + the target file. + + + +

Commit this change and release the lock. + + Commit this change and release the lock. +

+ If this method fails (returns false) the lock is still released. + + + true if the commit was successful and the file contains the new + data; false if the commit failed and the file remains with the + old data. + + the lock is not held. + + +

Get the modification time of the output file when it was committed. + Get the modification time of the output file when it was committed. + modification time of the lock file right before we committed it. +
+ + + get the + FileSnapshot + just before commit. + + + + + Update the commit snapshot + GetCommitSnapshot() + before commit. +

+ This may be necessary if you need time stamp before commit occurs, e.g + while writing the index. +

+
+ + Unlock this file and abort this change. + + Unlock this file and abort this change. +

+ The temporary file (if created) is deleted before returning. + + + + + + + + + + + + + + + +

+ Traditional file system based + NGit.ObjectDatabase + . +

+ This is the classical object database representation for a Git repository, + where objects are stored loose by hashing them into directories by their + NGit.ObjectId + , or are stored in compressed containers known as + PackFile + s. +

+ Optionally an object database can reference one or more alternates; other + ObjectDatabase instances that are searched in addition to the current + database. +

+ Databases are divided into two halves: a half that is considered to be fast + to search (the + PackFile + s), and a half that is considered to be slow + to search (loose objects). When alternates are present the fast half is fully + searched (recursively through all alternates) before the slow half is + considered. +

+
+ + Maximum number of candidates offered as resolutions of abbreviation. + Maximum number of candidates offered as resolutions of abbreviation. + + + Initialize a reference to an on-disk object directory. + Initialize a reference to an on-disk object directory. + configuration this directory consults for write settings. + the location of the objects directory. + a list of alternate object directories + + the file system abstraction which will be necessary to perform + certain file system operations. + + an alternate object cannot be opened. + + + the location of the objects directory. + + + + + + Compute the location of a loose object file. + Compute the location of a loose object file. + identity of the loose object to map to the directory. + location of the object, if it were to exist as a loose object. + + + + unmodifiable collection of all known pack files local to this + directory. Most recent packs are presented first. Packs most + likely to contain more recent objects appear before packs + containing objects referenced by commits further back in the + history of the repository. + + + + + + + + + + Add a single existing pack to the list of available pack files. + Add a single existing pack to the list of available pack files. + path of the pack file to open. + path of the corresponding index file. + the pack that was opened and added to the database. + + index file could not be opened, read, or is not recognized as + a Git pack file index. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + State just before reading the pack directory. + State just before reading the pack directory. + + + + All known packs, sorted by + PackFile.SORT + . + + + + + Creates loose objects in a + ObjectDirectory + . + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + A Git version 2 pack file representation. + + A Git version 2 pack file representation. A pack file contains Git objects in + delta packed format yielding high compression of lots of object where some + objects are similar. + + + + Sorts PackFiles to be most recently created to least recently created. + Sorts PackFiles to be most recently created to least recently created. + + + + Serializes reads performed against + fd + . + + + + Objects we have tried to read, and discovered to be corrupt. + + Objects we have tried to read, and discovered to be corrupt. +

+ The list is allocated after the first corruption is found, and filled in + as more entries are discovered. Typically this list is never used, as + pack files do not usually contain corrupt objects. + + + +

Construct a reader for an existing, pre-indexed packfile. + Construct a reader for an existing, pre-indexed packfile. + path of the .idx file listing the contents. + path of the .pack file holding the data. +
+ + + + + the File object which locates this pack on disk. + + + the index for this pack file. + System.IO.IOException + + + + name extracted from + pack-*.pack + pattern. + + + + Determine if an object is contained within the pack file. + + Determine if an object is contained within the pack file. +

+ For performance reasons only the index file is searched; the main pack + content is ignored entirely. + + the object to look for. Must not be null. + true if the object is in this pack; false otherwise. + the index file cannot be loaded into memory. + + + +

Determines whether a .keep file exists for this pack file. + Determines whether a .keep file exists for this pack file. + true if a .keep file exist. +
+ + Get an object from this pack. + Get an object from this pack. + temporary working space associated with the calling thread. + the object to obtain from the pack. Must not be null. + + the object loader for the requested object if it is contained in + this pack; null if the object was not found. + + the pack file or the index could not be read. + + + + + + + Close the resources utilized by this repository + + + + Provide iterator over entries in associated pack index, that should also + exist in this pack file. + + + Provide iterator over entries in associated pack index, that should also + exist in this pack file. Objects returned by such iterator are mutable + during iteration. +

+ Iterator returns objects in SHA-1 lexicographical order. + + iterator over entries of associated pack index + PackIndex.Iterator() + + +

Obtain the total number of objects available in this pack. + + Obtain the total number of objects available in this pack. This method + relies on pack index, giving number of effectively available objects. + + number of objects in index of this pack, likewise in this pack + the index file cannot be loaded into memory. + +
+ + + Search for object id with the specified start offset in associated pack + (reverse) index. + + + Search for object id with the specified start offset in associated pack + (reverse) index. + + start offset of object to find + object id for this offset, or null if no object was found + the index file cannot be loaded into memory. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Child that applies onto this object. + Child that applies onto this object. + + + Offset of the delta object. + Offset of the delta object. + + + Size of the inflated delta stream. + Size of the inflated delta stream. + + + Total size of the delta's pack entry header (including base). + Total size of the delta's pack entry header (including base). + + + Offset of the base object this delta applies onto. + Offset of the base object this delta applies onto. + + + + Access path to locate objects by + NGit.ObjectId + in a + PackFile + . +

+ Indexes are strictly redundant information in that we can rebuild all of the + data held in the index file from the on disk representation of the pack file + itself, but it is faster to access for random requests because data is stored + by ObjectId. +

+
+ + Open an existing pack .idx file for reading. + + Open an existing pack .idx file for reading. +

+ The format of the file will be automatically detected and a proper access + implementation for that format will be constructed and returned to the + caller. The file may or may not be held open by the returned instance. + + existing pack .idx to read. + access implementation for the requested file. + the file does not exist. + + the file exists but could not be read due to security errors, + unrecognized data version, or unexpected data corruption. + + + +

Read an existing pack index file from a buffered stream. + + Read an existing pack index file from a buffered stream. +

+ The format of the file will be automatically detected and a proper access + implementation for that format will be constructed and returned to the + caller. The file may or may not be held open by the returned instance. + + + stream to read the index file from. The stream must be + buffered as some small IOs are performed against the stream. + The caller is responsible for closing the stream. + + a copy of the index in-memory. + the stream cannot be read. + the stream does not contain a valid pack index. + + + +

Footer checksum applied on the bottom of the pack file. + Footer checksum applied on the bottom of the pack file. +
+ + Determine if an object is contained within the pack file. + Determine if an object is contained within the pack file. + the object to look for. Must not be null. + true if the object is listed in this index; false otherwise. + + + Provide iterator that gives access to index entries. + + Provide iterator that gives access to index entries. Note, that iterator + returns reference to mutable object, the same reference in each call - + for performance reason. If client needs immutable objects, it must copy + returned object on its own. +

+ Iterator returns objects in SHA-1 lexicographical order. + + iterator over pack index entries + + +

Obtain the total number of objects described by this index. + Obtain the total number of objects described by this index. + + number of objects in this index, and likewise in the associated + pack that this index was generated from. + +
+ + Obtain the total number of objects needing 64 bit offsets. + Obtain the total number of objects needing 64 bit offsets. + + number of objects in this index using a 64 bit offset; that is an + object positioned after the 2 GB position within the file. + + + + + Get ObjectId for the n-th object entry returned by + Iterator() + . +

+ This method is a constant-time replacement for the following loop: +

+            Iterator<MutableEntry> eItr = index.iterator();
+            int curPosition = 0;
+            while (eItr.hasNext() && curPosition++ < nthPosition)
+            eItr.next();
+            ObjectId result = eItr.next().toObjectId();
+            
+
+ + position within the traversal of + Iterator() + that the + caller needs the object for. The first returned + MutableEntry + is 0, the second is 1, etc. + + the ObjectId for the corresponding entry. +
+ + + Get ObjectId for the n-th object entry returned by + Iterator() + . +

+ This method is a constant-time replacement for the following loop: +

+            Iterator<MutableEntry> eItr = index.iterator();
+            int curPosition = 0;
+            while (eItr.hasNext() && curPosition++ < nthPosition)
+            eItr.next();
+            ObjectId result = eItr.next().toObjectId();
+            
+
+ + unsigned 32 bit position within the traversal of + Iterator() + that the caller needs the object for. The + first returned + MutableEntry + is 0, the second is 1, + etc. Positions past 2**31-1 are negative, but still valid. + + the ObjectId for the corresponding entry. +
+ + Locate the file offset position for the requested object. + Locate the file offset position for the requested object. + name of the object to locate within the pack. + + offset of the object's header and compressed content; -1 if the + object does not exist in this index and is thus not stored in the + associated pack. + + + + + Retrieve stored CRC32 checksum of the requested object raw-data + (including header). + + + Retrieve stored CRC32 checksum of the requested object raw-data + (including header). + + id of object to look for + CRC32 checksum of specified object (at 32 less significant bits) + when requested ObjectId was not found in this index + + when this index doesn't support CRC32 checksum + + + + Check whether this index supports (has) CRC32 checksums for objects. + Check whether this index supports (has) CRC32 checksums for objects. + true if CRC32 is stored, false otherwise + + + Find objects matching the prefix abbreviation. + Find objects matching the prefix abbreviation. + + set to add any located ObjectIds to. This is an output + parameter. + + prefix to search for. + + maximum number of results to return. At most this many + ObjectIds should be added to matches before returning. + + the index cannot be read. + + + + Represent mutable entry of pack index consisting of object id and offset + in pack (both mutable). + + + Represent mutable entry of pack index consisting of object id and offset + in pack (both mutable). + + + + Returns offset for this index object entry + offset of this object in a pack file + + + hex string describing the object id of this entry. + + + a copy of the object id. + + + a complete copy of this entry, that won't modify + + + + Implementation must update + returnedNumber + before returning + element. + + + + + + + + + + + Support for the pack index v2 format. + Support for the pack index v2 format. + + + 256 arrays of contiguous object names. + 256 arrays of contiguous object names. + + + + 256 arrays of the 32 bit offset data, matching + names + . + + + + + 256 arrays of the CRC-32 of objects, matching + names + . + + + + 64 bit offset table. + 64 bit offset table. + + + + + + + + + + + + + Creates a table of contents to support random access by + PackFile + . +

+ Pack index files (the .idx suffix in a pack file pair) + provides random access to any object in the pack by associating an ObjectId + to the byte offset within the pack where the object's data can be read. +

+
+ + Magic constant indicating post-version 1 format. + Magic constant indicating post-version 1 format. + + + Create a new writer for the oldest (most widely understood) format. + + Create a new writer for the oldest (most widely understood) format. +

+ This method selects an index format that can accurate describe the + supplied objects and that will be the most compatible format with older + Git implementations. +

+ Index version 1 is widely recognized by all Git implementations, but + index version 2 (and later) is not as well recognized as it was + introduced more than a year later. Index version 1 can only be used if + the resulting pack file is under 4 gigabytes in size; packs larger than + that limit must use index version 2. + + + the stream the index data will be written to. If not already + buffered it will be automatically wrapped in a buffered + stream. Callers are always responsible for closing the stream. + + + the objects the caller needs to store in the index. Entries + will be examined until a format can be conclusively selected. + + + a new writer to output an index file of the requested format to + the supplied stream. + + + no recognized pack index version can support the supplied + objects. This is likely a bug in the implementation. + + + +

Create a new writer instance for a specific index format version. + Create a new writer instance for a specific index format version. + + the stream the index data will be written to. If not already + buffered it will be automatically wrapped in a buffered + stream. Callers are always responsible for closing the stream. + + + index format version number required by the caller. Exactly + this formatted version will be written. + + + a new writer to output an index file of the requested format to + the supplied stream. + + + the version requested is not supported by this + implementation. + +
+ + The index data stream we are responsible for creating. + The index data stream we are responsible for creating. + + + A temporary buffer for use during IO to {link #out}. + A temporary buffer for use during IO to {link #out}. + + + The entries this writer must pack. + The entries this writer must pack. + + + SHA-1 checksum for the entire pack data. + SHA-1 checksum for the entire pack data. + + + Create a new writer instance. + Create a new writer instance. + + the stream this instance outputs to. If not already buffered + it will be automatically wrapped in a buffered stream. + + + + Write all object entries to the index stream. + + Write all object entries to the index stream. +

+ After writing the stream passed to the factory is flushed but remains + open. Callers are always responsible for closing the output stream. + + + sorted list of objects to store in the index. The caller must + have previously sorted the list using + NGit.Transport.PackedObjectInfo + 's + native + System.IComparable<T> + implementation. + + + checksum signature of the entire pack data content. This is + traditionally the last 20 bytes of the pack file's own stream. + + + an error occurred while writing to the output stream, or this + index format cannot store the object data supplied. + + + +

+ Writes the index file to + @out + . +

+ Implementations should go something like: +

+            writeFanOutTable();
+            for (final PackedObjectInfo po : entries)
+            writeOneEntry(po);
+            writeChecksumFooter();
+            
+

+ Where the logic for writeOneEntry is specific to the index + format in use. Additional headers/footers may be used if necessary and + the + entries + collection may be iterated over more than once if + necessary. Implementors therefore have complete control over the data. +

+ + an error occurred while writing to the output stream, or this + index format cannot store the object data supplied. + +
+ + Output the version 2 (and later) TOC header, with version number. + + Output the version 2 (and later) TOC header, with version number. +

+ Post version 1 all index files start with a TOC header that makes the + file an invalid version 1 file, and then includes the version number. + This header is necessary to recognize a version 1 from a version 2 + formatted index. + + version number of this index format being written. + an error occurred while writing to the output stream. + + + +

Output the standard 256 entry first-level fan-out table. + + Output the standard 256 entry first-level fan-out table. +

+ The fan-out table is 4 KB in size, holding 256 32-bit unsigned integer + counts. Each count represents the number of objects within this index + whose + NGit.AnyObjectId.FirstByte() + matches the count's position in the + fan-out table. + + an error occurred while writing to the output stream. + + + +

Output the standard two-checksum index footer. + + Output the standard two-checksum index footer. +

+ The standard footer contains two checksums (20 byte SHA-1 values): +

    +
  1. Pack data checksum - taken from the last 20 bytes of the pack file.
  2. +
  3. Index data checksum - checksum of all index bytes written, including + the pack data checksum above.
  4. +
+
+ an error occurred while writing to the output stream. + +
+ + Creates the version 1 (old style) pack table of contents files. + Creates the version 1 (old style) pack table of contents files. + PackIndexWriter + PackIndexV1 + + + + + + Creates the version 2 pack table of contents files. + Creates the version 2 pack table of contents files. + PackIndexWriter + PackIndexV2 + + + + + + + + + + + + + + + + + + + + + + + + + + + + Keeps track of a + PackFile + 's associated .keep file. + + + + Create a new lock for a pack file. + Create a new lock for a pack file. + location of the pack-*.pack file. + the filesystem abstraction used by the repository. + + + Create the pack-*.keep file, with the given message. + Create the pack-*.keep file, with the given message. + message to store in the file. + true if the keep file was successfully written; false otherwise. + the keep file could not be written. + + + Remove the .keep file that holds this pack in place. + Remove the .keep file that holds this pack in place. + if deletion of .keep file failed + + + +

+ Reverse index for forward pack index. +

+ +

+ Reverse index for forward pack index. Provides operations based on offset + instead of object id. Such offset-based reverse lookups are performed in + O(log n) time. + + PackIndex + PackFile + + +

Index we were created from, and that has our ObjectId data. + Index we were created from, and that has our ObjectId data. +
+ + (offset31, truly) Offsets accommodating in 31 bits. + (offset31, truly) Offsets accommodating in 31 bits. + + + Offsets not accommodating in 31 bits. + Offsets not accommodating in 31 bits. + + + + Position of the corresponding + offsets32 + in + index + . + + + + + Position of the corresponding + offsets64 + in + index + . + + + + + Create reverse index from straight/forward pack index, by indexing all + its entries. + + + Create reverse index from straight/forward pack index, by indexing all + its entries. + + forward index - entries to (reverse) index. + + + + Search for object id with the specified start offset in this pack + (reverse) index. + + + Search for object id with the specified start offset in this pack + (reverse) index. + + start offset of object to find. + object id for this offset, or null if no object was found. + + + + Search for the next offset to the specified offset in this pack (reverse) + index. + + + Search for the next offset to the specified offset in this pack (reverse) + index. + + + start offset of previous object (must be valid-existing + offset). + + + maximum offset in a pack (returned when there is no next + offset). + + + offset of the next object in a pack or maxOffset if provided + offset was the last one. + + when there is no object with the provided offset. + + + + + Traditional file system based + NGit.RefDatabase + . +

+ This is the classical reference database representation for a Git repository. + References are stored in two formats: loose, and packed. +

+ Loose references are stored as individual files within the + refs/ + directory. The file name matches the reference name and the file contents is + the current + NGit.ObjectId + in string form. +

+ Packed references are stored in a single text file named + packed-refs + . + In the packed format, each reference is stored on its own line. This file + reduces the number of files needed for large reference spaces, reducing the + overall size of a Git repository on disk. +

+
+ + Magic string denoting the start of a symbolic reference file. + Magic string denoting the start of a symbolic reference file. + + + Magic string denoting the header of a packed-refs file. + Magic string denoting the header of a packed-refs file. + + + If in the header, denotes the file has peeled data. + If in the header, denotes the file has peeled data. + + + The names of the additional refs supported by this class + + + Immutable sorted list of loose references. + + Immutable sorted list of loose references. +

+ Symbolic references in this collection are stored unresolved, that is + their target appears to be a new reference with no ObjectId. These are + converted into resolved references during a get operation, ensuring the + live value is always returned. + + + +

Immutable sorted list of packed references. + Immutable sorted list of packed references. +
+ + Number of modifications made to this database. + + Number of modifications made to this database. +

+ This counter is incremented when a change is made, or detected from the + filesystem during a read operation. + + + +

+ Last + modCnt + that we sent to listeners. +

+ This value is compared to + modCnt + , and a notification is sent to + the listeners only when it differs. +

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Adds a set of refs to the set of packed-refs. + + Adds a set of refs to the set of packed-refs. Only non-symbolic refs are + added. If a ref with the given name already existed in packed-refs it is + updated with the new value. Each loose ref which was added to the + packed-ref file is deleted. If a given ref can't be locked it will not be + added to the pack file. + + the refs to be added. Must be fully qualified. + System.IO.IOException + + + Make sure a ref is peeled and has the Storage PACKED. + + Make sure a ref is peeled and has the Storage PACKED. If the given ref + has this attributes simply return it. Otherwise create a new peeled + NGit.ObjectIdRef + where Storage is set to PACKED. + + + a ref for Storage PACKED having the same name, id, peeledId as f + NGit.Errors.MissingObjectException + + System.IO.IOException + + + + + + + + + + + + + + + + + + + + + + + + + + + If the parent should fire listeners, fires them. + If the parent should fire listeners, fires them. + + + Create a reference update to write a temporary reference. + Create a reference update to write a temporary reference. + an update for a new temporary reference. + a temporary name cannot be allocated. + + + Locate the file on disk for a single reference name. + Locate the file on disk for a single reference name. + + name of the ref, relative to the Git repository top level + directory (so typically starts with refs/). + + the loose file location. + + + + + + + + + + Specialized variant of an ArrayList to support a + RefDatabase + . +

+ This list is a hybrid of a Map<String,Ref> and of a List<Ref>. It + tracks reference instances by name by keeping them sorted and performing + binary search to locate an entry. Lookup time is O(log N), but addition and + removal is O(N + log N) due to the list expansion or contraction costs. +

+ This list type is copy-on-write. Mutation methods return a new copy of the + list, leaving + this + unmodified. As a result we cannot easily implement + the + System.Collections.IList<E> + interface contract. +

+
+ + an empty unmodifiable reference list. + + + Initialize this list to use the same backing array as another list. + Initialize this list to use the same backing array as another list. + the source list. + + + + this cast as an immutable, standard + System.Collections.IList<E> + . + + + + number of items in this list. + + + true if the size of this list is 0. + + + Locate an entry by name. + Locate an entry by name. + the name of the reference to find. + + the index the reference is at. If the entry is not present + returns a negative value. The insertion position for the given + name can be computed from + -(index + 1) + . + + + + Determine if a reference is present. + Determine if a reference is present. + name of the reference to find. + true if the reference is present; false if it is not. + + + Get a reference object by name. + Get a reference object by name. + the name of the reference. + the reference object; null if it does not exist in this list. + + + Get the reference at a particular index. + Get the reference at a particular index. + + the index to obtain. Must be + 0 <= idx < size() + . + + the reference value, never null. + + + + Obtain a builder initialized with the first + n + elements. +

+ Copies the first + n + elements from this list into a new builder, + which can be used by the caller to add additional elements. +

+ the number of elements to copy. + + a new builder with the first + n + elements already added. + +
+ + Obtain a new copy of the list after changing one element. + + Obtain a new copy of the list after changing one element. +

+ This list instance is not affected by the replacement. Because this + method copies the entire list, it runs in O(N) time. + + index of the element to change. + the new value, must not be null. + + copy of this list, after replacing + idx + with + ref + . + + + +

Add an item at a specific index. + + Add an item at a specific index. +

+ This list instance is not affected by the addition. Because this method + copies the entire list, it runs in O(N) time. + + + position to add the item at. If negative the method assumes it + was a direct return value from + RefList<T>.Find(string) + and will + adjust it to the correct position. + + the new reference to insert. + + copy of this list, after making space for and adding + ref + . + + + +

Remove an item at a specific index. + + Remove an item at a specific index. +

+ This list instance is not affected by the addition. Because this method + copies the entire list, it runs in O(N) time. + + position to remove the item from. + + copy of this list, after making removing the item at + idx + . + + + +

Store a reference, adding or replacing as necessary. + + Store a reference, adding or replacing as necessary. +

+ This list instance is not affected by the store. The correct position is + determined, and the item is added if missing, or replaced if existing. + Because this method copies the entire list, it runs in O(N + log N) time. + + the reference to store. + copy of this list, after performing the addition or replacement. + + +

+ Rename any reference stored by + RefDirectory + . +

+ This class works by first renaming the source reference to a temporary name, + then renaming the temporary name to the final destination reference. +

+ This strategy permits switching a reference like + refs/heads/foo + , + which is a file, to + refs/heads/foo/bar + , which is stored inside a + directory that happens to match the source name. +

+
+ + The value of the source reference at the start of the rename. + + The value of the source reference at the start of the rename. +

+ At the end of the rename the destination reference must have this same + value, otherwise we have a concurrent update and the rename must fail + without making any changes. + + + +

True if HEAD must be moved to the destination reference. + True if HEAD must be moved to the destination reference. +
+ + + A reference we backup + objId + into during the rename. + + + + + + + + Updates any reference stored by + RefDirectory + . + + + + + + + + + + + + + + + + Utility for reading reflog entries + + + + + + + Get the last entry in the reflog + the latest reflog entry, or null if no log + System.IO.IOException + + + all reflog entries in reverse order + System.IO.IOException + + + + Get specific entry in the reflog relative to the last entry which is + considered entry zero. + + + Get specific entry in the reflog relative to the last entry which is + considered entry zero. + + + reflog entry or null if not found + System.IO.IOException + + + max number of entries to read + all reflog entries in reverse order + System.IO.IOException + + + Loose object loader. + Loose object loader. This class loads an object not stored in a pack. + + + Parse an object from the unpacked object format. + Parse an object from the unpacked object format. + complete contents of the compressed object. + + expected ObjectId of the object, used only for error reporting + in exceptions. + + loader to read the inflated contents. + the object cannot be parsed. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Remembers objects that are currently unpacked. + Remembers objects that are currently unpacked. + + + + Caches slices of a + PackFile + in memory for faster read access. +

+ The WindowCache serves as a Java based "buffer cache", loading segments of a + PackFile into the JVM heap prior to use. As JGit often wants to do reads of + only tiny slices of a file, the WindowCache tries to smooth out these tiny + reads into larger block-sized IO operations. +

+ Whenever a cache miss occurs, + Load(PackFile, long) + is invoked by + exactly one thread for the given (PackFile,position) key tuple. + This is ensured by an array of locks, with the tuple hashed to a lock + instance. +

+ During a miss, older entries are evicted from the cache so long as + IsFull() + returns true. +

+ Its too expensive during object access to be 100% accurate with a least + recently used (LRU) algorithm. Strictly ordering every read is a lot of + overhead that typically doesn't yield a corresponding benefit to the + application. +

+ This cache implements a loose LRU policy by randomly picking a window + comprised of roughly 10% of the cache, and evicting the oldest accessed entry + within that window. +

+ Entities created by the cache are held under SoftReferences, permitting the + Java runtime's garbage collector to evict entries when heap memory gets low. + Most JREs implement a loose least recently used algorithm for this eviction. +

+ The internal hash table does not expand at runtime, instead it is fixed in + size at cache creation time. The internal lock table used to gate load + invocations is also fixed in size. +

+ The key tuple is passed through to methods as a pair of parameters rather + than as a single Object, thus reducing the transient memory allocations of + callers. It is more efficient to avoid the allocation, as we can't be 100% + sure that a JIT would be able to stack-allocate a key tuple. +

+ This cache has an implementation rule such that: +

    +
  • + Load(PackFile, long) + is invoked by at most one thread at a time + for a given (PackFile,position) tuple.
  • +
  • For every load() invocation there is exactly one + CreateRef(PackFile, long, ByteWindow) + + invocation to wrap a + SoftReference around the cached entity.
  • +
  • For every Reference created by createRef() there will be + exactly one call to + Clear(Ref) + to cleanup any resources associated + with the (now expired) cached entity.
  • +
+

+ Therefore, it is safe to perform resource accounting increments during the + Load(PackFile, long) + or + CreateRef(PackFile, long, ByteWindow) + + methods, and matching + decrements during + Clear(Ref) + . Implementors may need to override + CreateRef(PackFile, long, ByteWindow) + + in order to embed additional + accounting information into an implementation specific + Ref + subclass, + as the cached entity may have already been evicted by the JRE's garbage + collector. +

+ To maintain higher concurrency workloads, during eviction only one thread + performs the eviction work, while other threads can continue to insert new + objects in parallel. This means that the cache can be temporarily over limit, + especially if the nominated eviction thread is being starved relative to the + other threads. +

+
+ + Modify the configuration of the window cache. + + Modify the configuration of the window cache. +

+ The new configuration is applied immediately. If the new limits are + smaller than what what is currently cached, older entries will be purged + as soon as possible to allow the cache to meet the new limit. + + maximum number of bytes to hold within this instance. + + number of bytes per window within the cache. + true to enable use of mmap when creating windows. + number of bytes to hold in the delta base cache. + + + +

Modify the configuration of the window cache. + + Modify the configuration of the window cache. +

+ The new configuration is applied immediately. If the new limits are + smaller than what what is currently cached, older entries will be purged + as soon as possible to allow the cache to meet the new limit. + + the new window cache configuration. + + the cache configuration contains one or more invalid + settings, usually too low of a limit. + + + + + + +

ReferenceQueue to cleanup released and garbage collected windows. + ReferenceQueue to cleanup released and garbage collected windows. +
+ + + Number of entries in + table + . + + + + Access clock for loose LRU. + Access clock for loose LRU. + + + Hash bucket directory; entries are chained below. + Hash bucket directory; entries are chained below. + + + Locks to prevent concurrent loads for same (PackFile,position). + Locks to prevent concurrent loads for same (PackFile,position). + + + Lock to elect the eviction thread after a load occurs. + Lock to elect the eviction thread after a load occurs. + + + + Number of + table + buckets to scan for an eviction window. + + + + + + + Lookup a cached object, creating and loading it if it doesn't exist. + Lookup a cached object, creating and loading it if it doesn't exist. + the pack that "contains" the cached object. + offset within pack of the object. + the object reference. + + the object reference was not in the cache and could not be + obtained by + Load(PackFile, long) + . + + + + Clear every entry from the cache. + + Clear every entry from the cache. +

+ This is a last-ditch effort to clear out the cache, such as before it + gets replaced by another cache that is configured differently. This + method tries to force every cached entry through + Clear(Ref) + to + ensure that resources are correctly accounted for and cleaned up by the + subclass. A concurrent reader loading entries while this method is + running may cause resource accounting failures. + + + +

Clear all entries related to a single file. + + Clear all entries related to a single file. +

+ Typically this method is invoked during + PackFile.Close() + , when we + know the pack is never going to be useful to us again (for example, it no + longer exists on disk). A concurrent reader loading an entry from this + same pack may cause the pack to become stuck in the cache anyway. + + the file to purge all entries of. + + +

Next entry in the hash table's chain list. + Next entry in the hash table's chain list. +
+ + The referenced object. + The referenced object. + + + Marked true when ref.get() returns null and the ref is dead. + + Marked true when ref.get() returns null and the ref is dead. +

+ A true here indicates that the ref is no longer accessible, and that + we therefore need to eventually purge this Entry object out of the + bucket's chain. + + + +

A soft reference wrapped around a cached object. + A soft reference wrapped around a cached object. +
+ + + Configuration parameters for + WindowCache + . + + + + 1024 (number of bytes in one kibibyte/kilobyte) + + + + 1024 + KB + (number of bytes in one mebibyte/megabyte) + + + + Create a default configuration. + Create a default configuration. + + + + maximum number of streams to open at a time. Open packs count + against the process limits. Default is 128. + + + + + maximum number of streams to open at a time. Open packs count + against the process limits + + + + + maximum number bytes of heap memory to dedicate to caching pack + file data. Default is 10 MB. + + + + + maximum number bytes of heap memory to dedicate to caching + pack file data. + + + + + size in bytes of a single window mapped or read in from the pack + file. Default is 8 KB. + + + + size in bytes of a single window read in from the pack file. + + + + + true enables use of Java NIO virtual memory mapping for windows; + false reads entire window into a byte[] with standard read calls. + Default false. + + + + + true enables use of Java NIO virtual memory mapping for + windows; false reads entire window into a byte[] with standard + read calls. + + + + + maximum number of bytes to cache in + DeltaBaseCache + for inflated, recently accessed objects, without delta chains. + Default 10 MB. + + + + + maximum number of bytes to cache in + DeltaBaseCache + for inflated, recently accessed + objects, without delta chains. + + + + the size threshold beyond which objects must be streamed. + + + + new byte limit for objects that must be streamed. Objects + smaller than this size can be obtained as a contiguous byte + array, while objects bigger than this size require using an + NGit.ObjectStream + . + + + + Update properties by setting fields from the configuration. + + Update properties by setting fields from the configuration. +

+ If a property is not defined in the configuration, then it is left + unmodified. + + configuration to read properties from. + + +

Active handle to a ByteWindow. + Active handle to a ByteWindow. +
+ + + Extension of + NGit.ObjectReader + that supports reusing objects in packs. +

+ ObjectReader + implementations may also optionally implement this + interface to support + PackWriter + with a means of copying an object + that is already in pack encoding format directly into the output stream, + without incurring decompression and recompression overheads. +

+
+ + + Allocate a new + PackWriter + state structure for an object. +

+ PackWriter + allocates these objects to keep track of the + per-object state, and how to load the objects efficiently into the + generated stream. Implementers may subclass this type with additional + object state, such as to remember what file and offset contains the + object's pack encoded data. +

+ + identity of the object that will be packed. The object's + parsed status is undefined here. Implementers must not rely on + the object being parsed. + + a new instance for this object. +
+ + Select the best object representation for a packer. + + Select the best object representation for a packer. +

+ Implementations should iterate through all available representations of + an object, and pass them in turn to the PackWriter though + PackWriter.Select(ObjectToPack, StoredObjectRepresentation) + + so + the writer can select the most suitable representation to reuse into the + output stream. +

+ If the implementation returns CachedPack from + GetCachedPacks() + , + it must consider the representation of any object that is stored in any + of the offered CachedPacks. PackWriter relies on this behavior to prune + duplicate objects out of the pack stream when it selects a CachedPack and + the object was also reached through the thin-pack enumeration. +

+ The implementation may choose to consider multiple objects at once on + concurrent threads, but must evaluate all representations of an object + within the same thread. + + the packer that will write the object in the near future. + + progress monitor, implementation should update the monitor + once for each item in the iteration when selection is done. + + the objects that are being packed. + + there is no representation available for the object, as it is + no longer in the repository. Packing will abort. + + the repository cannot be accessed. Packing will abort. + + + +

Write objects to the pack stream in roughly the order given. + + Write objects to the pack stream in roughly the order given. + PackWriter + invokes this method to write out one or more objects, + in approximately the order specified by the iteration over the list. A + simple implementation of this method would just iterate the list and + output each object: +
+            for (ObjectToPack obj : list)
+            out.writeObject(obj)
+            
+ However more sophisticated implementors may try to perform some (small) + reordering to access objects that are stored close to each other at + roughly the same time. Implementations may choose to write objects out of + order, but this may increase pack file size due to using a larger header + format to reach a delta base that is later in the stream. It may also + reduce data locality for the reader, slowing down data access. + Invoking + PackOutputStream.WriteObject(ObjectToPack) + + will cause + CopyObjectAsIs(PackOutputStream, ObjectToPack, bool) + + to be + invoked recursively on + this + if the current object is scheduled + for reuse. +
+ the stream to write each object to. + + the list of objects to write. Objects should be written in + approximately this order. Implementors may resort the list + elements in-place during writing if desired. + + + the stream cannot be written to, or one or more required + objects cannot be accessed from the object database. + +
+ + Output a previously selected representation. + + Output a previously selected representation. +

+ PackWriter + invokes this method only if a representation + previously given to it by + selectObjectRepresentation + was chosen + for reuse into the output stream. The + otp + argument is an instance + created by this reader's own + newObjectToPack + , and the + representation data saved within it also originated from this reader. +

+ Implementors must write the object header before copying the raw data to + the output stream. The typical implementation is like: +

+            MyToPack mtp = (MyToPack) otp;
+            byte[] raw;
+            if (validate)
+            raw = validate(mtp); // throw SORNAE here, if at all
+            else
+            raw = readFast(mtp);
+            out.writeHeader(mtp, mtp.inflatedSize);
+            out.write(raw);
+            
+
+ stream the object should be written to. + the object's saved representation information. + + if true the representation must be validated and not be + corrupt before being reused. If false, validation may be + skipped as it will be performed elsewhere in the processing + pipeline. + + + the previously selected representation is no longer + available. If thrown before + out.writeHeader + the pack + writer will try to find another representation, and write + that one instead. If throw after + out.writeHeader + , + packing will abort. + + + the stream's write method threw an exception. Packing will + abort. + +
+ + Obtain the available cached packs. + + Obtain the available cached packs. +

+ A cached pack has known starting points and may be sent entirely as-is, + with almost no effort on the sender's part. + + the available cached packs. + + the cached packs cannot be listed from the repository. + Callers may choose to ignore this and continue as-if there + were no cached packs. + + + +

Append an entire pack's contents onto the output stream. + + Append an entire pack's contents onto the output stream. +

+ The entire pack, excluding its header and trailing footer is sent. + + stream to append the pack onto. + the cached pack to send. + + if true the representation must be validated and not be + corrupt before being reused. If false, validation may be + skipped as it will be performed elsewhere in the processing + pipeline. + + the pack cannot be read, or stream did not accept a write. + + + +

Temporary buffer large enough for at least one raw object id. + Temporary buffer large enough for at least one raw object id. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Copy bytes from the window to a caller supplied buffer. + Copy bytes from the window to a caller supplied buffer. + the file the desired window is stored within. + position within the file to read from. + destination buffer to copy into. + offset within dstbuf to start copying into. + + number of bytes to copy. This value may exceed the number of + bytes remaining in the window starting at offset + pos. + + + number of bytes actually copied; this may be less than + cnt if cnt exceeded the number of bytes + available. + + + this cursor does not match the provider or id and the proper + window could not be acquired through the provider's cache. + + + + + + + + + + + Inflate a region of the pack starting at + position + . + + the file the desired window is stored within. + position within the file to read from. + + destination buffer the inflater should output decompressed + data to. + + current offset within dstbuf to inflate into. + + updated dstoff based on the number of bytes + successfully inflated into dstbuf. + + + this cursor does not match the provider or id and the proper + window could not be acquired through the provider's cache. + + + the inflater encountered an invalid chunk of data. Data + stream corruption is likely. + + + + + + + + + + Release the current window cursor. + Release the current window cursor. + + + Recreate a stream from a base stream and a GIT pack delta. + + Recreate a stream from a base stream and a GIT pack delta. +

+ This entire class is heavily cribbed from patch-delta.c in the + GIT project. The original delta patching code was written by Nicolas Pitre + (<nico@cam.org>). + + + +

Length of the base object in the delta stream. + Length of the base object in the delta stream. + the delta stream, or at least the header of it. + the base object's size. +
+ + Length of the resulting object in the delta stream. + Length of the resulting object in the delta stream. + the delta stream, or at least the header of it. + the resulting object's size. + + + + Apply the changes defined by delta to the data in base, yielding a new + array of bytes. + + + Apply the changes defined by delta to the data in base, yielding a new + array of bytes. + + some byte representing an object of some kind. + + a git pack delta defining the transform from one version to + another. + + patched base + + + + Apply the changes defined by delta to the data in base, yielding a new + array of bytes. + + + Apply the changes defined by delta to the data in base, yielding a new + array of bytes. + + some byte representing an object of some kind. + + a git pack delta defining the transform from one version to + another. + + + array to store the result into. If null the result will be + allocated and returned. + + + either + result + , or the result array allocated. + + + + Format this delta as a human readable string. + Format this delta as a human readable string. + the delta instruction sequence to format. + the formatted delta. + + + Format this delta as a human readable string. + Format this delta as a human readable string. + the delta instruction sequence to format. + + true if the header (base size and result size) should be + included in the formatting. + + the formatted delta. + + + + Encodes an instruction stream for + BinaryDelta + . + + + + Maximum number of bytes to be copied in pack v2 format. + + Maximum number of bytes to be copied in pack v2 format. +

+ Historical limitations have this at 64k, even though current delta + decoders recognize larger copy instructions. + + + +

Maximum number of bytes used by a copy instruction. + Maximum number of bytes used by a copy instruction. +
+ + Maximum length that an an insert command can encode at once. + Maximum length that an an insert command can encode at once. + + + Create an encoder with no upper bound on the instruction stream size. + Create an encoder with no upper bound on the instruction stream size. + buffer to store the instructions written. + size of the base object, in bytes. + + size of the resulting object, after applying this instruction + stream to the base object, in bytes. + + + the output buffer cannot store the instruction stream's + header with the size fields. + + + + Create an encoder with an upper limit on the instruction size. + Create an encoder with an upper limit on the instruction size. + buffer to store the instructions written. + size of the base object, in bytes. + + size of the resulting object, after applying this instruction + stream to the base object, in bytes. + + + maximum number of bytes to write to the out buffer declaring + the stream is over limit and should be discarded. May be 0 to + specify an infinite limit. + + + the output buffer cannot store the instruction stream's + header with the size fields. + + + + + + + current size of the delta stream, in bytes. + + + Insert a literal string of text, in UTF-8 encoding. + Insert a literal string of text, in UTF-8 encoding. + the string to insert. + + true if the insert fits within the limit; false if the insert + would cause the instruction stream to exceed the limit. + + the instruction buffer can't store the instructions. + + + + Insert a literal binary sequence. + Insert a literal binary sequence. + the binary to insert. + + true if the insert fits within the limit; false if the insert + would cause the instruction stream to exceed the limit. + + the instruction buffer can't store the instructions. + + + + Insert a literal binary sequence. + Insert a literal binary sequence. + the binary to insert. + + offset within + text + to start copying from. + + number of bytes to insert. + + true if the insert fits within the limit; false if the insert + would cause the instruction stream to exceed the limit. + + the instruction buffer can't store the instructions. + + + + Create a copy instruction to copy from the base object. + Create a copy instruction to copy from the base object. + + position in the base object to copy from. This is absolute, + from the beginning of the base. + + number of bytes to copy. + + true if the copy fits within the limit; false if the copy + would cause the instruction stream to exceed the limit. + + the instruction buffer cannot store the instructions. + + + + Index of blocks in a source file. + + Index of blocks in a source file. +

+ The index can be passed a result buffer, and output an instruction sequence + that transforms the source buffer used by the index into the result buffer. + The instruction sequence can be executed by + BinaryDelta + or + DeltaStream + to recreate the result buffer. +

+ An index stores the entire contents of the source buffer, but also a table of + block identities mapped to locations where the block appears in the source + buffer. The mapping table uses 12 bytes for every 16 bytes of source buffer, + and is therefore ~75% of the source buffer size. The overall index is ~1.75x + the size of the source buffer. This relationship holds for any JVM, as only a + constant number of objects are allocated per index. Callers can use the + method + GetIndexSize() + to obtain a reasonably accurate estimate of + the complete heap space used by this index. +

+ A + DeltaIndex + is thread-safe. Concurrent threads can use the same + index to encode delta instructions for different result buffers. + + + +

Number of bytes in a block. + Number of bytes in a block. +
+ + Maximum number of positions to consider for a given content hash. + + Maximum number of positions to consider for a given content hash. +

+ All positions with the same content hash are stored into a single chain. + The chain size is capped to ensure delta encoding stays linear time at + O(len_src + len_dst) rather than quadratic at O(len_src * len_dst). + + + +

Estimate the size of an index for a given source. + + Estimate the size of an index for a given source. +

+ This is roughly a worst-case estimate. The actual index may be smaller. + + length of the source, in bytes. + + estimated size. Approximately + 1.75 * sourceLength + . + + + +

Original source file that we indexed. + Original source file that we indexed. +
+ + + Pointers into the + entries + table, indexed by block hash. +

+ A block hash is masked with + tableMask + to become the array index + of this table. The value stored here is the first index within + entries + that starts the consecutive list of blocks with that + same masked hash. If there are no matching blocks, 0 is stored instead. +

+ Note that this table is always a power of 2 in size, to support fast + normalization of a block hash into an array index. +

+
+ + + Pairs of block hash value and + src + offsets. +

+ The very first entry in this table at index 0 is always empty, this is to + allow fast evaluation when + table + has no values under any given + slot. Remaining entries are pairs of integers, with the upper 32 bits + holding the block hash and the lower 32 bits holding the source offset. +

+
+ + + Mask to make block hashes into an array index for + table + . + + + + Construct an index from the source file. + Construct an index from the source file. + + the source file's raw contents. The buffer will be held by the + index instance to facilitate matching, and therefore must not + be modified by the caller. + + + + size of the source buffer this index has scanned. + + + Get an estimate of the memory required by this index. + Get an estimate of the memory required by this index. + + an approximation of the number of bytes used by this index in + memory. The size includes the cached source buffer size from + GetSourceSize() + , as well as a rough approximation of JVM + object overheads. + + + + Generate a delta sequence to recreate the result buffer. + + Generate a delta sequence to recreate the result buffer. +

+ There is no limit on the size of the delta sequence created. This is the + same as + encode(out, res, 0) + . + + + stream to receive the delta instructions that can transform + this index's source buffer into + res + . This stream + should be buffered, as instructions are written directly to it + in small bursts. + + + the desired result buffer. The generated instructions will + recreate this buffer when applied to the source buffer stored + within this index. + + the output stream refused to write the instructions. + + + +

Generate a delta sequence to recreate the result buffer. + Generate a delta sequence to recreate the result buffer. + + stream to receive the delta instructions that can transform + this index's source buffer into + res + . This stream + should be buffered, as instructions are written directly to it + in small bursts. If the caller might need to discard the + instructions (such as when deltaSizeLimit would be exceeded) + the caller is responsible for discarding or rewinding the + stream when this method returns false. + + + the desired result buffer. The generated instructions will + recreate this buffer when applied to the source buffer stored + within this index. + + + maximum number of bytes that the delta instructions can + occupy. If the generated instructions would be longer than + this amount, this method returns false. If 0, there is no + limit on the length of delta created. + + + true if the delta is smaller than deltaSizeLimit; false if the + encoder aborted because the encoded delta instructions would be + longer than deltaSizeLimit bytes. + + the output stream refused to write the instructions. + +
+ + + + + + Supports + DeltaIndex + by performing a partial scan of the content. + + + + + + + Maximum number of bytes to admit to the window at once. + Maximum number of bytes to admit to the window at once. + + + Maximum depth we should create for any delta chain. + Maximum depth we should create for any delta chain. + + + Amount of memory we have loaded right now. + Amount of memory we have loaded right now. + + + + Position of + res + within + window + array. + + + + Maximum delta chain depth the current object can have. + + Maximum delta chain depth the current object can have. +

+ This can be smaller than + maxDepth + . + + + +

Window entry of the object we are currently considering. + Window entry of the object we are currently considering. +
+ + + If we have a delta for + res + , this is the shortest found yet. + + + + + If we have + bestDelta + , the window position it was created by. + + + + Used to compress cached deltas. + Used to compress cached deltas. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Complete contents of this object. + Complete contents of this object. Lazily loaded. + + + Index of this object's content, to encode other deltas. + Index of this object's content, to encode other deltas. Lazily loaded. + + + current delta chain depth of this object. + + + type of the object in this window entry. + + + estimated unpacked size of the object, in bytes . + + + true if there is no object stored in this entry. + + + + Configuration used by a + PackWriter + when constructing the stream. + A configuration may be modified once created, but should not be modified + while it is being used by a PackWriter. If a configuration is not modified it + is safe to share the same configuration instance between multiple concurrent + threads executing different PackWriters. + + + + + Default value of deltas reuse option: + + + SetReuseDeltas(bool) + + + + Default value of objects reuse option: + + + SetReuseObjects(bool) + + + + Default value of delta compress option: + + + SetDeltaCompress(bool) + + + + Default value of delta base as offset option: + + + SetDeltaBaseAsOffset(bool) + + + + Default value of maximum delta chain depth: + + + SetMaxDeltaDepth(int) + + + + Default window size during packing: + + + SetDeltaSearchWindowSize(int) + + + + Default big file threshold: + + + SetBigFileThreshold(int) + + + + Default delta cache size: + + + SetDeltaCacheSize(long) + + + + Default delta cache limit: + + + SetDeltaCacheLimit(int) + + + + Default index version: + + + SetIndexVersion(int) + + + Create a default configuration. + Create a default configuration. + + + Create a configuration honoring the repository's settings. + Create a configuration honoring the repository's settings. + + the repository to read settings from. The repository is not + retained by the new configuration, instead its settings are + copied during the constructor. + + + + + Create a configuration honoring settings in a + NGit.Config + . + + + the source to read settings from. The source is not retained + by the new configuration, instead its settings are copied + during the constructor. + + + + Copy an existing configuration to a new instance. + Copy an existing configuration to a new instance. + the source configuration to copy from. + + + Check whether to reuse deltas existing in repository. + + Check whether to reuse deltas existing in repository. + Default setting: + #DEFAULT_REUSE_DELTAS + + true if object is configured to reuse deltas; false otherwise. + + + Set reuse deltas configuration option for the writer. + + Set reuse deltas configuration option for the writer. + When enabled, writer will search for delta representation of object in + repository and use it if possible. Normally, only deltas with base to + another object existing in set of objects to pack will be used. The + exception however is thin-packs where the base object may exist on the + other side. + When raw delta data is directly copied from a pack file, its checksum is + computed to verify the data is not corrupt. + Default setting: + #DEFAULT_REUSE_DELTAS + + boolean indicating whether or not try to reuse deltas. + + + Checks whether to reuse existing objects representation in repository. + + Checks whether to reuse existing objects representation in repository. + Default setting: + #DEFAULT_REUSE_OBJECTS + + + true if writer is configured to reuse objects representation from + pack; false otherwise. + + + + Set reuse objects configuration option for the writer. + + Set reuse objects configuration option for the writer. + If enabled, writer searches for compressed representation in a pack file. + If possible, compressed data is directly copied from such a pack file. + Data checksum is verified. + Default setting: + #DEFAULT_REUSE_OBJECTS + + + boolean indicating whether or not writer should reuse existing + objects representation. + + + + True if writer can use offsets to point to a delta base. + + True if writer can use offsets to point to a delta base. + If true the writer may choose to use an offset to point to a delta base + in the same pack, this is a newer style of reference that saves space. + False if the writer has to use the older (and more compatible style) of + storing the full ObjectId of the delta base. + Default setting: + #DEFAULT_DELTA_BASE_AS_OFFSET + + + true if delta base is stored as an offset; false if it is stored + as an ObjectId. + + + + Set writer delta base format. + + Set writer delta base format. + Delta base can be written as an offset in a pack file (new approach + reducing file size) or as an object id (legacy approach, compatible with + old readers). + Default setting: + #DEFAULT_DELTA_BASE_AS_OFFSET + + + boolean indicating whether delta base can be stored as an + offset. + + + + Check whether the writer will create new deltas on the fly. + + Check whether the writer will create new deltas on the fly. + Default setting: + #DEFAULT_DELTA_COMPRESS + + + true if the writer will create a new delta when either + IsReuseDeltas() + is false, or no suitable delta is + available for reuse. + + + + Set whether or not the writer will create new deltas on the fly. + + Set whether or not the writer will create new deltas on the fly. + Default setting: + #DEFAULT_DELTA_COMPRESS + + + true to create deltas when + IsReuseDeltas() + is false, + or when a suitable delta isn't available for reuse. Set to + false to write whole objects instead. + + + + Get maximum depth of delta chain set up for the writer. + + Get maximum depth of delta chain set up for the writer. + Generated chains are not longer than this value. + Default setting: + #DEFAULT_MAX_DELTA_DEPTH + + maximum delta chain depth. + + + Set up maximum depth of delta chain for the writer. + + Set up maximum depth of delta chain for the writer. + Generated chains are not longer than this value. Too low value causes low + compression level, while too big makes unpacking (reading) longer. + Default setting: + #DEFAULT_MAX_DELTA_DEPTH + + maximum delta chain depth. + + + Get the number of objects to try when looking for a delta base. + + Get the number of objects to try when looking for a delta base. + This limit is per thread, if 4 threads are used the actual memory used + will be 4 times this value. + Default setting: + #DEFAULT_DELTA_SEARCH_WINDOW_SIZE + + the object count to be searched. + + + Set the number of objects considered when searching for a delta base. + + Set the number of objects considered when searching for a delta base. + Default setting: + #DEFAULT_DELTA_SEARCH_WINDOW_SIZE + + number of objects to search at once. Must be at least 2. + + + + Get maximum number of bytes to put into the delta search window. + + Get maximum number of bytes to put into the delta search window. + Default setting is 0, for an unlimited amount of memory usage. Actual + memory used is the lower limit of either this setting, or the sum of + space used by at most + GetDeltaSearchWindowSize() + objects. + This limit is per thread, if 4 threads are used the actual memory limit + will be 4 times this value. + + the memory limit. + + + Set the maximum number of bytes to put into the delta search window. + + Set the maximum number of bytes to put into the delta search window. + Default setting is 0, for an unlimited amount of memory usage. If the + memory limit is reached before + GetDeltaSearchWindowSize() + the + window size is temporarily lowered. + + Maximum number of bytes to load at once, 0 for unlimited. + + + + Get the size of the in-memory delta cache. + + Get the size of the in-memory delta cache. + This limit is for the entire writer, even if multiple threads are used. + Default setting: + #DEFAULT_DELTA_CACHE_SIZE + + + maximum number of bytes worth of delta data to cache in memory. + If 0 the cache is infinite in size (up to the JVM heap limit + anyway). A very tiny size such as 1 indicates the cache is + effectively disabled. + + + + Set the maximum number of bytes of delta data to cache. + + Set the maximum number of bytes of delta data to cache. + During delta search, up to this many bytes worth of small or hard to + compute deltas will be stored in memory. This cache speeds up writing by + allowing the cached entry to simply be dumped to the output stream. + Default setting: + #DEFAULT_DELTA_CACHE_SIZE + + + number of bytes to cache. Set to 0 to enable an infinite + cache, set to 1 (an impossible size for any delta) to disable + the cache. + + + + Maximum size in bytes of a delta to cache. + + Maximum size in bytes of a delta to cache. + Default setting: + #DEFAULT_DELTA_CACHE_LIMIT + + maximum size (in bytes) of a delta that should be cached. + + + Set the maximum size of a delta that should be cached. + + Set the maximum size of a delta that should be cached. + During delta search, any delta smaller than this size will be cached, up + to the + GetDeltaCacheSize() + maximum limit. This speeds up writing + by allowing these cached deltas to be output as-is. + Default setting: + #DEFAULT_DELTA_CACHE_LIMIT + + maximum size (in bytes) of a delta to be cached. + + + Get the maximum file size that will be delta compressed. + + Get the maximum file size that will be delta compressed. + Files bigger than this setting will not be delta compressed, as they are + more than likely already highly compressed binary data files that do not + delta compress well, such as MPEG videos. + Default setting: + #DEFAULT_BIG_FILE_THRESHOLD + + the configured big file threshold. + + + Set the maximum file size that should be considered for deltas. + + Set the maximum file size that should be considered for deltas. + Default setting: + #DEFAULT_BIG_FILE_THRESHOLD + + the limit, in bytes. + + + Get the compression level applied to objects in the pack. + + Get the compression level applied to objects in the pack. + Default setting: + java.util.zip.Deflater#DEFAULT_COMPRESSION + + + current compression level, see + ICSharpCode.SharpZipLib.Zip.Compression.Deflater + + . + + + + Set the compression level applied to objects in the pack. + + Set the compression level applied to objects in the pack. + Default setting: + java.util.zip.Deflater#DEFAULT_COMPRESSION + + + compression level, must be a valid level recognized by the + ICSharpCode.SharpZipLib.Zip.Compression.Deflater + + class. + + + + Get the number of threads used during delta compression. + + Get the number of threads used during delta compression. + Default setting: 0 (auto-detect processors) + + + number of threads used for delta compression. 0 will auto-detect + the threads to the number of available processors. + + + + Set the number of threads to use for delta compression. + + Set the number of threads to use for delta compression. + During delta compression, if there are enough objects to be considered + the writer will start up concurrent threads and allow them to compress + different sections of the repository concurrently. + An application thread pool can be set by + SetExecutor(Sharpen.Executor) + . + If not set a temporary pool will be created by the writer, and torn down + automatically when compression is over. + Default setting: 0 (auto-detect processors) + + + number of threads to use. If <= 0 the number of available + processors for this JVM is used. + + + + the preferred thread pool to execute delta search on. + + + Set the executor to use when using threads. + + Set the executor to use when using threads. + During delta compression if the executor is non-null jobs will be queued + up on it to perform delta compression in parallel. Aside from setting the + executor, the caller must set + SetThreads(int) + to enable threaded + delta search. + + + executor to use for threads. Set to null to create a temporary + executor just for the writer. + + + + Get the pack index file format version this instance creates. + + Get the pack index file format version this instance creates. + Default setting: + #DEFAULT_INDEX_VERSION + + + the index version, the special version 0 designates the oldest + (most compatible) format available for the objects. + + NGit.Storage.File.PackIndexWriter + + + + Set the pack index file format version this instance will create. + + Set the pack index file format version this instance will create. + Default setting: + #DEFAULT_INDEX_VERSION + + + the version to write. The special version 0 designates the + oldest (most compatible) format available for the objects. + + NGit.Storage.File.PackIndexWriter + + + + Update properties by setting fields from the configuration. + + Update properties by setting fields from the configuration. + If a property's corresponding variable is not defined in the supplied + configuration, then it is left unmodified. + + configuration to read properties from. + + + + Custom output stream to support + PackWriter + . + + + + Initialize a pack output stream. + + Initialize a pack output stream. +

+ This constructor is exposed to support debugging the JGit library only. + Application or storage level code should not create a PackOutputStream, + instead use + PackWriter + , and let the writer create the stream. + + monitor to update on object output progress. + target stream to receive all object contents. + packer that is going to perform the output. + + + + + + + + + + + + + + +

Write one object. + + Write one object. + If the object was already written, this method does nothing and returns + quickly. This case occurs whenever an object was written out of order in + order to ensure the delta base occurred before the object that needs it. + + the object to write. + + the object cannot be read from the object reader, or the + output stream is no longer accepting output. Caller must + examine the type of exception and possibly its message to + distinguish between these cases. + +
+ + Commits the object header onto the stream. + + Commits the object header onto the stream. +

+ Once the header has been written, the object representation must be fully + output, or packing must abort abnormally. + + the object to pack. Header information is obtained. + + number of bytes of the inflated content. For an object that is + in whole object format, this is the same as the object size. + For an object that is in a delta format, this is the size of + the inflated delta instruction stream. + + the underlying stream refused to accept the header. + + + + a temporary buffer writers can use to copy data with. + + + total number of bytes written since stream start. + + + obtain the current CRC32 register. + + +

Reinitialize the CRC32 register for a new region. + Reinitialize the CRC32 register for a new region. +
+ + obtain the current SHA-1 digest. + + + +

+ PackWriter class is responsible for generating pack files from specified set + of objects from repository. +

+ +

+ PackWriter class is responsible for generating pack files from specified set + of objects from repository. This implementation produce pack files in format + version 2. +

+ Source of objects may be specified in two ways: +

    +
  • (usually) by providing sets of interesting and uninteresting objects in + repository - all interesting objects and their ancestors except uninteresting + objects and their ancestors will be included in pack, or
  • +
  • by providing iterator of + NGit.Revwalk.RevObject + specifying exact list and + order of objects in pack
  • +
+ Typical usage consists of creating instance intended for some pack, + configuring options, preparing the list of objects by calling + PreparePack(Sharpen.Iterator<E>) + + or + PreparePack(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>, System.Collections.Generic.ICollection<E>) + + , and finally + producing the stream with + WritePack(NGit.ProgressMonitor, NGit.ProgressMonitor, Sharpen.OutputStream) + . +

+ Class provide set of configurable options and + NGit.ProgressMonitor + support, as operations may take a long time for big repositories. Deltas + searching algorithm is NOT IMPLEMENTED yet - this implementation + relies only on deltas and objects reuse. +

+ This class is not thread safe, it is intended to be used in one thread, with + one instance per created pack. Subsequent calls to writePack result in + undefined behavior. + + + + all allocated, non-released PackWriters instances. + + +

+ reader + recast to the reuse interface, if it supports it. + +
+ + Create writer for specified repository. + + Create writer for specified repository. +

+ Objects for packing are specified in + PreparePack(Sharpen.Iterator<E>) + + or + PreparePack(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>, System.Collections.Generic.ICollection<E>) + + . + + repository where objects are stored. + + +

Create a writer to load objects from the specified reader. + + Create a writer to load objects from the specified reader. +

+ Objects for packing are specified in + PreparePack(Sharpen.Iterator<E>) + + or + PreparePack(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>, System.Collections.Generic.ICollection<E>) + + . + + reader to read from the repository with. + + +

Create writer for specified repository. + + Create writer for specified repository. +

+ Objects for packing are specified in + PreparePack(Sharpen.Iterator<E>) + + or + PreparePack(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>, System.Collections.Generic.ICollection<E>) + + . + + repository where objects are stored. + reader to read from the repository with. + + +

Create writer with a specified configuration. + + Create writer with a specified configuration. +

+ Objects for packing are specified in + PreparePack(Sharpen.Iterator<E>) + + or + PreparePack(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>, System.Collections.Generic.ICollection<E>) + + . + + configuration for the pack writer. + reader to read from the repository with. + + +

+ Check whether writer can store delta base as an offset (new style + reducing pack size) or should store it as an object id (legacy style, + compatible with old readers). + + + Check whether writer can store delta base as an offset (new style + reducing pack size) or should store it as an object id (legacy style, + compatible with old readers). + Default setting: + PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET + + + true if delta base is stored as an offset; false if it is stored + as an object id. + +
+ + Set writer delta base format. + + Set writer delta base format. Delta base can be written as an offset in a + pack file (new approach reducing file size) or as an object id (legacy + approach, compatible with old readers). + Default setting: + PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET + + + boolean indicating whether delta base can be stored as an + offset. + + + + Check if the writer will reuse commits that are already stored as deltas. + + Check if the writer will reuse commits that are already stored as deltas. + + + true if the writer would reuse commits stored as deltas, assuming + delta reuse is already enabled. + + + + Set the writer to reuse existing delta versions of commits. + Set the writer to reuse existing delta versions of commits. + + if true, the writer will reuse any commits stored as deltas. + By default the writer does not reuse delta commits. + + + + Check if the writer validates objects before copying them. + Check if the writer validates objects before copying them. + + true if validation is enabled; false if the reader will handle + object validation as a side-effect of it consuming the output. + + + + Enable (or disable) object validation during packing. + Enable (or disable) object validation during packing. + + if true the pack writer will validate an object before it is + put into the output. This additional validation work may be + necessary to avoid propagating corruption from one local pack + file to another local pack file. + + + + true if this writer is producing a thin pack. + + + + a boolean indicating whether writer may pack objects with + delta base object not within set of objects to pack, but + belonging to party repository (uninteresting/boundary) as + determined by set; this kind of pack is used only for + transport; true - to produce thin pack, false - otherwise. + + + + true to reuse cached packs. If true index creation isn't available. + + + + if set to true and a cached pack is present, it will be + appended onto the end of a thin-pack, reducing the amount of + working set space and CPU used by PackWriter. Enabling this + feature prevents PackWriter from creating an index for the + newly created pack, so its only suitable for writing to a + network client, where the client will make the index. + + + + + true to ignore objects that are uninteresting and also not found + on local disk; false to throw a + NGit.Errors.MissingObjectException + + out of + PreparePack(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>, System.Collections.Generic.ICollection<E>) + + if an + uninteresting object is not in the source repository. By default, + true, permitting gracefully ignoring of uninteresting objects. + + + + + true if writer should ignore non existing uninteresting + objects during construction set of objects to pack; false + otherwise - non existing uninteresting objects may cause + NGit.Errors.MissingObjectException + + + + + Set the tag targets that should be hoisted earlier during packing. + + Set the tag targets that should be hoisted earlier during packing. +

+ Callers may put objects into this set before invoking any of the + preparePack methods to influence where an annotated tag's target is + stored within the resulting pack. Typically these will be clustered + together, and hoisted earlier in the file even if they are ancient + revisions, allowing readers to find tag targets with better locality. + + objects that annotated tags point at. + + +

Configure this pack for a shallow clone. + Configure this pack for a shallow clone. + maximum depth to traverse the commit graph + + objects which used to be shallow on the client, but are being + extended as part of this fetch + +
+ + Returns objects number in a pack file that was created by this writer. + Returns objects number in a pack file that was created by this writer. + number of objects in pack. + a cached pack cannot supply its object count. + + + + Add a pack index whose contents should be excluded from the result. + Add a pack index whose contents should be excluded from the result. + objects in this index will not be in the output pack. + + + Prepare the list of objects to be written to the pack stream. + + Prepare the list of objects to be written to the pack stream. +

+ Iterator exactly determines which objects are included in a pack + and order they appear in pack (except that objects order by type is not + needed at input). This order should conform general rules of ordering + objects in git - by recency and path (type and delta-base first is + internally secured) and responsibility for guaranteeing this order is on + a caller side. Iterator must return each id of object to write exactly + once. + + + iterator of object to store in a pack; order of objects within + each type is important, ordering by type is not needed; + allowed types for objects are + NGit.Constants.OBJ_COMMIT + , + NGit.Constants.OBJ_TREE + , + NGit.Constants.OBJ_BLOB + and + NGit.Constants.OBJ_TAG + ; objects returned by iterator may be + later reused by caller as object id and type are internally + copied in each iteration. + + when some I/O problem occur during reading objects. + + + +

Prepare the list of objects to be written to the pack stream. + + Prepare the list of objects to be written to the pack stream. +

+ Basing on these 2 sets, another set of objects to put in a pack file is + created: this set consists of all objects reachable (ancestors) from + interesting objects, except uninteresting objects and their ancestors. + This method uses class + NGit.Revwalk.ObjectWalk + extensively to find out that + appropriate set of output objects and their optimal order in output pack. + Order is consistent with general git in-pack rules: sort by object type, + recency, path and delta-base first. + + progress during object enumeration. + + collection of objects to be marked as interesting (start + points of graph traversal). + + + collection of objects to be marked as uninteresting (end + points of graph traversal). + + when some I/O problem occur during reading objects. + + + +

Prepare the list of objects to be written to the pack stream. + + Prepare the list of objects to be written to the pack stream. +

+ Basing on these 2 sets, another set of objects to put in a pack file is + created: this set consists of all objects reachable (ancestors) from + interesting objects, except uninteresting objects and their ancestors. + This method uses class + NGit.Revwalk.ObjectWalk + extensively to find out that + appropriate set of output objects and their optimal order in output pack. + Order is consistent with general git in-pack rules: sort by object type, + recency, path and delta-base first. + + progress during object enumeration. + ObjectWalk to perform enumeration. + + collection of objects to be marked as interesting (start + points of graph traversal). + + + collection of objects to be marked as uninteresting (end + points of graph traversal). + + when some I/O problem occur during reading objects. + + + +

Determine if the pack file will contain the requested object. + Determine if the pack file will contain the requested object. + the object to test the existence of. + true if the object will appear in the output pack file. + a cached pack cannot be examined. +
+ + Lookup the ObjectToPack object for a given ObjectId. + Lookup the ObjectToPack object for a given ObjectId. + the object to find in the pack. + the object we are packing, or null. + + + + Computes SHA-1 of lexicographically sorted objects ids written in this + pack, as used to name a pack file in repository. + + + Computes SHA-1 of lexicographically sorted objects ids written in this + pack, as used to name a pack file in repository. + + ObjectId representing SHA-1 name of a pack that was created. + + + Create an index file to match the pack file just written. + + Create an index file to match the pack file just written. +

+ This method can only be invoked after + WritePack(NGit.ProgressMonitor, NGit.ProgressMonitor, Sharpen.OutputStream) + has + been invoked and completed successfully. Writing a corresponding index is + an optional feature that not all pack users may require. + + + output for the index data. Caller is responsible for closing + this stream. + + the index data could not be written to the supplied stream. + + + +

Write the prepared pack to the supplied stream. + + Write the prepared pack to the supplied stream. +

+ At first, this method collects and sorts objects to pack, then deltas + search is performed if set up accordingly, finally pack stream is + written. +

+ All reused objects data checksum (Adler32/CRC32) is computed and + validated against existing checksum. + + progress monitor to report object compression work. + + progress monitor to report the number of objects written. + + + output stream of pack data. The stream should be buffered by + the caller. The caller is responsible for closing the stream. + + + an error occurred reading a local object's data to include in + the pack, or writing compressed object data to the output + stream. + + + + + description of what this PackWriter did in order to create the + final pack stream. The object is only available to callers after + WritePack(NGit.ProgressMonitor, NGit.ProgressMonitor, Sharpen.OutputStream) + + + + snapshot of the current state of this PackWriter. + + +

Release all resources used by this writer. + Release all resources used by this writer. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Include one object to the output file. + + Include one object to the output file. +

+ Objects are written in the order they are added. If the same object is + added twice, it may be written twice, creating a larger than necessary + file. + + the object to add. + the object is an unsupported type. + + + +

Select an object representation for this writer. + + Select an object representation for this writer. +

+ An + NGit.ObjectReader + implementation should invoke this method once for + each representation available for an object, to allow the writer to find + the most suitable one for the output. + + the object being packed. + the next available representation from the repository. + + +

Summary of how PackWriter created the pack. + Summary of how PackWriter created the pack. +
+ + + unmodifiable collection of objects to be included in the + pack. May be null if the pack was hand-crafted in a unit + test. + + + + + unmodifiable collection of objects that should be excluded + from the pack, as the peer that will receive the pack already + has these objects. + + + + + unmodifiable collection of the cached packs that were reused + in the output, if any were selected for reuse. + + + + + number of objects in the output pack that went through the + delta search process in order to find a potential delta base. + + + + + number of objects in the output pack that went through delta + base search and found a suitable base. This is a subset of + GetDeltaSearchNonEdgeObjects() + . + + + + + total number of objects output. This total includes the value + of + GetTotalDeltas() + . + + + + + total number of deltas output. This may be lower than the + actual number of deltas if a cached pack was reused. + + + + + number of objects whose existing representation was reused in + the output. This count includes + GetReusedDeltas() + . + + + + + number of deltas whose existing representation was reused in + the output, as their base object was also output or was + assumed present for a thin pack. This may be lower than the + actual number of reused deltas if a cached pack was reused. + + + + + total number of bytes written. This size includes the pack + header, trailer, thin pack, and reused cached pack(s). + + + + + size of the thin pack in bytes, if a thin pack was generated. + A thin pack is created when the client already has objects + and some deltas are created against those objects, or if a + cached pack is being used and some deltas will reference + objects in the cached pack. This size does not include the + pack header or trailer. + + + + object type code, e.g. OBJ_COMMIT or OBJ_TREE. + information about this type of object in the pack. + + + true if the resulting pack file was a shallow pack. + + + depth (in commits) the pack includes if shallow. + + + + time in milliseconds spent enumerating the objects that need + to be included in the output. This time includes any restarts + that occur when a cached pack is selected for reuse. + + + + + time in milliseconds spent matching existing representations + against objects that will be transmitted, or that the client + can be assumed to already have. + + + + + time in milliseconds spent finding the sizes of all objects + that will enter the delta compression search window. The + sizes need to be known to better match similar objects + together and improve delta compression ratios. + + + + + time in milliseconds spent on delta compression. This is + observed wall-clock time and does not accurately track CPU + time used when multiple threads were used to perform the + delta compression. + + + + + time in milliseconds spent writing the pack output, from + start of header until end of trailer. The transfer speed can + be approximated by dividing + GetTotalBytes() + by this + value. + + + + total time spent processing this pack. + + + + get the average output speed in terms of bytes-per-second. + getTotalBytes() / (getTimeWriting() / 1000.0) + . + + + + formatted message string for display to clients. + + + Statistics about a single class of object. + Statistics about a single class of object. + + + + total number of objects output. This total includes the + value of + GetDeltas() + . + + + + + total number of deltas output. This may be lower than the + actual number of deltas if a cached pack was reused. + + + + + number of objects whose existing representation was + reused in the output. This count includes + GetReusedDeltas() + . + + + + + number of deltas whose existing representation was reused + in the output, as their base object was also output or + was assumed present for a thin pack. This may be lower + than the actual number of reused deltas if a cached pack + was reused. + + + + + total number of bytes written. This size includes the + object headers as well as the compressed data. This size + also includes all of + GetDeltaBytes() + . + + + + + number of delta bytes written. This size includes the + object headers for the delta objects. + + + + Estimated size of a single ObjectToPack instance. + Estimated size of a single ObjectToPack instance. + + + Possible states that a PackWriter can be in. + Possible states that a PackWriter can be in. + + + Summary of the current state of a PackWriter. + Summary of the current state of a PackWriter. + + + the PackConfig used to build the writer. + + + the current phase of the writer. + + + an estimate of the total memory used by the writer. + + + Parses raw Git trees from the canonical semi-text/semi-binary format. + Parses raw Git trees from the canonical semi-text/semi-binary format. + + + + First offset within + raw + of the prior entry. + + + + + First offset within + raw + of the current entry's data. + + + + Offset one past the current entry (first byte of next entry). + Offset one past the current entry (first byte of next entry). + + + Create a new parser. + Create a new parser. + + + Create a new parser for a tree appearing in a subset of a repository. + Create a new parser for a tree appearing in a subset of a repository. + + position of this iterator in the repository tree. The value + may be null or the empty array to indicate the prefix is the + root of the repository. A trailing slash ('/') is + automatically appended if the prefix does not end in '/'. + + reader to load the tree data from. + + identity of the tree being parsed; used only in exception + messages if data corruption is found. + + the object supplied is not available from the repository. + + + the object supplied as an argument is not actually a tree and + cannot be parsed as though it were a tree. + + a loose object or pack file could not be read. + + + + the parent of this tree parser + + + + Reset this parser to walk through the given tree data. + Reset this parser to walk through the given tree data. + the raw tree content. + + + Reset this parser to walk through the given tree. + Reset this parser to walk through the given tree. + reader to use during repository access. + + identity of the tree being parsed; used only in exception + messages if data corruption is found. + + the root level parser. + the object supplied is not available from the repository. + + + the object supplied as an argument is not actually a tree and + cannot be parsed as though it were a tree. + + a loose object or pack file could not be read. + + + + this iterator, or its parent, if the tree is at eof. + + + Reset this parser to walk through the given tree. + Reset this parser to walk through the given tree. + reader to use during repository access. + + identity of the tree being parsed; used only in exception + messages if data corruption is found. + + the object supplied is not available from the repository. + + + the object supplied as an argument is not actually a tree and + cannot be parsed as though it were a tree. + + a loose object or pack file could not be read. + + + + + + + + Back door to quickly create a subtree iterator for any subtree. + + Back door to quickly create a subtree iterator for any subtree. +

+ Don't use this unless you are ObjectWalk. The method is meant to be + called only once the current entry has been identified as a tree and its + identity has been converted into an ObjectId. + + reader to load the tree data from. + ObjectId of the tree to open. + a new parser that walks over the current subtree. + a loose object or pack file could not be read. + + + + + + + +

Iterator over an empty tree (a directory with no files). + Iterator over an empty tree (a directory with no files). +
+ + Create a new iterator with no parent. + Create a new iterator with no parent. + + + Create an iterator for a subtree of an existing iterator. + + Create an iterator for a subtree of an existing iterator. +

+ The caller is responsible for setting up the path of the child iterator. + + parent tree iterator. + + path array to be used by the child iterator. This path must + contain the path from the top of the walk to the first child + and must end with a '/'. + + + position within childPath where the child can + insert its data. The value at + childPath[childPathOffset-1] must be '/'. + + + + + + + + + + + + + +

Working directory iterator for standard Java IO. + + Working directory iterator for standard Java IO. +

+ This iterator uses the standard java.io package to read the + specified working directory as part of a + TreeWalk + . + + + +

+ Walks a working directory tree as part of a + TreeWalk + . +

+ Most applications will want to use the standard implementation of this + iterator, + FileTreeIterator + , as that does all IO through the standard + java.io package. Plugins for a Java based IDE may however wish + to create their own implementations of this class to allow traversal of the + IDE's project space, as well as benefit from any caching the IDE may have. +

+ FileTreeIterator +
+ + Size we perform file IO in if we have to read and hash a file. + Size we perform file IO in if we have to read and hash a file. + + + + Maximum size of files which may be read fully into memory for performance + reasons. + + + Maximum size of files which may be read fully into memory for performance + reasons. + + + + + An empty entry array, suitable for + Init(Entry[]) + . + + + + Inherited state of this iterator, describing working tree, etc. + Inherited state of this iterator, describing working tree, etc. + + + + The + IdBuffer() + for the current entry. + + + + + Index within + entries + that + contentId + came from. + + + + List of entries obtained from the subclass. + List of entries obtained from the subclass. + + + + Total number of entries in + entries + that are valid. + + + + + Current position within + entries + . + + + + If there is a .gitignore file present, the parsed rules from it. + If there is a .gitignore file present, the parsed rules from it. + + + Repository that is the root level being iterated over + + + + Cached canonical length, initialized from + IdBuffer() + + + + + + The offset of the content id in + IdBuffer() + + + + + Cached value of isEntryIgnored(). + + Cached value of isEntryIgnored(). 0 if not ignored, 1 if ignored, -1 if + the value is not yet cached. + + + + Create a new iterator with no parent. + Create a new iterator with no parent. + working tree options to be used + + + Create a new iterator with no parent and a prefix. + + Create a new iterator with no parent and a prefix. +

+ The prefix path supplied is inserted in front of all paths generated by + this iterator. It is intended to be used when an iterator is being + created for a subsection of an overall repository and needs to be + combined with other iterators that are created to run over the entire + repository namespace. + + + position of this iterator in the repository tree. The value + may be null or the empty string to indicate the prefix is the + root of the repository. A trailing slash ('/') is + automatically appended if the prefix does not end in '/'. + + working tree options to be used + + +

Create an iterator for a subtree of an existing iterator. + Create an iterator for a subtree of an existing iterator. + parent tree iterator. +
+ + Initialize this iterator for the root level of a repository. + + Initialize this iterator for the root level of a repository. +

+ This method should only be invoked after calling + Init(Entry[]) + , + and only for the root iterator. + + the repository. + + +

+ Define the matching + NGit.Dircache.DirCacheIterator + , to optimize ObjectIds. + Once the DirCacheIterator has been set this iterator must only be + advanced by the TreeWalk that is supplied, as it assumes that itself and + the corresponding DirCacheIterator are positioned on the same file path + whenever + IdBuffer() + is invoked. + + the walk that will be advancing this iterator. + + index of the matching + NGit.Dircache.DirCacheIterator + . + +
+ + Get submodule id for given entry. + Get submodule id for given entry. + + non-null submodule id + + + + Get submodule id using the repository at the location of the entry + relative to the directory. + + + Get submodule id using the repository at the location of the entry + relative to the directory. + + + + non-null submodule id + + + + + + + + + + + + Returns the working tree options used by this iterator. + Returns the working tree options used by this iterator. + working tree options + + + + + + + + + Get the raw byte length of this entry. + Get the raw byte length of this entry. + size of this file, in bytes. + + + Get the filtered input length of this entry + size of the content, in bytes + System.IO.IOException + + + Get the last modified time of this entry. + Get the last modified time of this entry. + + last modified time of this file, in milliseconds since the epoch + (Jan 1, 1970 UTC). + + + + Obtain an input stream to read the file content. + + Obtain an input stream to read the file content. +

+ Efficient implementations are not required. The caller will usually + obtain the stream only once per entry, if at all. +

+ The input stream should not use buffering if the implementation can avoid + it. The caller will buffer as necessary to perform efficient block IO + operations. +

+ The caller will close the stream once complete. + + a stream to read from the file. + the file could not be opened for reading. + + + +

Determine if the current entry path is ignored by an ignore rule. + Determine if the current entry path is ignored by an ignore rule. + true if the entry was ignored by an ignore rule file. + a relevant ignore rule file exists but cannot be read. + +
+ + Determine if the entry path is ignored by an ignore rule. + Determine if the entry path is ignored by an ignore rule. + the length of the path in the path buffer. + true if the entry is ignored by an ignore rule. + a relevant ignore rule file exists but cannot be read. + + + + + + + Constructor helper. + Constructor helper. + + files in the subtree of the work tree this iterator operates + on + + + + Obtain the current entry from this iterator. + Obtain the current entry from this iterator. + the currently selected entry. + + + Is the file mode of the current entry different than the given raw mode? + + + true if different, false otherwise + + + + Compare the metadata (mode, length, modification-timestamp) of the + current entry and a + NGit.Dircache.DirCacheEntry + + + the + NGit.Dircache.DirCacheEntry + to compare with + + + a + MetadataDiff + which tells whether and how the entries + metadata differ + + + + + Checks whether this entry differs from a given entry from the + NGit.Dircache.DirCache + . + File status information is used and if status is same we consider the + file identical to the state in the working directory. Native git uses + more stat fields than we have accessible in Java. + + the entry from the dircache we want to compare against + + True if the actual file content should be checked if + modification time differs. + + true if content is most likely different. + + + + Get the file mode to use for the current entry when it is to be updated + in the index. + + + Get the file mode to use for the current entry when it is to be updated + in the index. + + + NGit.Dircache.DirCacheIterator + positioned at the same entry as this + iterator or null if no + NGit.Dircache.DirCacheIterator + is available + at this iterator's current entry + + index file mode + + + Compares the entries content with the content in the filesystem. + + Compares the entries content with the content in the filesystem. + Unsmudges the entry when it is detected that it is clean. + + the entry to be checked + + true if the content matches, false + otherwise + + + + + + + + + + + The result of a metadata-comparison between the current entry and a + DirCacheEntry + + + + A single entry within a working directory tree. + A single entry within a working directory tree. + + + Get the type of this entry. + + Get the type of this entry. +

+ Note: Efficient implementation required. +

+ The implementation of this method must be efficient. If a subclass + needs to compute the value they should cache the reference within an + instance member instead. + + + a file mode constant from + NGit.FileMode + . + + + +

Get the byte length of this entry. + + Get the byte length of this entry. +

+ Note: Efficient implementation required. +

+ The implementation of this method must be efficient. If a subclass + needs to compute the value they should cache the reference within an + instance member instead. + + size of this file, in bytes. + + +

Get the last modified time of this entry. + + Get the last modified time of this entry. +

+ Note: Efficient implementation required. +

+ The implementation of this method must be efficient. If a subclass + needs to compute the value they should cache the reference within an + instance member instead. + + time since the epoch (in ms) of the last change. + + +

Get the name of this entry within its directory. + + Get the name of this entry within its directory. +

+ Efficient implementations are not required. The caller will obtain + the name only once and cache it once obtained. + + name of the entry. + + +

Obtain an input stream to read the file content. + + Obtain an input stream to read the file content. +

+ Efficient implementations are not required. The caller will usually + obtain the stream only once per entry, if at all. +

+ The input stream should not use buffering if the implementation can + avoid it. The caller will buffer as necessary to perform efficient + block IO operations. +

+ The caller will close the stream once complete. + + a stream to read from the file. + the file could not be opened for reading. + + + +

Magic type indicating we know rules exist, but they aren't loaded. + Magic type indicating we know rules exist, but they aren't loaded. +
+ + + + + Magic type indicating there may be rules for the top level. + Magic type indicating there may be rules for the top level. + + + + + + + + + + Options used to process the working tree. + Options used to process the working tree. + + + File name character encoder. + File name character encoder. + + + + Digest computer for + WorkingTreeIterator.contentId + computations. + + + + + Buffer used to perform + WorkingTreeIterator.contentId + computations. + + + + TreeWalk with a (supposedly) matching DirCacheIterator. + TreeWalk with a (supposedly) matching DirCacheIterator. + + + + Position of the matching + NGit.Dircache.DirCacheIterator + . + + + + the starting directory. + + the starting directory. This directory should correspond to the root of + the repository. + + + + + the file system abstraction which will be necessary to perform certain + file system operations. + + + the file system abstraction which will be necessary to perform certain + file system operations. + + + + Create a new iterator to traverse the work tree and its children. + Create a new iterator to traverse the work tree and its children. + the repository whose working tree will be scanned. + + + Create a new iterator to traverse the given directory and its children. + Create a new iterator to traverse the given directory and its children. + + the starting directory. This directory should correspond to + the root of the repository. + + + the file system abstraction which will be necessary to perform + certain file system operations. + + working tree options to be used + + + Create a new iterator to traverse a subdirectory. + Create a new iterator to traverse a subdirectory. + the parent iterator we were created from. + + the file system abstraction which will be necessary to perform + certain file system operations. + + + the subdirectory. This should be a directory contained within + the parent directory. + + + + + + + + The root directory of this iterator + + + + The location of the working file. This is the same as + + new + File(getDirectory(), getEntryPath()) + + but may be faster by + reusing an internal File instance. + + + + Wrapper for a standard Java IO file + + + + + + Get the underlying file of this entry. + Get the underlying file of this entry. + the underlying file of this entry + + + Specialized TreeWalk to detect directory-file (D/F) name conflicts. + + Specialized TreeWalk to detect directory-file (D/F) name conflicts. +

+ Due to the way a Git tree is organized the standard + TreeWalk + won't + easily find a D/F conflict when merging two or more trees together. In the + standard TreeWalk the file will be returned first, and then much later the + directory will be returned. This makes it impossible for the application to + efficiently detect and handle the conflict. +

+ Using this walk implementation causes the directory to report earlier than + usual, at the same time as the non-directory entry. This permits the + application to handle the D/F conflict in a single step. The directory is + returned only once, so it does not get returned later in the iteration. +

+ When a D/F conflict is detected + TreeWalk.IsSubtree() + will return true + and + TreeWalk.EnterSubtree() + will recurse into the subtree, no matter + which iterator originally supplied the subtree. +

+ Because conflicted directories report early, using this walk implementation + to populate a + NGit.Dircache.DirCacheBuilder + may cause the automatic resorting to + run and fix the entry ordering. +

+ This walk implementation requires more CPU to implement a look-ahead and a + look-behind to merge a D/F pair together, or to skip a previously reported + directory. In typical Git repositories the look-ahead cost is 0 and the + look-behind doesn't trigger, as users tend not to create trees which contain + both "foo" as a directory and "foo.c" as a file. +

+ In the worst-case however several thousand look-ahead steps per walk step may + be necessary, making the overhead quite significant. Since this worst-case + should never happen this walk implementation has made the time/space tradeoff + in favor of more-time/less-space, as that better suits the typical case. + + + +

+ Walks one or more + AbstractTreeIterator + s in parallel. +

+ This class can perform n-way differences across as many trees as necessary. +

+ Each tree added must have the same root as existing trees in the walk. +

+ A TreeWalk instance can only be used once to generate results. Running a + second time requires creating a new TreeWalk instance, or invoking + Reset() + and adding new trees before starting again. Resetting an + existing instance may be faster for some applications as some internal + buffers may be recycled. +

+ TreeWalk instances are not thread-safe. Applications must either restrict + usage of a TreeWalk instance to a single thread, or implement their own + synchronization at a higher level. +

+ Multiple simultaneous TreeWalk instances per + NGit.Repository + are + permitted, even from concurrent threads. +

+
+ + Open a tree walk and filter to exactly one path. + + Open a tree walk and filter to exactly one path. +

+ The returned tree walk is already positioned on the requested path, so + the caller should not need to invoke + Next() + unless they are + looking for a possible directory/file name conflict. + + the reader the walker will obtain tree data from. + single path to advance the tree walk instance into. + one or more trees to walk through, all with the same root. + + a new tree walk configured for exactly this one path; null if no + path was found in any of the trees. + + reading a pack file or loose object failed. + + + an tree object could not be read as its data stream did not + appear to be a tree, or could not be inflated. + + an object we expected to be a tree was not a tree. + + a tree object was not found. + + + +

Open a tree walk and filter to exactly one path. + + Open a tree walk and filter to exactly one path. +

+ The returned tree walk is already positioned on the requested path, so + the caller should not need to invoke + Next() + unless they are + looking for a possible directory/file name conflict. + + repository to read tree object data from. + single path to advance the tree walk instance into. + one or more trees to walk through, all with the same root. + + a new tree walk configured for exactly this one path; null if no + path was found in any of the trees. + + reading a pack file or loose object failed. + + + an tree object could not be read as its data stream did not + appear to be a tree, or could not be inflated. + + an object we expected to be a tree was not a tree. + + a tree object was not found. + + + +

Open a tree walk and filter to exactly one path. + + Open a tree walk and filter to exactly one path. +

+ The returned tree walk is already positioned on the requested path, so + the caller should not need to invoke + Next() + unless they are + looking for a possible directory/file name conflict. + + repository to read tree object data from. + single path to advance the tree walk instance into. + the single tree to walk through. + + a new tree walk configured for exactly this one path; null if no + path was found in any of the trees. + + reading a pack file or loose object failed. + + + an tree object could not be read as its data stream did not + appear to be a tree, or could not be inflated. + + an object we expected to be a tree was not a tree. + + a tree object was not found. + + + +

Create a new tree walker for a given repository. + Create a new tree walker for a given repository. + the repository the walker will obtain data from. +
+ + Create a new tree walker for a given repository. + Create a new tree walker for a given repository. + the reader the walker will obtain tree data from. + + + Release any resources used by this walker's reader. + + Release any resources used by this walker's reader. +

+ A walker that has been released can be used again, but may need to be + released after the subsequent usage. + + + +

Reset this walker so new tree iterators can be added to it. + Reset this walker so new tree iterators can be added to it. +
+ + Reset this walker to run over a single existing tree. + Reset this walker to run over a single existing tree. + + the tree we need to parse. The walker will execute over this + single tree if the reset is successful. + + the given tree object does not exist in this repository. + + + the given object id does not denote a tree, but instead names + some other non-tree type of object. Note that commits are not + trees, even if they are sometimes called a "tree-ish". + + + the object claimed to be a tree, but its contents did not + appear to be a tree. The repository may have data corruption. + + a loose object or pack file could not be read. + + + + Reset this walker to run over a set of existing trees. + Reset this walker to run over a set of existing trees. + + the trees we need to parse. The walker will execute over this + many parallel trees if the reset is successful. + + the given tree object does not exist in this repository. + + + the given object id does not denote a tree, but instead names + some other non-tree type of object. Note that commits are not + trees, even if they are sometimes called a "tree-ish". + + + the object claimed to be a tree, but its contents did not + appear to be a tree. The repository may have data corruption. + + a loose object or pack file could not be read. + + + + Add an already existing tree object for walking. + + Add an already existing tree object for walking. +

+ The position of this tree is returned to the caller, in case the caller + has lost track of the order they added the trees into the walker. +

+ The tree must have the same root as existing trees in the walk. + + identity of the tree object the caller wants walked. + position of this tree within the walker. + the given tree object does not exist in this repository. + + + the given object id does not denote a tree, but instead names + some other non-tree type of object. Note that commits are not + trees, even if they are sometimes called a "tree-ish". + + + the object claimed to be a tree, but its contents did not + appear to be a tree. The repository may have data corruption. + + a loose object or pack file could not be read. + + + +

Add an already created tree iterator for walking. + + Add an already created tree iterator for walking. +

+ The position of this tree is returned to the caller, in case the caller + has lost track of the order they added the trees into the walker. +

+ The tree which the iterator operates on must have the same root as + existing trees in the walk. + + + an iterator to walk over. The iterator should be new, with no + parent, and should still be positioned before the first entry. + The tree which the iterator operates on must have the same root + as other trees in the walk. + + position of this tree within the walker. + + the iterator was unable to obtain its first entry, due to + possible data corruption within the backing data store. + + + +

Advance this walker to the next relevant entry. + Advance this walker to the next relevant entry. + + true if there is an entry available; false if all entries have + been walked and the walk of this set of tree iterators is over. + + + Recursive() + was enabled, a subtree was found, but + the subtree object does not exist in this repository. The + repository may be missing objects. + + + Recursive() + was enabled, a subtree was found, and + the subtree id does not denote a tree, but instead names some + other non-tree type of object. The repository may have data + corruption. + + + the contents of a tree did not appear to be a tree. The + repository may have data corruption. + + a loose object or pack file could not be read. + +
+ + Obtain the tree iterator for the current entry. + + Obtain the tree iterator for the current entry. +

+ Entering into (or exiting out of) a subtree causes the current tree + iterator instance to be changed for the nth tree. This allows the tree + iterators to manage only one list of items, with the diving handled by + recursive trees. + + tree to obtain the current iterator of. + type of the tree iterator expected by the caller. + + r the current iterator of the requested type; null if the tree + has no entry to match the current path. + + + +

+ Obtain the raw + NGit.FileMode + bits for the current entry. +

+ Every added tree supplies mode bits, even if the tree does not contain + the current entry. In the latter case + NGit.FileMode.MISSING + 's mode + bits (0) are returned. +

+ tree to obtain the mode bits from. + mode bits for the current entry of the nth tree. + NGit.FileMode.FromBits(int) +
+ + + Obtain the + NGit.FileMode + for the current entry. +

+ Every added tree supplies a mode, even if the tree does not contain the + current entry. In the latter case + NGit.FileMode.MISSING + is returned. +

+ tree to obtain the mode from. + mode for the current entry of the nth tree. +
+ + Obtain the ObjectId for the current entry. + + Obtain the ObjectId for the current entry. +

+ Using this method to compare ObjectId values between trees of this walker + is very inefficient. Applications should try to use + IdEqual(int, int) + or + GetObjectId(NGit.MutableObjectId, int) + + whenever possible. +

+ Every tree supplies an object id, even if the tree does not contain the + current entry. In the latter case + NGit.ObjectId.ZeroId() + is returned. + + tree to obtain the object identifier from. + object identifier for the current tree entry. + GetObjectId(NGit.MutableObjectId, int) + + IdEqual(int, int) + + +

Obtain the ObjectId for the current entry. + + Obtain the ObjectId for the current entry. +

+ Every tree supplies an object id, even if the tree does not contain the + current entry. In the latter case + NGit.ObjectId.ZeroId() + is supplied. +

+ Applications should try to use + IdEqual(int, int) + when possible + as it avoids conversion overheads. + + buffer to copy the object id into. + tree to obtain the object identifier from. + IdEqual(int, int) + + +

Compare two tree's current ObjectId values for equality. + Compare two tree's current ObjectId values for equality. + first tree to compare the object id from. + second tree to compare the object id from. + + result of + getObjectId(nthA).equals(getObjectId(nthB)). + + GetObjectId(int) +
+ + The path length of the current entry. + + + Test if the supplied path matches the current entry's path. + + Test if the supplied path matches the current entry's path. +

+ This method tests that the supplied path is exactly equal to the current + entry, or is one of its parent directories. It is faster to use this + method then to use + PathString() + to first create a String + object, then test startsWith or some other type of string + match function. + + + path buffer to test. Callers should ensure the path does not + end with '/' prior to invocation. + + number of bytes from buf to test. + + < 0 if p is before the current path; 0 if p matches the current + path; 1 if the current path is past p and p will never match + again on this tree walk. + + + +

+ Test if the supplied path matches (being suffix of) the current entry's + path. + + + Test if the supplied path matches (being suffix of) the current entry's + path. +

+ This method tests that the supplied path is exactly equal to the current + entry, or is relative to one of entry's parent directories. It is faster + to use this method then to use + PathString() + to first create + a String object, then test endsWith or some other type of + string match function. + + path buffer to test. + number of bytes from buf to test. + + true if p is suffix of the current path; + false if otherwise + + + +

Enter into the current subtree. + + Enter into the current subtree. +

+ If the current entry is a subtree this method arranges for its children + to be returned before the next sibling following the subtree is returned. + + + a subtree was found, but the subtree object does not exist in + this repository. The repository may be missing objects. + + + a subtree was found, and the subtree id does not denote a + tree, but instead names some other non-tree type of object. + The repository may have data corruption. + + + the contents of a tree did not appear to be a tree. The + repository may have data corruption. + + a loose object or pack file could not be read. + + + + + + + + + + + + + + + + + the reader this walker is using to load objects. + + +

Get the currently configured filter. + Get the currently configured filter. + the current filter. Never null as a filter is always needed. + Set the tree entry filter for this walker. + + Set the tree entry filter for this walker. +

+ Multiple filters may be combined by constructing an arbitrary tree of + AndTreeFilter or OrTreeFilter instances to + describe the boolean expression required by the application. Custom + filter implementations may also be constructed by applications. +

+ Note that filters are not thread-safe and may not be shared by concurrent + TreeWalk instances. Every TreeWalk must be supplied its own unique + filter, unless the filter implementation specifically states it is (and + always will be) thread-safe. Callers may use + NGit.Treewalk.Filter.TreeFilter.Clone() + + to create a unique filter tree for this TreeWalk instance. + + + the new filter. If null the special + NGit.Treewalk.Filter.TreeFilter.ALL + + filter will be used instead, as it matches every entry. + + NGit.Treewalk.Filter.AndTreeFilter + + NGit.Treewalk.Filter.OrTreeFilter + + + +

+ Is this walker automatically entering into subtrees? +

+ If the walker is recursive then the caller will not see a subtree node + and instead will only receive file nodes in all relevant subtrees. +

+ + Is this walker automatically entering into subtrees? +

+ If the walker is recursive then the caller will not see a subtree node + and instead will only receive file nodes in all relevant subtrees. + + true if automatically entering subtrees is enabled. +

Set the walker to enter (or not enter) subtrees automatically. + + Set the walker to enter (or not enter) subtrees automatically. +

+ If recursive mode is enabled the walker will hide subtree nodes from the + calling application and will produce only file level nodes. If a tree + (directory) is deleted then all of the file level nodes will appear to be + deleted, recursively, through as many levels as necessary to account for + all entries. + + true to skip subtree nodes and only obtain files nodes. + + +

+ Does this walker return a tree entry after it exits the subtree? +

+ If post order traversal is enabled then the walker will return a subtree + after it has returned the last entry within that subtree. +

+ + Does this walker return a tree entry after it exits the subtree? +

+ If post order traversal is enabled then the walker will return a subtree + after it has returned the last entry within that subtree. This may cause + a subtree to be seen by the application twice if + Recursive() + is false, as the application will see it once, call + EnterSubtree() + , and then see it again as it leaves the subtree. +

+ If an application does not enable + Recursive() + and it does not + call + EnterSubtree() + then the tree is returned only once as none + of the children were processed. + + true if subtrees are returned after entries within the subtree. +

Set the walker to return trees after their children. + Set the walker to return trees after their children. + true to get trees after their children. + PostOrderTraversal() +
+ + Get the number of trees known to this walker. + Get the number of trees known to this walker. + the total number of trees this walker is iterating over. + + + Get the current entry's name within its parent tree. + + Get the current entry's name within its parent tree. +

+ This method is not very efficient and is primarily meant for debugging + and final output generation. Applications should try to avoid calling it, + and if invoked do so only once per interesting entry, where the name is + absolutely required for correct function. + + + name of the current entry within the parent tree (or directory). + The name never includes a '/'. + + + +

Get the current entry's complete path. + + Get the current entry's complete path. +

+ This method is not very efficient and is primarily meant for debugging + and final output generation. Applications should try to avoid calling it, + and if invoked do so only once per interesting entry, where the name is + absolutely required for correct function. + + + complete path of the current entry, from the root of the + repository. If the current entry is in a subtree there will be at + least one '/' in the returned string. + + + +

Get the current entry's complete path as a UTF-8 byte array. + Get the current entry's complete path as a UTF-8 byte array. + + complete path of the current entry, from the root of the + repository. If the current entry is in a subtree there will be at + least one '/' in the returned string. + +
+ + Get the current subtree depth of this walker. + Get the current subtree depth of this walker. + the current subtree depth of this walker. + + + + Is the current entry a subtree? +

+ This method is faster then testing the raw mode bits of all trees to see + if any of them are a subtree. +

+ + Is the current entry a subtree? +

+ This method is faster then testing the raw mode bits of all trees to see + if any of them are a subtree. If at least one is a subtree then this + method will return true. + + + true if + EnterSubtree() + will work on the current node. + + + +

Is the current entry a subtree returned after its children? + + true if the current node is a tree that has been returned after + its children were already processed. + + PostOrderTraversal() +
+ + Create a new tree walker for a given repository. + Create a new tree walker for a given repository. + the repository the walker will obtain data from. + + + Create a new tree walker for a given repository. + Create a new tree walker for a given repository. + the reader the walker will obtain tree data from. + + + + + + + + + + + + + + + + + + True if the current entry is covered by a directory/file conflict. + + True if the current entry is covered by a directory/file conflict. + This means that for some prefix of the current entry's path, this walk + has detected a directory/file conflict. Also true if the current entry + itself is a directory/file conflict. + Example: If this TreeWalk points to foo/bar/a.txt and this method returns + true then you know that either for path foo or for path foo/bar files and + folders were detected. + + + true if the current entry is covered by a + directory/file conflict, false otherwise + + + + + Options used by the + WorkingTreeIterator + . + + + + + Key for + NGit.Config.Get<T>(NGit.Config.SectionParser<T>) + + . + + + + true if the execute bit on working files should be trusted. + + + how automatic CRLF conversion has been configured. + + + Includes a tree entry only if all subfilters include the same tree entry. + + + Includes a tree entry only if all subfilters include the same tree entry. +

+ Classic shortcut behavior is used, so evaluation of the + TreeFilter.Include(NGit.Treewalk.TreeWalk) + + method stops as soon as a false result + is obtained. Applications can improve filtering performance by placing faster + filters that are more likely to reject a result earlier in the list. + + + +

Create a filter with two filters, both of which must match. + Create a filter with two filters, both of which must match. + first filter to test. + second filter to test. + a filter that must match both input filters. +
+ + Create a filter around many filters, all of which must match. + Create a filter around many filters, all of which must match. + + list of filters to match against. Must contain at least 2 + filters. + + a filter that must match all input filters. + + + Create a filter around many filters, all of which must match. + Create a filter around many filters, all of which must match. + + list of filters to match against. Must contain at least 2 + filters. + + a filter that must match all input filters. + + + + + + + + + + + + + + Skip + NGit.Treewalk.WorkingTreeIterator + entries that appear in gitignore files. + + + + Construct a filter to ignore paths known to a particular iterator. + Construct a filter to ignore paths known to a particular iterator. + index of the workdir tree in the tree walk + + + + + + + + Includes an entry only if the subfilter does not include the entry. + Includes an entry only if the subfilter does not include the entry. + + + Create a filter that negates the result of another filter. + Create a filter that negates the result of another filter. + filter to negate. + a filter that does the reverse of a. + + + + + + + + Includes a tree entry if any subfilters include the same tree entry. + + Includes a tree entry if any subfilters include the same tree entry. +

+ Classic shortcut behavior is used, so evaluation of the + TreeFilter.Include(NGit.Treewalk.TreeWalk) + + method stops as soon as a true result is + obtained. Applications can improve filtering performance by placing faster + filters that are more likely to accept a result earlier in the list. + + + +

Create a filter with two filters, one of which must match. + Create a filter with two filters, one of which must match. + first filter to test. + second filter to test. + a filter that must match at least one input filter. +
+ + Create a filter around many filters, one of which must match. + Create a filter around many filters, one of which must match. + + list of filters to match against. Must contain at least 2 + filters. + + a filter that must match at least one input filter. + + + Create a filter around many filters, one of which must match. + Create a filter around many filters, one of which must match. + + list of filters to match against. Must contain at least 2 + filters. + + a filter that must match at least one input filter. + + + + + + + + + + + + + Includes tree entries only if they match the configured path. + + Includes tree entries only if they match the configured path. +

+ Applications should use + PathFilterGroup + to connect these into a tree + filter graph, as the group supports breaking out of traversal once it is + known the path can never match. + + + +

Create a new tree filter for a user supplied path. + + Create a new tree filter for a user supplied path. +

+ Path strings are relative to the root of the repository. If the user's + input should be assumed relative to a subdirectory of the repository the + caller must prepend the subdirectory's path prior to creating the filter. +

+ Path strings use '/' to delimit directories on all platforms. + + + the path to filter on. Must not be the empty string. All + trailing '/' characters will be trimmed before string's length + is checked or is used as part of the constructed filter. + + a new filter for the requested path. + the path supplied was the empty string. + + + + the path this filter matches. + + + The walk to check against. + + + true + if the path length of this filter matches the length + of the current path of the supplied TreeWalk. + + + +

Includes tree entries only if they match one or more configured paths. + + Includes tree entries only if they match one or more configured paths. +

+ Operates like + PathFilter + but causes the walk to abort as soon as the + tree can no longer match any of the paths within the group. This may bypass + the boolean logic of a higher level AND or OR group, but does improve + performance for the common case of examining one or more modified paths. +

+ This filter is effectively an OR group around paths, with the early abort + feature described above. + + + +

Create a collection of path filters from Java strings. + + Create a collection of path filters from Java strings. +

+ Path strings are relative to the root of the repository. If the user's + input should be assumed relative to a subdirectory of the repository the + caller must prepend the subdirectory's path prior to creating the filter. +

+ Path strings use '/' to delimit directories on all platforms. +

+ Paths may appear in any order within the collection. Sorting may be done + internally when the group is constructed if doing so will improve path + matching performance. + + the paths to test against. Must have at least one entry. + a new filter for the list of paths supplied. + + +

Create a collection of path filters from Java strings. + + Create a collection of path filters from Java strings. +

+ Path strings are relative to the root of the repository. If the user's + input should be assumed relative to a subdirectory of the repository the + caller must prepend the subdirectory's path prior to creating the filter. +

+ Path strings use '/' to delimit directories on all platforms. +

+ Paths may appear in any order. Sorting may be done internally when the + group is constructed if doing so will improve path matching performance. + + the paths to test against. Must have at least one entry. + a new filter for the paths supplied. + + +

Create a collection of path filters. + + Create a collection of path filters. +

+ Paths may appear in any order within the collection. Sorting may be done + internally when the group is constructed if doing so will improve path + matching performance. + + the paths to test against. Must have at least one entry. + a new filter for the list of paths supplied. + + +

Includes tree entries only if they match the configured path. + Includes tree entries only if they match the configured path. +
+ + Create a new tree filter for a user supplied path. + + Create a new tree filter for a user supplied path. +

+ Path strings use '/' to delimit directories on all platforms. + + the path (suffix) to filter on. Must not be the empty string. + a new filter for the requested path. + the path supplied was the empty string. + + + + + + + + +

+ To be used in combination with a DirCacheIterator: includes only tree entries + for which 'skipWorkTree' flag is not set. + + + To be used in combination with a DirCacheIterator: includes only tree entries + for which 'skipWorkTree' flag is not set. + +
+ + Index of DirCacheIterator to work on. + Index of DirCacheIterator to work on. + + + Create a filter to work on the specified DirCacheIterator. + Create a filter to work on the specified DirCacheIterator. + + index of DirCacheIterator to work on. If the index does not + refer to a DirCacheIterator, the filter will include all + entries. + + + + Encodes and decodes to and from Base64 notation. + + Encodes and decodes to and from Base64 notation. +

+ I am placing this code in the Public Domain. Do with it as you will. This + software comes with no guarantees or warranties but with plenty of + well-wishing instead! Please visit <a + href="http://iharder.net/base64">http://iharder.net/base64</a> periodically + to check for updates or to contribute improvements. + + Robert Harder + rob@iharder.net + 2.1, stripped to minimum feature set used by JGit. + + +

The equals sign (=) as a byte. + The equals sign (=) as a byte. +
+ + Indicates equals sign in encoding. + Indicates equals sign in encoding. + + + Indicates white space in encoding. + Indicates white space in encoding. + + + Indicates an invalid byte during decoding. + Indicates an invalid byte during decoding. + + + Preferred encoding. + Preferred encoding. + + + The 64 valid Base64 values. + The 64 valid Base64 values. + + + + Translates a Base64 value to either its 6-bit reconstruction value or a + negative number indicating some other meaning. + + + Translates a Base64 value to either its 6-bit reconstruction value or a + negative number indicating some other meaning. The table is only 7 bits + wide, as the 8th bit is discarded during decoding. + + + + Defeats instantiation. + Defeats instantiation. + + + + Encodes up to three bytes of the array source and writes the + resulting four Base64 bytes to destination. + + + Encodes up to three bytes of the array source and writes the + resulting four Base64 bytes to destination. The source and + destination arrays can be manipulated anywhere along their length by + specifying srcOffset and destOffset. This method + does not check to make sure your arrays are large enough to accommodate + srcOffset + 3 for the source array or + destOffset + 4 for the destination array. The + actual number of significant bytes in your array is given by + numSigBytes. + + the array to convert + the index where conversion begins + the number of significant bytes in your array + the array to hold the conversion + the index where output will be put + + + Encodes a byte array into Base64 notation. + Encodes a byte array into Base64 notation. + The data to convert + encoded base64 representation of source. + + + Encodes a byte array into Base64 notation. + Encodes a byte array into Base64 notation. + The data to convert + Offset in array where conversion should begin + Length of data to convert + encoded base64 representation of source. + + + + Decodes four bytes from array source and writes the resulting + bytes (up to three of them) to destination. + + + Decodes four bytes from array source and writes the resulting + bytes (up to three of them) to destination. The source and + destination arrays can be manipulated anywhere along their length by + specifying srcOffset and destOffset. This method + does not check to make sure your arrays are large enough to accommodate + srcOffset + 4 for the source array or + destOffset + 3 for the destination array. This + method returns the actual number of bytes that were converted from the + Base64 encoding. + + the array to convert + the index where conversion begins + the array to hold the conversion + the index where output will be put + the number of decoded bytes converted + + + Low-level decoding ASCII characters from a byte array. + Low-level decoding ASCII characters from a byte array. + The Base64 encoded data + The offset of where to begin decoding + The length of characters to decode + decoded data + the input is not a valid Base64 sequence. + + + + Decodes data from Base64 notation. + Decodes data from Base64 notation. + the string to decode + the decoded data + + + Abstract authenticator which remembers prior authentications. + Abstract authenticator which remembers prior authentications. + + + Add a cached authentication for future use. + Add a cached authentication for future use. + the information we should remember. + + + Prompt for and request authentication from the end-user. + Prompt for and request authentication from the end-user. + + the authentication data; null if the user canceled the request + and does not want to continue. + + + + Authentication data to remember and reuse. + Authentication data to remember and reuse. + + + Create a new cached authentication. + Create a new cached authentication. + system this is for. + port number of the service. + username at the service. + password at the service. + + + + Utilities for creating and working with Change-Id's, like the one used by + Gerrit Code Review. + + + Utilities for creating and working with Change-Id's, like the one used by + Gerrit Code Review. +

+ A Change-Id is a SHA-1 computed from the content of a commit, in a similar + fashion to how the commit id is computed. Unlike the commit id a Change-Id is + retained in the commit and subsequent revised commits in the footer of the + commit text. + + + +

Compute a Change-Id. + Compute a Change-Id. + The id of the tree that would be committed + parent id of previous commit or null + + the + NGit.PersonIdent + for the presumed author and time + + + the + NGit.PersonIdent + for the presumed committer and time + + The commit message + + the change id SHA1 string (without the 'I') or null if the + message is not complete enough + + System.IO.IOException +
+ + Find the right place to insert a Change-Id and return it. + + Find the right place to insert a Change-Id and return it. +

+ The Change-Id is inserted before the first footer line but after a Bug + line. + + + + a commit message with an inserted Change-Id line + + +

Find the right place to insert a Change-Id and return it. + + Find the right place to insert a Change-Id and return it. +

+ If no Change-Id is found the Change-Id is inserted before + the first footer line but after a Bug line. + If Change-Id is found and replaceExisting is set to false, + the message is unchanged. + If Change-Id is found and replaceExisting is set to true, + the Change-Id is replaced with + changeId + . + + + + + a commit message with an inserted Change-Id line + + +

Abstraction to support various file system operations not in Java. + Abstraction to support various file system operations not in Java. +
+ + The auto-detected implementation selected for this operating system and JRE. + + The auto-detected implementation selected for this operating system and JRE. + + + + Auto-detect the appropriate file system abstraction. + Auto-detect the appropriate file system abstraction. + detected file system abstraction + + + + Auto-detect the appropriate file system abstraction, taking into account + the presence of a Cygwin installation on the system. + + + Auto-detect the appropriate file system abstraction, taking into account + the presence of a Cygwin installation on the system. Using jgit in + combination with Cygwin requires a more elaborate (and possibly slower) + resolution of file system paths. + + +
    +
  • Boolean.TRUE to assume that Cygwin is used in + combination with jgit
  • +
  • Boolean.FALSE to assume that Cygwin is + not used with jgit
  • +
  • null to auto-detect whether a Cygwin + installation is present on the system and in this case assume + that Cygwin is used
  • +
+ Note: this parameter is only relevant on Windows. + + detected file system abstraction +
+ + Constructs a file system abstraction. + Constructs a file system abstraction. + + + Initialize this FS using another's current settings. + Initialize this FS using another's current settings. + the source FS to copy from. + + + a new instance of the same type of FS. + + + Does this operating system and JRE support the execute flag on files? + + true if this implementation can provide reasonably accurate + executable bit information; false otherwise. + + + + Is this file system case sensitive + true if this implementation is case sensitive + + + Determine if the file is executable (or not). + + Determine if the file is executable (or not). +

+ Not all platforms and JREs support executable flags on files. If the + feature is unsupported this method will always return false. + + abstract path to test. + true if the file is believed to be executable by the user. + + +

Set a file to be executable by the user. + + Set a file to be executable by the user. +

+ Not all platforms and JREs support executable flags on files. If the + feature is unsupported this method will always return false and no + changes will be made to the file specified. + + path to modify the executable status of. + true to enable execution; false to disable it. + true if the change succeeded; false otherwise. + + +

Resolve this file to its actual path name that the JRE can use. + + Resolve this file to its actual path name that the JRE can use. +

+ This method can be relatively expensive. Computing a translation may + require forking an external process per path name translated. Callers + should try to minimize the number of translations necessary by caching + the results. +

+ Not all platforms and JREs require path name translation. Currently only + Cygwin on Win32 require translation for Cygwin based paths. + + directory relative to which the path name is. + path name to translate. + + the translated path. new File(dir,name) if this + platform does not require path name translation. + + + +

Determine the user's home directory (location where preferences are). + + Determine the user's home directory (location where preferences are). +

+ This method can be expensive on the first invocation if path name + translation is required. Subsequent invocations return a cached result. +

+ Not all platforms and JREs require path name translation. Currently only + Cygwin on Win32 requires translation of the Cygwin HOME directory. + + the user's home directory; null if the user does not have one. + + +

Set the user's home directory location. + Set the user's home directory location. + + the location of the user's preferences; null if there is no + home directory for the current user. + + + + this + . + +
+ + Does this file system have problems with atomic renames? + true if the caller should retry a failed rename of a lock file. + + + Determine the user's home directory (location where preferences are). + Determine the user's home directory (location where preferences are). + the user's home directory; null if the user does not have one. + + + Searches the given path to see if it contains one of the given files. + + Searches the given path to see if it contains one of the given files. + Returns the first it finds. Returns null if not found or if path is null. + + List of paths to search separated by File.pathSeparator + Files to search for in the given path + the first match found, or null + + + Execute a command and return a single line of output as a String + Working directory for the command + as component array + + the one-line output of the command + + + the $prefix directory C Git would use. + + + the $prefix directory C Git would use. + + + Set the $prefix directory C Git uses. + Set the $prefix directory C Git uses. + the directory. Null if C Git is not installed. + + + this + + + + Initialize a ProcesssBuilder to run a command using the system shell. + Initialize a ProcesssBuilder to run a command using the system shell. + + command to execute. This string should originate from the + end-user, and thus is platform specific. + + + arguments to pass to command. These should be protected from + shell evaluation. + + + a partially completed process builder. Caller should finish + populating directory, environment, and then start the process. + + + + A more efficient List<Integer> using a primitive integer array. + A more efficient List<Integer> using a primitive integer array. + + + Create an empty list with a default capacity. + Create an empty list with a default capacity. + + + Create an empty list with the specified capacity. + Create an empty list with the specified capacity. + number of entries the list can initially hold. + + + number of entries in this list + + + + index to read, must be in the range [0, + Size() + ). + + the number at the specified index + the index outside the valid range + + + + Empty this list + + + Add an entry to the end of the list. + Add an entry to the end of the list. + the number to add. + + + Assign an entry in the list. + Assign an entry in the list. + + index to set, must be in the range [0, + Size() + ). + + value to store at the position. + + + Pad the list with entries. + Pad the list with entries. + + index position to stop filling at. 0 inserts no filler. 1 + ensures the list has a size of 1, adding val if + the list is currently empty. + + value to insert into padded positions. + + + A more efficient List<Long> using a primitive long array. + A more efficient List<Long> using a primitive long array. + + + Create an empty list with a default capacity. + Create an empty list with a default capacity. + + + Create an empty list with the specified capacity. + Create an empty list with the specified capacity. + number of entries the list can initially hold. + + + number of entries in this list + + + + index to read, must be in the range [0, + Size() + ). + + the number at the specified index + the index outside the valid range + + + + Determine if an entry appears in this collection. + Determine if an entry appears in this collection. + the value to search for. + + true of + value + appears in this list. + + + + Empty this list + + + Add an entry to the end of the list. + Add an entry to the end of the list. + the number to add. + + + Assign an entry in the list. + Assign an entry in the list. + + index to set, must be in the range [0, + Size() + ). + + value to store at the position. + + + Pad the list with entries. + Pad the list with entries. + + index position to stop filling at. 0 inserts no filler. 1 + ensures the list has a size of 1, adding val if + the list is currently empty. + + value to insert into padded positions. + + + Sort the list of longs according to their natural ordering. + Sort the list of longs according to their natural ordering. + + + A boxed integer that can be modified. + A boxed integer that can be modified. + + + Current value of this boxed value. + Current value of this boxed value. + + + Conversion utilities for network byte order handling. + Conversion utilities for network byte order handling. + + + Compare a 32 bit unsigned integer stored in a 32 bit signed integer. + + Compare a 32 bit unsigned integer stored in a 32 bit signed integer. +

+ This function performs an unsigned compare operation, even though Java + does not natively support unsigned integer values. Negative numbers are + treated as larger than positive ones. + + the first value to compare. + the second value to compare. + < 0 if a < b; 0 if a == b; > 0 if a > b. + + +

Convert sequence of 2 bytes (network byte order) into unsigned value. + Convert sequence of 2 bytes (network byte order) into unsigned value. + buffer to acquire the 2 bytes of data from. + + position within the buffer to begin reading from. This + position and the next byte after it (for a total of 2 bytes) + will be read. + + unsigned integer value that matches the 16 bits read. +
+ + Convert sequence of 4 bytes (network byte order) into signed value. + Convert sequence of 4 bytes (network byte order) into signed value. + buffer to acquire the 4 bytes of data from. + + position within the buffer to begin reading from. This + position and the next 3 bytes after it (for a total of 4 + bytes) will be read. + + signed integer value that matches the 32 bits read. + + + Convert sequence of 4 bytes (network byte order) into unsigned value. + Convert sequence of 4 bytes (network byte order) into unsigned value. + buffer to acquire the 4 bytes of data from. + + position within the buffer to begin reading from. This + position and the next 3 bytes after it (for a total of 4 + bytes) will be read. + + unsigned integer value that matches the 32 bits read. + + + Convert sequence of 8 bytes (network byte order) into unsigned value. + Convert sequence of 8 bytes (network byte order) into unsigned value. + buffer to acquire the 8 bytes of data from. + + position within the buffer to begin reading from. This + position and the next 7 bytes after it (for a total of 8 + bytes) will be read. + + unsigned integer value that matches the 64 bits read. + + + Write a 16 bit integer as a sequence of 2 bytes (network byte order). + Write a 16 bit integer as a sequence of 2 bytes (network byte order). + buffer to write the 2 bytes of data into. + + position within the buffer to begin writing to. This position + and the next byte after it (for a total of 2 bytes) will be + replaced. + + the value to write. + + + Write a 32 bit integer as a sequence of 4 bytes (network byte order). + Write a 32 bit integer as a sequence of 4 bytes (network byte order). + buffer to write the 4 bytes of data into. + + position within the buffer to begin writing to. This position + and the next 3 bytes after it (for a total of 4 bytes) will be + replaced. + + the value to write. + + + Write a 64 bit integer as a sequence of 8 bytes (network byte order). + Write a 64 bit integer as a sequence of 8 bytes (network byte order). + buffer to write the 48bytes of data into. + + position within the buffer to begin writing to. This position + and the next 7 bytes after it (for a total of 8 bytes) will be + replaced. + + the value to write. + + + Utility functions related to quoted string handling. + Utility functions related to quoted string handling. + + + Quoting style that obeys the rules Git applies to file names + + + Quoting style used by the Bourne shell. + + Quoting style used by the Bourne shell. +

+ Quotes are unconditionally inserted during + Quote(string) + . This + protects shell meta-characters like $ or ~ from + being recognized as special. + + + +

Bourne style, but permits ~user at the start of the string. + + Bourne style, but permits ~user at the start of the string. + +
+ + Quote an input string by the quoting rules. + + Quote an input string by the quoting rules. +

+ If the input string does not require any quoting, the same String + reference is returned to the caller. +

+ Otherwise a quoted string is returned, including the opening and closing + quotation marks at the start and end of the string. If the style does not + permit raw Unicode characters then the string will first be encoded in + UTF-8, with unprintable sequences possibly escaped by the rules. + + any non-null Unicode string. + a quoted string. See above for details. + + +

Clean a previously quoted input, decoding the result via UTF-8. + + Clean a previously quoted input, decoding the result via UTF-8. +

+ This method must match quote such that: +

+            a.equals(dequote(quote(a)));
+            
+ is true for any a. +
+ a Unicode string to remove quoting from. + the cleaned string. + Dequote(byte[], int, int) +
+ + Decode a previously quoted input, scanning a UTF-8 encoded buffer. + + Decode a previously quoted input, scanning a UTF-8 encoded buffer. +

+ This method must match quote such that: +

+            a.equals(dequote(Constants.encode(quote(a))));
+            
+ is true for any a. +

+ This method removes any opening/closing quotation marks added by + Quote(string) + . + + the input buffer to parse. + first position within in to scan. + one position past in in to scan. + the cleaned string. + + +

Quoting style used by the Bourne shell. + + Quoting style used by the Bourne shell. +

+ Quotes are unconditionally inserted during + Quote(string) + . This + protects shell meta-characters like $ or ~ from + being recognized as special. + + + +

Bourne style, but permits ~user at the start of the string. + + Bourne style, but permits ~user at the start of the string. + +
+ + Quoting style that obeys the rules Git applies to file names + + + A rough character sequence around a raw byte buffer. + + A rough character sequence around a raw byte buffer. +

+ Characters are assumed to be 8-bit US-ASCII. + + + +

A zero-length character sequence. + A zero-length character sequence. +
+ + Create a rough character sequence around the raw byte buffer. + Create a rough character sequence around the raw byte buffer. + buffer to scan. + starting position for the sequence. + ending position for the sequence. + + + + Utility class for character functions on raw bytes +

+ Characters are assumed to be 8-bit US-ASCII. +

+ + Utility class for character functions on raw bytes +

+ Characters are assumed to be 8-bit US-ASCII. + + + +

Determine if an 8-bit US-ASCII encoded character is represents whitespace + + the 8-bit US-ASCII encoded character + true if c represents a whitespace character in 8-bit US-ASCII +
+ + + Returns the new end point for the byte array passed in after trimming any + trailing whitespace characters, as determined by the isWhitespace() + function. + + + Returns the new end point for the byte array passed in after trimming any + trailing whitespace characters, as determined by the isWhitespace() + function. start and end are assumed to be within the bounds of raw. + + the byte array containing the portion to trim whitespace for + the start of the section of bytes + the end of the section of bytes + the new end point + + + + Returns the new start point for the byte array passed in after trimming + any leading whitespace characters, as determined by the isWhitespace() + function. + + + Returns the new start point for the byte array passed in after trimming + any leading whitespace characters, as determined by the isWhitespace() + function. start and end are assumed to be within the bounds of raw. + + the byte array containing the portion to trim whitespace for + the start of the section of bytes + the end of the section of bytes + the new start point + + + Handy utility functions to parse raw object contents. + Handy utility functions to parse raw object contents. + + + UTF-8 charset constant. + UTF-8 charset constant. + + + Determine if b[ptr] matches src. + Determine if b[ptr] matches src. + the buffer to scan. + first position within b, this should match src[0]. + the buffer to test for equality with b. + ptr + src.length if b[ptr..src.length] == src; else -1. + + + Format a base 10 numeric into a temporary buffer. + + Format a base 10 numeric into a temporary buffer. +

+ Formatting is performed backwards. The method starts at offset + o-1 and ends at o-1-digits, where + digits is the number of positions necessary to store the + base 10 value. +

+ The argument and return values from this method make it easy to chain + writing, for example: +

+            final byte[] tmp = new byte[64];
+            int ptr = tmp.length;
+            tmp[--ptr] = '\n';
+            ptr = RawParseUtils.formatBase10(tmp, ptr, 32);
+            tmp[--ptr] = ' ';
+            ptr = RawParseUtils.formatBase10(tmp, ptr, 18);
+            tmp[--ptr] = 0;
+            final String str = new String(tmp, ptr, tmp.length - ptr);
+            
+
+ buffer to write into. + + one offset past the location where writing will begin; writing + proceeds towards lower index values. + + the value to store. + + the new offset value o. This is the position of + the last byte written. Additional writing should start at one + position earlier. + +
+ + Parse a base 10 numeric from a sequence of ASCII digits into an int. + + Parse a base 10 numeric from a sequence of ASCII digits into an int. +

+ Digit sequences can begin with an optional run of spaces before the + sequence, and may start with a '+' or a '-' to indicate sign position. + Any other characters will cause the method to stop and return the current + result to the caller. + + buffer to scan. + position within buffer to start parsing digits at. + + optional location to return the new ptr value through. If null + the ptr value will be discarded. + + + the value at this location; 0 if the location is not a valid + numeric. + + + +

Parse a base 10 numeric from a sequence of ASCII digits into a long. + + Parse a base 10 numeric from a sequence of ASCII digits into a long. +

+ Digit sequences can begin with an optional run of spaces before the + sequence, and may start with a '+' or a '-' to indicate sign position. + Any other characters will cause the method to stop and return the current + result to the caller. + + buffer to scan. + position within buffer to start parsing digits at. + + optional location to return the new ptr value through. If null + the ptr value will be discarded. + + + the value at this location; 0 if the location is not a valid + numeric. + + + +

Parse 4 character base 16 (hex) formatted string to unsigned integer. + + Parse 4 character base 16 (hex) formatted string to unsigned integer. +

+ The number is read in network byte order, that is, most significant + nybble first. + + + buffer to parse digits from; positions + [p, p+4) + will + be parsed. + + first position within the buffer to parse. + the integer value. + if the string is not hex formatted. + + + +

Parse 8 character base 16 (hex) formatted string to unsigned integer. + + Parse 8 character base 16 (hex) formatted string to unsigned integer. +

+ The number is read in network byte order, that is, most significant + nybble first. + + + buffer to parse digits from; positions + [p, p+8) + will + be parsed. + + first position within the buffer to parse. + the integer value. + if the string is not hex formatted. + + + +

Parse a single hex digit to its numeric value (0-15). + Parse a single hex digit to its numeric value (0-15). + hex character to parse. + numeric value, in the range 0-15. + if the input digit is not a valid hex digit. + +
+ + Parse a Git style timezone string. + + Parse a Git style timezone string. +

+ The sequence "-0315" will be parsed as the numeric value -195, as the + lower two positions count minutes, not 100ths of an hour. + + buffer to scan. + position within buffer to start parsing digits at. + the timezone at this location, expressed in minutes. + + +

Locate the first position after a given character. + Locate the first position after a given character. + buffer to scan. + position within buffer to start looking for chrA at. + character to find. + new position just after chrA. +
+ + Locate the first position after the next LF. + + Locate the first position after the next LF. +

+ This method stops on the first '\n' it finds. + + buffer to scan. + position within buffer to start looking for LF at. + new position just after the first LF found. + + +

Locate the first position after either the given character or LF. + + Locate the first position after either the given character or LF. +

+ This method stops on the first match it finds from either chrA or '\n'. + + buffer to scan. + position within buffer to start looking for chrA or LF at. + character to find. + new position just after the first chrA or LF to be found. + + +

Locate the first position before a given character. + Locate the first position before a given character. + buffer to scan. + position within buffer to start looking for chrA at. + character to find. + new position just before chrA, -1 for not found +
+ + Locate the first position before the previous LF. + + Locate the first position before the previous LF. +

+ This method stops on the first '\n' it finds. + + buffer to scan. + position within buffer to start looking for LF at. + new position just before the first LF found, -1 for not found + + +

Locate the previous position before either the given character or LF. + + Locate the previous position before either the given character or LF. +

+ This method stops on the first match it finds from either chrA or '\n'. + + buffer to scan. + position within buffer to start looking for chrA or LF at. + character to find. + + new position just before the first chrA or LF to be found, -1 for + not found + + + +

Index the region between [ptr, end) to find line starts. + + Index the region between [ptr, end) to find line starts. +

+ The returned list is 1 indexed. Index 0 contains + int.MinValue + to pad the list out. +

+ Using a 1 indexed list means that line numbers can be directly accessed + from the list, so list.get(1) (aka get line 1) returns + ptr. +

+ The last element (index map.size()-1) always contains + end. + + buffer to scan. + + position within the buffer corresponding to the first byte of + line 1. + + 1 past the end of the content within buf. + a line map indexing the start position of each line. + + +

Locate the "author " header line data. + Locate the "author " header line data. + buffer to scan. + + position in buffer to start the scan at. Most callers should + pass 0 to ensure the scan starts from the beginning of the + commit buffer and does not accidentally look at message body. + + + position just after the space in "author ", so the first + character of the author's name. If no author header can be + located -1 is returned. + +
+ + Locate the "committer " header line data. + Locate the "committer " header line data. + buffer to scan. + + position in buffer to start the scan at. Most callers should + pass 0 to ensure the scan starts from the beginning of the + commit buffer and does not accidentally look at message body. + + + position just after the space in "committer ", so the first + character of the committer's name. If no committer header can be + located -1 is returned. + + + + Locate the "tagger " header line data. + Locate the "tagger " header line data. + buffer to scan. + + position in buffer to start the scan at. Most callers should + pass 0 to ensure the scan starts from the beginning of the tag + buffer and does not accidentally look at message body. + + + position just after the space in "tagger ", so the first + character of the tagger's name. If no tagger header can be + located -1 is returned. + + + + Locate the "encoding " header line. + Locate the "encoding " header line. + buffer to scan. + + position in buffer to start the scan at. Most callers should + pass 0 to ensure the scan starts from the beginning of the + buffer and does not accidentally look at the message body. + + + position just after the space in "encoding ", so the first + character of the encoding's name. If no encoding header can be + located -1 is returned (and UTF-8 should be assumed). + + + + Parse the "encoding " header into a character set reference. + + Parse the "encoding " header into a character set reference. +

+ Locates the "encoding " header (if present) by first calling + Encoding(byte[], int) + and then returns the proper character set + to apply to this buffer to evaluate its contents as character data. +

+ If no encoding header is present, + NGit.Constants.CHARSET + is assumed. + + buffer to scan. + the Java character set representation. Never null. + + +

Parse a name string (e.g. + + Parse a name string (e.g. author, committer, tagger) into a PersonIdent. +

+ Leading spaces won't be trimmed from the string, i.e. will show up in the + parsed name afterwards. + + the string to parse a name from. + + the parsed identity or null in case the identity could not be + parsed. + + + +

Parse a name line (e.g. + + Parse a name line (e.g. author, committer, tagger) into a PersonIdent. +

+ When passing in a value for nameB callers should use the + return value of + Author(byte[], int) + or + Committer(byte[], int) + , as these methods provide the proper + position within the buffer. + + the buffer to parse character data from. + + first position of the identity information. This should be the + first position after the space which delimits the header field + name (e.g. "author" or "committer") from the rest of the + identity line. + + + the parsed identity or null in case the identity could not be + parsed. + + + +

Parse a name data (e.g. + + Parse a name data (e.g. as within a reflog) into a PersonIdent. +

+ When passing in a value for nameB callers should use the + return value of + Author(byte[], int) + or + Committer(byte[], int) + , as these methods provide the proper + position within the buffer. + + the buffer to parse character data from. + + first position of the identity information. This should be the + first position after the space which delimits the header field + name (e.g. "author" or "committer") from the rest of the + identity line. + + the parsed identity. Never null. + + +

Locate the end of a footer line key string. + + Locate the end of a footer line key string. +

+ If the region at + raw[ptr] + matches + ^[A-Za-z0-9-]+: + (e.g. + "Signed-off-by: A. U. Thor\n") then this method returns the position of + the first ':'. +

+ If the region at + raw[ptr] + does not match + ^[A-Za-z0-9-]+: + then this method returns -1. + + buffer to scan. + first position within raw to consider as a footer line key. + + position of the ':' which terminates the footer line key if this + is otherwise a valid footer line key; otherwise -1. + + + +

Decode a buffer under UTF-8, if possible. + + Decode a buffer under UTF-8, if possible. + If the byte stream cannot be decoded that way, the platform default is tried + and if that too fails, the fail-safe ISO-8859-1 encoding is tried. + + buffer to pull raw bytes from. + + a string representation of the range [start,end), + after decoding the region through the specified character set. + +
+ + Decode a buffer under UTF-8, if possible. + + Decode a buffer under UTF-8, if possible. + If the byte stream cannot be decoded that way, the platform default is + tried and if that too fails, the fail-safe ISO-8859-1 encoding is tried. + + buffer to pull raw bytes from. + start position in buffer + + one position past the last location within the buffer to take + data from. + + + a string representation of the range [start,end), + after decoding the region through the specified character set. + + + + Decode a buffer under the specified character set if possible. + + Decode a buffer under the specified character set if possible. + If the byte stream cannot be decoded that way, the platform default is tried + and if that too fails, the fail-safe ISO-8859-1 encoding is tried. + + character set to use when decoding the buffer. + buffer to pull raw bytes from. + + a string representation of the range [start,end), + after decoding the region through the specified character set. + + + + Decode a region of the buffer under the specified character set if possible. + + + Decode a region of the buffer under the specified character set if possible. + If the byte stream cannot be decoded that way, the platform default is tried + and if that too fails, the fail-safe ISO-8859-1 encoding is tried. + + character set to use when decoding the buffer. + buffer to pull raw bytes from. + first position within the buffer to take data from. + + one position past the last location within the buffer to take + data from. + + + a string representation of the range [start,end), + after decoding the region through the specified character set. + + + + + Decode a region of the buffer under the specified character set if + possible. + + + Decode a region of the buffer under the specified character set if + possible. + If the byte stream cannot be decoded that way, the platform default is + tried and if that too fails, an exception is thrown. + + character set to use when decoding the buffer. + buffer to pull raw bytes from. + first position within the buffer to take data from. + + one position past the last location within the buffer to take + data from. + + + a string representation of the range [start,end), + after decoding the region through the specified character set. + + the input is not in any of the tested character sets. + + + + Decode a region of the buffer under the ISO-8859-1 encoding. + + Decode a region of the buffer under the ISO-8859-1 encoding. + Each byte is treated as a single character in the 8859-1 character + encoding, performing a raw binary->char conversion. + + buffer to pull raw bytes from. + first position within the buffer to take data from. + + one position past the last location within the buffer to take + data from. + + a string representation of the range [start,end). + + + + + + Locate the position of the commit message body. + Locate the position of the commit message body. + buffer to scan. + + position in buffer to start the scan at. Most callers should + pass 0 to ensure the scan starts from the beginning of the + commit buffer. + + position of the user's message buffer. + + + Locate the position of the tag message body. + Locate the position of the tag message body. + buffer to scan. + + position in buffer to start the scan at. Most callers should + pass 0 to ensure the scan starts from the beginning of the tag + buffer. + + position of the user's message buffer. + + + Locate the end of a paragraph. + + Locate the end of a paragraph. +

+ A paragraph is ended by two consecutive LF bytes. + + buffer to scan. + + position in buffer to start the scan at. Most callers will + want to pass the first position of the commit message (as + found by + CommitMessage(byte[], int) + . + + + position of the LF at the end of the paragraph; + b.length if no paragraph end could be located. + + + +

Searches text using only substring search. + + Searches text using only substring search. +

+ Instances are thread-safe. Multiple concurrent threads may perform matches on + different character sequences at the same time. + + + +

Construct a new substring pattern. + Construct a new substring pattern. + + text to locate. This should be a literal string, as no + meta-characters are supported by this implementation. The + string may not be the empty string. + +
+ + Match a character sequence against this pattern. + Match a character sequence against this pattern. + + the sequence to match. Must not be null but the length of the + sequence is permitted to be 0. + + + offset within rcs of the first occurrence of this + pattern; -1 if this pattern does not appear at any position of + rcs. + + + + Get the literal pattern string this instance searches for. + Get the literal pattern string this instance searches for. + the pattern string given to our constructor. + + + Builder to facilitate fast construction of an immutable RefList. + Builder to facilitate fast construction of an immutable RefList. + + + Create an empty list ready for items to be added. + Create an empty list ready for items to be added. + + + Create an empty list with at least the specified capacity. + Create an empty list with at least the specified capacity. + the new capacity. + + + number of items in this builder's internal collection. + + + Get the reference at a particular index. + Get the reference at a particular index. + + the index to obtain. Must be + 0 <= idx < size() + . + + the reference value, never null. + + + Remove an item at a specific index. + Remove an item at a specific index. + position to remove the item from. + + + Add the reference to the end of the array. + + Add the reference to the end of the array. +

+ References must be added in sort order, or the array must be sorted + after additions are complete using + Builder<T>.Sort() + . + + + + +

Add all items from a source array. + + Add all items from a source array. +

+ References must be added in sort order, or the array must be sorted + after additions are complete using + Builder<T>.Sort() + . + + the source array. + + position within + src + to start copying from. + + + number of items to copy from + src + . + + + +

Replace a single existing element. + Replace a single existing element. + index, must have already been added previously. + the new reference. +
+ + Sort the list's backing array in-place. + Sort the list's backing array in-place. + + + an unmodifiable list using this collection's backing array. + + + + Specialized Map to present a + RefDatabase + namespace. +

+ Although not declared as a + Sharpen.SortedMap<K, V> + , iterators from this + map's projections always return references in + NGit.RefComparator + ordering. + The map's internal representation is a sorted array of + NGit.Ref + objects, + which means lookup and replacement is O(log N), while insertion and removal + can be as expensive as O(N + log N) while the list expands or contracts. + Since this is not a general map implementation, all entries must be keyed by + the reference name. +

+ This class is really intended as a helper for + RefDatabase + , which + needs to perform a merge-join of three sorted + RefList<T> + s in order to + present the unified namespace of the packed-refs file, the loose refs/ + directory tree, and the resolved form of any symbolic references. +

+
+ + Prefix denoting the reference subspace this map contains. + + Prefix denoting the reference subspace this map contains. +

+ All reference names in this map must start with this prefix. If the + prefix is not the empty string, it must end with a '/'. + + + +

Immutable collection of the packed references at construction time. + Immutable collection of the packed references at construction time. +
+ + Immutable collection of the loose references at construction time. + + Immutable collection of the loose references at construction time. +

+ If an entry appears here and in + packed + , this entry must take + precedence, as its more current. Symbolic references in this collection + are typically unresolved, so they only tell us who their target is, but + not the current value of the target. + + + +

Immutable collection of resolved symbolic references. + + Immutable collection of resolved symbolic references. +

+ This collection contains only the symbolic references we were able to + resolve at map construction time. Other loose references must be read + from + loose + . Every entry in this list must be matched by an entry + in + loose + , otherwise it might be omitted by the map. + + + +

Construct an empty map with a small initial capacity. + Construct an empty map with a small initial capacity. +
+ + Construct a map to merge 3 collections together. + Construct a map to merge 3 collections together. + + prefix used to slice the lists down. Only references whose + names start with this prefix will appear to reside in the map. + Must not be null, use + "" + (the empty string) to select + all list items. + + + items from the packed reference list, this is the last list + searched. + + + items from the loose reference list, this list overrides + packed + if a name appears in both. + + + resolved symbolic references. This list overrides the prior + list + loose + , if an item appears in both. Items in this + list must also appear in + loose + . + + + + Miscellaneous string comparison utility methods. + Miscellaneous string comparison utility methods. + + + Convert the input to lowercase. + + Convert the input to lowercase. +

+ This method does not honor the JVM locale, but instead always behaves as + though it is in the US-ASCII locale. Only characters in the range 'A' + through 'Z' are converted. All other characters are left as-is, even if + they otherwise would have a lowercase character equivalent. + + the input character. + lowercase version of the input. + + +

Convert the input string to lower case, according to the "C" locale. + + Convert the input string to lower case, according to the "C" locale. +

+ This method does not honor the JVM locale, but instead always behaves as + though it is in the US-ASCII locale. Only characters in the range 'A' + through 'Z' are converted, all other characters are left as-is, even if + they otherwise would have a lowercase character equivalent. + + the input string. Must not be null. + + a copy of the input string, after converting characters in the + range 'A'..'Z' to 'a'..'z'. + + + +

Test if two strings are equal, ignoring case. + + Test if two strings are equal, ignoring case. +

+ This method does not honor the JVM locale, but instead always behaves as + though it is in the US-ASCII locale. + + first string to compare. + second string to compare. + true if a equals b + + +

Compare two strings, ignoring case. + + Compare two strings, ignoring case. +

+ This method does not honor the JVM locale, but instead always behaves as + though it is in the US-ASCII locale. + + first string to compare. + second string to compare. + + negative, zero or positive if a sorts before, is equal to, or + sorts after b. + + 2.0 + + +

Compare two strings, honoring case. + + Compare two strings, honoring case. +

+ This method does not honor the JVM locale, but instead always behaves as + though it is in the US-ASCII locale. + + first string to compare. + second string to compare. + + negative, zero or positive if a sorts before, is equal to, or + sorts after b. + + 2.0 + + +

Parse a string as a standard Git boolean value. + + Parse a string as a standard Git boolean value. See + ToBooleanOrNull(string) + . + + the string to parse. + + the boolean interpretation of + value + . + + + if + value + is not recognized as one of the standard + boolean names. + +
+ + Parse a string as a standard Git boolean value. + + Parse a string as a standard Git boolean value. +

+ The terms + yes + , + true + , + 1 + , + on + can all be + used to mean + true + . +

+ The terms + no + , + false + , + 0 + , + off + can all be + used to mean + false + . +

+ Comparisons ignore case, via + EqualsIgnoreCase(string, string) + . + + the string to parse. + + the boolean interpretation of + value + or null in case the + string does not represent a boolean value + + + +

Join a collection of Strings together using the specified separator. + Join a collection of Strings together using the specified separator. + Strings to join + used to join + a String with all the joined parts +
+ + + Join a collection of Strings together using the specified separator and a + lastSeparator which is used for joining the second last and the last + part. + + + Join a collection of Strings together using the specified separator and a + lastSeparator which is used for joining the second last and the last + part. + + Strings to join + separator used to join all but the two last elements + separator to use for joining the last two elements + a String with all the joined parts + + + Test if a string is empty or null. + Test if a string is empty or null. + the string to check + true if the string is null or empty + + + Interface to read values from the system. + + Interface to read values from the system. +

+ When writing unit tests, extending this interface with a custom class + permits to simulate an access to a system variable or property and + permits to control the user's global configuration. + + + + the live instance to read system properties. + + + the new instance to use when accessing properties. + + +

Gets the hostname of the local host. + + Gets the hostname of the local host. If no hostname can be found, the + hostname is set to the default value "localhost". + + the canonical hostname +
+ + system variable to read + value of the system variable + + + of the system property to read + value of the system property + + + a config with values not found directly in the returned config + + + the file system abstraction which will be necessary to perform + certain file system operations. + + the git configuration found in the user home + + + + a config with values not found directly in the returned + config. Null is a reasonable value here. + + + the file system abstraction which will be necessary to perform + certain file system operations. + + + the gitonfig configuration found in the system-wide "etc" + directory + + + + the current system time + + + TODO + the local time zone + + + system time zone, possibly mocked for testing + 1.2 + + + the locale to use + 1.2 + + + Returns a simple date format instance as specified by the given pattern. + + Returns a simple date format instance as specified by the given pattern. + + + the pattern as defined in + Sharpen.SimpleDateFormat.SimpleDateFormat(string) + + + the simple date format + 2.0 + + + Returns a date/time format instance for the given styles. + Returns a date/time format instance for the given styles. + + the date style as specified in + Sharpen.DateFormat.GetDateTimeInstance(int, int) + + + + the time style as specified in + Sharpen.DateFormat.GetDateTimeInstance(int, int) + + + the date format + 2.0 + + + true if we are running on a Windows. + + + true if we are running on Mac OS X + + + A fully buffered output stream. + + A fully buffered output stream. +

+ Subclasses determine the behavior when the in-memory buffer capacity has been + exceeded and additional bytes are still being received for output. + + + +

Default limit for in-core storage. + Default limit for in-core storage. +
+ + Chain of data, if we are still completely in-core; otherwise null. + Chain of data, if we are still completely in-core; otherwise null. + + + Maximum number of bytes we will permit storing in memory. + + Maximum number of bytes we will permit storing in memory. +

+ When this limit is reached the data will be shifted to a file on disk, + preventing the JVM heap from growing out of control. + + + +

+ If + inCoreLimit + has been reached, remainder goes here. + +
+ + Create a new empty temporary buffer. + Create a new empty temporary buffer. + + maximum number of bytes to store in memory before entering the + overflow output path. + + + + + + + + + + Dumps the entire buffer into the overflow stream, and flushes it. + Dumps the entire buffer into the overflow stream, and flushes it. + + the overflow stream cannot be started, or the buffer contents + cannot be written to it, or it failed to flush. + + + + Copy all bytes remaining on the input stream into this buffer. + Copy all bytes remaining on the input stream into this buffer. + the stream to read from, until EOF is reached. + + an error occurred reading from the input stream, or while + writing to a local temporary file. + + + + Obtain the length (in bytes) of the buffer. + + Obtain the length (in bytes) of the buffer. +

+ The length is only accurate after + Close() + has been invoked. + + total length of the buffer, in bytes. + + +

Convert this buffer's contents into a contiguous byte array. + + Convert this buffer's contents into a contiguous byte array. +

+ The buffer is only complete after + Close() + has been invoked. + + + the complete byte array; length matches + Length() + . + + an error occurred reading from a local temporary file + + the buffer cannot fit in memory + + +

Send this buffer to an output stream. + + Send this buffer to an output stream. +

+ This method may only be invoked after + Close() + has completed + normally, to ensure all data is completely transferred. + + stream to send this buffer's complete content to. + + if not null progress updates are sent here. Caller should + initialize the task and the number of work units to + Length() + /1024. + + + an error occurred reading from a temporary file on the local + system, or writing to the output stream. + + + +

Open an input stream to read from the buffered data. + + Open an input stream to read from the buffered data. +

+ This method may only be invoked after + Close() + has completed + normally, to ensure all data is completely transferred. + + + a stream to read from the buffer. The caller must close the + stream when it is no longer useful. + + an error occurred opening the temporary file. + + + +

Reset this buffer for reuse, purging all buffered content. + Reset this buffer for reuse, purging all buffered content. +
+ + Open the overflow output stream, so the remaining output can be stored. + Open the overflow output stream, so the remaining output can be stored. + + the output stream to receive the buffered content, followed by + the remaining output. + + the buffer cannot create the overflow stream. + + + + + + + + + + + + + Clear this buffer so it has no data, and cannot be used again. + Clear this buffer so it has no data, and cannot be used again. + + + A fully buffered output stream using local disk storage for large data. + + A fully buffered output stream using local disk storage for large data. +

+ Initially this output stream buffers to memory and is therefore similar + to ByteArrayOutputStream, but it shifts to using an on disk temporary + file if the output gets too large. +

+ The content of this buffered stream may be sent to another OutputStream + only after this stream has been properly closed by + TemporaryBuffer.Close() + . + + + +

Directory to store the temporary file under. + Directory to store the temporary file under. +
+ + Location of our temporary file if we are on disk; otherwise null. + + Location of our temporary file if we are on disk; otherwise null. +

+ If we exceeded the + TemporaryBuffer.inCoreLimit + we nulled out + TemporaryBuffer.blocks + and created this file instead. All output goes here through + TemporaryBuffer.overflow + . + + + +

Create a new temporary buffer. + Create a new temporary buffer. +
+ + Create a new temporary buffer, limiting memory usage. + Create a new temporary buffer, limiting memory usage. + + maximum number of bytes to store in memory. Storage beyond + this limit will use the local file. + + + + Create a new temporary buffer, limiting memory usage. + Create a new temporary buffer, limiting memory usage. + + if the buffer has to spill over into a temporary file, the + directory where the file should be saved. If null the + system default temporary directory (for example /tmp) will + be used instead. + + + + Create a new temporary buffer, limiting memory usage. + Create a new temporary buffer, limiting memory usage. + + if the buffer has to spill over into a temporary file, the + directory where the file should be saved. If null the + system default temporary directory (for example /tmp) will + be used instead. + + + maximum number of bytes to store in memory. Storage beyond + this limit will use the local file. + + + + + + + + + + + + + + + + A temporary buffer that will never exceed its in-memory limit. + + A temporary buffer that will never exceed its in-memory limit. +

+ If the in-memory limit is reached an IOException is thrown, rather than + attempting to spool to local disk. + + + +

Create a new heap buffer with a maximum storage limit. + Create a new heap buffer with a maximum storage limit. + + maximum number of bytes that can be stored in this buffer. + Storing beyond this many will cause an IOException to be + thrown during write. + +
+ + + + + + + + + + + + + + An OutputStream which always throws IllegalStateExeption during write. + An OutputStream which always throws IllegalStateExeption during write. + + + The canonical instance which always throws IllegalStateException. + The canonical instance which always throws IllegalStateException. + + + + + + An input stream which canonicalizes EOLs bytes on the fly to '\n'. + + An input stream which canonicalizes EOLs bytes on the fly to '\n'. + Optionally, a binary check on the first 8000 bytes is performed + and in case of binary files, canonicalization is turned off + (for the complete file). + + + + Creates a new InputStream, wrapping the specified stream + raw input stream + whether binaries should be detected + 2.0 + + + + + + + + + + + + + + + Triggers an interrupt on the calling thread if it doesn't complete a block. + + + Triggers an interrupt on the calling thread if it doesn't complete a block. +

+ Classes can use this to trip an alarm interrupting the calling thread if it + doesn't complete a block within the specified timeout. Typical calling + pattern is: +

+            private InterruptTimer myTimer = ...;
+            void foo() {
+            try {
+            myTimer.begin(timeout);
+            // work
+            } finally {
+            myTimer.end();
+            }
+            }
+            
+

+ An InterruptTimer is not recursive. To implement recursive timers, + independent InterruptTimer instances are required. A single InterruptTimer + may be shared between objects which won't recursively call each other. +

+ Each InterruptTimer spawns one background thread to sleep the specified time + and interrupt the thread which called + Begin(int) + . It is up to the + caller to ensure that the operations within the work block between the + matched begin and end calls tests the interrupt flag (most IO operations do). +

+ To terminate the background thread, use + Terminate() + . If the + application fails to terminate the thread, it will (eventually) terminate + itself when the InterruptTimer instance is garbage collected. + + TimeoutInputStream + + +

Create a new timer with a default thread name. + Create a new timer with a default thread name. +
+ + Create a new timer to signal on interrupt on the caller. + + Create a new timer to signal on interrupt on the caller. +

+ The timer thread is created in the calling thread's ThreadGroup. + + name of the timer thread. + + +

Arm the interrupt timer before entering a blocking operation. + Arm the interrupt timer before entering a blocking operation. + + number of milliseconds before the interrupt should trigger. + Must be > 0. + +
+ + Disable the interrupt timer, as the operation is complete. + Disable the interrupt timer, as the operation is complete. + + + Shutdown the timer thread, and wait for it to terminate. + Shutdown the timer thread, and wait for it to terminate. + + + Combines messages from an OutputStream (hopefully in UTF-8) and a Writer. + + + Combines messages from an OutputStream (hopefully in UTF-8) and a Writer. +

+ This class is primarily meant for + NGit.Transport.BaseConnection + in contexts where a + standard error stream from a command execution, as well as messages from a + side-band channel, need to be combined together into a buffer to represent + the complete set of messages from a remote repository. +

+ Writes made to the writer are re-encoded as UTF-8 and interleaved into the + buffer that + GetRawStream() + also writes to. +

+ ToString() + returns all written data, after converting it to a String + under the assumption of UTF-8 encoding. +

+ Internally + NGit.Util.RawParseUtils.Decode(byte[]) + + is used by + toString() + tries to work out a reasonably correct character set for the raw data. + + + +

Create an empty writer. + Create an empty writer. +
+ + + + + + the underlying byte stream that character writes to this writer + drop into. Writes to this stream should should be in UTF-8. + + + + + + + + + + string version of all buffered data. + + + Thread to copy from an input stream to an output stream. + Thread to copy from an input stream to an output stream. + + + Create a thread to copy data from an input stream to an output stream. + Create a thread to copy data from an input stream to an output stream. + + stream to copy from. The thread terminates when this stream + reaches EOF. The thread closes this stream before it exits. + + + stream to copy into. The destination stream is automatically + closed when the thread terminates. + + + + Request the thread to flush the output stream as soon as possible. + + Request the thread to flush the output stream as soon as possible. +

+ This is an asynchronous request to the thread. The actual flush will + happen at some future point in time, when the thread wakes up to process + the request. + + + +

Request that the thread terminate, and wait for it. + + Request that the thread terminate, and wait for it. +

+ This method signals to the copy thread that it should stop as soon as + there is no more IO occurring. + + the calling thread was interrupted. + + +

Input stream that copies data read to another output stream. + + Input stream that copies data read to another output stream. + This stream is primarily useful with a + NGit.Util.TemporaryBuffer + , where any + data read or skipped by the caller is also duplicated into the temporary + buffer. Later the temporary buffer can then be used instead of the original + source stream. + During close this stream copies any remaining data from the source stream + into the destination stream. + +
+ + Initialize a tee input stream. + Initialize a tee input stream. + source stream to consume. + + destination to copy the source to as it is consumed. Typically + this is a + NGit.Util.TemporaryBuffer + . + + + + + + + + + + + + + + + + InputStream with a configurable timeout. + InputStream with a configurable timeout. + + + Wrap an input stream with a timeout on all read operations. + Wrap an input stream with a timeout on all read operations. + + base input stream (to read from). The stream must be + interruptible (most socket streams are). + + timer to manage the timeouts during reads. + + + number of milliseconds before aborting a read. + + + number of milliseconds before aborting a read. Must be > 0. + + + + + + + + + + + + + + + + OutputStream with a configurable timeout. + OutputStream with a configurable timeout. + + + Wrap an output stream with a timeout on all write operations. + Wrap an output stream with a timeout on all write operations. + + base input stream (to write to). The stream must be + interruptible (most socket streams are). + + timer to manage the timeouts during writes. + + + number of milliseconds before aborting a write. + + + number of milliseconds before aborting a write. Must be > 0. + + + + + + + + + + + + + + + + + + + An InputStream which reads from one or more InputStreams. + + An InputStream which reads from one or more InputStreams. +

+ This stream may enter into an EOF state, returning -1 from any of the read + methods, and then later successfully read additional bytes if a new + InputStream is added after reaching EOF. +

+ Currently this stream does not support the mark/reset APIs. If mark and later + reset functionality is needed the caller should wrap this stream with a + Sharpen.BufferedInputStream + . + + + +

Create an empty InputStream that is currently at EOF state. + Create an empty InputStream that is currently at EOF state. +
+ + Create an InputStream that is a union of the individual streams. + + Create an InputStream that is a union of the individual streams. +

+ As each stream reaches EOF, it will be automatically closed before bytes + from the next stream are read. + + streams to be pushed onto this stream. + + + + + +

Add the given InputStream onto the end of the stream queue. + + Add the given InputStream onto the end of the stream queue. +

+ When the stream reaches EOF it will be automatically closed. + + the stream to add; must not be null. + + +

Returns true if there are no more InputStreams in the stream queue. + + Returns true if there are no more InputStreams in the stream queue. +

+ If this method returns + true + then all read methods will signal EOF + by returning -1, until another InputStream has been pushed into the queue + with + Add(Sharpen.InputStream) + . + + true if there are no more streams to read from. + + + + + + + + + + + + + + + + + + + + +

Indicates a local repository does not exist. + Indicates a local repository does not exist. +
+ + Indicates a protocol error has occurred while fetching/pushing objects. + Indicates a protocol error has occurred while fetching/pushing objects. + + + + Constructs an TransportException with the specified detail message + prefixed with provided URI. + + + Constructs an TransportException with the specified detail message + prefixed with provided URI. + + URI used for transport + message + + + + Constructs an TransportException with the specified detail message + prefixed with provided URI. + + + Constructs an TransportException with the specified detail message + prefixed with provided URI. + + URI used for transport + message + root cause exception + + + Constructs an TransportException with the specified detail message. + Constructs an TransportException with the specified detail message. + message + + + Constructs an TransportException with the specified detail message. + Constructs an TransportException with the specified detail message. + message + root cause exception + + + Constructs an exception indicating a local repository does not exist. + Constructs an exception indicating a local repository does not exist. + description of the repository not found, usually file path. + + + + Constructs an exception indicating a local repository does not exist. + Constructs an exception indicating a local repository does not exist. + description of the repository not found, usually file path. + + why the repository does not exist. + + + Constructs an exception indicating a local repository does not exist. + Constructs an exception indicating a local repository does not exist. + description of the repository not found, usually file path. + + + + Constructs an exception indicating a local repository does not exist. + Constructs an exception indicating a local repository does not exist. + description of the repository not found, usually file path. + + why the repository does not exist. + + + + This URI like construct used for referencing Git archives over the net, as + well as locally stored archives. + + + This URI like construct used for referencing Git archives over the net, as + well as locally stored archives. It is similar to RFC 2396 URI's, but also + support SCP and the malformed file://<path> syntax (as opposed to the correct + file:<path> syntax. + + + + + Part of a pattern which matches the scheme part (git, http, ...) of an + URI. + + + Part of a pattern which matches the scheme part (git, http, ...) of an + URI. Defines one capturing group containing the scheme without the + trailing colon and slashes + + + + Part of a pattern which matches the optional user/password part (e.g. + + Part of a pattern which matches the optional user/password part (e.g. + root:pwd@ in git://root:pwd@host.xyz/a.git) of URIs. Defines two + capturing groups: the first containing the user and the second containing + the password + + + + Part of a pattern which matches the host part of URIs. + + Part of a pattern which matches the host part of URIs. Defines one + capturing group containing the host name. + + + + Part of a pattern which matches the optional port part of URIs. + + Part of a pattern which matches the optional port part of URIs. Defines + one capturing group containing the port without the preceding colon. + + + + Part of a pattern which matches the ~username part (e.g. + + Part of a pattern which matches the ~username part (e.g. /~root in + git://host.xyz/~root/a.git) of URIs. Defines no capturing group. + + + + Part of a pattern which matches the optional drive letter in paths (e.g. + + + Part of a pattern which matches the optional drive letter in paths (e.g. + D: in file:///D:/a.txt). Defines no capturing group. + + + + Part of a pattern which matches a relative path. + + Part of a pattern which matches a relative path. Relative paths don't + start with slash or drive letters. Defines no capturing group. + + + + Part of a pattern which matches a relative or absolute path. + + Part of a pattern which matches a relative or absolute path. Defines no + capturing group. + + + + + A pattern matching standard URI:
+ scheme "://" user_password? hostname? portnumber? path +
+
+ + A pattern matching the reference to a local file. + + A pattern matching the reference to a local file. This may be an absolute + path (maybe even containing windows drive-letters) or a relative path. + + + + + A pattern matching a URI for the scheme 'file' which has only ':/' as + separator between scheme and path. + + + A pattern matching a URI for the scheme 'file' which has only ':/' as + separator between scheme and path. Standard file URIs have '://' as + separator, but java.io.File.toURI() constructs those URIs. + + + + A pattern matching a SCP URI's of the form user@host:path/to/repo.git + + + A pattern matching a SCP URI's of the form user@host:/path/to/repo.git + + + + Parse and construct an + URIish + from a string + + + Sharpen.URISyntaxException + + + + + + Escape unprintable characters optionally URI-reserved characters + The Java String to encode (may contain any character) + true to escape URI reserved characters + encode any non-ASCII characters + a URI-encoded string + + + Construct a URIish from a standard URL. + Construct a URIish from a standard URL. + the source URL to convert from. + + + Create an empty, non-configured URI. + Create an empty, non-configured URI. + + + true if this URI references a repository on another system. + + + host name part or null + + + Return a new URI matching this one, but with a different host. + Return a new URI matching this one, but with a different host. + the new value for host. + a new URI with the updated value. + + + protocol name or null for local references + + + Return a new URI matching this one, but with a different scheme. + Return a new URI matching this one, but with a different scheme. + the new value for scheme. + a new URI with the updated value. + + + path name component + + + path name component + + + Return a new URI matching this one, but with a different path. + Return a new URI matching this one, but with a different path. + the new value for path. + a new URI with the updated value. + + + Return a new URI matching this one, but with a different (raw) path. + Return a new URI matching this one, but with a different (raw) path. + the new value for path. + a new URI with the updated value. + Sharpen.URISyntaxException + + + user name requested for transfer or null + + + Return a new URI matching this one, but with a different user. + Return a new URI matching this one, but with a different user. + the new value for user. + a new URI with the updated value. + + + password requested for transfer or null + + + Return a new URI matching this one, but with a different password. + Return a new URI matching this one, but with a different password. + the new value for password. + a new URI with the updated value. + + + port number requested for transfer or -1 if not explicit + + + Return a new URI matching this one, but with a different port. + Return a new URI matching this one, but with a different port. + the new value for port. + a new URI with the updated value. + + + Obtain the string form of the URI, with the password included. + Obtain the string form of the URI, with the password included. + the URI, including its password field, if any. + + + the URI as an ASCII string. Password is not included. + + + + the URI including password, formatted with only ASCII characters + such that it will be valid for use over the network. + + + + Get the "humanish" part of the path. + + Get the "humanish" part of the path. Some examples of a 'humanish' part + for a full path: + + + + + + + + + + + + + + + + + + + + + + +
PathHumanish part
/path/to/repo.gitrepo
/path/to/repo.git/
/path/to/repo/.git
/path/to/repo/
/path//toan empty string
+
+ + the "humanish" part of the path. May be an empty string. Never + null + . + + + if it's impossible to determine a humanish part, or path is + null + or empty + + GetPath() +
+ + + Encapsulates the result of a + MergeCommand + . + + + + the object the head points at after the merge + + the common base which was used to produce a content-merge. May + be null if the merge-result was produced without + computing a common base + + all the commits which have been merged together + the status the merge resulted in + + the used + NGit.Merge.MergeStrategy + + + merge results as returned by + NGit.Merge.ResolveMerger.GetMergeResults() + + + 2.0 + + + the object the head points at after the merge + + the common base which was used to produce a content-merge. May + be null if the merge-result was produced without + computing a common base + + all the commits which have been merged together + the status the merge resulted in + + the used + NGit.Merge.MergeStrategy + + + merge results as returned by + NGit.Merge.ResolveMerger.GetMergeResults() + + + a user friendly description of the merge result + + + the object the head points at after the merge + + the common base which was used to produce a content-merge. May + be null if the merge-result was produced without + computing a common base + + all the commits which have been merged together + the status the merge resulted in + + the used + NGit.Merge.MergeStrategy + + + merge results as returned by + NGit.Merge.ResolveMerger.GetMergeResults() + + + + list of paths causing this merge to fail as returned by + NGit.Merge.ResolveMerger.GetFailingPaths() + + + a user friendly description of the merge result + + + the object the head points at after the merge + + + the status the merge resulted in + + + all the commits which have been merged together + + + + base the common base which was used to produce a content-merge. + May be null if the merge-result was produced without + computing a common base + + + + the conflicts to set + + + + the conflicts to set + + + + + + + + Returns information about the conflicts which occurred during a + MergeCommand + . The returned value maps the path of a conflicting + file to a two-dimensional int-array of line-numbers telling where in the + file conflict markers for which merged commit can be found. +

+ If the returned value contains a mapping "path"->[x][y]=z then this means +

    +
  • the file with path "path" contains conflicts
  • +
  • if y < "number of merged commits": for conflict number x in this file + the chunk which was copied from commit number y starts on line number z. + All numberings and line numbers start with 0.
  • +
  • if y == "number of merged commits": the first non-conflicting line + after conflict number x starts at line number z
  • +
+

+ Example code how to parse this data: +

 MergeResult m=...;
+            Map<String, int[][]> allConflicts = m.getConflicts();
+            for (String path : allConflicts.keySet()) {
+            int[][] c = allConflicts.get(path);
+            System.out.println("Conflicts in file " + path);
+            for (int i = 0; i < c.length; ++i) {
+            System.out.println("  Conflict #" + i);
+            for (int j = 0; j < (c[i].length) - 1; ++j) {
+            if (c[i][j] >= 0)
+            System.out.println("    Chunk for "
+            + m.getMergedCommits()[j] + " starts on line #"
+            + c[i][j]);
+            }
+            }
+            }
+
+ the conflicts or null if no conflict occurred +
+ + + Returns a list of paths causing this merge to fail as returned by + NGit.Merge.ResolveMerger.GetFailingPaths() + + + + the list of paths causing this merge to fail or null + if no failure occurred + + + + whether the status indicates a successful result + + + + Simple Map<long,Object> helper for + PackParser + . + + + + Number of entries currently in the map. + Number of entries currently in the map. + + + + Next + LongMap<V>.size + to trigger a + LongMap<V>.Grow() + . + + + + Input/Output utilities + + + Read an entire local file into memory as a byte array. + Read an entire local file into memory as a byte array. + location of the file to read. + complete contents of the requested local file. + the file does not exist. + the file exists, but its contents cannot be read. + + + + Read at most limit bytes from the local file into memory as a byte array. + + Read at most limit bytes from the local file into memory as a byte array. + + location of the file to read. + + maximum number of bytes to read, if the file is larger than + only the first limit number of bytes are returned + + + complete contents of the requested local file. If the contents + exceeds the limit, then only the limit is returned. + + the file does not exist. + the file exists, but its contents cannot be read. + + + + Read an entire local file into memory as a byte array. + Read an entire local file into memory as a byte array. + location of the file to read. + + maximum number of bytes to read, if the file is larger than + this limit an IOException is thrown. + + complete contents of the requested local file. + the file does not exist. + the file exists, but its contents cannot be read. + + + + Read an entire input stream into memory as a ByteBuffer. + + Read an entire input stream into memory as a ByteBuffer. + Note: The stream is read to its end and is not usable after calling this + method. The caller is responsible for closing the stream. + + input stream to be read. + + a hint on the approximate number of bytes contained in the + stream, used to allocate temporary buffers more efficiently + + + complete contents of the input stream. The ByteBuffer always has + a writable backing array, with + position() == 0 + and + limit() + equal to the actual length read. Callers may rely + on obtaining the underlying array for efficient data access. If + sizeHint + was too large, the array may be over-allocated, + resulting in + limit() < array().length + . + + there was an error reading from the stream. + + + + Read the entire byte array into memory, or throw an exception. + Read the entire byte array into memory, or throw an exception. + input stream to read the data from. + buffer that must be fully populated, [off, off+len). + position within the buffer to start writing to. + number of bytes that must be read. + the stream ended before dst was fully populated. + + there was an error reading from the stream. + + + + Read the entire byte array into memory, unless input is shorter + input stream to read the data from. + buffer that must be fully populated, [off, off+len). + position within the buffer to start writing to. + number of bytes in buffer or stream, whichever is shortest + there was an error reading from the stream. + + + + Skip an entire region of an input stream. + + Skip an entire region of an input stream. +

+ The input stream's position is moved forward by the number of requested + bytes, discarding them from the input. This method does not return until + the exact number of bytes requested has been skipped. + + the stream to skip bytes from. + total number of bytes to be discarded. Must be >= 0. + + the stream ended before the requested number of bytes were + skipped. + + there was an error reading from the stream. + + + +

Divides the given string into lines. + Divides the given string into lines. + the string to read + the string divided into lines + 2.0 +
+ + A remembered remote repository, including URLs and RefSpecs. + + A remembered remote repository, including URLs and RefSpecs. +

+ A remote configuration remembers one or more URLs for a frequently accessed + remote repository as well as zero or more fetch and push specifications + describing how refs should be transferred between this repository and the + remote repository. + + + +

+ Default value for + UploadPack() + if not specified. + +
+ + + Default value for + ReceivePack() + if not specified. + + + + + Parse all remote blocks in an existing configuration file, looking for + remotes configuration. + + + Parse all remote blocks in an existing configuration file, looking for + remotes configuration. + + + the existing configuration to get the remote settings from. + The configuration must already be loaded into memory. + + + all remotes configurations existing in provided repository + configuration. Returned configurations are ordered + lexicographically by names. + + one of the URIs within the remote's configuration is invalid. + + + + Parse a remote block from an existing configuration file. + + Parse a remote block from an existing configuration file. +

+ This constructor succeeds even if the requested remote is not defined + within the supplied configuration file. If that occurs then there will be + no URIs and no ref specifications known to the new instance. + + + the existing configuration to get the remote settings from. + The configuration must already be loaded into memory. + + subsection key indicating the name of this remote. + one of the URIs within the remote's configuration is invalid. + + + +

Update this remote's definition within the configuration. + Update this remote's definition within the configuration. + the configuration file to store ourselves into. +
+ + Add a new URI to the end of the list of URIs. + Add a new URI to the end of the list of URIs. + the new URI to add to this remote. + true if the URI was added; false if it already exists. + + + Remove a URI from the list of URIs. + Remove a URI from the list of URIs. + the URI to remove from this remote. + true if the URI was added; false if it already exists. + + + Add a new push-only URI to the end of the list of URIs. + Add a new push-only URI to the end of the list of URIs. + the new URI to add to this remote. + true if the URI was added; false if it already exists. + + + Remove a push-only URI from the list of URIs. + Remove a push-only URI from the list of URIs. + the URI to remove from this remote. + true if the URI was added; false if it already exists. + + + Add a new fetch RefSpec to this remote. + Add a new fetch RefSpec to this remote. + the new specification to add. + true if the specification was added; false if it already exists. + + + Remove a fetch RefSpec from this remote. + Remove a fetch RefSpec from this remote. + the specification to remove. + true if the specification existed and was removed. + + + Add a new push RefSpec to this remote. + Add a new push RefSpec to this remote. + the new specification to add. + true if the specification was added; false if it already exists. + + + Remove a push RefSpec from this remote. + Remove a push RefSpec from this remote. + the specification to remove. + true if the specification existed and was removed. + + + Get the local name this remote configuration is recognized as. + Get the local name this remote configuration is recognized as. + name assigned by the user to this configuration block. + Set the local name this remote configuration is recognized as. + Set the local name this remote configuration is recognized as. + the new name of this remote. + + + Get all configured URIs under this remote. + Get all configured URIs under this remote. + the set of URIs known to this remote. + + + Get all configured push-only URIs under this remote. + Get all configured push-only URIs under this remote. + the set of URIs known to this remote. + + + Remembered specifications for fetching from a repository. + Remembered specifications for fetching from a repository. + set of specs used by default when fetching. + Override existing fetch specifications with new ones. + Override existing fetch specifications with new ones. + + list of fetch specifications to set. List is copied, it can be + modified after this call. + + + + Override existing push specifications with new ones. + Override existing push specifications with new ones. + + list of push specifications to set. List is copied, it can be + modified after this call. + + Remembered specifications for pushing to a repository. + Remembered specifications for pushing to a repository. + set of specs used by default when pushing. + + + Override for the location of 'git-upload-pack' on the remote system. + + Override for the location of 'git-upload-pack' on the remote system. +

+ This value is only useful for an SSH style connection, where Git is + asking the remote system to execute a program that provides the necessary + network protocol. + + + location of 'git-upload-pack' on the remote system. If no + location has been configured the default of 'git-upload-pack' is + returned instead. + + + +

Override for the location of 'git-receive-pack' on the remote system. + + Override for the location of 'git-receive-pack' on the remote system. +

+ This value is only useful for an SSH style connection, where Git is + asking the remote system to execute a program that provides the necessary + network protocol. + + + location of 'git-receive-pack' on the remote system. If no + location has been configured the default of 'git-receive-pack' is + returned instead. + + + +

Get the description of how annotated tags should be treated during fetch. + + Get the description of how annotated tags should be treated during fetch. + + option indicating the behavior of annotated tags in fetch. + Set the description of how annotated tags should be treated on fetch. + Set the description of how annotated tags should be treated on fetch. + method to use when handling annotated tags. +
+ + + true if pushing to the remote automatically deletes remote refs + which don't exist on the source side. + + Set the mirror flag to automatically delete remote refs. + Set the mirror flag to automatically delete remote refs. + true to automatically delete remote refs during push. + + + timeout (in seconds) before aborting an IO operation. + Set the timeout before willing to abort an IO call. + Set the timeout before willing to abort an IO call. + + number of seconds to wait (with no data transfer occurring) + before aborting an IO read or write operation with this + remote. A timeout of 0 will block indefinitely. + + + + Describes how refs in one repository copy into another repository. + + Describes how refs in one repository copy into another repository. +

+ A ref specification provides matching support and limited rules to rewrite a + reference in one repository to another reference in another repository. + + + +

+ Suffix for wildcard ref spec component, that indicate matching all refs + with specified prefix. + + + Suffix for wildcard ref spec component, that indicate matching all refs + with specified prefix. + +
+ + Check whether provided string is a wildcard ref spec component. + Check whether provided string is a wildcard ref spec component. + ref spec component - string to test. Can be null. + true if provided string is a wildcard ref spec component. + + + Does this specification ask for forced updated (rewind/reset)? + + + Is this specification actually a wildcard match? + + + Name of the ref(s) we would copy from. + Name of the ref(s) we would copy from. + + + Name of the ref(s) we would copy into. + Name of the ref(s) we would copy into. + + + Construct an empty RefSpec. + + Construct an empty RefSpec. +

+ A newly created empty RefSpec is not suitable for use in most + applications, as at least one field must be set to match a source name. + + + +

Parse a ref specification for use during transport operations. + + Parse a ref specification for use during transport operations. +

+ Specifications are typically one of the following forms: +

    +
  • refs/head/master
  • +
  • refs/head/master:refs/remotes/origin/master
  • +
  • refs/head/*:refs/remotes/origin/*
  • +
  • +refs/head/master
  • +
  • +refs/head/master:refs/remotes/origin/master
  • +
  • +refs/head/*:refs/remotes/origin/*
  • +
  • :refs/head/master
  • +
+
+ string describing the specification. + the specification is invalid. +
+ + Check if this specification wants to forcefully update the destination. + Check if this specification wants to forcefully update the destination. + true if this specification asks for updates without merge tests. + + + Create a new RefSpec with a different force update setting. + Create a new RefSpec with a different force update setting. + new value for force update in the returned instance. + a new RefSpec with force update as specified. + + + Check if this specification is actually a wildcard pattern. + + Check if this specification is actually a wildcard pattern. +

+ If this is a wildcard pattern then the source and destination names + returned by + GetSource() + and + GetDestination() + will not + be actual ref names, but instead will be patterns. + + true if this specification could match more than one ref. + + +

Get the source ref description. + + Get the source ref description. +

+ During a fetch this is the name of the ref on the remote repository we + are fetching from. During a push this is the name of the ref on the local + repository we are pushing out from. + + name (or wildcard pattern) to match the source ref. + + +

Create a new RefSpec with a different source name setting. + Create a new RefSpec with a different source name setting. + new value for source in the returned instance. + a new RefSpec with source as specified. + + There is already a destination configured, and the wildcard + status of the existing destination disagrees with the + wildcard status of the new source. + +
+ + Get the destination ref description. + + Get the destination ref description. +

+ During a fetch this is the local tracking branch that will be updated + with the new ObjectId after fetching is complete. During a push this is + the remote ref that will be updated by the remote's receive-pack process. +

+ If null during a fetch no tracking branch should be updated and the + ObjectId should be stored transiently in order to prepare a merge. +

+ If null during a push, use + GetSource() + instead. + + name (or wildcard) pattern to match the destination ref. + + +

Create a new RefSpec with a different destination name setting. + Create a new RefSpec with a different destination name setting. + new value for destination in the returned instance. + a new RefSpec with destination as specified. + + There is already a source configured, and the wildcard status + of the existing source disagrees with the wildcard status of + the new destination. + +
+ + Create a new RefSpec with a different source/destination name setting. + Create a new RefSpec with a different source/destination name setting. + new value for source in the returned instance. + new value for destination in the returned instance. + a new RefSpec with destination as specified. + + The wildcard status of the new source disagrees with the + wildcard status of the new destination. + + + + Does this specification's source description match the ref name? + ref name that should be tested. + true if the names match; false otherwise. + + + Does this specification's source description match the ref? + ref whose name should be tested. + true if the names match; false otherwise. + + + Does this specification's destination description match the ref name? + ref name that should be tested. + true if the names match; false otherwise. + + + Does this specification's destination description match the ref? + ref whose name should be tested. + true if the names match; false otherwise. + + + Expand this specification to exactly match a ref name. + + Expand this specification to exactly match a ref name. +

+ Callers must first verify the passed ref name matches this specification, + otherwise expansion results may be unpredictable. + + + a ref name that matched our source specification. Could be a + wildcard also. + + + a new specification expanded from provided ref name. Result + specification is wildcard if and only if provided ref name is + wildcard. + + + +

Expand this specification to exactly match a ref. + + Expand this specification to exactly match a ref. +

+ Callers must first verify the passed ref matches this specification, + otherwise expansion results may be unpredictable. + + + a ref that matched our source specification. Could be a + wildcard also. + + + a new specification expanded from provided ref name. Result + specification is wildcard if and only if provided ref name is + wildcard. + + + +

Expand this specification to exactly match a ref name. + + Expand this specification to exactly match a ref name. +

+ Callers must first verify the passed ref name matches this specification, + otherwise expansion results may be unpredictable. + + + a ref name that matched our destination specification. Could + be a wildcard also. + + + a new specification expanded from provided ref name. Result + specification is wildcard if and only if provided ref name is + wildcard. + + + +

Expand this specification to exactly match a ref. + + Expand this specification to exactly match a ref. +

+ Callers must first verify the passed ref matches this specification, + otherwise expansion results may be unpredictable. + + a ref that matched our destination specification. + + a new specification expanded from provided ref name. Result + specification is wildcard if and only if provided ref name is + wildcard. + + + +

Specification of annotated tag behavior during fetch. + Specification of annotated tag behavior during fetch. +
+ + Automatically follow tags if we fetch the thing they point at. + + Automatically follow tags if we fetch the thing they point at. +

+ This is the default behavior and tries to balance the benefit of having + an annotated tag against the cost of possibly objects that are only on + branches we care nothing about. Annotated tags are fetched only if we can + prove that we already have (or will have when the fetch completes) the + object the annotated tag peels (dereferences) to. + + + +

Never fetch tags, even if we have the thing it points at. + + Never fetch tags, even if we have the thing it points at. +

+ This option must be requested by the user and always avoids fetching + annotated tags. It is most useful if the location you are fetching from + publishes annotated tags, but you are not interested in the tags and only + want their branches. + + + +

Always fetch tags, even if we do not have the thing it points at. + + Always fetch tags, even if we do not have the thing it points at. +

+ Unlike + AUTO_FOLLOW + the tag is always obtained. This may cause + hundreds of megabytes of objects to be fetched if the receiving + repository does not yet have the necessary dependencies. + + + +

Get the command line/configuration file text for this value. + Get the command line/configuration file text for this value. + text that appears in the configuration file to activate this. +
+ + Convert a command line/configuration file text into a value instance. + Convert a command line/configuration file text into a value instance. + the configuration file text value. + the option that matches the passed parameter. + + + Base helper class for implementing operations connections. + Base helper class for implementing operations connections. + BasePackConnection + BaseFetchConnection + + + Represent connection for operation on a remote repository. + + Represent connection for operation on a remote repository. +

+ Currently all operations on remote repository (fetch and push) provide + information about remote refs. Every connection is able to be closed and + should be closed - this is a connection client responsibility. + + Transport + + +

+ Get the complete map of refs advertised as available for fetching or + pushing. + + + Get the complete map of refs advertised as available for fetching or + pushing. + + + available/advertised refs: map of refname to ref. Never null. Not + modifiable. The collection can be empty if the remote side has no + refs (it is an empty/newly created repository). + +
+ + + Get the complete list of refs advertised as available for fetching or + pushing. + + + Get the complete list of refs advertised as available for fetching or + pushing. +

+ The returned refs may appear in any order. If the caller needs these to + be sorted, they should be copied into a new array or List and then sorted + by the caller as necessary. + + + available/advertised refs. Never null. Not modifiable. The + collection can be empty if the remote side has no refs (it is an + empty/newly created repository). + + + +

Get a single advertised ref by name. + + Get a single advertised ref by name. +

+ The name supplied should be valid ref name. To get a peeled value for a + ref (aka refs/tags/v1.0^{}) use the base name (without + the ^{} suffix) and look at the peeled object id. + + name of the ref to obtain. + the requested ref; null if the remote did not advertise this ref. + + +

Close any resources used by this connection. + + Close any resources used by this connection. +

+ If the remote repository is contacted by a network socket this method + must close that network socket, disconnecting the two peers. If the + remote repository is actually local (same system) this method must close + any open file handles used to read the "remote" repository. +

+ If additional messages were produced by the remote peer, these should + still be retained in the connection instance for + GetMessages() + . + + + +

Get the additional messages, if any, returned by the remote process. + + Get the additional messages, if any, returned by the remote process. +

+ These messages are most likely informational or error messages, sent by + the remote peer, to help the end-user correct any problems that may have + prevented the operation from completing successfully. Application UIs + should try to show these in an appropriate context. +

+ The message buffer is available after + Close() + has been called. + Prior to closing the connection, the message buffer may be empty. + + + the messages returned by the remote, most likely terminated by a + newline (LF) character. The empty string is returned if the + remote produced no additional messages. + + + +

Denote the list of refs available on the remote repository. + + Denote the list of refs available on the remote repository. +

+ Implementors should invoke this method once they have obtained the refs + that are available from the remote repository. + + + the complete list of refs the remote has to offer. This map + will be wrapped in an unmodifiable way to protect it, but it + does not get copied. + + + +

Helper method for ensuring one-operation per connection. + + Helper method for ensuring one-operation per connection. Check whether + operation was already marked as started, and mark it as started. + + if operation was already marked as started. + +
+ + Get the writer that buffers messages from the remote side. + Get the writer that buffers messages from the remote side. + writer to store messages from the remote. + + + Set the writer that buffers messages from the remote side. + Set the writer that buffers messages from the remote side. + + the writer that messages will be delivered to. The writer's + toString() + method should be overridden to return the + complete contents. + + + + Lists known refs from the remote and copies objects of selected refs. + + Lists known refs from the remote and copies objects of selected refs. +

+ A fetch connection typically connects to the git-upload-pack + service running where the remote repository is stored. This provides a + one-way object transfer service to copy objects from the remote repository + into this local repository. +

+ Instances of a FetchConnection must be created by a + Transport + that + implements a specific object transfer protocol that both sides of the + connection understand. +

+ FetchConnection instances are not thread safe and may be accessed by only one + thread at a time. + + Transport + + +

Fetch objects we don't have but that are reachable from advertised refs. + + + Fetch objects we don't have but that are reachable from advertised refs. +

+ Only one call per connection is allowed. Subsequent calls will result in + NGit.Errors.TransportException + . +

+ Implementations are free to use network connections as necessary to + efficiently (for both client and server) transfer objects from the remote + repository into this repository. When possible implementations should + avoid replacing/overwriting/duplicating an object already available in + the local destination repository. Locally available objects and packs + should always be preferred over remotely available objects and packs. + Transport.IsFetchThin() + should be honored if applicable. + + + progress monitor to inform the end-user about the amount of + work completed, or to indicate cancellation. Implementations + should poll the monitor at regular intervals to look for + cancellation requests from the user. + + + one or more refs advertised by this connection that the caller + wants to store locally. + + + additional objects known to exist in the destination + repository, especially if they aren't yet reachable by the ref + database. Connections should take this set as an addition to + what is reachable through all Refs, not in replace of it. + + + objects could not be copied due to a network failure, + protocol error, or error on remote side, or connection was + already used for fetch. + + + +

+ Did the last + Fetch(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>, System.Collections.Generic.ICollection<E>) + + get tags? +

+ Some Git aware transports are able to implicitly grab an annotated tag if + TagOpt.AUTO_FOLLOW + or + TagOpt.FETCH_TAGS + was selected and + the object the tag peels to (references) was transferred as part of the + last + Fetch(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>, System.Collections.Generic.ICollection<E>) + + call. If it is + possible for such tags to have been included in the transfer this method + returns true, allowing the caller to attempt tag discovery. +

+ By returning only true/false (and not the actual list of tags obtained) + the transport itself does not need to be aware of whether or not tags + were included in the transfer. +

+ + true if the last fetch call implicitly included tag objects; + false if tags were not implicitly obtained. + +
+ + + Did the last + Fetch(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>, System.Collections.Generic.ICollection<E>) + + validate + graph? +

+ Some transports walk the object graph on the client side, with the client + looking for what objects it is missing and requesting them individually + from the remote peer. By virtue of completing the fetch call the client + implicitly tested the object connectivity, as every object in the graph + was either already local or was requested successfully from the peer. In + such transports this method returns true. +

+ Some transports assume the remote peer knows the Git object graph and is + able to supply a fully connected graph to the client (although it may + only be transferring the parts the client does not yet have). Its faster + to assume such remote peers are well behaved and send the correct + response to the client. In such transports this method returns false. +

+ + true if the last fetch had to perform a connectivity check on the + client side in order to succeed; false if the last fetch assumed + the remote peer supplied a complete graph. + +
+ + Set the lock message used when holding a pack out of garbage collection. + + + Set the lock message used when holding a pack out of garbage collection. +

+ Callers that set a lock message must ensure they call + GetPackLocks() + after + Fetch(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>, System.Collections.Generic.ICollection<E>) + + , even if an exception + was thrown, and release the locks that are held. + + message to use when holding a pack in place. + + +

+ All locks created by the last + Fetch(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>, System.Collections.Generic.ICollection<E>) + + call. + + + collection (possibly empty) of locks created by the last call to + fetch. The caller must release these after refs are updated in + order to safely permit garbage collection. + +
+ + Base helper class for fetch connection implementations. + + Base helper class for fetch connection implementations. Provides some common + typical structures and methods used during fetch connection. +

+ Implementors of fetch over pack-based protocols should consider using + BasePackFetchConnection + instead. + + + + + + +

+ Default implementation of + FetchConnection.DidFetchIncludeTags() + + - + returning false. + +
+ + + Implementation of + Fetch(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>, System.Collections.Generic.ICollection<E>) + + without checking for multiple fetch. + + + as in + Fetch(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>, System.Collections.Generic.ICollection<E>) + + + + as in + Fetch(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>, System.Collections.Generic.ICollection<E>) + + + + as in + Fetch(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>, System.Collections.Generic.ICollection<E>) + + + + as in + Fetch(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>, System.Collections.Generic.ICollection<E>) + + , but + implementation doesn't have to care about multiple + Fetch(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>, System.Collections.Generic.ICollection<E>) + + calls, as it + is checked in this class. + + + + Read Git style pkt-line formatting from an input stream. + + Read Git style pkt-line formatting from an input stream. +

+ This class is not thread safe and may issue multiple reads to the underlying + stream for each method call made. +

+ This class performs no buffering on its own. This makes it suitable to + interleave reads performed by this class with reads performed directly + against the underlying InputStream. + + + +

+ Magic return from + ReadString() + when a flush packet is found. + +
+ + Create a new packet line reader. + Create a new packet line reader. + the input stream to consume. + + + + + + Read a single UTF-8 encoded string packet from the input stream. + + Read a single UTF-8 encoded string packet from the input stream. +

+ If the string ends with an LF, it will be removed before returning the + value to the caller. If this automatic trimming behavior is not desired, + use + ReadStringRaw() + instead. + + + the string. + END + if the string was the magic flush + packet. + + the stream cannot be read. + + +

Read a single UTF-8 encoded string packet from the input stream. + + Read a single UTF-8 encoded string packet from the input stream. +

+ Unlike + ReadString() + a trailing LF will be retained. + + + the string. + END + if the string was the magic flush + packet. + + the stream cannot be read. + + + + + +

Unmultiplexes the data portion of a side-band channel. + + Unmultiplexes the data portion of a side-band channel. +

+ Reading from this input stream obtains data from channel 1, which is + typically the bulk data stream. +

+ Channel 2 is transparently unpacked and "scraped" to update a progress + monitor. The scraping is performed behind the scenes as part of any of the + read methods offered by this stream. +

+ Channel 3 results in an exception being thrown, as the remote side has issued + an unrecoverable error. + + SideBandOutputStream + + + + + + + + + + + + + + + + + + + + +

Multiplexes data and progress messages. + + Multiplexes data and progress messages. +

+ This stream is buffered at packet sizes, so the caller doesn't need to wrap + it in yet another buffered stream. + + 2.0 + + +

Channel used for pack data. + Channel used for pack data. +
+ + Channel used for progress messages. + Channel used for progress messages. + + + Channel used for error messages. + Channel used for error messages. + + + Default buffer size for a small amount of data. + Default buffer size for a small amount of data. + + + Maximum buffer size for a single packet of sideband data. + Maximum buffer size for a single packet of sideband data. + + + + Number of bytes in + buffer + that are valid data. +

+ Initialized to + HDR_SIZE + if there is no application data in the + buffer, as the packet header always appears at the start of the buffer. +

+
+ + Create a new stream to write side band packets. + Create a new stream to write side band packets. + + channel number to prefix all packets with, so the remote side + can demultiplex the stream and get back the original data. + Must be in the range [0, 255]. + + + maximum size of a data packet within the stream. The remote + side needs to agree to the packet size to prevent buffer + overflows. Must be in the range [HDR_SIZE + 1, MAX_BUF). + + + stream that the packets are written onto. This stream should + be attached to a SideBandInputStream on the remote side. + + + + + + + + + + + + + + + + + + + Write Git style pkt-line formatting to an output stream. + + Write Git style pkt-line formatting to an output stream. +

+ This class is not thread safe and may issue multiple writes to the underlying + stream for each method call made. +

+ This class performs no buffering on its own. This makes it suitable to + interleave writes performed by this class with writes performed directly + against the underlying OutputStream. + + + +

Create a new packet line writer. + Create a new packet line writer. + stream. +
+ + + Set the flush behavior during + End() + . + + + if true, a flush-pkt written during + End() + also + flushes the underlying stream. + + + + Write a UTF-8 encoded string as a single length-delimited packet. + Write a UTF-8 encoded string as a single length-delimited packet. + string to write. + + the packet could not be written, the stream is corrupted as + the packet may have been only partially written. + + + + Write a binary packet to the stream. + Write a binary packet to the stream. + + the packet to write; the length of the packet is equal to the + size of the byte array. + + + the packet could not be written, the stream is corrupted as + the packet may have been only partially written. + + + + Write a packet end marker, sometimes referred to as a flush command. + + Write a packet end marker, sometimes referred to as a flush command. +

+ Technically this is a magical packet type which can be detected + separately from an empty string or an empty packet. +

+ Implicitly performs a flush on the underlying OutputStream to ensure the + peer will receive all data written thus far. + + + the end marker could not be written, the stream is corrupted + as the end marker may have been only partially written. + + + +

Flush the underlying OutputStream. + + Flush the underlying OutputStream. +

+ Performs a flush on the underlying OutputStream to ensure the peer will + receive all data written thus far. + + the underlying stream failed to flush. + + +

Connects two Git repositories together and copies objects between them. + + Connects two Git repositories together and copies objects between them. +

+ A transport can be used for either fetching (copying objects into the + caller's repository from the remote repository) or pushing (copying objects + into the remote repository from the caller's repository). Each transport + implementation is responsible for the details associated with establishing + the network connection(s) necessary for the copy, as well as actually + shuffling data back and forth. +

+ Transport instances and the connections they create are not thread-safe. + Callers must ensure a transport is accessed by only one thread at a time. + + + +

+ Default setting for + fetchThin + option. + +
+ + + Default setting for + pushThin + option. + + + + Register a TransportProtocol instance for use during open. + + Register a TransportProtocol instance for use during open. +

+ Protocol definitions are held by WeakReference, allowing them to be + garbage collected when the calling application drops all strongly held + references to the TransportProtocol. Therefore applications should use a + singleton pattern as described in + TransportProtocol + 's class + documentation to ensure their protocol does not get disabled by garbage + collection earlier than expected. +

+ The new protocol is registered in front of all earlier protocols, giving + it higher priority than the built-in protocol definitions. + + the protocol definition. Must not be null. + + +

Unregister a TransportProtocol instance. + + Unregister a TransportProtocol instance. +

+ Unregistering a protocol usually isn't necessary, as protocols are held + by weak references and will automatically clear when they are garbage + collected by the JVM. Matching is handled by reference equality, so the + exact reference given to + Register(TransportProtocol) + must be + used. + + the exact object previously given to register. + + +

Obtain a copy of the registered protocols. + Obtain a copy of the registered protocols. + an immutable copy of the currently registered protocols. +
+ + Open a new transport instance to connect two repositories. + + Open a new transport instance to connect two repositories. +

+ This method assumes + Operation.FETCH + . + + existing local repository. + + location of the remote repository - may be URI or remote + configuration name. + + + the new transport instance. Never null. In case of multiple URIs + in remote configuration, only the first is chosen. + + + the location is not a remote defined in the configuration + file and is not a well-formed URL. + + the protocol specified is not supported. + + the transport cannot open this URI. + + + +

Open a new transport instance to connect two repositories. + Open a new transport instance to connect two repositories. + existing local repository. + + location of the remote repository - may be URI or remote + configuration name. + + + planned use of the returned Transport; the URI may differ + based on the type of connection desired. + + + the new transport instance. Never null. In case of multiple URIs + in remote configuration, only the first is chosen. + + + the location is not a remote defined in the configuration + file and is not a well-formed URL. + + the protocol specified is not supported. + + the transport cannot open this URI. + +
+ + Open new transport instances to connect two repositories. + + Open new transport instances to connect two repositories. +

+ This method assumes + Operation.FETCH + . + + existing local repository. + + location of the remote repository - may be URI or remote + configuration name. + + + the list of new transport instances for every URI in remote + configuration. + + + the location is not a remote defined in the configuration + file and is not a well-formed URL. + + the protocol specified is not supported. + + the transport cannot open this URI. + + + +

Open new transport instances to connect two repositories. + Open new transport instances to connect two repositories. + existing local repository. + + location of the remote repository - may be URI or remote + configuration name. + + + planned use of the returned Transport; the URI may differ + based on the type of connection desired. + + + the list of new transport instances for every URI in remote + configuration. + + + the location is not a remote defined in the configuration + file and is not a well-formed URL. + + the protocol specified is not supported. + + the transport cannot open this URI. + +
+ + Open a new transport instance to connect two repositories. + + Open a new transport instance to connect two repositories. +

+ This method assumes + Operation.FETCH + . + + existing local repository. + + configuration describing how to connect to the remote + repository. + + + the new transport instance. Never null. In case of multiple URIs + in remote configuration, only the first is chosen. + + the protocol specified is not supported. + + the transport cannot open this URI. + + + if provided remote configuration doesn't have any URI + associated. + + + +

Open a new transport instance to connect two repositories. + Open a new transport instance to connect two repositories. + existing local repository. + + configuration describing how to connect to the remote + repository. + + + planned use of the returned Transport; the URI may differ + based on the type of connection desired. + + + the new transport instance. Never null. In case of multiple URIs + in remote configuration, only the first is chosen. + + the protocol specified is not supported. + + the transport cannot open this URI. + + + if provided remote configuration doesn't have any URI + associated. + +
+ + Open new transport instances to connect two repositories. + + Open new transport instances to connect two repositories. +

+ This method assumes + Operation.FETCH + . + + existing local repository. + + configuration describing how to connect to the remote + repository. + + + the list of new transport instances for every URI in remote + configuration. + + the protocol specified is not supported. + + the transport cannot open this URI. + + + +

Open new transport instances to connect two repositories. + Open new transport instances to connect two repositories. + existing local repository. + + configuration describing how to connect to the remote + repository. + + + planned use of the returned Transport; the URI may differ + based on the type of connection desired. + + + the list of new transport instances for every URI in remote + configuration. + + the protocol specified is not supported. + + the transport cannot open this URI. + +
+ + Open a new transport instance to connect two repositories. + Open a new transport instance to connect two repositories. + existing local repository. + location of the remote repository. + the new transport instance. Never null. + the protocol specified is not supported. + + the transport cannot open this URI. + + + + Open a new transport instance to connect two repositories. + Open a new transport instance to connect two repositories. + existing local repository. + location of the remote repository. + + name of the remote, if the remote as configured in + local + ; otherwise null. + + the new transport instance. Never null. + the protocol specified is not supported. + + the transport cannot open this URI. + + + + Open a new transport with no local repository. + Open a new transport with no local repository. + + new Transport instance + System.NotSupportedException + NGit.Errors.TransportException + + + + Convert push remote refs update specification from + RefSpec + form + to + RemoteRefUpdate + . Conversion expands wildcards by matching + source part to local refs. expectedOldObjectId in RemoteRefUpdate is + always set as null. Tracking branch is configured if RefSpec destination + matches source of any fetch ref spec for this transport remote + configuration. + + local database. + collection of RefSpec to convert. + + fetch specifications used for finding localtracking refs. May + be null or empty collection. + + + collection of set up + RemoteRefUpdate + . + + + when problem occurred during conversion or specification set + up: most probably, missing objects or refs. + + + + Specification for fetch or push operations, to fetch or push all tags. + + Specification for fetch or push operations, to fetch or push all tags. + Acts as --tags. + + + + Specification for push operation, to push all refs under refs/heads. + + Specification for push operation, to push all refs under refs/heads. Acts + as --all. + + + + The repository this transport fetches into, or pushes out of. + The repository this transport fetches into, or pushes out of. + + + The URI used to create this transport. + The URI used to create this transport. + + + Name of the upload pack program, if it must be executed. + Name of the upload pack program, if it must be executed. + + + Specifications to apply during fetch. + Specifications to apply during fetch. + + + + How + Fetch(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>) + should handle tags. +

+ We default to + TagOpt.NO_TAGS + so as to avoid fetching annotated + tags during one-shot fetches used for later merges. This prevents + dragging down tags from repositories that we do not have established + tracking branches for. If we do not track the source repository, we most + likely do not care about any tags it publishes. +

+
+ + Should fetch request thin-pack if remote repository can produce it. + Should fetch request thin-pack if remote repository can produce it. + + + Name of the receive pack program, if it must be executed. + Name of the receive pack program, if it must be executed. + + + Specifications to apply during push. + Specifications to apply during push. + + + Should push produce thin-pack when sending objects to remote repository. + + Should push produce thin-pack when sending objects to remote repository. + + + + Should push just check for operation result, not really push. + Should push just check for operation result, not really push. + + + Should an incoming (fetch) transfer validate objects? + + + Should refs no longer on the source be pruned from the destination? + + + Timeout in seconds to wait before aborting an IO read or write. + Timeout in seconds to wait before aborting an IO read or write. + + + Pack configuration used by this transport to make pack file. + Pack configuration used by this transport to make pack file. + + + Assists with authentication the connection. + Assists with authentication the connection. + + + Create a new transport instance. + Create a new transport instance. + + the repository this instance will fetch into, or push out of. + This must be the repository passed to + Open(NGit.Repository, URIish) + . + + + the URI used to access the remote repository. This must be the + URI passed to + Open(NGit.Repository, URIish) + . + + + + Create a minimal transport instance not tied to a single repository. + Create a minimal transport instance not tied to a single repository. + + + + Get the URI this transport connects to. + + Get the URI this transport connects to. +

+ Each transport instance connects to at most one URI at any point in time. + + the URI describing the location of the remote repository. + + +

Get the name of the remote executable providing upload-pack service. + Get the name of the remote executable providing upload-pack service. + typically "git-upload-pack". +
+ + Set the name of the remote executable providing upload-pack services. + Set the name of the remote executable providing upload-pack services. + name of the executable. + + + Get the description of how annotated tags should be treated during fetch. + + Get the description of how annotated tags should be treated during fetch. + + option indicating the behavior of annotated tags in fetch. + + + Set the description of how annotated tags should be treated on fetch. + Set the description of how annotated tags should be treated on fetch. + method to use when handling annotated tags. + + + + Default setting is: + DEFAULT_FETCH_THIN + + + true if fetch should request thin-pack when possible; false + otherwise + + PackTransport + + + Set the thin-pack preference for fetch operation. + + Set the thin-pack preference for fetch operation. Default setting is: + DEFAULT_FETCH_THIN + + + true when fetch should request thin-pack when possible; false + when it shouldn't + + PackTransport + + + + true if fetch will verify received objects are formatted + correctly. Validating objects requires more CPU time on the + client side of the connection. + + + + + true to enable checking received objects; false to assume all + received objects are valid. + + + + + Default setting is: + RemoteConfig.DEFAULT_RECEIVE_PACK + + + remote executable providing receive-pack service for pack + transports. + + PackTransport + + + Set remote executable providing receive-pack service for pack transports. + + + Set remote executable providing receive-pack service for pack transports. + Default setting is: + RemoteConfig.DEFAULT_RECEIVE_PACK + + remote executable, if null or empty default one is set; + + + + + Default setting is: + #DEFAULT_PUSH_THIN + + true if push should produce thin-pack in pack transports + PackTransport + + + Set thin-pack preference for push operation. + + Set thin-pack preference for push operation. Default setting is: + #DEFAULT_PUSH_THIN + + + true when push should produce thin-pack in pack transports; + false when it shouldn't + + PackTransport + + + + true if destination refs should be removed if they no longer + exist at the source repository. + + + + Set whether or not to remove refs which no longer exist in the source. + + Set whether or not to remove refs which no longer exist in the source. +

+ If true, refs at the destination repository (local for fetch, remote for + push) are deleted if they no longer exist on the source side (remote for + fetch, local for push). +

+ False by default, as this may cause data to become unreachable, and + eventually be deleted on the next GC. + + true to remove refs that no longer exist. + + +

Apply provided remote configuration on this transport. + Apply provided remote configuration on this transport. + configuration to apply on this transport. +
+ + + true if push operation should just check for possible result and + not really update remote refs, false otherwise - when push should + act normally. + + + + Set dry run option for push operation. + Set dry run option for push operation. + + true if push operation should just check for possible result + and not really update remote refs, false otherwise - when push + should act normally. + + + + timeout (in seconds) before aborting an IO operation. + + + Set the timeout before willing to abort an IO call. + Set the timeout before willing to abort an IO call. + + number of seconds to wait (with no data transfer occurring) + before aborting an IO read or write operation with this + remote. + + + + Get the configuration used by the pack generator to make packs. + + Get the configuration used by the pack generator to make packs. + If + SetPackConfig(NGit.Storage.Pack.PackConfig) + + was previously given null a new + PackConfig is created on demand by this method using the source + repository's settings. + + the pack configuration. Never null. + + + Set the configuration used by the pack generator. + Set the configuration used by the pack generator. + + configuration controlling packing parameters. If null the + source repository's settings will be used. + + + + A credentials provider to assist with authentication connections.. + A credentials provider to assist with authentication connections.. + the credentials provider, or null if there is none + + + + The configured credentials provider. + The configured credentials provider. + + the credentials provider, or null if no credentials provider is + associated with this transport. + + + + Fetch objects and refs from the remote repository to the local one. + + Fetch objects and refs from the remote repository to the local one. +

+ This is a utility function providing standard fetch behavior. Local + tracking refs associated with the remote repository are automatically + updated if this transport was created from a + RemoteConfig + with + fetch RefSpecs defined. + + + progress monitor to inform the user about our processing + activity. Must not be null. Use + NGit.NullProgressMonitor + if + progress updates are not interesting or necessary. + + + specification of refs to fetch locally. May be null or the + empty collection to use the specifications from the + RemoteConfig. Source for each RefSpec can't be null. + + information describing the tracking refs updated. + + this transport implementation does not support fetching + objects. + + + the remote connection could not be established or object + copying (if necessary) failed or update specification was + incorrect. + + + +

Push objects and refs from the local repository to the remote one. + + Push objects and refs from the local repository to the remote one. +

+ This is a utility function providing standard push behavior. It updates + remote refs and send there necessary objects according to remote ref + update specification. After successful remote ref update, associated + locally stored tracking branch is updated if set up accordingly. Detailed + operation result is provided after execution. +

+ For setting up remote ref update specification from ref spec, see helper + method + FindRemoteRefUpdatesFor(System.Collections.Generic.ICollection<E>) + + , predefined refspecs + ( + REFSPEC_TAGS + , + REFSPEC_PUSH_ALL + ) or consider using + directly + RemoteRefUpdate + for more possibilities. +

+ When + IsDryRun() + is true, result of this operation is just + estimation of real operation result, no real action is performed. + + RemoteRefUpdate + + progress monitor to inform the user about our processing + activity. Must not be null. Use + NGit.NullProgressMonitor + if + progress updates are not interesting or necessary. + + + specification of refs to push. May be null or the empty + collection to use the specifications from the RemoteConfig + converted by + FindRemoteRefUpdatesFor(System.Collections.Generic.ICollection<E>) + + . No + more than 1 RemoteRefUpdate with the same remoteName is + allowed. These objects are modified during this call. + + + information about results of remote refs updates, tracking refs + updates and refs advertised by remote repository. + + + this transport implementation does not support pushing + objects. + + + the remote connection could not be established or object + copying (if necessary) failed at I/O or protocol level or + update specification was incorrect. + + + +

+ Convert push remote refs update specification from + RefSpec + form + to + RemoteRefUpdate + . Conversion expands wildcards by matching + source part to local refs. expectedOldObjectId in RemoteRefUpdate is + always set as null. Tracking branch is configured if RefSpec destination + matches source of any fetch ref spec for this transport remote + configuration. +

+ Conversion is performed for context of this transport (database, fetch + specifications). +

+ collection of RefSpec to convert. + + collection of set up + RemoteRefUpdate + . + + + when problem occurred during conversion or specification set + up: most probably, missing objects or refs. + +
+ + Begins a new connection for fetching from the remote repository. + Begins a new connection for fetching from the remote repository. + a fresh connection to fetch from the remote repository. + the implementation does not support fetching. + + the remote connection could not be established. + + + + Begins a new connection for pushing into the remote repository. + Begins a new connection for pushing into the remote repository. + a fresh connection to push into the remote repository. + the implementation does not support pushing. + + the remote connection could not be established + + + + Close any resources used by this transport. + + Close any resources used by this transport. +

+ If the remote repository is contacted by a network socket this method + must close that network socket, disconnecting the two peers. If the + remote repository is actually local (same system) this method must close + any open file handles used to read the "remote" repository. + + + +

Type of operation a Transport is being opened for. + Type of operation a Transport is being opened for. +
+ + Represent request and status of a remote ref update. + + Represent request and status of a remote ref update. Specification is + provided by client, while status is handled by + PushProcess + class, + being read-only for client. +

+ Client can create instances of this class directly, basing on user + specification and advertised refs ( + Connection + or through + Transport + helper methods. Apply this specification on remote + repository using + Transport.Push(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>) + + method. + + + +

Construct remote ref update request by providing an update specification. + + + Construct remote ref update request by providing an update specification. + Object is created with default + Status.NOT_ATTEMPTED + status and no + message. + + local repository to push from. + + source revision - any string resolvable by + NGit.Repository.Resolve(string) + . This resolves to the new + object that the caller want remote ref to be after update. Use + null or + NGit.ObjectId.ZeroId() + string for delete request. + + + full name of a remote ref to update, e.g. "refs/heads/master" + (no wildcard, no short name). + + + true when caller want remote ref to be updated regardless + whether it is fast-forward update (old object is ancestor of + new object). + + + optional full name of a local stored tracking branch, to + update after push, e.g. "refs/remotes/zawir/dirty" (no + wildcard, no short name); null if no local tracking branch + should be updated. + + + optional object id that caller is expecting, requiring to be + advertised by remote side before update; update will take + place ONLY if remote side advertise exactly this expected id; + null if caller doesn't care what object id remote side + advertise. Use + NGit.ObjectId.ZeroId() + when expecting no + remote ref with this name. + + + when I/O error occurred during creating + TrackingRefUpdate + for local tracking branch or srcRef + can't be resolved to any object. + + if some required parameter was null +
+ + Construct remote ref update request by providing an update specification. + + + Construct remote ref update request by providing an update specification. + Object is created with default + Status.NOT_ATTEMPTED + status and no + message. + + local repository to push from. + source revision. Use null to delete. + + full name of a remote ref to update, e.g. "refs/heads/master" + (no wildcard, no short name). + + + true when caller want remote ref to be updated regardless + whether it is fast-forward update (old object is ancestor of + new object). + + + optional full name of a local stored tracking branch, to + update after push, e.g. "refs/remotes/zawir/dirty" (no + wildcard, no short name); null if no local tracking branch + should be updated. + + + optional object id that caller is expecting, requiring to be + advertised by remote side before update; update will take + place ONLY if remote side advertise exactly this expected id; + null if caller doesn't care what object id remote side + advertise. Use + NGit.ObjectId.ZeroId() + when expecting no + remote ref with this name. + + + when I/O error occurred during creating + TrackingRefUpdate + for local tracking branch or srcRef + can't be resolved to any object. + + if some required parameter was null + + + Construct remote ref update request by providing an update specification. + + + Construct remote ref update request by providing an update specification. + Object is created with default + Status.NOT_ATTEMPTED + status and no + message. + + local repository to push from. + + source revision to label srcId with. If null srcId.name() will + be used instead. + + + The new object that the caller wants remote ref to be after + update. Use null or + NGit.ObjectId.ZeroId() + for delete + request. + + + full name of a remote ref to update, e.g. "refs/heads/master" + (no wildcard, no short name). + + + true when caller want remote ref to be updated regardless + whether it is fast-forward update (old object is ancestor of + new object). + + + optional full name of a local stored tracking branch, to + update after push, e.g. "refs/remotes/zawir/dirty" (no + wildcard, no short name); null if no local tracking branch + should be updated. + + + optional object id that caller is expecting, requiring to be + advertised by remote side before update; update will take + place ONLY if remote side advertise exactly this expected id; + null if caller doesn't care what object id remote side + advertise. Use + NGit.ObjectId.ZeroId() + when expecting no + remote ref with this name. + + + when I/O error occurred during creating + TrackingRefUpdate + for local tracking branch or srcRef + can't be resolved to any object. + + if some required parameter was null + + + + Create a new instance of this object basing on existing instance for + configuration. + + + Create a new instance of this object basing on existing instance for + configuration. State (like + GetMessage() + , + GetStatus() + ) + of base object is not shared. Expected old object id is set up from + scratch, as this constructor may be used for 2-stage push: first one + being dry run, second one being actual push. + + configuration base. + new expected object id value. + + when I/O error occurred during creating + TrackingRefUpdate + for local tracking branch or srcRef + of base object no longer can be resolved to any object. + + + + + expectedOldObjectId required to be advertised by remote side, as + set in constructor; may be null. + + + + + true if some object is required to be advertised by remote side, + as set in constructor; false otherwise. + + + + newObjectId for remote ref, as set in constructor. + + + true if this update is deleting update; false otherwise. + + + name of remote ref to update, as set in constructor. + + + local tracking branch update if localName was set in constructor. + + + + source revision as specified by user (in constructor), could be + any string parseable by + NGit.Repository.Resolve(string) + ; can + be null if specified that way in constructor - this stands for + delete request. + + + + + true if user specified a local tracking branch for remote update; + false otherwise. + + + + + true if this update is forced regardless of old remote ref + object; false otherwise. + + + + status of remote ref update operation. + + + Check whether update was fast-forward. + + Check whether update was fast-forward. Note that this result is + meaningful only after successful update (when status is + Status.OK + ). + + true if update was fast-forward; false otherwise. + + + + message describing reasons of status when needed/possible; may be + null. + + + + Update locally stored tracking branch with the new object. + Update locally stored tracking branch with the new object. + walker used for checking update properties. + when I/O error occurred during update + + + Represent current status of a remote ref update. + Represent current status of a remote ref update. + + + Final status after a successful fetch from a remote repository. + Final status after a successful fetch from a remote repository. + Transport.Fetch(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>) + + + + Class holding result of operation on remote repository. + + Class holding result of operation on remote repository. This includes refs + advertised by remote repo and local tracking refs updates. + + + + Get the URI this result came from. + + Get the URI this result came from. +

+ Each transport instance connects to at most one URI at any point in time. + + the URI describing the location of the remote repository. + + +

Get the complete list of refs advertised by the remote. + + Get the complete list of refs advertised by the remote. +

+ The returned refs may appear in any order. If the caller needs these to + be sorted, they should be copied into a new array or List and then sorted + by the caller as necessary. + + + available/advertised refs. Never null. Not modifiable. The + collection can be empty if the remote side has no refs (it is an + empty/newly created repository). + + + +

Get a single advertised ref by name. + + Get a single advertised ref by name. +

+ The name supplied should be valid ref name. To get a peeled value for a + ref (aka refs/tags/v1.0^{}) use the base name (without + the ^{} suffix) and look at the peeled object id. + + name of the ref to obtain. + the requested ref; null if the remote did not advertise this ref. + + +

Get the status of all local tracking refs that were updated. + Get the status of all local tracking refs that were updated. + + unmodifiable collection of local updates. Never null. Empty if + there were no local tracking refs updated. + +
+ + Get the status for a specific local tracking ref update. + Get the status for a specific local tracking ref update. + name of the local ref (e.g. "refs/remotes/origin/master"). + + + status of the local ref; null if this local ref was not touched + during this operation. + + + + Get the additional messages, if any, returned by the remote process. + + Get the additional messages, if any, returned by the remote process. +

+ These messages are most likely informational or error messages, sent by + the remote peer, to help the end-user correct any problems that may have + prevented the operation from completing successfully. Application UIs + should try to show these in an appropriate context. + + + the messages returned by the remote, most likely terminated by a + newline (LF) character. The empty string is returned if the + remote produced no additional messages. + + + +

Result of push operation to the remote repository. + + Result of push operation to the remote repository. Holding information of + OperationResult + and remote refs updates status. + + Transport.Push(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>) + +
+ + Get status of remote refs updates. + + Get status of remote refs updates. Together with + OperationResult.GetAdvertisedRefs() + + it provides full description/status of each + ref update. +

+ Returned collection is not sorted in any order. + + collection of remote refs updates + + +

Get status of specific remote ref update by remote ref name. + + Get status of specific remote ref update by remote ref name. Together + with + OperationResult.GetAdvertisedRef(string) + + it provide full description/status + of this ref update. + + remote ref name + status of remote ref update +
+ + Lists known refs from the remote and sends objects to the remote. + + Lists known refs from the remote and sends objects to the remote. +

+ A push connection typically connects to the git-receive-pack + service running where the remote repository is stored. This provides a + one-way object transfer service to copy objects from the local repository + into the remote repository, as well as a way to modify the refs stored by the + remote repository. +

+ Instances of a PushConnection must be created by a + Transport + that + implements a specific object transfer protocol that both sides of the + connection understand. +

+ PushConnection instances are not thread safe and may be accessed by only one + thread at a time. + + Transport + + +

Pushes to the remote repository basing on provided specification. + + Pushes to the remote repository basing on provided specification. This + possibly result in update/creation/deletion of refs on remote repository + and sending objects that remote repository need to have a consistent + objects graph from new refs. +

+

+ Only one call per connection is allowed. Subsequent calls will result in + NGit.Errors.TransportException + . +

+ Implementation may use local repository to send a minimum set of objects + needed by remote repository in efficient way. + Transport.IsPushThin() + should be honored if applicable. + refUpdates should be filled with information about status of each update. + + + progress monitor to update the end-user about the amount of + work completed, or to indicate cancellation. Implementors + should poll the monitor at regular intervals to look for + cancellation requests from the user. + + + map of remote refnames to remote refs update + specifications/statuses. Can't be empty. This indicate what + refs caller want to update on remote side. Only refs updates + with + Status.NOT_ATTEMPTED + should passed. + Implementation must ensure that and appropriate status with + optional message should be set during call. No refUpdate with + Status.AWAITING_REPORT + or + Status.NOT_ATTEMPTED + can be leaved by implementation after return from this call. + + + objects could not be copied due to a network failure, + critical protocol error, or error on remote side, or + connection was already used for push - new connection must be + created. Non-critical errors concerning only isolated refs + should be placed in refUpdates. + + + +

Update of a locally stored tracking branch. + Update of a locally stored tracking branch. +
+ + Get the name of the remote ref. + + Get the name of the remote ref. +

+ Usually this is of the form "refs/heads/master". + + the name used within the remote repository. + + +

Get the name of the local tracking ref. + + Get the name of the local tracking ref. +

+ Usually this is of the form "refs/remotes/origin/master". + + the name used within this local repository. + + +

Get the new value the ref will be (or was) updated to. + Get the new value the ref will be (or was) updated to. + new value. Null if the caller has not configured it. +
+ + The old value of the ref, prior to the update being attempted. + + The old value of the ref, prior to the update being attempted. +

+ This value may differ before and after the update method. Initially it is + populated with the value of the ref before the lock is taken, but the old + value may change if someone else modified the ref between the time we + last read it and when the ref was locked for update. + + the value of the ref prior to the update being attempted. + + +

Get the status of this update. + Get the status of this update. + the status of the update. +
+ + + A command being processed by + BaseReceivePack + . +

+ This command instance roughly translates to the server side representation of + the + RemoteRefUpdate + created by the client. +

+
+ + Filter a list of commands according to result. + Filter a list of commands according to result. + commands to filter. + desired status to filter by. + + a copy of the command list containing only those commands with + the desired status. + + 2.0 + + + + Create a new command for + BaseReceivePack + . + + + the old object id; must not be null. Use + NGit.ObjectId.ZeroId() + to indicate a ref creation. + + + the new object id; must not be null. Use + NGit.ObjectId.ZeroId() + to indicate a ref deletion. + + name of the ref being affected. + + + + Create a new command for + BaseReceivePack + . + + + the old object id; must not be null. Use + NGit.ObjectId.ZeroId() + to indicate a ref creation. + + + the new object id; must not be null. Use + NGit.ObjectId.ZeroId() + to indicate a ref deletion. + + name of the ref being affected. + type of the command. + 2.0 + + + the old value the client thinks the ref has. + + + the requested new value for this ref. + + + the name of the ref being updated. + + + + the type of this command; see + Type + . + + + + the ref, if this was advertised by the connection. + + + the current status code of this command. + + + the message associated with a failure status. + + + Set the status of this command. + Set the status of this command. + the new status code for this command. + + + Set the status of this command. + Set the status of this command. + new status code for this command. + optional message explaining the new status. + + + Update the type of this command by checking for fast-forward. + + Update the type of this command by checking for fast-forward. +

+ If the command's current type is UPDATE, a merge test will be performed + using the supplied RevWalk to determine if + GetOldId() + is fully + merged into + GetNewId() + . If some commits are not merged the + update type is changed to + Type.UPDATE_NONFASTFORWARD + . + + + an instance to perform the merge test with. The caller must + allocate and release this object. + + + either oldId or newId is not accessible in the repository + used by the RevWalk. This usually indicates data corruption, + and the command cannot be processed. + + + +

Execute this command during a receive-pack session. + + Execute this command during a receive-pack session. +

+ Sets the status of the command as a side effect. + + receive-pack session. + 2.0 + + +

Set the result of this command. + Set the result of this command. + the new result code for this command. +
+ + Type of operation requested. + Type of operation requested. + + + Result of the update command. + Result of the update command. + + + + + + Transport we will fetch over. + Transport we will fetch over. + + + List of things we want to fetch from the remote repository. + List of things we want to fetch from the remote repository. + + + Set of refs we will actually wind up asking to obtain. + Set of refs we will actually wind up asking to obtain. + + + Objects we know we have locally. + Objects we know we have locally. + + + Updates to local tracking branches (if any). + Updates to local tracking branches (if any). + + + Records to be recorded into FETCH_HEAD. + Records to be recorded into FETCH_HEAD. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Class performing push operation on remote repository. + Class performing push operation on remote repository. + Transport.Push(NGit.ProgressMonitor, System.Collections.Generic.ICollection<E>) + + + + + Task name for + NGit.ProgressMonitor + used during opening connection. + + + + Transport used to perform this operation. + Transport used to perform this operation. + + + Push operation connection created to perform this operation + + + Refs to update on remote side. + Refs to update on remote side. + + + Revision walker for checking some updates properties. + Revision walker for checking some updates properties. + + + Create process for specified transport and refs updates specification. + Create process for specified transport and refs updates specification. + + transport between remote and local repository, used to create + connection. + + specification of refs updates (and local tracking branches). + + NGit.Errors.TransportException + + + + Perform push operation between local and remote repository - set remote + refs appropriately, send needed objects and update local tracking refs. + + + Perform push operation between local and remote repository - set remote + refs appropriately, send needed objects and update local tracking refs. +

+ When + Transport.IsDryRun() + is true, result of this operation is + just estimation of real operation result, no real action is performed. + + progress monitor used for feedback about operation. + result of push operation with complete status description. + when push operation is not supported by provided transport. + + + when some error occurred during operation, like I/O, protocol + error, or local database consistency error. + + + + + + +

Base helper class for pack-based operations implementations. + + Base helper class for pack-based operations implementations. Provides partial + implementation of pack-protocol - refs advertising and capabilities support, + and some other helper methods. + + BasePackFetchConnection + BasePackPushConnection +
+ + The repository this transport fetches into, or pushes out of. + The repository this transport fetches into, or pushes out of. + + + Remote repository location. + Remote repository location. + + + + A transport connected to + uri + . + + + + Low-level input stream, if a timeout was configured. + Low-level input stream, if a timeout was configured. + + + Low-level output stream, if a timeout was configured. + Low-level output stream, if a timeout was configured. + + + + Timer to manage + timeoutIn + and + timeoutOut + . + + + + Input stream reading from the remote. + Input stream reading from the remote. + + + Output stream sending to the remote. + Output stream sending to the remote. + + + + Packet line decoder around + @in + . + + + + + Packet line encoder around + @out + . + + + + + Send + PacketLineOut.End() + before closing + @out + ? + + + + True if this is a stateless RPC connection. + True if this is a stateless RPC connection. + + + Capability tokens advertised by the remote side. + Capability tokens advertised by the remote side. + + + Extra objects the remote has, but which aren't offered as refs. + Extra objects the remote has, but which aren't offered as refs. + + + Configure this connection with the directional pipes. + Configure this connection with the directional pipes. + + input stream to receive data from the peer. Caller must ensure + the input is buffered, otherwise read performance may suffer. + + + output stream to transmit data to the peer. Caller must ensure + the output is buffered, otherwise write performance may + suffer. + + + + Reads the advertised references through the initialized stream. + + Reads the advertised references through the initialized stream. +

+ Subclass implementations may call this method only after setting up the + input and output streams with + Init(Sharpen.InputStream, Sharpen.OutputStream) + + . +

+ If any errors occur, this connection is automatically closed by invoking + Close() + and the exception is wrapped (if necessary) and thrown + as a + NGit.Errors.TransportException + . + + the reference list could not be scanned. + + + + + + +

Create an exception to indicate problems finding a remote repository. + + Create an exception to indicate problems finding a remote repository. The + caller is expected to throw the returned exception. + Subclasses may override this method to provide better diagnostics. + + + a TransportException saying a repository cannot be found and + possibly why. + +
+ + Tell the peer we are disconnecting, if it cares to know. + Tell the peer we are disconnecting, if it cares to know. + + + Marker interface an object transport using Git pack transfers. + + Marker interface an object transport using Git pack transfers. +

+ Implementations of PackTransport setup connections and move objects back and + forth by creating pack files on the source side and indexing them on the + receiving side. + + BasePackFetchConnection + BasePackPushConnection + + +

Contains a message from the remote repository indicating a problem. + + Contains a message from the remote repository indicating a problem. +

+ Some remote repositories may send customized error messages describing why + they cannot be accessed. These messages are wrapped up in this exception and + thrown to the caller of the transport operation. + + + +

Constructs a RemoteRepositoryException for a message. + Constructs a RemoteRepositoryException for a message. + URI used for transport + + message, exactly as supplied by the remote repository. May + contain LFs (newlines) if the remote formatted it that way. + +
+ + Indicates a remote repository does not exist. + Indicates a remote repository does not exist. + + + Constructs an exception indicating a repository does not exist. + Constructs an exception indicating a repository does not exist. + URI used for transport + message + + + Indicates a protocol error has occurred while fetching/pushing objects. + Indicates a protocol error has occurred while fetching/pushing objects. + + + + Constructs an PackProtocolException with the specified detail message + prefixed with provided URI. + + + Constructs an PackProtocolException with the specified detail message + prefixed with provided URI. + + URI used for transport + message + + + + Constructs an PackProtocolException with the specified detail message + prefixed with provided URI. + + + Constructs an PackProtocolException with the specified detail message + prefixed with provided URI. + + URI used for transport + message + root cause exception + + + Constructs an PackProtocolException with the specified detail message. + Constructs an PackProtocolException with the specified detail message. + message + + + Constructs an PackProtocolException with the specified detail message. + Constructs an PackProtocolException with the specified detail message. + message + root cause exception + + + Indicates a base/common object was required, but is not found. + Indicates a base/common object was required, but is not found. + + + Constructs a MissingBundlePrerequisiteException for a set of objects. + Constructs a MissingBundlePrerequisiteException for a set of objects. + URI used for transport + + the Map of the base/common object(s) we don't have. Keys are + ids of the missing objects and values are short descriptions. + + + + Fetch implementation using the native Git pack transfer service. + + Fetch implementation using the native Git pack transfer service. +

+ This is the canonical implementation for transferring objects from the remote + repository to the local repository by talking to the 'git-upload-pack' + service. Objects are packed on the remote side into a pack file and then sent + down the pipe to us. +

+ This connection requires only a bi-directional pipe or socket, and thus is + easily wrapped up into a local process pipe, anonymous TCP socket, or a + command executed through an SSH tunnel. +

+ If + BasePackConnection.statelessRPC + is + true + , this connection + can be tunneled over a request-response style RPC system like HTTP. The RPC + call boundary is determined by this class switching from writing to the + OutputStream to reading from the InputStream. +

+ Concrete implementations should just call + BasePackConnection.Init(Sharpen.InputStream, Sharpen.OutputStream) + + and + BasePackConnection.ReadAdvertisedRefs() + + methods in constructor or before any use. They + should also handle resources releasing in + Close() + method if needed. + + + +

Maximum number of 'have' lines to send before giving up. + + Maximum number of 'have' lines to send before giving up. +

+ During + Negotiate(NGit.ProgressMonitor) + we send at most this many + commits to the remote peer as 'have' lines without an ACK response before + we give up. + + + +

Amount of data the client sends before starting to read. + + Amount of data the client sends before starting to read. +

+ Any output stream given to the client must be able to buffer this many + bytes before the client will stop writing and start reading from the + input stream. If the output stream blocks before this many bytes are in + the send queue, the system will deadlock. + + + +

Include tags if we are also including the referenced objects. + Include tags if we are also including the referenced objects. + 2.0 +
+ + Mutli-ACK support for improved negotiation. + Mutli-ACK support for improved negotiation. + 2.0 + + + Mutli-ACK detailed support for improved negotiation. + Mutli-ACK detailed support for improved negotiation. + 2.0 + + + The client supports packs with deltas but not their bases. + The client supports packs with deltas but not their bases. + 2.0 + + + The client supports using the side-band for progress messages. + The client supports using the side-band for progress messages. + 2.0 + + + The client supports using the 64K side-band for progress messages. + The client supports using the 64K side-band for progress messages. + 2.0 + + + The client supports packs with OFS deltas. + The client supports packs with OFS deltas. + 2.0 + + + The client supports shallow fetches. + The client supports shallow fetches. + 2.0 + + + The client does not want progress messages and will ignore them. + The client does not want progress messages and will ignore them. + 2.0 + + + The client supports receiving a pack before it has sent "done". + The client supports receiving a pack before it has sent "done". + 2.0 + + + All commits that are immediately reachable by a local ref. + All commits that are immediately reachable by a local ref. + + + Marks an object as having all its dependencies. + Marks an object as having all its dependencies. + + + Marks a commit known to both sides of the connection. + Marks a commit known to both sides of the connection. + + + + Like + COMMON + but means its also in + pckState + . + + + + Marks a commit listed in the advertised refs. + Marks a commit listed in the advertised refs. + + + + RPC state, if + BasePackConnection.statelessRPC + is true. + + + + Create a new connection to fetch using the native git transport. + Create a new connection to fetch using the native git transport. + the transport. + + + + + + Execute common ancestor negotiation and fetch the objects. + Execute common ancestor negotiation and fetch the objects. + progress monitor to receive status updates. + the advertised remote references the caller wants to fetch. + + additional objects to assume that already exist locally. This + will be added to the set of objects reachable from the + destination repository's references. + + if any exception occurs. + + + + + + + + + + + + + + + + + + + + + + + + + + Notification event delivered just before the pack is received from the + network. + + + Notification event delivered just before the pack is received from the + network. This event can be used by RPC such as + TransportHttp + to + disable its request magic and ensure the pack stream is read correctly. + + 2.0 + + + Push implementation using the native Git pack transfer service. + + Push implementation using the native Git pack transfer service. +

+ This is the canonical implementation for transferring objects to the remote + repository from the local repository by talking to the 'git-receive-pack' + service. Objects are packed on the local side into a pack file and then sent + to the remote repository. +

+ This connection requires only a bi-directional pipe or socket, and thus is + easily wrapped up into a local process pipe, anonymous TCP socket, or a + command executed through an SSH tunnel. +

+ This implementation honors + Transport.IsPushThin() + option. +

+ Concrete implementations should just call + BasePackConnection.Init(Sharpen.InputStream, Sharpen.OutputStream) + + and + BasePackConnection.ReadAdvertisedRefs() + + methods in constructor or before any use. They + should also handle resources releasing in + BasePackConnection.Close() + method if needed. + + + +

The client expects a status report after the server processes the pack. + The client expects a status report after the server processes the pack. + 2.0 +
+ + The server supports deleting refs. + The server supports deleting refs. + 2.0 + + + The server supports packs with OFS deltas. + The server supports packs with OFS deltas. + 2.0 + + + The client supports using the 64K side-band for progress messages. + The client supports using the 64K side-band for progress messages. + 2.0 + + + Time in milliseconds spent transferring the pack data. + Time in milliseconds spent transferring the pack data. + + + Create a new connection to push using the native git transport. + Create a new connection to push using the native git transport. + the transport. + + + + + + Push one or more objects and update the remote repository. + Push one or more objects and update the remote repository. + progress monitor to receive status updates. + update commands to be applied to the remote repository. + if any exception occurs. + + + + + + + + + + + + + + + Fetch connection for bundle based classes. + + Fetch connection for bundle based classes. It used by + instances of + TransportBundle + + + + + + + + + + + + + + + + + + + + + + + Marker interface for transports that supports fetching from a git bundle + (sneaker-net object transport). + + + Marker interface for transports that supports fetching from a git bundle + (sneaker-net object transport). +

+ Push support for a bundle is complex, as one does not have a peer to + communicate with to decide what the peer already knows. So push is not + supported by the bundle transport. + + + +

Bundle signature +
+ + Creates a Git bundle file, for sneaker-net transport to another system. + + Creates a Git bundle file, for sneaker-net transport to another system. +

+ Bundles generated by this class can be later read in from a file URI using + the bundle transport, or from an application controlled buffer by the more + generic + TransportBundleStream + . +

+ Applications creating bundles need to call one or more include + calls to reflect which objects should be available as refs in the bundle for + the other side to fetch. At least one include is required to create a valid + bundle file, and duplicate names are not permitted. +

+ Optional assume calls can be made to declare commits which the + recipient must have in order to fetch from the bundle file. Objects reachable + from these assumed commits can be used as delta bases in order to reduce the + overall bundle size. + + + +

Create a writer for a bundle. + Create a writer for a bundle. + repository where objects are stored. +
+ + Set the configuration used by the pack generator. + Set the configuration used by the pack generator. + + configuration controlling packing parameters. If null the + source repository's settings will be used. + + + + Include an object (and everything reachable from it) in the bundle. + Include an object (and everything reachable from it) in the bundle. + + name the recipient can discover this object as from the + bundle's list of advertised refs . The name must be a valid + ref format and must not have already been included in this + bundle writer. + + object to pack. Multiple refs may point to the same object. + + + Include a single ref (a name/object pair) in the bundle. + + Include a single ref (a name/object pair) in the bundle. +

+ This is a utility function for: + include(r.getName(), r.getObjectId()). + + the ref to include. + + +

Assume a commit is available on the recipient's side. + + Assume a commit is available on the recipient's side. +

+ In order to fetch from a bundle the recipient must have any assumed + commit. Each assumed commit is explicitly recorded in the bundle header + to permit the recipient to validate it has these objects. + + + the commit to assume being available. This commit should be + parsed and not disposed in order to maximize the amount of + debugging information available in the bundle stream. + + + +

Generate and write the bundle to the output stream. + + Generate and write the bundle to the output stream. +

+ This method can only be called once per BundleWriter instance. + + progress monitor to report bundle writing status to. + + the stream the bundle is written to. The stream should be + buffered by the caller. The caller is responsible for closing + the stream. + + + an error occurred reading a local object's data to include in + the bundle, or writing compressed object data to the output + stream. + + + +

The base class for transports that use HTTP as underlying protocol. + + The base class for transports that use HTTP as underlying protocol. This class + allows customizing HTTP connection settings. + +
+ + Create a new transport instance. + Create a new transport instance. + + the repository this instance will fetch into, or push out of. + This must be the repository passed to + Transport.Open(NGit.Repository, URIish) + + . + + + the URI used to access the remote repository. This must be the + URI passed to + Transport.Open(NGit.Repository, URIish) + + . + + + + Create a minimal HTTP transport instance not tied to a single repository. + + Create a minimal HTTP transport instance not tied to a single repository. + + + + + + Hook invoked by + ReceivePack + after all updates are executed. +

+ The hook is called after all commands have been processed. Only commands with + a status of + Result.OK + are passed into the hook. To get + all commands within the hook, see + BaseReceivePack.GetAllCommands() + . +

+ Any post-receive hook implementation should not update the status of a + command, as the command has already completed or failed, and the status has + already been returned to the client. +

+ Hooks should execute quickly, as they block the server and the client from + completing the connection. +

+
+ + A simple no-op hook. + A simple no-op hook. + + + Invoked after all commands are executed and status has been returned. + Invoked after all commands are executed and status has been returned. + + the process handling the current receive. Hooks may obtain + details about the destination repository through this handle. + + + unmodifiable set of successfully completed commands. May be + the empty set. + + + + + Hook invoked by + ReceivePack + before any updates are executed. +

+ The hook is called with any commands that are deemed valid after parsing them + from the client and applying the standard receive configuration options to + them: +

    +
  • receive.denyDenyDeletes
  • +
  • receive.denyNonFastForwards
  • +
+ This means the hook will not receive a non-fast-forward update command if + denyNonFastForwards is set to true in the configuration file. To get all + commands within the hook, see + BaseReceivePack.GetAllCommands() + . +

+ As the hook is invoked prior to the commands being executed, the hook may + choose to block any command by setting its result status with + ReceiveCommand.SetResult(Result) + . +

+ The hook may also choose to perform the command itself (or merely pretend + that it has performed the command), by setting the result status to + Result.OK + . +

+ Hooks should run quickly, as they block the caller thread and the client + process from completing. +

+ Hooks may send optional messages back to the client via methods on + ReceivePack + . Implementors should be aware that not all network + transports support this output, so some (or all) messages may simply be + discarded. These messages should be advisory only. +

+
+ + A simple no-op hook. + A simple no-op hook. + + + Invoked just before commands are executed. + + Invoked just before commands are executed. +

+ See the class description for how this method can impact execution. + + + the process handling the current receive. Hooks may obtain + details about the destination repository through this handle. + + + unmodifiable set of valid commands still pending execution. + May be the empty set. + + + +

Implements the server side of a push connection, receiving objects. + Implements the server side of a push connection, receiving objects. +
+ + Base implementation of the side of a push connection that receives objects. + + + Base implementation of the side of a push connection that receives objects. +

+ Contains high-level operations for initializing and closing streams, + advertising refs, reading commands, and receiving and applying a pack. + Subclasses compose these operations into full service implementations. + + + +

Database we write the stored objects into. + Database we write the stored objects into. +
+ + + Revision traversal support over + db + . + + + + + Is the client connection a bi-directional socket or pipe? +

+ If true, this class assumes it can perform multiple read and write cycles + with the client over the input and output streams. +

+ + Is the client connection a bi-directional socket or pipe? +

+ If true, this class assumes it can perform multiple read and write cycles + with the client over the input and output streams. This matches the + functionality available with a standard TCP/IP connection, or a local + operating system or in-memory pipe. +

+ If false, this class runs in a read everything then output results mode, + making it suitable for single round-trip systems RPCs such as HTTP. + + + +

Expecting data after the pack footer +
+ + Should an incoming transfer validate objects? + + + Should an incoming transfer permit create requests? + + + Should an incoming transfer permit delete requests? + + + Should an incoming transfer permit non-fast-forward requests? + + + Identity to record action as within the reflog. + Identity to record action as within the reflog. + + + Hook used while advertising the refs to the client. + Hook used while advertising the refs to the client. + + + Filter used while advertising the refs to the client. + Filter used while advertising the refs to the client. + + + Timeout in seconds to wait for client interaction. + Timeout in seconds to wait for client interaction. + + + + Timer to manage + timeout + . + + + + Raw input stream. + Raw input stream. + + + Raw output stream. + Raw output stream. + + + Optional message output stream. + Optional message output stream. + + + + Packet line input stream around + rawIn + . + + + + + Packet line output stream around + rawOut + . + + + + The refs we advertised as existing at the start of the connection. + The refs we advertised as existing at the start of the connection. + + + All SHA-1s shown to the client, which can be possible edges. + All SHA-1s shown to the client, which can be possible edges. + + + Capabilities requested by the client. + Capabilities requested by the client. + + + + If + BasePackPushConnection.CAPABILITY_SIDE_BAND_64K + + is enabled. + + + + Lock around the received pack file, while updating refs. + Lock around the received pack file, while updating refs. + + + Git object size limit + + + Create a new pack receive for an open repository. + Create a new pack receive for an open repository. + the destination repository. + + + the process name used for pack lock messages. + + + the repository this receive completes into. + + + the RevWalk instance used by this connection. + + + Get refs which were advertised to the client. + Get refs which were advertised to the client. + + all refs which were advertised to the client, or null if + SetAdvertisedRefs(System.Collections.Generic.IDictionary<K, V>, System.Collections.Generic.ICollection<E>) + + has not been called yet. + + + + Set the refs advertised by this ReceivePack. + + Set the refs advertised by this ReceivePack. +

+ Intended to be called from a + PreReceiveHook + . + + + explicit set of references to claim as advertised by this + ReceivePack instance. This overrides any references that + may exist in the source repository. The map is passed + to the configured + GetRefFilter() + . If null, assumes + all refs were advertised. + + + explicit set of additional haves to claim as advertised. If + null, assumes the default set of additional haves from the + repository. + + + +

Get objects advertised to the client. + Get objects advertised to the client. + + the set of objects advertised to the as present in this repository, + or null if + SetAdvertisedRefs(System.Collections.Generic.IDictionary<K, V>, System.Collections.Generic.ICollection<E>) + + has not been called + yet. + +
+ + + true if this instance will validate all referenced, but not + supplied by the client, objects are reachable from another + reference. + + + + Validate all referenced but not supplied objects are reachable. + + Validate all referenced but not supplied objects are reachable. +

+ If enabled, this instance will verify that references to objects not + contained within the received pack are already reachable through at least + one other reference displayed as part of + GetAdvertisedRefs() + . +

+ This feature is useful when the application doesn't trust the client to + not provide a forged SHA-1 reference to an object, in an attempt to + access parts of the DAG that they aren't allowed to see and which have + been hidden from them via the configured + AdvertiseRefsHook + or + RefFilter + . +

+ Enabling this feature may imply at least some, if not all, of the same + functionality performed by + SetCheckReceivedObjects(bool) + . + Applications are encouraged to enable both features, if desired. + + + true + to enable the additional check. + + + + + true if this class expects a bi-directional pipe opened between + the client and itself. The default is true. + + + + + if true, this class will assume the socket is a fully + bidirectional pipe between the two peers and takes advantage + of that by first transmitting the known refs, then waiting to + read commands. If false, this class assumes it must read the + commands before writing output and does not perform the + initial advertising. + + + + true if there is data expected after the pack footer. + + + true if there is additional data in InputStream after pack. + + + + true if this instance will verify received objects are formatted + correctly. Validating objects requires more CPU time on this side + of the connection. + + + + + true to enable checking received objects; false to assume all + received objects are valid. + + + + true if the client can request refs to be created. + + + true to permit create ref commands to be processed. + + + true if the client can request refs to be deleted. + + + true to permit delete ref commands to be processed. + + + + true if the client can request non-fast-forward updates of a ref, + possibly making objects unreachable. + + + + + true to permit the client to ask for non-fast-forward updates + of an existing ref. + + + + identity of the user making the changes in the reflog. + + +

Set the identity of the user appearing in the affected reflogs. + + Set the identity of the user appearing in the affected reflogs. +

+ The timestamp portion of the identity is ignored. A new identity with the + current timestamp will be created automatically when the updates occur + and the log records are written. + + + identity of the user. If null the identity will be + automatically determined based on the repository + configuration. + + + + the hook used while advertising the refs to the client + + + the filter used while advertising the refs to the client + + +

Set the hook used while advertising the refs to the client. + + Set the hook used while advertising the refs to the client. +

+ If the + AdvertiseRefsHook + chooses to call + SetAdvertisedRefs(System.Collections.Generic.IDictionary<K, V>, System.Collections.Generic.ICollection<E>) + + , only refs set by this hook + and selected by the + RefFilter + will be shown to the client. + Clients may still attempt to create or update a reference not advertised by + the configured + AdvertiseRefsHook + . These attempts should be rejected + by a matching + PreReceiveHook + . + + the hook; may be null to show all refs. + + +

Set the filter used while advertising the refs to the client. + + Set the filter used while advertising the refs to the client. +

+ Only refs allowed by this filter will be shown to the client. + The filter is run against the refs specified by the + AdvertiseRefsHook + (if applicable). + + the filter; may be null to show all refs. + + + timeout (in seconds) before aborting an IO operation. + + +

Set the timeout before willing to abort an IO call. + Set the timeout before willing to abort an IO call. + + number of seconds to wait (with no data transfer occurring) + before aborting an IO read or write operation with the + connected client. + +
+ + Set the maximum allowed Git object size. + + Set the maximum allowed Git object size. +

+ If an object is larger than the given size the pack-parsing will throw an + exception aborting the receive-pack operation. + + the Git object size limit. If zero then there is not limit. + + +

Check whether the client expects a side-band stream. + Check whether the client expects a side-band stream. + + true if the client has advertised a side-band capability, false + otherwise. + + + if the client's request has not yet been read from the wire, so + we do not know if they expect side-band. Note that the client + may have already written the request, it just has not been + read. + + +
+ + all of the command received by the current request. + + + Send an error message to the client. + + Send an error message to the client. +

+ If any error messages are sent before the references are advertised to + the client, the errors will be sent instead of the advertisement and the + receive operation will be aborted. All clients should receive and display + such early stage errors. +

+ If the reference advertisements have already been sent, messages are sent + in a side channel. If the client doesn't support receiving messages, the + message will be discarded, with no other indication to the caller or to + the client. +

+ PreReceiveHook + s should always try to use + ReceiveCommand.SetResult(Result, string) + + with a result status of + Result.REJECTED_OTHER_REASON + to indicate any reasons for + rejecting an update. Messages attached to a command are much more likely + to be returned to the client. + + + string describing the problem identified by the hook. The + string must not end with an LF, and must not contain an LF. + + + +

Send a message to the client, if it supports receiving them. + + Send a message to the client, if it supports receiving them. +

+ If the client doesn't support receiving messages, the message will be + discarded, with no other indication to the caller or to the client. + + + string describing the problem identified by the hook. The + string must not end with an LF, and must not contain an LF. + + + + an underlying stream for sending messages to the client. + + + true if any commands to be executed have been read. + + + true if an error occurred that should be advertised. + + +

Initialize the instance with the given streams. + Initialize the instance with the given streams. + + raw input to read client commands and pack data from. Caller + must ensure the input is buffered, otherwise read performance + may suffer. + + + response back to the Git network client. Caller must ensure + the output is buffered, otherwise write performance may + suffer. + + + secondary "notice" channel to send additional messages out + through. When run over SSH this should be tied back to the + standard error channel of the command execution. For most + other network connections this should be null. + +
+ + advertised refs, or the default if not explicitly advertised. + + + Receive a pack from the stream and check connectivity if necessary. + Receive a pack from the stream and check connectivity if necessary. + an error occurred during unpacking or connectivity checking. + + + + Unlock the pack written by this object. + Unlock the pack written by this object. + the pack could not be unlocked. + + + Generate an advertisement of available refs and capabilities. + Generate an advertisement of available refs and capabilities. + the advertisement formatter. + the formatter failed to write an advertisement. + + the hook denied advertisement. + + + + Receive a list of commands from the input. + Receive a list of commands from the input. + System.IO.IOException + + + Enable capabilities based on a previously read capabilities line. + Enable capabilities based on a previously read capabilities line. + + + Check if the peer requested a capability. + Check if the peer requested a capability. + protocol name identifying the capability. + true if the peer requested the capability to be enabled. + + + true if a pack is expected based on the list of commands. + + + Receive a pack from the input and store it in the repository. + Receive a pack from the input and store it in the repository. + an error occurred reading or indexing the pack. + + + + + + + Validate the command list. + Validate the command list. + + + Filter the list of commands according to result. + Filter the list of commands according to result. + desired status to filter by. + + a copy of the command list containing only those commands with the + desired status. + + + + Execute commands to update references. + Execute commands to update references. + + + Send a status report. + Send a status report. + + true if this report is for a Git client, false if it is for an + end-user. + + + an error that occurred during unpacking, or + null + + the reporter for sending the status strings. + an error occurred writing the status report. + + + + Close and flush (if necessary) the underlying streams. + Close and flush (if necessary) the underlying streams. + System.IO.IOException + + + Release any resources used by this object. + Release any resources used by this object. + the pack could not be unlocked. + + + Data in the first line of a request, the line itself plus capabilities. + Data in the first line of a request, the line itself plus capabilities. + + + Parse the first line of a receive-pack request. + Parse the first line of a receive-pack request. + line from the client. + + + non-capabilities part of the line. + + + capabilities parsed from the line. + + + Configuration for receive operations. + Configuration for receive operations. + + + + Output stream that wraps the current + BaseReceivePack.msgOut + . +

+ We don't want to expose + BaseReceivePack.msgOut + directly because it can change + several times over the course of a session. +

+
+ + Interface for reporting status messages. + Interface for reporting status messages. + + + + + + Hook to validate the update commands before execution. + Hook to validate the update commands before execution. + + + Hook to report on the commands after execution. + Hook to report on the commands after execution. + + + + If + BasePackPushConnection.CAPABILITY_REPORT_STATUS + + is enabled. + + + + Create a new pack receive for an open repository. + Create a new pack receive for an open repository. + the destination repository. + + + the hook invoked before updates occur. + + + Set the hook which is invoked prior to commands being executed. + + Set the hook which is invoked prior to commands being executed. +

+ Only valid commands (those which have no obvious errors according to the + received input and this instance's configuration) are passed into the + hook. The hook may mark a command with a result of any value other than + Result.NOT_ATTEMPTED + to block its execution. +

+ The hook may be called with an empty command collection if the current + set is completely invalid. + + the hook instance; may be null to disable the hook. + + + the hook invoked after updates occur. + + +

Set the hook which is invoked after commands are executed. + + Set the hook which is invoked after commands are executed. +

+ Only successful commands (type is + Result.OK + ) are passed into the + hook. The hook may be called with an empty command collection if the + current set all resulted in an error. + + the hook instance; may be null to disable the hook. + + + + if true this class will report command failures as warning + messages before sending the command results. This is usually + not necessary, but may help buggy Git clients that discard the + errors when all branches fail. + + + +

Execute the receive task on the socket. + Execute the receive task on the socket. + + raw input to read client commands and pack data from. Caller + must ensure the input is buffered, otherwise read performance + may suffer. + + + response back to the Git network client. Caller must ensure + the output is buffered, otherwise write performance may + suffer. + + + secondary "notice" channel to send additional messages out + through. When run over SSH this should be tied back to the + standard error channel of the command execution. For most + other network connections this should be null. + + System.IO.IOException +
+ + + + + + + + + + + + + + Filters the list of refs that are advertised to the client. + + Filters the list of refs that are advertised to the client. +

+ The filter is called by + ReceivePack + and + UploadPack + to ensure + that the refs are filtered before they are advertised to the client. +

+ This can be used by applications to control visibility of certain refs based + on a custom set of rules. + + + +

The default filter, allows all refs to be shown. + The default filter, allows all refs to be shown. +
+ + + Filters a + Map + of refs before it is advertised to the client. + + the refs which this method need to consider. + the filtered map of refs. + + + + Support for the start of + UploadPack + and + ReceivePack + . + + + + Initialize this advertiser with a repository for peeling tags. + Initialize this advertiser with a repository for peeling tags. + the repository to read from. + + + Toggle tag peeling. + + Toggle tag peeling. +

+

+ This method must be invoked prior to any of the following: +

    +
  • + Send(System.Collections.Generic.IDictionary<K, V>) +
  • +
+
+ + true to show the dereferenced value of a tag as the special + ref $tag^{} ; false to omit it from the output. + +
+ + Add one protocol capability to the initial advertisement. + + Add one protocol capability to the initial advertisement. +

+ This method must be invoked prior to any of the following: +

    +
  • + Send(System.Collections.Generic.IDictionary<K, V>) +
  • +
  • + AdvertiseHave(NGit.AnyObjectId)
  • +
+
+ + the name of a single protocol capability supported by the + caller. The set of capabilities are sent to the client in the + advertisement, allowing the client to later selectively enable + features it recognizes. + +
+ + Format an advertisement for the supplied refs. + Format an advertisement for the supplied refs. + + zero or more refs to format for the client. The collection is + sorted before display if necessary, and therefore may appear + in any order. + + set of ObjectIds that were advertised to the client. + + the underlying output stream failed to write out an + advertisement record. + + + + + Advertise one object is available using the magic + .have + . +

+ The magic + .have + advertisement is not available for fetching by a + client, but can be used by a client when considering a delta base + candidate before transferring data in a push. Within the record created + by this method the ref name is simply the invalid string + .have + . +

+ identity of the object that is assumed to exist. + + the underlying output stream failed to write out an + advertisement record. + +
+ + true if no advertisements have been sent yet. + + + + + + + + + Advertise one object under a specific name. + + Advertise one object under a specific name. +

+ If the advertised object is a tag, this method does not advertise the + peeled version of it. + + the object to advertise. + + name of the reference to advertise the object as, can be any + string not including the NUL byte. + + + the underlying output stream failed to write out an + advertisement record. + + + +

Write a single advertisement line. + Write a single advertisement line. + + the advertisement line to be written. The line always ends + with LF. Never null or the empty string. + + + the underlying output stream failed to write out an + advertisement record. + +
+ + Mark the end of the advertisements. + Mark the end of the advertisements. + + the underlying output stream failed to write out an + advertisement record. + + + + + Advertiser which frames lines in a + PacketLineOut + format. + + + + Create a new advertiser for the supplied stream. + Create a new advertiser for the supplied stream. + the output stream. + + + + + + + + + Write progress messages out to the sideband channel. + Write progress messages out to the sideband channel. + + + The base class for transports based on TCP sockets. + + The base class for transports based on TCP sockets. This class + holds settings common for all TCP based transports. + + + + Create a new transport instance. + Create a new transport instance. + + the repository this instance will fetch into, or push out of. + This must be the repository passed to + Transport.Open(NGit.Repository, URIish) + + . + + + the URI used to access the remote repository. This must be the + URI passed to + Transport.Open(NGit.Repository, URIish) + + . + + + + + + + + + + + Describes a way to connect to another Git repository. + + Describes a way to connect to another Git repository. +

+ Implementations of this class are typically immutable singletons held by + static class members, for example: +

+            package com.example.my_transport;
+            class MyTransport extends Transport {
+            public static final TransportProtocol PROTO = new TransportProtocol() {
+            public String getName() {
+            return "My Protocol";
+            }
+            };
+            }
+            
+

+ Applications may register additional protocols for use by JGit by calling + Transport.Register(TransportProtocol) + + . Because that API holds onto + the protocol object by a WeakReference, applications must ensure their own + ClassLoader retains the TransportProtocol for the life of the application. + Using a static singleton pattern as above will ensure the protocol is valid + so long as the ClassLoader that defines it remains valid. +

+ Applications may automatically register additional protocols by filling in + the names of their TransportProtocol defining classes using the services file + META-INF/services/org.eclipse.jgit.transport.Transport + . For each + class name listed in the services file, any static fields of type + TransportProtocol + will be automatically registered. For the above + example the string + com.example.my_transport.MyTransport + should be + listed in the file, as that is the name of the class that defines the static + PROTO singleton. + + + + text name of the protocol suitable for display to a user. + + + immutable set of schemes supported by this protocol. + + + immutable set of URIishFields that must be filled in. + + + immutable set of URIishFields that may be filled in. + + + if a port is supported, the default port, else -1. + + +

Determine if this protocol can handle a particular URI. + + Determine if this protocol can handle a particular URI. +

+ Implementations should try to avoid looking at the local filesystem, but + may look at implementation specific configuration options in the remote + block of + local.getConfig() + using + remoteName + if the name + is non-null. +

+ The default implementation of this method matches the scheme against + GetSchemes() + , required fields against + GetRequiredFields() + , and optional fields against + GetOptionalFields() + , returning true only if all of the fields + match the specification. + + address of the Git repository; never null. + true if this protocol can handle this URI; false otherwise. + + +

Determine if this protocol can handle a particular URI. + + Determine if this protocol can handle a particular URI. +

+ Implementations should try to avoid looking at the local filesystem, but + may look at implementation specific configuration options in the remote + block of + local.getConfig() + using + remoteName + if the name + is non-null. +

+ The default implementation of this method matches the scheme against + GetSchemes() + , required fields against + GetRequiredFields() + , and optional fields against + GetOptionalFields() + , returning true only if all of the fields + match the specification. + + address of the Git repository; never null. + + the local repository that will communicate with the other Git + repository. May be null if the caller is only asking about a + specific URI and does not have a local Repository. + + + name of the remote, if the remote as configured in + local + ; otherwise null. + + true if this protocol can handle this URI; false otherwise. + + +

Open a Transport instance to the other repository. + + Open a Transport instance to the other repository. +

+ Implementations should avoid making remote connections until an operation + on the returned Transport is invoked, however they may fail fast here if + they know a connection is impossible, such as when using the local + filesystem and the target path does not exist. +

+ Implementations may access implementation-specific configuration options + within + local.getConfig() + using the remote block named by the + remoteName + , if the name is non-null. + + address of the Git repository. + + the local repository that will communicate with the other Git + repository. + + + name of the remote, if the remote as configured in + local + ; otherwise null. + + the transport. + this protocol does not support the URI. + + the transport cannot open this URI. + + + +

Open a new transport instance to the remote repository. + + Open a new transport instance to the remote repository. Use default + configuration instead of reading from configuration files. + + + new Transport + System.NotSupportedException + NGit.Errors.TransportException +
+ + + Fields within a + URIish + that a transport uses. + + + + + + + + Single shot fetch from a streamed Git bundle. + + Single shot fetch from a streamed Git bundle. +

+ The bundle is read from an unbuffered input stream, which limits the + transport to opening at most one FetchConnection before needing to recreate + the transport instance. + + + +

Create a new transport to fetch objects from a streamed bundle. + + Create a new transport to fetch objects from a streamed bundle. +

+ The stream can be unbuffered (buffering is automatically provided + internally to smooth out short reads) and unpositionable (the stream is + read from only once, sequentially). +

+ When the FetchConnection or the this instance is closed the supplied + input stream is also automatically closed. This frees callers from + needing to keep track of the supplied stream. + + repository the fetched objects will be loaded into. + + symbolic name of the source of the stream. The URI can + reference a non-existent resource. It is used only for + exception reporting. + + the stream to read the bundle from. + + + + + + + + +

Implements the server side of a fetch connection, transmitting objects. + Implements the server side of a fetch connection, transmitting objects. +
+ + Database we read the objects from. + Database we read the objects from. + + + + Revision traversal support over + db + . + + + + Configuration to pass into the PackWriter. + Configuration to pass into the PackWriter. + + + Timeout in seconds to wait for client interaction. + Timeout in seconds to wait for client interaction. + + + + Is the client connection a bi-directional socket or pipe? +

+ If true, this class assumes it can perform multiple read and write cycles + with the client over the input and output streams. +

+ + Is the client connection a bi-directional socket or pipe? +

+ If true, this class assumes it can perform multiple read and write cycles + with the client over the input and output streams. This matches the + functionality available with a standard TCP/IP connection, or a local + operating system or in-memory pipe. +

+ If false, this class runs in a read everything then output results mode, + making it suitable for single round-trip systems RPCs such as HTTP. + + + +

+ Timer to manage + timeout + . + +
+ + The refs we advertised as existing at the start of the connection. + The refs we advertised as existing at the start of the connection. + + + Hook used while advertising the refs to the client. + Hook used while advertising the refs to the client. + + + Filter used while advertising the refs to the client. + Filter used while advertising the refs to the client. + + + Hook handling the various upload phases. + Hook handling the various upload phases. + + + Capabilities requested by the client. + Capabilities requested by the client. + + + Raw ObjectIds the client has asked for, before validating them. + Raw ObjectIds the client has asked for, before validating them. + + + Objects the client wants to obtain. + Objects the client wants to obtain. + + + Objects on both sides, these don't have to be sent. + Objects on both sides, these don't have to be sent. + + + Shallow commits the client already has. + Shallow commits the client already has. + + + Shallow commits on the client which are now becoming unshallow + + + Desired depth from the client on a shallow request. + Desired depth from the client on a shallow request. + + + Commit time of the oldest common commit, in seconds. + Commit time of the oldest common commit, in seconds. + + + + null if + commonBase + should be examined again. + + + + Objects we sent in our advertisement list, clients can ask for these. + Objects we sent in our advertisement list, clients can ask for these. + + + Marked on objects the client has asked us to give them. + Marked on objects the client has asked us to give them. + + + Marked on objects both we and the client have. + Marked on objects both we and the client have. + + + + Marked on objects in + commonBase + . + + + + Objects where we found a path from the want list to a common base. + Objects where we found a path from the want list to a common base. + + + Create a new pack upload for an open repository. + Create a new pack upload for an open repository. + the source repository. + + + the repository this upload is reading from. + + + the RevWalk instance used by this connection. + + + Get refs which were advertised to the client. + Get refs which were advertised to the client. + + all refs which were advertised to the client, or null if + SetAdvertisedRefs(System.Collections.Generic.IDictionary<K, V>) + + has not been called yet. + + + + Set the refs advertised by this UploadPack. + + Set the refs advertised by this UploadPack. +

+ Intended to be called from a + PreUploadHook + . + + + explicit set of references to claim as advertised by this + UploadPack instance. This overrides any references that + may exist in the source repository. The map is passed + to the configured + GetRefFilter() + . If null, assumes + all refs were advertised. + + + + timeout (in seconds) before aborting an IO operation. + + +

Set the timeout before willing to abort an IO call. + Set the timeout before willing to abort an IO call. + + number of seconds to wait (with no data transfer occurring) + before aborting an IO read or write operation with the + connected client. + +
+ + + true if this class expects a bi-directional pipe opened between + the client and itself. The default is true. + + + + + if true, this class will assume the socket is a fully + bidirectional pipe between the two peers and takes advantage + of that by first transmitting the known refs, then waiting to + read commands. If false, this class assumes it must read the + commands before writing output and does not perform the + initial advertising. + + + + policy used by the service to validate client requests. + + + + the policy used to enforce validation of a client's want list. + By default the policy is + RequestPolicy.ADVERTISED + , + which is the Git default requiring clients to only ask for an + object that a reference directly points to. This may be relaxed + to + RequestPolicy.REACHABLE_COMMIT + when callers + have + SetBiDirectionalPipe(bool) + set to false. + + + + the hook used while advertising the refs to the client + + + the filter used while advertising the refs to the client + + + Set the hook used while advertising the refs to the client. + + Set the hook used while advertising the refs to the client. +

+ If the + AdvertiseRefsHook + chooses to call + SetAdvertisedRefs(System.Collections.Generic.IDictionary<K, V>) + + , only refs set by this hook and + selected by the + RefFilter + will be shown to the client. + + the hook; may be null to show all refs. + + +

Set the filter used while advertising the refs to the client. + + Set the filter used while advertising the refs to the client. +

+ Only refs allowed by this filter will be sent to the client. + The filter is run against the refs specified by the + AdvertiseRefsHook + (if applicable). + + the filter; may be null to show all refs. + + + the configured upload hook. + + +

Set the hook that controls how this instance will behave. + Set the hook that controls how this instance will behave. + the hook; if null no special actions are taken. +
+ + Set the configuration used by the pack generator. + Set the configuration used by the pack generator. + + configuration controlling packing parameters. If null the + source repository's settings will be used. + + + + the configured logger. + + + Set the logger. + Set the logger. + the logger instance. If null, no logging occurs. + + + Check whether the client expects a side-band stream. + Check whether the client expects a side-band stream. + + true if the client has advertised a side-band capability, false + otherwise. + + + if the client's request has not yet been read from the wire, so + we do not know if they expect side-band. Note that the client + may have already written the request, it just has not been + read. + + + + + Execute the upload task on the socket. + Execute the upload task on the socket. + + raw input to read client commands from. Caller must ensure the + input is buffered, otherwise read performance may suffer. + + + response back to the Git network client, to write the pack + data onto. Caller must ensure the output is buffered, + otherwise write performance may suffer. + + + secondary "notice" channel to send additional messages out + through. When run over SSH this should be tied back to the + standard error channel of the command execution. For most + other network connections this should be null. + + System.IO.IOException + + + Get the PackWriter's statistics if a pack was sent to the client. + Get the PackWriter's statistics if a pack was sent to the client. + + statistics about pack output, if a pack was sent. Null if no pack + was sent, such as during the negotation phase of a smart HTTP + connection, or if the client was already up-to-date. + + + + + + + + + + Generate an advertisement of available refs and capabilities. + Generate an advertisement of available refs and capabilities. + the advertisement formatter. + the formatter failed to write an advertisement. + + the hook denied advertisement. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Policy the server uses to validate client requests + + + Data in the first line of a request, the line itself plus options. + Data in the first line of a request, the line itself plus options. + + + Parse the first line of a receive-pack request. + Parse the first line of a receive-pack request. + line from the client. + + + non-capabilities part of the line. + + + options parsed from the line. + + + Generic fetch support for dumb transport protocols. + + Generic fetch support for dumb transport protocols. +

+ Since there are no Git-specific smarts on the remote side of the connection + the client side must determine which objects it needs to copy in order to + completely fetch the requested refs and their history. The generic walk + support in this class parses each individual object (once it has been copied + to the local repository) and examines the list of objects that must also be + copied to create a complete history. Objects which are already available + locally are retained (and not copied), saving bandwidth for incremental + fetches. Pack files are copied from the remote repository only as a last + resort, as the entire pack must be copied locally in order to access any + single object. +

+ This fetch connection does not actually perform the object data transfer. + Instead it delegates the transfer to a + WalkRemoteObjectDatabase + , + which knows how to read individual files from the remote repository and + supply the data as a standard Java InputStream. + + WalkRemoteObjectDatabase + + +

The repository this transport fetches into, or pushes out of. + The repository this transport fetches into, or pushes out of. +
+ + If not null the validator for received objects. + If not null the validator for received objects. + + + List of all remote repositories we may need to get objects out of. + + List of all remote repositories we may need to get objects out of. +

+ The first repository in the list is the one we were asked to fetch from; + the remaining repositories point to the alternate locations we can fetch + objects through. + + + +

+ Most recently used item in + remotes + . + +
+ + Objects whose direct dependents we know we have (or will have). + Objects whose direct dependents we know we have (or will have). + + + + Objects that have already entered + workQueue + . + + + + + Commits that have already entered + localCommitQueue + . + + + + Commits already reachable from all local refs. + Commits already reachable from all local refs. + + + Objects we need to copy from the remote repository. + Objects we need to copy from the remote repository. + + + Databases we have not yet obtained the list of packs from. + Databases we have not yet obtained the list of packs from. + + + Databases we have not yet obtained the alternates from. + Databases we have not yet obtained the alternates from. + + + Packs we have discovered, but have not yet fetched locally. + Packs we have discovered, but have not yet fetched locally. + + + + Packs whose indexes we have looked at in + unfetchedPacks + . +

+ We try to avoid getting duplicate copies of the same pack through + multiple alternates by only looking at packs whose names are not yet in + this collection. +

+
+ + Errors received while trying to obtain an object. + + Errors received while trying to obtain an object. +

+ If the fetch winds up failing because we cannot locate a specific object + then we need to report all errors related to that object back to the + caller as there may be cascading failures. + + + +

+ Inserter to write objects onto + local + . + +
+ + + Inserter to read objects from + local + . + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Transfers object data through a dumb transport. + + Transfers object data through a dumb transport. +

+ Implementations are responsible for resolving path names relative to the + objects/ subdirectory of a single remote Git repository or + naked object database and make the content available as a Java input stream + for reading during fetch. The actual object traversal logic to determine the + names of files to retrieve is handled through the generic, protocol + independent + WalkFetchConnection + . + + + +

Obtain the list of available packs (if any). + + Obtain the list of available packs (if any). +

+ Pack names should be the file name in the packs directory, that is + pack-035760ab452d6eebd123add421f253ce7682355a.pack. Index + names should not be included in the returned collection. + + list of pack names; null or empty list if none are available. + + The connection is unable to read the remote repository's list + of available pack files. + + + +

Obtain alternate connections to alternate object databases (if any). + + Obtain alternate connections to alternate object databases (if any). +

+ Alternates are typically read from the file + INFO_ALTERNATES + or + INFO_HTTP_ALTERNATES + . The content of each line must be resolved + by the implementation and a new database reference should be returned to + represent the additional location. +

+ Alternates may reuse the same network connection handle, however the + fetch connection will + Close() + each created alternate. + + + list of additional object databases the caller could fetch from; + null or empty list if none are configured. + + + The connection is unable to read the remote repository's list + of configured alternates. + + + +

Open a single file for reading. + + Open a single file for reading. +

+ Implementors should make every attempt possible to ensure + System.IO.FileNotFoundException + is used when the remote object does not + exist. However when fetching over HTTP some misconfigured servers may + generate a 200 OK status message (rather than a 404 Not Found) with an + HTML formatted message explaining the requested resource does not exist. + Callers such as + WalkFetchConnection + are prepared to handle this + by validating the content received, and assuming content that fails to + match its hash is an incorrectly phrased FileNotFoundException. + + + location of the file to read, relative to this objects + directory (e.g. + cb/95df6ab7ae9e57571511ef451cf33767c26dd2 or + pack/pack-035760ab452d6eebd123add421f253ce7682355a.pack). + + a stream to read from the file. Never null. + the requested file does not exist at the given location. + + + The connection is unable to read the remote's file, and the + failure occurred prior to being able to determine if the file + exists, or after it was determined to exist but before the + stream could be created. + + + +

+ Create a new connection for a discovered alternate object database +

+ This method is typically called by + ReadAlternates(string) + when + subclasses us the generic alternate parsing logic for their + implementation of + GetAlternates() + . +

+ + the location of the new alternate, relative to the current + object database. + + + a new database connection that can read from the specified + alternate. + + + The database connection cannot be established with the + alternate, such as if the alternate location does not + actually exist and the connection's constructor attempts to + verify that. + +
+ + Close any resources used by this connection. + + Close any resources used by this connection. +

+ If the remote repository is contacted by a network socket this method + must close that network socket, disconnecting the two peers. If the + remote repository is actually local (same system) this method must close + any open file handles used to read the "remote" repository. + + + +

Delete a file from the object database. + + Delete a file from the object database. +

+ Path may start with ../ to request deletion of a file that + resides in the repository itself. +

+ When possible empty directories must be removed, up to but not including + the current object database directory itself. +

+ This method does not support deletion of directories. + + + name of the item to be removed, relative to the current object + database. + + deletion is not supported, or deletion failed. + + + +

Open a remote file for writing. + + Open a remote file for writing. +

+ Path may start with ../ to request writing of a file that + resides in the repository itself. +

+ The requested path may or may not exist. If the path already exists as a + file the file should be truncated and completely replaced. +

+ This method creates any missing parent directories, if necessary. + + + name of the file to write, relative to the current object + database. + + + stream to write into this file. Caller must close the stream to + complete the write request. The stream is not buffered and each + write may cause a network request/response so callers should + buffer to smooth out small writes. + + + (optional) progress monitor to post write completion to during + the stream's close method. + + (optional) task name to display during the close method. + + + writing is not supported, or attempting to write the file + failed, possibly due to permissions or remote disk full, etc. + + + +

Atomically write a remote file. + + Atomically write a remote file. +

+ This method attempts to perform as atomic of an update as it can, + reducing (or eliminating) the time that clients might be able to see + partial file content. This method is not suitable for very large + transfers as the complete content must be passed as an argument. +

+ Path may start with ../ to request writing of a file that + resides in the repository itself. +

+ The requested path may or may not exist. If the path already exists as a + file the file should be truncated and completely replaced. +

+ This method creates any missing parent directories, if necessary. + + + name of the file to write, relative to the current object + database. + + complete new content of the file. + + writing is not supported, or attempting to write the file + failed, possibly due to permissions or remote disk full, etc. + + + +

Delete a loose ref from the remote repository. + Delete a loose ref from the remote repository. + + name of the ref within the ref space, for example + refs/heads/pu. + + deletion is not supported, or deletion failed. + +
+ + Delete a reflog from the remote repository. + Delete a reflog from the remote repository. + + name of the ref within the ref space, for example + refs/heads/pu. + + deletion is not supported, or deletion failed. + + + + Overwrite (or create) a loose ref in the remote repository. + + Overwrite (or create) a loose ref in the remote repository. +

+ This method creates any missing parent directories, if necessary. + + + name of the ref within the ref space, for example + refs/heads/pu. + + new value to store in this ref. Must not be null. + + writing is not supported, or attempting to write the file + failed, possibly due to permissions or remote disk full, etc. + + + +

+ Rebuild the + INFO_PACKS + for dumb transport clients. +

+ This method rebuilds the contents of the + INFO_PACKS + file to + match the passed list of pack names. +

+ + names of available pack files, in the order they should appear + in the file. Valid pack name strings are of the form + pack-035760ab452d6eebd123add421f253ce7682355a.pack. + + + writing is not supported, or attempting to write the file + failed, possibly due to permissions or remote disk full, etc. + +
+ + Open a buffered reader around a file. + + Open a buffered reader around a file. +

+ This is shorthand for calling + Open(string) + and then wrapping it + in a reader suitable for line oriented files like the alternates list. + + a stream to read from the file. Never null. + + location of the file to read, relative to this objects + directory (e.g. info/packs). + + the requested file does not exist at the given location. + + + The connection is unable to read the remote's file, and the + failure occurred prior to being able to determine if the file + exists, or after it was determined to exist but before the + stream could be created. + + + +

Read a standard Git alternates file to discover other object databases. + + Read a standard Git alternates file to discover other object databases. +

+ This method is suitable for reading the standard formats of the + alternates file, such as found in objects/info/alternates + or objects/info/http-alternates within a Git repository. +

+ Alternates appear one per line, with paths expressed relative to this + object database. + + + location of the alternate file to read, relative to this + object database (e.g. info/alternates). + + + the list of discovered alternates. Empty list if the file exists, + but no entries were discovered. + + the requested file does not exist at the given location. + + + The connection is unable to read the remote's file, and the + failure occurred prior to being able to determine if the file + exists, or after it was determined to exist but before the + stream could be created. + + + +

Read a standard Git packed-refs file to discover known references. + Read a standard Git packed-refs file to discover known references. + + return collection of references. Any existing entries will be + replaced if they are found in the packed-refs file. + + an error occurred reading from the packed refs file. + +
+ + + + + Create a new stream of unknown length. + Create a new stream of unknown length. + + stream containing the file data. This stream will be + closed by the caller when reading is complete. + + + + Create a new stream of known length. + Create a new stream of known length. + + stream containing the file data. This stream will be + closed by the caller when reading is complete. + + + total number of bytes available for reading through + i. + + + + + + + Marker interface for an object transport walking transport. + + Marker interface for an object transport walking transport. +

+ Implementations of WalkTransport transfer individual objects one at a time + from the loose objects directory, or entire packs if the source side does not + have the object as a loose object. +

+ WalkTransports are not as efficient as + PackTransport + instances, but + can be useful in situations where a pack transport is not acceptable. + + WalkFetchConnection + + +

Generic push support for dumb transport protocols. + + Generic push support for dumb transport protocols. +

+ Since there are no Git-specific smarts on the remote side of the connection + the client side must handle everything on its own. The generic push support + requires being able to delete, create and overwrite files on the remote side, + as well as create any missing directories (if necessary). Typically this can + be handled through an FTP style protocol. +

+ Objects not on the remote side are uploaded as pack files, using one pack + file per invocation. This simplifies the implementation as only two data + files need to be written to the remote repository. +

+ Push support supplied by this class is not multiuser safe. Concurrent pushes + to the same repository may yield an inconsistent reference database which may + confuse fetch clients. +

+ A single push is concurrently safe with multiple fetch requests, due to the + careful order of operations used to update the repository. Clients fetching + may receive transient failures due to short reads on certain files if the + protocol does not support atomic file replacement. + + WalkRemoteObjectDatabase + + +

The repository this transport pushes out of. + The repository this transport pushes out of. +
+ + Location of the remote repository we are writing to. + Location of the remote repository we are writing to. + + + Database connection to the remote repository. + Database connection to the remote repository. + + + The configured transport we were constructed by. + The configured transport we were constructed by. + + + Packs already known to reside in the remote repository. + + Packs already known to reside in the remote repository. +

+ This is a LinkedHashMap to maintain the original order. + + + +

Complete listing of refs the remote will have after our push. + Complete listing of refs the remote will have after our push. +
+ + Updates which require altering the packed-refs file to complete. + + Updates which require altering the packed-refs file to complete. +

+ If this collection is non-empty then any refs listed in + newRefs + with a storage class of + NGit.RefStorage.PACKED + will be written. + + + + + + + + + + + + + + + +

Basic daemon for the anonymous git:// transport protocol. + Basic daemon for the anonymous git:// transport protocol. +
+ + 9418: IANA assigned port number for Git. + 9418: IANA assigned port number for Git. + + + Configure a daemon to listen on any available network port. + Configure a daemon to listen on any available network port. + + + Configure a new daemon for the specified network address. + Configure a new daemon for the specified network address. + + address to listen for connections on. If null, any available + port will be chosen on all network interfaces. + + + + the address connections are received on. + + + Lookup a supported service so it can be reconfigured. + Lookup a supported service so it can be reconfigured. + + name of the service; e.g. "receive-pack"/"git-receive-pack" or + "upload-pack"/"git-upload-pack". + + + the service; null if this daemon implementation doesn't support + the requested service type. + + + + timeout (in seconds) before aborting an IO operation. + + + Set the timeout before willing to abort an IO call. + Set the timeout before willing to abort an IO call. + + number of seconds to wait (with no data transfer occurring) + before aborting an IO read or write operation with the + connected client. + + + + configuration controlling packing, may be null. + + + Set the configuration used by the pack generator. + Set the configuration used by the pack generator. + + configuration controlling packing parameters. If null the + source repository's settings will be used. + + + + Set the resolver used to locate a repository by name. + Set the resolver used to locate a repository by name. + the resolver instance. + + + Set the factory to construct and configure per-request UploadPack. + Set the factory to construct and configure per-request UploadPack. + the factory. If null upload-pack is disabled. + + + Set the factory to construct and configure per-request ReceivePack. + Set the factory to construct and configure per-request ReceivePack. + the factory. If null receive-pack is disabled. + + + Start this daemon on a background thread. + Start this daemon on a background thread. + the server socket could not be opened. + the daemon is already running. + + + + true if this daemon is receiving connections. + + + Stop this daemon. + Stop this daemon. + + + + + + + Create and configure + NGit.Transport.UploadPack + service instance. + + + + A factory disabling the UploadPack service for all repositories. + A factory disabling the UploadPack service for all repositories. + + + Create and configure a new UploadPack instance for a repository. + Create and configure a new UploadPack instance for a repository. + + current request, in case information from the request may help + configure the UploadPack instance. + + the repository the upload would read from. + the newly configured UploadPack instance, must not be null. + + this factory refuses to create the instance because it is not + allowed on the target repository, by any user. + + + this factory refuses to create the instance for this HTTP + request and repository, such as due to a permission error. + + + + + + + + + + + + + + Create and configure + NGit.Transport.ReceivePack + service instance. + + + + A factory disabling the ReceivePack service for all repositories + + + Create and configure a new ReceivePack instance for a repository. + Create and configure a new ReceivePack instance for a repository. + + current request, in case information from the request may help + configure the ReceivePack instance. + + the repository the receive would write into. + the newly configured ReceivePack instance, must not be null. + + this factory refuses to create the instance because it is not + allowed on the target repository, by any user. + + + this factory refuses to create the instance for this HTTP + request and repository, such as due to a permission error. + + + + + + + + + + + + + + A service exposed by + Daemon + over anonymous git://. + + + + is this service enabled for invocation? + + + true to allow this service to be used; false to deny it. + + + can this service be configured in the repository config file? + + + + true to permit repositories to override this service's enabled + state with the daemon.servicename config setting. + + + + name of the command requested by clients. + + + Determine if this service can handle the requested command. + Determine if this service can handle the requested command. + input line from the client. + true if this command can accept the given command line. + + + + + + + + + + + + + + + + + + + + + + + + Active network client of + Daemon + . + + + + the daemon which spawned this client. + + + Internet address of the remote client. + + + input stream to read from the connected client. + + + output stream to send data to the connected client. + + + + + + + + Simple configuration parser for the OpenSSH ~/.ssh/config file. + + Simple configuration parser for the OpenSSH ~/.ssh/config file. +

+ Since JSch does not (currently) have the ability to parse an OpenSSH + configuration file this is a simple parser to read that file and make the + critical options available to + SshSessionFactory + . + + + +

IANA assigned port number for SSH. + IANA assigned port number for SSH. +
+ + Obtain the user's configuration data. + + Obtain the user's configuration data. +

+ The configuration file is always returned to the caller, even if no file + exists in the user's home directory at the time the call was made. Lookup + requests are cached and are automatically updated if the user modifies + the configuration file since the last time it was cached. + + + the file system abstraction which will be necessary to + perform certain file system operations. + + a caching reader of the user's configuration file. + + +

The user's home directory, as key files may be relative to here. + The user's home directory, as key files may be relative to here. +
+ + The .ssh/config file we read and monitor for updates. + The .ssh/config file we read and monitor for updates. + + + + Modification time of + configFile + when + hosts + loaded. + + + + Cached entries read out of the configuration file. + Cached entries read out of the configuration file. + + + Locate the configuration for a specific host request. + Locate the configuration for a specific host request. + + the name the user has supplied to the SSH tool. This may be a + real host name, or it may just be a "Host" block in the + configuration file. + + r configuration for the requested name. Never null. + + + + + + Configuration of one "Host" block in the configuration file. + + Configuration of one "Host" block in the configuration file. +

+ If returned from + OpenSshConfig.Lookup(string) + some or all of the + properties may not be populated. The properties which are not populated + should be defaulted by the caller. +

+ When returned from + OpenSshConfig.Lookup(string) + any wildcard + entries which appear later in the configuration file will have been + already merged into this block. + + + + + the value StrictHostKeyChecking property, the valid values + are "yes" (unknown hosts are not accepted), "no" (unknown + hosts are always accepted), and "ask" (user should be asked + before accepting the host) + + + + the real IP address or host name to connect to; never null. + + + the real port number to connect to; never 0. + + + + path of the private key file to use for authentication; null + if the caller should use default authentication strategies. + + + + the real user name to connect as; never null. + + + + the preferred authentication methods, separated by commas if + more than one authentication method is preferred. + + + + + true if batch (non-interactive) mode is preferred for this + host connection. + + + +

Transport through a git-daemon waiting for anonymous TCP connections. + + Transport through a git-daemon waiting for anonymous TCP connections. +

+ This transport supports the git:// protocol, usually run on + the IANA registered port 9418. It is a popular means for distributing open + source projects, as there are no authentication or authorization overheads. + + + + + + + + + + + + + + + + + + + + + + + + +

Transport over HTTP and FTP protocols. + + Transport over HTTP and FTP protocols. +

+ If the transport is using HTTP and the remote HTTP service is Git-aware + (speaks the "smart-http protocol") this client will automatically take + advantage of the additional Git-specific HTTP extensions. If the remote + service does not support these extensions, the client will degrade to direct + file fetching. +

+ If the remote (server side) repository does not have the specialized Git + support, object files are retrieved directly through standard HTTP GET (or + binary FTP GET) requests. This make it easy to serve a Git repository through + a standard web host provider that does not offer specific support for Git. + + WalkFetchConnection + + + + + +

Create a minimal HTTP transport with default configuration values. + Create a minimal HTTP transport with default configuration values. + + System.NotSupportedException +
+ + Toggle whether or not smart HTTP transport should be used. + + Toggle whether or not smart HTTP transport should be used. +

+ This flag exists primarily to support backwards compatibility testing + within a testing framework, there is no need to modify it in most + applications. + + + if + true + (default), smart HTTP is enabled. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Basic service for sending and receiving HTTP requests. + Basic service for sending and receiving HTTP requests. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + State required to speak multiple HTTP requests with the remote. + + State required to speak multiple HTTP requests with the remote. +

+ A service wrapper provides a normal looking InputStream and OutputStream + pair which are connected via HTTP to the named remote service. Writing to + the OutputStream is buffered until either the buffer overflows, or + reading from the InputStream occurs. If overflow occurs HTTP/1.1 and its + chunked transfer encoding is used to stream the request data to the + remote service. If the entire request fits in the memory buffer, the + older HTTP/1.0 standard and a fixed content length is used instead. +

+ It is an error to attempt to read without there being outstanding data + ready for transmission on the OutputStream. +

+ No state is preserved between write-read request pairs. The caller is + responsible for replaying state vector information as part of the request + data written to the OutputStream. Any session HTTP cookies may or may not + be preserved between requests, it is left up to the JVM's implementation + of the HTTP client. + + + +

Keep opening send-receive pairs to the given URI. + Keep opening send-receive pairs to the given URI. + +
+ + Service for maintaining a single long-poll connection. + Service for maintaining a single long-poll connection. + + + + + + Only open one send-receive request. + Only open one send-receive request. + + + + Extra utilities to support usage of HTTP. + Extra utilities to support usage of HTTP. + + + + The + GET + HTTP method. + + + + + The + POST + HTTP method. + + + + + The + Cache-Control + header. + + + + + The + Pragma + header. + + + + + The + User-Agent + header. + + + + + The + Date + header. + + + + + The + Expires + header. + + + + + The + ETag + header. + + + + + The + If-None-Match + header. + + + + + The + Last-Modified + header. + + + + + The + If-Modified-Since + header. + + + + + The + Accept + header. + + + + + The + Content-Type + header. + + + + + The + Content-Length + header. + + + + + The + Content-Encoding + header. + + + + + The + Content-Range + header. + + + + + The + Accept-Ranges + header. + + + + + The + If-Range + header. + + + + + The + Range + header. + + + + + The + Accept-Encoding + header. + + + + + The + gzip + encoding value for + HDR_ACCEPT_ENCODING + . + + + + + The standard + text/plain + MIME type. + + + + + The + Authorization + header. + + + + + The + WWW-Authenticate + header. + + + + URL encode a value string into an output buffer. + URL encode a value string into an output buffer. + the output buffer. + value which must be encoded to protected special characters. + + + Get the HTTP response code from the request. + + Get the HTTP response code from the request. +

+ Roughly the same as c.getResponseCode() but the + ConnectException is translated to be more understandable. + + connection the code should be obtained from. + + r HTTP status code, usually 200 to indicate success. See + Sharpen.HttpURLConnection + for other defined constants. + + communications error prevented obtaining the response code. + + + +

Determine the proxy server (if any) needed to obtain a URL. + Determine the proxy server (if any) needed to obtain a URL. + proxy support for the caller. + location of the server caller wants to talk to. + proxy to communicate with the supplied URL. + + the proxy could not be computed as the supplied URL could not + be read. This failure should never occur. + +
+ + Transport to access a local directory as though it were a remote peer. + + Transport to access a local directory as though it were a remote peer. +

+ This transport is suitable for use on the local system, where the caller has + direct read or write access to the "remote" repository. +

+ By default this transport works by spawning a helper thread within the same + JVM, and processes the data transfer using a shared memory buffer between the + calling thread and the helper thread. This is a pure-Java implementation + which does not require forking an external process. +

+ However, during + OpenFetch() + , if the Transport has configured + Transport.GetOptionUploadPack() + to be anything other than + "git-upload-pack" or "git upload-pack", this + implementation will fork and execute the external process, using an operating + system pipe to transfer data. +

+ Similarly, during + OpenPush() + , if the Transport has configured + Transport.GetOptionReceivePack() + to be anything other than + "git-receive-pack" or "git receive-pack", this + implementation will fork and execute the external process, using an operating + system pipe to transfer data. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Loads known hosts and private keys from $HOME/.ssh. + + Loads known hosts and private keys from $HOME/.ssh. +

+ This is the default implementation used by JGit and provides most of the + compatibility necessary to match OpenSSH, a popular implementation of SSH + used by C Git. +

+ If user interactivity is required by SSH (e.g. to obtain a password), the + connection will immediately fail. + + + +

+ The base session factory that loads known hosts and private keys from + $HOME/.ssh. + + + The base session factory that loads known hosts and private keys from + $HOME/.ssh. +

+ This is the default implementation used by JGit and provides most of the + compatibility necessary to match OpenSSH, a popular implementation of SSH + used by C Git. +

+ The factory does not provide UI behavior. Override the method + Configure(Host, NSch.Session) + to supply appropriate + NSch.UserInfo + to the session. + + + +

Creates and destroys SSH connections to a remote system. + + Creates and destroys SSH connections to a remote system. +

+ Different implementations of the session factory may be used to control + communicating with the end-user as well as reading their personal SSH + configuration settings, such as known hosts and private keys. +

+ A + RemoteSession + must be returned to the factory that created it. + Callers are encouraged to retain the SshSessionFactory for the duration of + the period they are using the Session. + + + +

Get the currently configured JVM-wide factory. + + Get the currently configured JVM-wide factory. +

+ A factory is always available. By default the factory will read from the + user's $HOME/.ssh and assume OpenSSH compatibility. + + factory the current factory for this JVM. + + +

Change the JVM-wide factory to a different implementation. + Change the JVM-wide factory to a different implementation. + + factory for future sessions to be created through. If null the + default factory will be restored.s + +
+ + Open (or reuse) a session to a host. + + Open (or reuse) a session to a host. +

+ A reasonable UserInfo that can interact with the end-user (if necessary) + is installed on the returned session by this method. +

+ The caller must connect the session by invoking connect() + if it has not already been connected. + + URI information about the remote host + provider to support authentication, may be null. + + + the file system abstraction which will be necessary to + perform certain file system operations. + + Timeout value, in milliseconds. + a session that can contact the remote host. + the session could not be created. + + + +

Close (or recycle) a session to a host. + Close (or recycle) a session to a host. + + a session previously obtained from this factory's + GetSession(URIish, CredentialsProvider, NGit.Util.FS, int) + + method. + +
+ + + + + + + + Create a new remote session for the requested address. + Create a new remote session for the requested address. + host configuration + login to authenticate as. + server name to connect to. + port number of the SSH daemon (typically 22). + + the file system abstraction which will be necessary to + perform certain file system operations. + + new session instance, but otherwise unconfigured. + the session could not be created. + + + + Provide additional configuration for the session based on the host + information. + + + Provide additional configuration for the session based on the host + information. This method could be used to supply + NSch.UserInfo + . + + host configuration + session to configure + + + Obtain the JSch used to create new sessions. + Obtain the JSch used to create new sessions. + host configuration + + the file system abstraction which will be necessary to + perform certain file system operations. + + the JSch instance to use. + the user configuration could not be created. + + + + + the file system abstraction which will be necessary to + perform certain file system operations. + + the new default JSch implementation. + known host keys cannot be loaded. + + + + + + The base class for transports that use SSH protocol. + + The base class for transports that use SSH protocol. This class allows + customizing SSH connection settings. + + + + The open SSH session + + + Create a new transport instance. + Create a new transport instance. + + the repository this instance will fetch into, or push out of. + This must be the repository passed to + Transport.Open(NGit.Repository, URIish) + + . + + + the URI used to access the remote repository. This must be the + URI passed to + Transport.Open(NGit.Repository, URIish) + + . + + + + + Set SSH session factory instead of the default one for this instance of + the transport. + + + Set SSH session factory instead of the default one for this instance of + the transport. + + a factory to set, must not be null + if session has been already created. + + + + the SSH session factory that will be used for creating SSH sessions + + + Get the default SSH session + a remote session + in case of error with opening SSH session + + + + Transport through an SSH tunnel. + + Transport through an SSH tunnel. +

+ The SSH transport requires the remote side to have Git installed, as the + transport logs into the remote system and executes a Git helper program on + the remote side to read (or write) the remote repository's files. +

+ This transport does not support direct SCP style of copying files, as it + assumes there are Git specific smarts on the remote side to perform object + enumeration, save file modification and hook execution. + + + + + + + + + + + + + + + + + + +

Create a remote "session" for executing remote commands. + + Create a remote "session" for executing remote commands. +

+ Clients should subclass RemoteSession to create an alternate way for JGit to + execute remote commands. (The client application may already have this + functionality available.) Note that this class is just a factory for creating + remote processes. If the application already has a persistent connection to + the remote machine, RemoteSession may do nothing more than return a new + RemoteProcess when exec is called. + + + +

Generate a new remote process to execute the given command. + + Generate a new remote process to execute the given command. This function + should also start execution and may need to create the streams prior to + execution. + + command to execute + timeout value, in seconds, for command execution + a new remote process + + may be thrown in several cases. For example, on problems + opening input or output streams or on problems connecting or + communicating with the remote host. For the latter two cases, + a TransportException may be thrown (a subclass of + IOException). + +
+ + Disconnect the remote session + + + + + + + + + + + + Transport over the non-Git aware SFTP (SSH based FTP) protocol. + + Transport over the non-Git aware SFTP (SSH based FTP) protocol. +

+ The SFTP transport does not require any specialized Git support on the remote + (server side) repository. Object files are retrieved directly through secure + shell's FTP protocol, making it possible to copy objects from a remote + repository that is available over SSH, but whose remote host does not have + Git installed. +

+ Unlike the HTTP variant (see + TransportHttp + ) we rely upon being able + to list files in directories, as the SFTP protocol supports this function. By + listing files through SFTP we can avoid needing to have current + objects/info/packs or info/refs files on the + remote repository and access the data directly, much as Git itself would. +

+ Concurrent pushing over this transport is not supported. Multiple concurrent + push operations may cause confusion in the repository state. + + WalkFetchConnection + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+ Supplies the content of a file for + DiffFormatter + . +

+ A content source is not thread-safe. Sources may contain state, including + information about the last ObjectLoader they returned. Callers must be + careful to ensure there is no more than one ObjectLoader pending on any + source, at any time. +

+
+ + Construct a content source for an ObjectReader. + Construct a content source for an ObjectReader. + the reader to obtain blobs from. + a source wrapping the reader. + + + Construct a content source for a working directory. + + Construct a content source for a working directory. + If the iterator is a + NGit.Treewalk.FileTreeIterator + an optimized version is + used that doesn't require seeking through a TreeWalk. + + the iterator to obtain source files through. + a content source wrapping the iterator. + + + Determine the size of the object. + Determine the size of the object. + the path of the file, relative to the root of the repository. + blob id of the file, if known. + the size in bytes. + the file cannot be accessed. + + + Open the object. + Open the object. + the path of the file, relative to the root of the repository. + blob id of the file, if known. + + a loader that can supply the content of the file. The loader must + be used before another loader can be obtained from this same + source. + + the file cannot be accessed. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + A pair of sources to access the old and new sides of a DiffEntry. + A pair of sources to access the old and new sides of a DiffEntry. + + + Construct a pair of sources. + Construct a pair of sources. + source to read the old side of a DiffEntry. + source to read the new side of a DiffEntry. + + + Determine the size of the object. + Determine the size of the object. + which side of the entry to read (OLD or NEW). + the entry to examine. + the size in bytes. + the file cannot be accessed. + + + Open the object. + Open the object. + which side of the entry to read (OLD or NEW). + the entry to examine. + + a loader that can supply the content of the file. The loader + must be used before another loader can be obtained from this + same source. + + the file cannot be accessed. + + + + Wraps a + Sequence + to assign hash codes to elements. +

+ This sequence acts as a proxy for the real sequence, caching element hash + codes so they don't need to be recomputed each time. Sequences of this type + must be used with a + HashedSequenceComparator<S> + . +

+ To construct an instance of this type use + HashedSequencePair<S> + . +

+
+ + + Wrap another comparator for use with + HashedSequence<S> + . +

+ This comparator acts as a proxy for the real comparator, evaluating the + cached hash code before testing the underlying comparator's equality. + Comparators of this type must be used with a + HashedSequence<S> + . +

+ To construct an instance of this type use + HashedSequencePair<S> + . +

+
+ + + Equivalence function for a + Sequence + compared by difference algorithm. +

+ Difference algorithms can use a comparator to compare portions of two + sequences and discover the minimal edits required to transform from one + sequence to the other sequence. +

+ Indexes within a sequence are zero-based. +

+
+ + Compare two items to determine if they are equivalent. + + Compare two items to determine if they are equivalent. + It is permissible to compare sequence + a + with itself (by passing + a + again in position + b + ). + + the first sequence. + + item of + ai + to compare. + + the second sequence. + + item of + bi + to compare. + + + true if the two items are identical according to this function's + equivalence rule. + + + + Get a hash value for an item in a sequence. + + Get a hash value for an item in a sequence. + If two items are equal according to this comparator's + SequenceComparator<S>.Equals(Sequence, int, Sequence, int) + + method, then this hash + method must produce the same integer result for both items. + It is not required for two items to have different hash values if they + are are unequal according to the + equals() + method. + + the sequence. + the item to obtain the hash for. + hash the hash value. + + + Modify the edit to remove common leading and trailing items. + + Modify the edit to remove common leading and trailing items. + The supplied edit + e + is reduced in size by moving the beginning A + and B points so the edit does not cover any items that are in common + between the two sequences. The ending A and B points are also shifted to + remove common items from the end of the region. + + the first sequence. + the second sequence. + the edit to start with and update. + + + e + if it was updated in-place, otherwise a new edit + containing the reduced region. + + + + + Wraps two + Sequence + instances to cache their element hash codes. +

+ This pair wraps two sequences that contain cached hash codes for the input + sequences. +

+
+ + Construct a pair to provide fast hash codes. + Construct a pair to provide fast hash codes. + the base comparator for the sequence elements. + the A sequence. + the B sequence. + + + obtain a comparator that uses the cached hash codes. + + + wrapper around A that includes cached hash codes. + + + wrapper around B that includes cached hash codes. + + + An extended form of Bram Cohen's patience diff algorithm. + + An extended form of Bram Cohen's patience diff algorithm. +

+ This implementation was derived by using the 4 rules that are outlined in + Bram Cohen's blog, + and then was further extended to support low-occurrence common elements. +

+ The basic idea of the algorithm is to create a histogram of occurrences for + each element of sequence A. Each element of sequence B is then considered in + turn. If the element also exists in sequence A, and has a lower occurrence + count, the positions are considered as a candidate for the longest common + subsequence (LCS). After scanning of B is complete the LCS that has the + lowest number of occurrences is chosen as a split point. The region is split + around the LCS, and the algorithm is recursively applied to the sections + before and after the LCS. +

+ By always selecting a LCS position with the lowest occurrence count, this + algorithm behaves exactly like Bram Cohen's patience diff whenever there is a + unique common element available between the two sequences. When no unique + elements exist, the lowest occurrence element is chosen instead. This offers + more readable diffs than simply falling back on the standard Myers' O(ND) + algorithm would produce. +

+ To prevent the algorithm from having an O(N^2) running time, an upper limit + on the number of unique elements in a histogram bucket is configured by + SetMaxChainLength(int) + . If sequence A has more than this many + elements that hash into the same hash bucket, the algorithm passes the region + to + SetFallbackAlgorithm(DiffAlgorithm) + + . If no fallback algorithm is + configured, the region is emitted as a replace edit. +

+ During scanning of sequence B, any element of A that occurs more than + SetMaxChainLength(int) + times is never considered for an LCS match + position, even if it is common between the two sequences. This limits the + number of locations in sequence A that must be considered to find the LCS, + and helps maintain a lower running time bound. +

+ So long as + SetMaxChainLength(int) + is a small constant (such as 64), + the algorithm runs in O(N * D) time, where N is the sum of the input lengths + and D is the number of edits in the resulting EditList. If the supplied + SequenceComparator<S> + has a good hash function, this implementation + typically out-performs + MyersDiff<S> + , even though its theoretical running + time is the same. +

+ This implementation has an internal limitation that prevents it from handling + sequences with more than 268,435,456 (2^28) elements. + + + +

Algorithm to use when there are too many element occurrences. + Algorithm to use when there are too many element occurrences. +
+ + Maximum number of positions to consider for a given element hash. + + Maximum number of positions to consider for a given element hash. + All elements with the same hash are stored into a single chain. The chain + size is capped to ensure search is linear time at O(len_A + len_B) rather + than quadratic at O(len_A * len_B). + + + + Set the algorithm used when there are too many element occurrences. + Set the algorithm used when there are too many element occurrences. + + the secondary algorithm. If null the region will be denoted as + a single REPLACE block. + + + + Maximum number of positions to consider for a given element hash. + + Maximum number of positions to consider for a given element hash. + All elements with the same hash are stored into a single chain. The chain + size is capped to ensure search is linear time at O(len_A + len_B) rather + than quadratic at O(len_A * len_B). + + new maximum length. + + + Result edits we have determined that must be made to convert a to b. + Result edits we have determined that must be made to convert a to b. + + + + Support + HistogramDiff + by computing occurrence counts of elements. +

+ Each element in the range being considered is put into a hash table, tracking + the number of times that distinct element appears in the sequence. Once all + elements have been inserted from sequence A, each element of sequence B is + probed in the hash table and the longest common subsequence with the lowest + occurrence count in A is used as the result. +

+
+ + + Keyed by + HistogramDiffIndex<S>.Hash(HashedSequence<S>, int) + + for + HistogramDiffIndex<S>.recs + index. + + + + + Number of low bits to discard from a key to index + HistogramDiffIndex<S>.table + . + + + + Describes a unique element in sequence A. + + Describes a unique element in sequence A. + The records in this table are actually 3-tuples of: +
    +
  • index of next record in this table that has same hash code
  • +
  • index of first element in this occurrence chain
  • +
  • occurrence count for this element (length of locs list)
  • +
+ The occurrence count is capped at + HistogramDiffIndex<S>.MAX_CNT + , as the field is only + a few bits wide. Elements that occur more frequently will have their + count capped. +
+
+ + + Number of elements in + HistogramDiffIndex<S>.recs + ; also is the unique element count. + + + + + For + ptr + , + next[ptr - ptrShift] + has subsequent index. + For the sequence element + ptr + , the value stored at location + next[ptr - ptrShift] + is the next occurrence of the exact same + element in the sequence. + Chains always run from the lowest index to the largest index. Therefore + the array will store + next[1] = 2 + , but never + next[2] = 1 + . + This allows a chain to terminate with + 0 + , as + 0 + would never + be a valid next element. + The array is sized to be + region.getLengthA() + and element indexes + are converted to array indexes by subtracting + HistogramDiffIndex<S>.ptrShift + , which is + just a cached version of + region.beginA + . + + + + + For element + ptr + in A, index of the record in + HistogramDiffIndex<S>.recs + array. + The record at + recs[recIdx[ptr - ptrShift]] + is the record + describing all occurrences of the element appearing in sequence A at + position + ptr + . The record is needed to get the occurrence count of + the element, or to locate all other occurrences of that element within + sequence A. This index provides constant-time access to the record, and + avoids needing to scan the hash chain. + + + + + Value to subtract from element indexes to key + HistogramDiffIndex<S>.next + array. + + + + + Equivalence function for + RawText + . + + + + No special treatment. + No special treatment. + + + Ignores all whitespace. + Ignores all whitespace. + + + Ignores leading whitespace. + Ignores leading whitespace. + + + Ignores trailing whitespace. + Ignores trailing whitespace. + + + Ignores whitespace occurring between non-whitespace characters. + Ignores whitespace occurring between non-whitespace characters. + + + Compute a hash code for a region. + Compute a hash code for a region. + the raw file content. + first byte of the region to hash. + 1 past the last byte of the region. + hash code for the region [ptr, end) of raw. + + + + Wraps a + Sequence + to have a narrower range of elements. +

+ This sequence acts as a proxy for the real sequence, translating element + indexes on the fly by adding + begin + to them. Sequences of this type + must be used with a + SubsequenceComparator<S> + . +

+
+ + Construct a subsequence around the A region/base sequence. + Construct a subsequence around the A region/base sequence. + the A sequence. + + the region of + a + to create a subsequence around. + + + subsequence of + base + as described by A in + region + . + + + + Construct a subsequence around the B region/base sequence. + Construct a subsequence around the B region/base sequence. + the B sequence. + + the region of + b + to create a subsequence around. + + + subsequence of + base + as described by B in + region + . + + + + Adjust the Edit to reflect positions in the base sequence. + Adjust the Edit to reflect positions in the base sequence. + + edit to adjust in-place. Prior to invocation the indexes are + in terms of the two subsequences; after invocation the indexes + are in terms of the base sequences. + + the A sequence. + the B sequence. + + + Adjust the Edits to reflect positions in the base sequence. + Adjust the Edits to reflect positions in the base sequence. + + edits to adjust in-place. Prior to invocation the indexes are + in terms of the two subsequences; after invocation the indexes + are in terms of the base sequences. + + the A sequence. + the B sequence. + + always + edits + (as the list was updated in-place). + + + + Construct a subset of another sequence. + + Construct a subset of another sequence. + The size of the subsequence will be + end - begin + . + + the real sequence. + + First element index of + base + that will be part of this + new subsequence. The element at + begin + will be this + sequence's element 0. + + + One past the last element index of + base + that will be + part of this new subsequence. + + + + + Wrap another comparator for use with + Subsequence<S> + . +

+ This comparator acts as a proxy for the real comparator, translating element + indexes on the fly by adding the subsequence's begin offset to them. + Comparators of this type must be used with a + Subsequence<S> + . +

+
+ + Construct a comparator wrapping another comparator. + Construct a comparator wrapping another comparator. + the real comparator. + + + + A class used to execute a + Fetch + command. It has setters for all + supported options and arguments of this command and a + Call() + method + to finally execute the command. + + Git documentation about Fetch + + + + Base class for commands that use a + NGit.Transport.Transport + during execution. +

+ This class provides standard configuration of a transport for options such as + a + NGit.Transport.CredentialsProvider + + , a timeout, and a + TransportConfigCallback + . +

+
+ + Configured credentials provider + + + Configured transport timeout + + + Configured callback for transport configuration + + + + + + + the + NGit.Transport.CredentialsProvider + + to use + + + + this + + + + the timeout used for the transport step + + + this + + + + + if set, the callback will be invoked after the + NGit.Transport.Transport + has created, but before the + NGit.Transport.Transport + is used. The callback can use this + opportunity to set additional type-specific configuration on + the + NGit.Transport.Transport + instance. + + + + this + + + + + + this + + + + + + Configure transport with credentials provider, timeout, and config + callback + + + + + this + + + + + Configure a child command with the current configuration set in + this + command + + + + + this + + + + + + + + Executes the + fetch + command with all the options and parameters + collected by the setter methods of this class. Each instance of this + class should only be used for one invocation of the command (means: one + call to + Call() + ) + + + a + NGit.Transport.FetchResult + object representing the successful fetch + result + + when called with an invalid remote uri + + when an error occurs during transport + + + + + The remote (uri or name) used for the fetch operation. + + The remote (uri or name) used for the fetch operation. If no remote is + set, the default value of Constants.DEFAULT_REMOTE_NAME will + be used. + + NGit.Constants.DEFAULT_REMOTE_NAME + + + + + this + + + + the remote used for the remote operation + + + the timeout used for the fetch operation + + + whether to check received objects checked for validity + + + If set to true, objects received will be checked for validity + + + + this + + + + whether or not to remove refs which no longer exist in the source + + + If set to true, refs are removed which no longer exist in the source + + + + this + + + + the progress monitor for the fetch operation + + + The progress monitor associated with the fetch operation. + + The progress monitor associated with the fetch operation. By default, + this is set to NullProgressMonitor + + NGit.NullProgressMonitor + + + + this + + + + the ref specs + + + The ref specs to be used in the fetch operation + + + + this + + + + The ref specs to be used in the fetch operation + + + + this + + + + the dry run preference for the fetch operation + + + Sets whether the fetch operation should be a dry run + + + + this + + + + the thin-pack preference for fetch operation + + + Sets the thin-pack preference for fetch operation. + + Sets the thin-pack preference for fetch operation. + Default setting is Transport.DEFAULT_FETCH_THIN + + + + + this + + + + Sets the specification of annotated tag behavior during fetch + + + + this + + + + Indicates a ReceivePack failure while scanning the pack stream. + Indicates a ReceivePack failure while scanning the pack stream. + + + Creates an exception with a root cause. + Creates an exception with a root cause. + the root cause of the unpacking failure. + + + Exception thrown when a fetch command was called with an invalid remote + + + message describing the invalid remote. + + + message describing the invalid remote. + why the remote is invalid. + + + + A class used to execute a + Push + command. It has setters for all + supported options and arguments of this command and a + Call() + method + to finally execute the command. + + Git documentation about Push + + + + + + + Executes the + push + command with all the options and parameters + collected by the setter methods of this class. Each instance of this + class should only be used for one invocation of the command (means: one + call to + Call() + ) + + + an iteration over + NGit.Transport.PushResult + objects + + when called with an invalid remote uri + + when an error occurs with the transport + + NGit.Api.Errors.GitAPIException + + + + The remote (uri or name) used for the push operation. + + The remote (uri or name) used for the push operation. If no remote is + set, the default value of Constants.DEFAULT_REMOTE_NAME will + be used. + + NGit.Constants.DEFAULT_REMOTE_NAME + + + + + this + + + + the remote used for the remote operation + + + The remote executable providing receive-pack service for pack transports. + + + The remote executable providing receive-pack service for pack transports. + If no receive-pack is set, the default value of + RemoteConfig.DEFAULT_RECEIVE_PACK will be used. + + NGit.Transport.RemoteConfig.DEFAULT_RECEIVE_PACK + + + + + this + + + + the receive-pack used for the remote operation + + + the timeout used for the push operation + + + the progress monitor for the push operation + + + The progress monitor associated with the push operation. + + The progress monitor associated with the push operation. By default, this + is set to NullProgressMonitor + + NGit.NullProgressMonitor + + + + this + + + + the ref specs + + + The ref specs to be used in the push operation + + + + this + + + + The ref specs to be used in the push operation + + + + this + + + + Push all branches under refs/heads/*. + Push all branches under refs/heads/*. + {code this} + + + Push all tags under refs/tags/*. + Push all tags under refs/tags/*. + {code this} + + + Add a reference to push. + Add a reference to push. + the source reference. The remote name will match. + + + this + . + + + + Add a reference to push. + Add a reference to push. + any reference name, or a reference specification. + + + this + . + + the reference name cannot be resolved. + + + + the dry run preference for the push operation + + + Sets whether the push operation should be a dry run + + + + this + + + + the thin-pack preference for push operation + + + Sets the thin-pack preference for push operation. + + Sets the thin-pack preference for push operation. + Default setting is Transport.DEFAULT_PUSH_THIN + + + + + this + + + + the force preference for push operation + + + Sets the force preference for push operation. + Sets the force preference for push operation. + + + + this + + + + The Pull command + Git documentation about Pull + + + + + + a progress monitor + this instance + + + + Executes the + Pull + command with all the options and parameters + collected by the setter methods (e.g. + SetProgressMonitor(NGit.ProgressMonitor) + + ) of this class. Each + instance of this class should only be used for one invocation of the + command. Don't call this method twice on an instance. + + the result of the pull + NGit.Api.Errors.WrongRepositoryStateException + + NGit.Api.Errors.InvalidConfigurationException + + NGit.Api.Errors.DetachedHeadException + + NGit.Api.Errors.InvalidRemoteException + + NGit.Api.Errors.CanceledException + + NGit.Api.Errors.RefNotFoundException + + NGit.Api.Errors.NoHeadException + + NGit.Api.Errors.TransportException + + NGit.Api.Errors.GitAPIException + + + + + Encapsulates the result of a + PullCommand + + + + the fetch result, or null + + + the merge result, or null + + + the rebase result, or null + + + + the name of the remote configuration from which fetch was tried, + or null + + + + whether the pull was successful + + + Used to create a local branch. + Used to create a local branch. + Git documentation about Branch + + + + + + + when trying to create (without force) a branch with a name + that already exists + + if the start point can not be found + + + if the provided name is null or otherwise + invalid + + the newly created branch + + + + + + + + + + + + the name of the new branch + this instance + + + + if true and the branch with the given name + already exists, the start-point of an existing branch will be + set to a new start-point; if false, the existing branch will + not be changed + + this instance + + + + corresponds to the start-point option; if null, + the current HEAD will be used + + this instance + + + + corresponds to the start-point option; if null, + the current HEAD will be used + + this instance + + + + corresponds to the --track/--no-track/--set-upstream options; + may be null + + this instance + + + + The modes available for setting up the upstream configuration + (corresponding to the --set-upstream, --track, --no-track options + + + + Used to delete one or several branches. + + Used to delete one or several branches. + The result of + Call() + is a list with the (full) names of the deleted + branches. + Note that we don't have a setter corresponding to the -r option; remote + tracking branches are simply deleted just like local branches. + + Git documentation about Branch + + + + + + + when trying to delete a branch which has not been merged into + the currently checked out branch without force + + NGit.Api.Errors.CannotDeleteCurrentBranchException + + the list with the (full) names of the deleted branches + + + + + the names of the branches to delete; if not set, this will do + nothing; invalid branch names will simply be ignored + + this instance + + + + true corresponds to the -D option, + false to the -d option (default)
+ if false a check will be performed whether the + branch to be deleted is already merged into the current branch + and deletion will be refused in this case + + this instance +
+ + Used to obtain a list of branches. + Used to obtain a list of branches. + Git documentation about Branch + + + + + + + + + + optional: corresponds to the -r/-a options; by default, only + local branches will be listed + + this instance + + + + The modes available for listing branches (corresponding to the -r and -a + options) + + + + Used to rename branches. + Used to rename branches. + Git documentation about Branch + + + + + + + if the old branch can not be found (branch with provided old + name does not exist or old name resolves to a tag) + + + if the provided new name is null or otherwise + invalid + + if a branch with the new name already exists + + + if rename is tried without specifying the old name and HEAD + is detached + + + + + the new name + this instance + + + + the name of the branch to rename; if not set, the currently + checked out branch (if any) will be renamed + + this instance + + + Support class to populate user authentication data on a connection. + + Support class to populate user authentication data on a connection. +

+ Instances of an HttpAuthMethod are not thread-safe, as some implementations + may need to maintain per-connection state information. + + + +

No authentication is configured. + No authentication is configured. +
+ + Handle an authentication failure and possibly return a new response. + Handle an authentication failure and possibly return a new response. + the connection that failed. + new authentication method to try. + + + Update this method with the credentials from the URIish. + Update this method with the credentials from the URIish. + the URI used to create the connection. + + the credentials provider, or null. If provided, + credentials in the URI + are ignored. + + + true if the authentication method is able to provide + authorization for the given URI + + + + Update this method with the given username and password pair. + Update this method with the given username and password pair. + + + + + Update connection properties based on this authentication method. + Update connection properties based on this authentication method. + + System.IO.IOException + + + Performs no user authentication. + Performs no user authentication. + + + + + + Performs HTTP basic authentication (plaintext username/password). + Performs HTTP basic authentication (plaintext username/password). + + + + + + Performs HTTP digest authentication. + Performs HTTP digest authentication. + + + + + + + Exception thrown when a command expected a non-detached + HEAD + reference + + + + The default constructor with a default message + + + + + + + + + + Exception thrown when an operation was canceled + + + + + + Thrown when trying to delete a branch which is currently checked out + + + the message + + + Exception thrown when a command fails due to an invalid configuration + + + + + + + + + + Exception thrown when an invalid Ref name was encountered + + + + + + + + + + Thrown when branch deletion fails due to unmerged data + + + The default constructor with a default message + + + + Thrown when trying to create a + NGit.Ref + with the same name as an existing + one + + + + + + + Thrown when a Ref can not be resolved + + + + + + + A class used to execute a + cherry-pick + command. It has setters for all + supported options and arguments of this command and a + Call() + method + to finally execute the command. Each instance of this class should only be + used for one invocation of the command (means: one call to + Call() + ) + + Git documentation about cherry-pick + + + + + + + Executes the + Cherry-Pick + command with all the options and + parameters collected by the setter methods (e.g. + Include(NGit.Ref) + of + this class. Each instance of this class should only be used for one + invocation of the command. Don't call this method twice on an instance. + + the result of the cherry-pick + NGit.Api.Errors.GitAPIException + + NGit.Api.Errors.WrongRepositoryStateException + + NGit.Api.Errors.ConcurrentRefUpdateException + + NGit.Api.Errors.UnmergedPathsException + + NGit.Api.Errors.NoMessageException + + NGit.Api.Errors.NoHeadException + + + + + a reference to a commit which is cherry-picked to the current + head + + + + this + + + + the Id of a commit which is cherry-picked to the current head + + + + this + + + + a name given to the commit + the Id of a commit which is cherry-picked to the current head + + + + this + + + + + the name that should be used in the "OURS" place for conflict + markers + + + + this + + + + The commit to be cherry-pick'ed did not have exactly one parent + + + + + + + + + + Mutable formatter to construct a single tree object. + + Mutable formatter to construct a single tree object. + This formatter does not process subtrees. Callers must handle creating each + subtree on their own. + To maintain good performance for bulk operations, this formatter does not + validate its input. Callers are responsible for ensuring the resulting tree + object is correctly well formed by writing entries in the correct order. + + + + Compute the size of a tree entry record. + + Compute the size of a tree entry record. + This method can be used to estimate the correct size of a tree prior to + allocating a formatter. Getting the size correct at allocation time + ensures the internal buffer is sized correctly, reducing copying. + + the mode the entry will have. + the length of the name, in bytes. + the length of the record. + + + Create an empty formatter with a default buffer size. + Create an empty formatter with a default buffer size. + + + Create an empty formatter with the specified buffer size. + Create an empty formatter with the specified buffer size. + + estimated size of the tree, in bytes. Callers can use + EntrySize(FileMode, int) + to estimate the size of each + entry in advance of allocating the formatter. + + + + + Add a link to a submodule commit, mode is + FileMode.GITLINK + . + + name of the entry. + the ObjectId to store in this entry. + + + + Add a subtree, mode is + FileMode.TREE + . + + name of the entry. + the ObjectId to store in this entry. + + + + Add a regular file, mode is + FileMode.REGULAR_FILE + . + + name of the entry. + the ObjectId to store in this entry. + + + Append any entry to the tree. + Append any entry to the tree. + name of the entry. + + mode describing the treatment of + id + . + + the ObjectId to store in this entry. + + + Append any entry to the tree. + Append any entry to the tree. + + name of the entry. The name should be UTF-8 encoded, but file + name encoding is not a well defined concept in Git. + + + mode describing the treatment of + id + . + + the ObjectId to store in this entry. + + + Append any entry to the tree. + Append any entry to the tree. + + buffer holding the name of the entry. The name should be UTF-8 + encoded, but file name encoding is not a well defined concept + in Git. + + + first position within + nameBuf + of the name data. + + + number of bytes from + nameBuf + to use as the name. + + + mode describing the treatment of + id + . + + the ObjectId to store in this entry. + + + Append any entry to the tree. + Append any entry to the tree. + + buffer holding the name of the entry. The name should be UTF-8 + encoded, but file name encoding is not a well defined concept + in Git. + + + first position within + nameBuf + of the name data. + + + number of bytes from + nameBuf + to use as the name. + + + mode describing the treatment of + id + . + + buffer holding the raw ObjectId of the entry. + + first position within + idBuf + to copy the id from. + + + + + + + Insert this tree and obtain its ObjectId. + Insert this tree and obtain its ObjectId. + the inserter to store the tree. + computed ObjectId of the tree + the tree could not be stored. + + + Compute the ObjectId for this tree + + ObjectId for this tree + + + Copy this formatter's buffer into a byte array. + + Copy this formatter's buffer into a byte array. + This method is not efficient, as it needs to create a copy of the + internal buffer in order to supply an array of the correct size to the + caller. If the buffer is just to pass to an ObjectInserter, consider + using + ObjectInserter.Insert(TreeFormatter) + + instead. + + a copy of this formatter's buffer. + + + Formatter for constructing the commit message for a merge commit. + + Formatter for constructing the commit message for a merge commit. +

+ The format should be the same as C Git does it, for compatibility. + + + +

Construct the merge commit message. + Construct the merge commit message. + the refs which will be merged + the branch ref which will be merged into + merge commit message +
+ + Add section with conflicting paths to merge message. + Add section with conflicting paths to merge message. + the original merge message + the paths with conflicts + merge message with conflicting paths added + + + + A credential requested from a + CredentialsProvider + . + Most users should work with the specialized subclasses: +
    +
  • + Username + for usernames
  • +
  • + Password + for passwords
  • +
  • + StringType + for other general string information
  • +
  • + CharArrayType + for other general secret information
  • +
+ This class is not thread-safe. Applications should construct their own + instance for each use, as the value is held within the CredentialItem object. +
+
+ + Initialize a prompt. + Initialize a prompt. + + prompt to display to the user alongside of the input field. + Should be sufficient text to indicate what to supply for this + item. + + + true if the value should be masked from displaying during + input. This should be true for passwords and other secrets, + false for names and other public data. + + + + prompt to display to the user. + + + true if the value should be masked when entered. + + + Clear the stored value, destroying it as much as possible. + Clear the stored value, destroying it as much as possible. + + + An item whose value is stored as a string. + + An item whose value is stored as a string. + When working with secret data, consider + CharArrayType + instead, as + the internal members of the array can be cleared, reducing the chances + that the password is left in memory after authentication is completed. + + + + Initialize a prompt for a single string. + Initialize a prompt for a single string. + + prompt to display to the user alongside of the input + field. Should be sufficient text to indicate what to + supply for this item. + + + true if the value should be masked from displaying during + input. This should be true for passwords and other + secrets, false for names and other public data. + + + + the current value + + + + + + An item whose value is stored as a char[] and is therefore clearable. + An item whose value is stored as a char[] and is therefore clearable. + + + Initialize a prompt for a secure value stored in a character array. + Initialize a prompt for a secure value stored in a character array. + + prompt to display to the user alongside of the input + field. Should be sufficient text to indicate what to + supply for this item. + + + true if the value should be masked from displaying during + input. This should be true for passwords and other + secrets, false for names and other public data. + + + + Destroys the current value, clearing the internal array. + Destroys the current value, clearing the internal array. + + + Get the current value. + + Get the current value. + The returned array will be cleared out when + Clear() + is + called. Callers that need the array elements to survive should delay + invoking + clear() + until the value is no longer necessary. + + + the current value array. The actual internal array is + returned, reducing the number of copies present in memory. + + + + Set the new value, clearing the old value array. + Set the new value, clearing the old value array. + if not null, the array is copied. + + + Set the new value, clearing the old value array. + Set the new value, clearing the old value array. + the new internal array. The array is NOT copied. + + + An item whose value is a boolean choice, presented as Yes/No. + An item whose value is a boolean choice, presented as Yes/No. + + + Initialize a prompt for a single boolean answer. + Initialize a prompt for a single boolean answer. + + prompt to display to the user alongside of the input + field. Should be sufficient text to indicate what to + supply for this item. + + + + the current value + + + Set the new value. + Set the new value. + + + + An advice message presented to the user, with no response required. + An advice message presented to the user, with no response required. + + + Initialize an informational message. + Initialize an informational message. + message to display to the user. + + + Prompt for a username, which is not masked on input. + Prompt for a username, which is not masked on input. + + + Initialize a new username item, with a default username prompt. + Initialize a new username item, with a default username prompt. + + + Prompt for a password, which is masked on input. + Prompt for a password, which is masked on input. + + + Initialize a new password item, with a default password prompt. + Initialize a new password item, with a default password prompt. + + + Initialize a new password item, with given prompt. + Initialize a new password item, with given prompt. + prompt message + + + Provide credentials for use in connecting to Git repositories. + + Provide credentials for use in connecting to Git repositories. + Implementors are strongly encouraged to support at least the minimal + Username + and + Password + items. + More sophisticated implementors may implement additional types, such as + StringType + . + CredentialItems are usually presented in bulk, allowing implementors to + combine them into a single UI widget and streamline the authentication + process for an end-user. + + UsernamePasswordCredentialsProvider + + + + the default credentials provider, or null. + + + Set the default credentials provider. + Set the default credentials provider. + the new default provider, may be null to select no default. + + + Check if the provider is interactive with the end-user. + + Check if the provider is interactive with the end-user. + An interactive provider may try to open a dialog box, or prompt for input + on the terminal, and will wait for a user response. A non-interactive + provider will either populate CredentialItems, or fail. + + + + true + if the provider is interactive with the end-user. + + + + + Check if the provider can supply the necessary + CredentialItem + s. + + the items the application requires to complete authentication. + + + + true + if this + CredentialsProvider + supports all of + the items supplied. + + + + Ask for the credential items to be populated. + Ask for the credential items to be populated. + the URI of the remote resource that needs authentication. + the items the application requires to complete authentication. + + + + true + if the request was successful and values were + supplied; + false + if the user canceled the request and did + not supply all requested values. + + if one of the items supplied is not supported. + + + + Ask for the credential items to be populated. + Ask for the credential items to be populated. + the URI of the remote resource that needs authentication. + the items the application requires to complete authentication. + + + + true + if the request was successful and values were + supplied; + false + if the user canceled the request and did + not supply all requested values. + + if one of the items supplied is not supported. + + + + Reset the credentials provider for the given URI + + + + + A JSch + NSch.UserInfo + adapter for a + CredentialsProvider + . + + + + Wrap a CredentialsProvider to make it suitable for use with JSch. + Wrap a CredentialsProvider to make it suitable for use with JSch. + the JSch session this UserInfo will support authentication on. + + the provider that will perform the authentication. + + + + + Simple + CredentialsProvider + that always uses the same information. + + + + Initialize the provider with a single username and password. + Initialize the provider with a single username and password. + + + + + Initialize the provider with a single username and password. + Initialize the provider with a single username and password. + + + + + + + + Destroy the saved username and password.. + Destroy the saved username and password.. + + + + An exception thrown when a + NGit.Transport.CredentialItem + is requested from a + NGit.Transport.CredentialsProvider + + which is not supported by this provider. + + + + + Constructs an UnsupportedCredentialItem with the specified detail message + prefixed with provided URI. + + + Constructs an UnsupportedCredentialItem with the specified detail message + prefixed with provided URI. + + URI used for transport + message + + + + Key for + NGit.Config.Get<T>(NGit.Config.SectionParser<T>) + + . + + + + A note tree holding only note subtrees, each named using a 2 digit hex name. + + + A note tree holding only note subtrees, each named using a 2 digit hex name. + The fanout buckets/trees contain on average 256 subtrees, naming the subtrees + by a slice of the ObjectId contained within them, from "00" through "ff". + Each fanout bucket has a + InMemoryNoteBucket.prefixLen + that defines how many digits it + skips in an ObjectId before it gets to the digits matching + table + . + The root tree has + prefixLen == 0 + , and thus does not skip any digits. + For ObjectId "c0ffee...", the note (if it exists) will be stored within the + bucket + table[0xc0] + . + The first level tree has + prefixLen == 2 + , and thus skips the first two + digits. For the same example "c0ffee..." object, its note would be found + within the + table[0xff] + bucket (as first 2 digits "c0" are skipped). + Each subtree is loaded on-demand, reducing startup latency for reads that + only need to examine a few objects. However, due to the rather uniform + distribution of the SHA-1 hash that is used for ObjectIds, accessing 256 + objects is very likely to load all of the subtrees into memory. + A FanoutBucket must be parsed from a tree object by + NoteParser + . + + + + A note bucket that has been loaded into the process. + A note bucket that has been loaded into the process. + + + A tree that stores note objects. + A tree that stores note objects. + FanoutBucket + LeafBucket + + + + + + + + + + + + + + + + + + Number of leading digits that leads to this bucket in the note path. + + Number of leading digits that leads to this bucket in the note path. + This is counted in terms of hex digits, not raw bytes. Each bucket level + is typically 2 higher than its parent, placing about 256 items in each + level of the tree. + + + + Chain of non-note tree entries found at this path in the tree. + + Chain of non-note tree entries found at this path in the tree. + During parsing of a note tree into the in-memory representation, + NoteParser + keeps track of all non-note tree entries and stores + them here as a sorted linked list. That list can be merged back with the + note data that is held by the subclass, allowing the tree to be + recreated. + + + + Fan-out table similar to the PackIndex structure. + + Fan-out table similar to the PackIndex structure. + Notes for an object are stored within the sub-bucket that is held here as + table[ objectId.getByte( prefixLen / 2 ) ] + . If the slot is null + there are no notes with that prefix. + + + + + Number of non-null slots in + table + . + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + A note tree holding only notes, with no subtrees. + + A note tree holding only notes, with no subtrees. + The leaf bucket contains on average less than 256 notes, all of whom share + the same leading prefix. If a notes branch has less than 256 notes, the top + level tree of the branch should be a LeafBucket. Once a notes branch has more + than 256 notes, the root should be a + FanoutBucket + and the LeafBucket + will appear only as a cell of a FanoutBucket. + Entries within the LeafBucket are stored sorted by ObjectId, and lookup is + performed using binary search. As the entry list should contain fewer than + 256 elements, the average number of compares to find an element should be + less than 8 due to the O(log N) lookup behavior. + A LeafBucket must be parsed from a tree object by + NoteParser + . + + + + All note blobs in this bucket, sorted sequentially. + All note blobs in this bucket, sorted sequentially. + + + + Number of items in + notes + . + + + + + + + + + + + + + A tree entry found in a note branch that isn't a valid note. + A tree entry found in a note branch that isn't a valid note. + + + Name of the entry in the tree, in raw format. + Name of the entry in the tree, in raw format. + + + Mode of the entry as parsed from the tree. + Mode of the entry as parsed from the tree. + + + The next non-note entry in the same tree, as defined by tree order. + The next non-note entry in the same tree, as defined by tree order. + + + In-memory representation of a single note attached to one object. + In-memory representation of a single note attached to one object. + + + + A Git note about the object referenced by + noteOn + . + + the object that has a note attached to it. + the actual note data contained in this note + + + the note content + + + Index of notes from a note branch. + + Index of notes from a note branch. + This class is not thread-safe, and relies on an + NGit.ObjectReader + that it + borrows/shares with the caller. The reader can be used during any call, and + is not released by this class. The caller should arrange for releasing the + shared + ObjectReader + at the proper times. + + + + Construct a new empty note map. + Construct a new empty note map. + an empty note map. + + + + Shorten the note ref name by trimming off the + NGit.Constants.R_NOTES + prefix if it exists. + + + a more user friendly note name + + + Load a collection of notes from a branch. + Load a collection of notes from a branch. + + reader to scan the note branch with. This reader may be + retained by the NoteMap for the life of the map in order to + support lazy loading of entries. + + the revision of the note branch to read. + the note map read from the commit. + the repository cannot be accessed through the reader. + + a tree object is corrupt and cannot be read. + + a tree object wasn't actually a tree. + + a reference tree object doesn't exist. + + + + Load a collection of notes from a tree. + Load a collection of notes from a tree. + + reader to scan the note branch with. This reader may be + retained by the NoteMap for the life of the map in order to + support lazy loading of entries. + + the note tree to read. + the note map read from the tree. + the repository cannot be accessed through the reader. + + a tree object is corrupt and cannot be read. + + a tree object wasn't actually a tree. + + a reference tree object doesn't exist. + + + + Load a collection of notes from a tree. + Load a collection of notes from a tree. + + reader to scan the note branch with. This reader may be + retained by the NoteMap for the life of the map in order to + support lazy loading of entries. + + the note tree to read. + the note map read from the tree. + the repository cannot be accessed through the reader. + + a tree object is corrupt and cannot be read. + + a tree object wasn't actually a tree. + + a reference tree object doesn't exist. + + + + Construct a new note map from an existing note bucket. + Construct a new note map from an existing note bucket. + the root bucket of this note map + + reader to scan the note branch with. This reader may be + retained by the NoteMap for the life of the map in order to + support lazy loading of entries. + + the note map built from the note bucket + + + Borrowed reader to access the repository. + Borrowed reader to access the repository. + + + All of the notes that have been loaded. + All of the notes that have been loaded. + + + + an iterator that iterates over notes of this NoteMap. Non note + entries are ignored by this iterator. + + + + Lookup a note for a specific ObjectId. + Lookup a note for a specific ObjectId. + the object to look for. + the note's blob ObjectId, or null if no note exists. + a portion of the note space is not accessible. + + + + Lookup a note for a specific ObjectId. + Lookup a note for a specific ObjectId. + the object to look for. + the note for the given object id, or null if no note exists. + a portion of the note space is not accessible. + + + + Determine if a note exists for the specified ObjectId. + Determine if a note exists for the specified ObjectId. + the object to look for. + true if a note exists; false if there is no note. + a portion of the note space is not accessible. + + + + Open and return the content of an object's note. + + Open and return the content of an object's note. + This method assumes the note is fairly small and can be accessed + efficiently. Larger notes should be accessed by streaming: +
+            ObjectId dataId = thisMap.get(id);
+            if (dataId != null)
+            reader.open(dataId).openStream();
+            
+
+ object to lookup the note of. + + maximum number of bytes to return. If the note data size is + larger than this limit, LargeObjectException will be thrown. + + + if a note is defined for + id + , the note content. If no note + is defined, null. + + + the note data is larger than + sizeLimit + . + + the note's blob does not exist in the repository. + + the note's blob cannot be read from the repository + +
+ + Attach (or remove) a note on an object. + + Attach (or remove) a note on an object. + If no note exists, a new note is stored. If a note already exists for the + given object, it is replaced (or removed). + This method only updates the map in memory. + If the caller wants to attach a UTF-8 encoded string message to an + object, + Set(NGit.AnyObjectId, string, NGit.ObjectInserter) + + is a convenient + way to encode and update a note in one step. + + + the object to attach the note to. This same ObjectId can later + be used as an argument to + Get(NGit.AnyObjectId) + or + GetCachedBytes(NGit.AnyObjectId, int) + + to read back the + noteData + . + + + data to associate with the note. This must be the ObjectId of + a blob that already exists in the repository. If null the note + will be deleted, if present. + + a portion of the note space is not accessible. + + + + Attach a note to an object. + + Attach a note to an object. + If no note exists, a new note is stored. If a note already exists for the + given object, it is replaced (or removed). + + + the object to attach the note to. This same ObjectId can later + be used as an argument to + Get(NGit.AnyObjectId) + or + GetCachedBytes(NGit.AnyObjectId, int) + + to read back the + noteData + . + + + text to store in the note. The text will be UTF-8 encoded when + stored in the repository. If null the note will be deleted, if + the empty string a note with the empty string will be stored. + + + inserter to write the encoded + noteData + out as a blob. + The caller must ensure the inserter is flushed before the + updated note map is made available for reading. + + the note data could not be stored in the repository. + + + + Remove a note from an object. + + Remove a note from an object. + If no note exists, no action is performed. + This method only updates the map in memory. + + the object to remove the note from. + a portion of the note space is not accessible. + + + + Write this note map as a tree. + Write this note map as a tree. + + inserter to use when writing trees to the object database. + Caller is responsible for flushing the inserter before trying + to read the objects, or exposing them through a reference. + + the top level tree. + a tree could not be written. + + + the root note bucket + + + + + + + + + Custom tree parser to select note bucket type and load it. + Custom tree parser to select note bucket type and load it. + + + + Parse a tree object into a + NoteBucket + instance. + The type of note tree is automatically detected by examining the items + within the tree, and allocating the proper storage type based on the + first note-like entry encountered. Since the method parses by guessing + the type on the first element, malformed note trees can be read as the + wrong type of tree. + This method is not recursive, it parses the one tree given to it and + returns the bucket. If there are subtrees for note storage, they are + setup as lazy pointers that will be resolved at a later time. + + + common hex digits that all notes within this tree share. The + root tree has + prefix.length() == 0 + , the first-level + subtrees should be + prefix.length()==2 + , etc. + + the tree to read from the repository. + reader to access the tree object. + bucket to holding the notes of the specified tree. + + treeId + cannot be accessed. + + + + + + + + + A class used to execute a + Rebase + command. It has setters for all + supported options and arguments of this command and a + Call() + method + to finally execute the command. Each instance of this class should only be + used for one invocation of the command (means: one call to + Call() + ) +

+

+ Git documentation about Rebase +
+ + The name of the "rebase-merge" folder + + + The name of the "stopped-sha" file + + + + + + + Executes the + Rebase + command with all the options and parameters + collected by the setter methods of this class. Each instance of this + class should only be used for one invocation of the command. Don't call + this method twice on an instance. + + an object describing the result of this command + NGit.Api.Errors.GitAPIException + + NGit.Api.Errors.WrongRepositoryStateException + + NGit.Api.Errors.NoHeadException + + NGit.Api.Errors.RefNotFoundException + + + + + + + + + + + the commit if we had to do a commit, otherwise null + NGit.Api.Errors.GitAPIException + + System.IO.IOException + + + + + + + + + + Removes the number of lines given in the parameter from the + git-rebase-todo file but preserves comments and other lines + that can not be parsed as steps + + + System.IO.IOException + + + + + + + checks if we can fast-forward and returns the new head if it is possible + + + the new head, or null + System.IO.IOException + NGit.Api.Errors.GitAPIException + + + + + + + + + + + + + + + + + + + + + + + + + + the upstream commit + + + this + + + + id of the upstream commit + + + this + + + + the upstream branch + + + this + + NGit.Api.Errors.RefNotFoundException + + + + Optionally override the name of the upstream. + + Optionally override the name of the upstream. If this is used, it has to + come after any + SetUpstream(NGit.Revwalk.RevCommit) + + call. + + the name which will be used to refer to upstream in conflicts + + + + this + + + + the operation to perform + + + this + + + + a progress monitor + this instance + + + The available operations + + + + The result of a + RebaseCommand + execution + + + + + Create RebaseResult with status + Status.STOPPED + + current commit + + + + Create RebaseResult with status + Status.FAILED + + list of paths causing this rebase to fail + + + the overall status + + + + the current commit if status is + Status.STOPPED + , otherwise + null + + + + + the list of paths causing this rebase to fail (see + NGit.Merge.ResolveMerger.GetFailingPaths() + + for details) if status is + Status.FAILED + , otherwise null + + + + + A class used to execute a + Rm + command. It has setters for all + supported options and arguments of this command and a + Call() + method + to finally execute the command. Each instance of this class should only be + used for one invocation of the command (means: one call to + Call() + ) + + Git documentation about Rm + + + + + + File to remove. + + + this + + + + + Executes the + Rm + command. Each instance of this class should only + be used for one invocation of the command. Don't call this method twice + on an instance. + + the DirCache after Rm + + + + + Checkout a branch to the working tree. + + Checkout a branch to the working tree. +

+ Examples (git is a + Git + instance): +

+ Check out an existing branch: +

+            git.checkout().setName("feature").call();
+            
+

+ Check out paths from the index: +

+            git.checkout().addPath("file1.txt").addPath("file2.txt").call();
+            
+

+ Check out a path from a commit: +

+            git.checkout().setStartPoint("HEAD&circ;").addPath("file1.txt").call();
+            
+

+ Create a new branch and check it out: +

+            git.checkout().setCreateBranch(true).setName("newbranch").call();
+            
+

+ Create a new tracking branch for a remote branch and check it out: +

+            git.checkout().setCreateBranch(true).setName("stable")
+            .setUpstreamMode(SetupUpstreamMode.SET_UPSTREAM)
+            .setStartPoint("origin/stable").call();
+            
+
+ Git documentation about Checkout +
+ + + + + + when trying to create (without force) a branch with a name + that already exists + + if the start point or branch can not be found + + + if the provided name is null or otherwise + invalid + + if the checkout results in a conflict + + the newly created branch + + + + Add a single path to the list of paths to check out. + + Add a single path to the list of paths to check out. To check out all + paths, use + SetAllPaths(bool) + . +

+ If this option is set, neither the + SetCreateBranch(bool) + nor + SetName(string) + option is considered. In other words, these + options are exclusive. + + path to update in the working tree and index + + + this + + + +

Set whether to checkout all paths. + + Set whether to checkout all paths. +

+ This options should be used when you want to do a path checkout on the + entire repository and so calling + AddPath(string) + is not possible + since empty paths are not allowed. +

+ If this option is set, neither the + SetCreateBranch(bool) + nor + SetName(string) + option is considered. In other words, these + options are exclusive. + + + true to checkout all paths, false + otherwise + + + + this + + 2.0 + + +

Checkout paths into index and working directory + this instance + System.IO.IOException + NGit.Api.Errors.RefNotFoundException + +
+ + + + + + + + + + + Specify the name of the branch or commit to check out, or the new branch + name. + + + Specify the name of the branch or commit to check out, or the new branch + name. +

+ When only checking out paths and not switching branches, use + SetStartPoint(string) + or + SetStartPoint(NGit.Revwalk.RevCommit) + + to + specify from which branch or commit to check out files. +

+ When + SetCreateBranch(bool) + is set to true, use + this method to set the name of the new branch to create and + SetStartPoint(string) + or + SetStartPoint(NGit.Revwalk.RevCommit) + + to + specify the start point of the branch. + + the name of the branch or commit + this instance + + +

Specify whether to create a new branch. + + Specify whether to create a new branch. +

+ If true is used, the name of the new branch must be set + using + SetName(string) + . The commit at which to start the new + branch can be set using + SetStartPoint(string) + or + SetStartPoint(NGit.Revwalk.RevCommit) + + ; if not specified, HEAD is used. Also + see + SetUpstreamMode(SetupUpstreamMode) + + for setting up branch tracking. + + + if true a branch will be created as part of the + checkout and set to the specified start point + + this instance + + +

Specify to force the ref update in case of a branch switch. + Specify to force the ref update in case of a branch switch. + + if true and the branch with the given name + already exists, the start-point of an existing branch will be + set to a new start-point; if false, the existing branch will + not be changed + + this instance +
+ + Set the name of the commit that should be checked out. + + Set the name of the commit that should be checked out. +

+ When checking out files and this is not specified or null, + the index is used. +

+ When creating a new branch, this will be used as the start point. If not + specified or null, the current HEAD is used. + + commit name to check out + this instance + + +

Set the commit that should be checked out. + + Set the commit that should be checked out. +

+ When creating a new branch, this will be used as the start point. If not + specified or null, the current HEAD is used. +

+ When checking out files and this is not specified or null, + the index is used. + + commit to check out + this instance + + +

+ When creating a branch with + SetCreateBranch(bool) + , this can + be used to configure branch tracking. + + + corresponds to the --track/--no-track options; may be + null + + this instance +
+ + the result, never null + + + Create an empty git repository or reinitalize an existing one + Git documentation about init + + + + Executes the + Init + command. + + + the newly created + Git + object with associated repository + + + + + The optional directory associated with the init operation. + + The optional directory associated with the init operation. If no + directory is set, we'll use the current directory + + the directory to init to + this instance + + + whether the repository is bare or not + this instance + + + File Utilities + + + + Option to delete given + File + + + + + Option to recursively delete given + File + + + + Option to retry deletion if not successful + + + Option to skip deletion if file doesn't exist + + + Option not to throw exceptions when a deletion finally doesn't succeed. + Option not to throw exceptions when a deletion finally doesn't succeed. + 2.0 + + + Delete file or empty folder + + File + to be deleted + + + if deletion of + f + fails. This may occur if + f + didn't exist when the method was called. This can therefore + cause IOExceptions during race conditions when multiple + concurrent threads all try to delete the same file. + + + + Delete file or folder + + File + to be deleted + + + deletion options, + RECURSIVE + for recursive deletion of + a subtree, + RETRY + to retry when deletion failed. + Retrying may help if the underlying file system doesn't allow + deletion of files being read by another thread. + + + if deletion of + f + fails. This may occur if + f + didn't exist when the method was called. This can therefore + cause IOExceptions during race conditions when multiple + concurrent threads all try to delete the same file. This + exception is not thrown when IGNORE_ERRORS is set. + + + + Creates the directory named by this abstract pathname. + Creates the directory named by this abstract pathname. + directory to be created + + if creation of + d + fails. This may occur if + d + did exist when the method was called. This can therefore + cause IOExceptions during race conditions when multiple + concurrent threads all try to create the same directory. + + + + Creates the directory named by this abstract pathname. + Creates the directory named by this abstract pathname. + directory to be created + + if + true + skip creation of the given directory if it + already exists in the file system + + + if creation of + d + fails. This may occur if + d + did exist when the method was called. This can therefore + cause IOExceptions during race conditions when multiple + concurrent threads all try to create the same directory. + + + + + Creates the directory named by this abstract pathname, including any + necessary but nonexistent parent directories. + + + Creates the directory named by this abstract pathname, including any + necessary but nonexistent parent directories. Note that if this operation + fails it may have succeeded in creating some of the necessary parent + directories. + + directory to be created + + if creation of + d + fails. This may occur if + d + did exist when the method was called. This can therefore + cause IOExceptions during race conditions when multiple + concurrent threads all try to create the same directory. + + + + + Creates the directory named by this abstract pathname, including any + necessary but nonexistent parent directories. + + + Creates the directory named by this abstract pathname, including any + necessary but nonexistent parent directories. Note that if this operation + fails it may have succeeded in creating some of the necessary parent + directories. + + directory to be created + + if + true + skip creation of the given directory if it + already exists in the file system + + + if creation of + d + fails. This may occur if + d + did exist when the method was called. This can therefore + cause IOExceptions during race conditions when multiple + concurrent threads all try to create the same directory. + + + + + Atomically creates a new, empty file named by this abstract pathname if + and only if a file with this name does not yet exist. + + + Atomically creates a new, empty file named by this abstract pathname if + and only if a file with this name does not yet exist. The check for the + existence of the file and the creation of the file if it does not exist + are a single operation that is atomic with respect to all other + filesystem activities that might affect the file. +

+ Note: this method should not be used for file-locking, as the resulting + protocol cannot be made to work reliably. The + Sharpen.FileLock + facility + should be used instead. + + the file to be created + if the named file already exists or if an I/O error occurred + + + +

+ A performance optimized variant of + TreeFilter.ANY_DIFF + which should + be used when among the walked trees there is a + NGit.Dircache.DirCacheIterator + and a + NGit.Treewalk.WorkingTreeIterator + . Please see the documentation of + TreeFilter.ANY_DIFF + for a basic description of the semantics. +

+ This filter tries to avoid computing content ids of the files in the + working-tree. In contrast to + TreeFilter.ANY_DIFF + this filter takes + care to first compare the entry from the + NGit.Dircache.DirCacheIterator + with the + entries from all other iterators besides the + NGit.Treewalk.WorkingTreeIterator + . + Since all those entries have fast access to content ids that is very fast. If + a difference is detected in this step this filter decides to include that + path before even looking at the working-tree entry. +

+ If no difference is found then we have to compare index and working-tree as + the last step. By making use of + NGit.Treewalk.WorkingTreeIterator.IsModified(NGit.Dircache.DirCacheEntry, bool) + + we can avoid the computation of the content id if the file is not dirty. +

+ Instances of this filter should not be used for multiple + NGit.Treewalk.TreeWalk + s. + Always construct a new instance of this filter for each TreeWalk. +

+
+ + Creates a new instance of this filter. + + Creates a new instance of this filter. Do not use an instance of this + filter in multiple treewalks. + + + the index of the + NGit.Dircache.DirCacheIterator + in the associated + treewalk + + + the index of the + NGit.Treewalk.WorkingTreeIterator + in the associated + treewalk + + + + Creates a new instance of this filter. + + Creates a new instance of this filter. Do not use an instance of this + filter in multiple treewalks. + + + the index of the + NGit.Dircache.DirCacheIterator + in the associated + treewalk + + + the index of the + NGit.Treewalk.WorkingTreeIterator + in the associated + treewalk + + + true if the filter should skip working tree files that are + declared as ignored by the standard exclude mechanisms.. + + + + + + + + + + Copy all entries which are still in untrackedParentFolders and which + belong to a path this treewalk has left into untrackedFolders. + + + Copy all entries which are still in untrackedParentFolders and which + belong to a path this treewalk has left into untrackedFolders. It is sure + that we will not find any tracked files underneath these paths. Therefore + these paths definitely belong to untracked folders. + + the current path of the treewalk + + + The method returns the list of ignored files and folders. + + The method returns the list of ignored files and folders. Only the root + folder of an ignored folder hierarchy is reported. If a/b/c is listed in + the .gitignore then you should not expect a/b/c/d/e/f to be reported + here. Only a/b/c will be reported. Furthermore only ignored files / + folders are returned that are NOT in the index. + + ignored paths + + + + all paths of folders which contain only untracked files/folders. + If on the associated treewalk postorder traversal was turned on + (see + NGit.Treewalk.TreeWalk.PostOrderTraversal(bool) + + ) then an + empty list will be returned. + + + + Thrown when branch deletion fails due to unmerged data + + + The default constructor with a default message + + + The default constructor with a default message + original exception + + + Caches when a file was last read, making it possible to detect future edits. + + + Caches when a file was last read, making it possible to detect future edits. +

+ This object tracks the last modified time of a file. Later during an + invocation of + IsModified(Sharpen.FilePath) + the object will return true if the + file may have been modified and should be re-read from disk. +

+ A snapshot does not "live update" when the underlying filesystem changes. + Callers must poll for updates by periodically invoking + IsModified(Sharpen.FilePath) + . +

+ To work around the "racy git" problem (where a file may be modified multiple + times within the granularity of the filesystem modification clock) this class + may return true from isModified(File) if the last modification time of the + file is less than 3 seconds ago. + + + +

A FileSnapshot that is considered to always be modified. + + A FileSnapshot that is considered to always be modified. +

+ This instance is useful for application code that wants to lazily read a + file, but only after + IsModified(Sharpen.FilePath) + gets invoked. The returned + snapshot contains only invalid status information. + + + +

A FileSnapshot that is clean if the file does not exist. + + A FileSnapshot that is clean if the file does not exist. +

+ This instance is useful if the application wants to consider a missing + file to be clean. + IsModified(Sharpen.FilePath) + will return false if the file + path does not exist. + + + +

Record a snapshot for a specific file path. + + Record a snapshot for a specific file path. +

+ This method should be invoked before the file is accessed. + + + the path to later remember. The path's current status + information is saved. + + the snapshot. + + +

+ Record a snapshot for a file for which the last modification time is + already known. + + + Record a snapshot for a file for which the last modification time is + already known. +

+ This method should be invoked before the file is accessed. + + the last modification time of the file + the snapshot. + + +

Last observed modification time of the path. + Last observed modification time of the path. +
+ + Last wall-clock time the path was read. + Last wall-clock time the path was read. + + + + True once + lastRead + is far later than + lastModified + . + + + + time of last snapshot update + + + Check if the path may have been modified since the snapshot was saved. + Check if the path may have been modified since the snapshot was saved. + the path the snapshot describes. + true if the path needs to be read again. + + + Update this snapshot when the content hasn't changed. + + Update this snapshot when the content hasn't changed. +

+ If the caller gets true from + IsModified(Sharpen.FilePath) + , re-reads the + content, discovers the content is identical, and + Equals(FileSnapshot) + is true, it can use + SetClean(FileSnapshot) + to make a future + IsModified(Sharpen.FilePath) + return false. The logic goes something like + this: +

+            if (snapshot.isModified(path)) {
+            FileSnapshot other = FileSnapshot.save(path);
+            Content newContent = ...;
+            if (oldContent.equals(newContent) && snapshot.equals(other))
+            snapshot.setClean(other);
+            }
+            
+
+ the other snapshot. +
+ + Compare two snapshots to see if they cache the same information. + Compare two snapshots to see if they cache the same information. + the other snapshot. + true if the two snapshots share the same information. + + + + Encapsulates the result of a + CheckoutCommand + + + + + The + Status.ERROR + result; + + + + + The + Status.NOT_TRIED + result; + + + + Create a new fail result. + + Create a new fail result. If status is + Status.CONFLICTS + , + fileList is a list of conflicting files, if status is + Status.NONDELETED + , fileList is a list of not deleted + files. All other values ignore fileList. To create a result + for + Status.OK + , see + CheckoutResult(System.Collections.Generic.IList<E>, System.Collections.Generic.IList<E>) + + . + + the failure status + + the list of files to store, status has to be either + Status.CONFLICTS + or + Status.NONDELETED + . + + + + Create a new OK result with modified and removed files. + Create a new OK result with modified and removed files. + the modified files + the removed files. + + + the status + + + + the list of files that created a checkout conflict, or an empty + list if + GetStatus() + is not + Status.CONFLICTS + ; + + + + + the list of files that could not be deleted during checkout, or + an empty list if + GetStatus() + is not + Status.NONDELETED + ; + + + + + the list of files that where modified during checkout, or an + empty list if + GetStatus() + is not + Status.OK + + + + + the list of files that where removed during checkout, or an empty + list if + GetStatus() + is not + Status.OK + + + + The status + + + + A class used to execute a + revert + command. It has setters for all + supported options and arguments of this command and a + Call() + method + to finally execute the command. Each instance of this class should only be + used for one invocation of the command (means: one call to + Call() + ) + + Git documentation about revert + + + + + + + Executes the + revert + command with all the options and parameters + collected by the setter methods (e.g. + Include(NGit.Ref) + of this + class. Each instance of this class should only be used for one invocation + of the command. Don't call this method twice on an instance. + + + on success the + NGit.Revwalk.RevCommit + pointed to by the new HEAD is + returned. If a failure occurred during revert null + is returned. The list of successfully reverted + NGit.Ref + 's can + be obtained by calling + GetRevertedRefs() + + NGit.Api.Errors.GitAPIException + + NGit.Api.Errors.WrongRepositoryStateException + + NGit.Api.Errors.ConcurrentRefUpdateException + + NGit.Api.Errors.UnmergedPathsException + + NGit.Api.Errors.NoMessageException + + + + + a reference to a commit which is reverted into the current + head + + + + this + + + + the Id of a commit which is reverted into the current head + + + this + + + + a name given to the commit + the Id of a commit which is reverted into the current head + + + this + + + + + the list of successfully reverted + NGit.Ref + 's. Never + null but maybe an empty list if no commit was + successfully cherry-picked + + + + + the result of the merge failure, null if no merge + failure occurred during the revert + + + + the unmerged paths, will be null if no merge conflicts + + + + Default implementation of the + NoteMerger + . +

+ If ours and theirs are both non-null, which means they are either both edits + or both adds, then this merger will simply join the content of ours and + theirs (in that order) and return that as the merge result. +

+ If one or ours/theirs is non-null and the other one is null then the non-null + value is returned as the merge result. This means that an edit/delete + conflict is resolved by keeping the edit version. +

+ If both ours and theirs are null then the result of the merge is also null. +

+
+ + Three-way note merge operation. + + Three-way note merge operation. +

+ This operation takes three versions of a note: base, ours and theirs, + performs the three-way merge and returns the merge result. + + + +

Merges the conflicting note changes. + + Merges the conflicting note changes. +

+ base, ours and their are all notes on the same object. + + version of the Note + version of the Note + version of the Note + the object reader that must be used to read Git objects + the object inserter that must be used to insert Git objects + + the merge result + + in case there was a merge conflict which this note merger + couldn't resolve + + + in case the reader or the inserter would throw an IOException + the implementor will most likely want to propagate it as it + can't do much to recover from it + + + + + + + +

Three-way note tree merge. + + Three-way note tree merge. +

+ Direct implementation of NoteMap merger without using + NGit.Treewalk.TreeWalk + and + NGit.Treewalk.AbstractTreeIterator + + + + +

+ Constructs a NoteMapMerger with custom + NoteMerger + and custom + NGit.Merge.MergeStrategy + . + + Git repository + note merger for merging conflicting changes on a note + merge strategy for merging non-note entries +
+ + + Constructs a NoteMapMerger with + DefaultNoteMerger + as the merger + for notes and the + NGit.Merge.MergeStrategy.RESOLVE + as the strategy for + resolving conflicts on non-notes + + Git repository + + + Performs the merge. + Performs the merge. + base version of the note tree + ours version of the note tree + theirs version of the note tree + merge result as a new NoteMap + System.IO.IOException + + + + This method is called only when it is known that there is some difference + between base, ours and theirs. + + + This method is called only when it is known that there is some difference + between base, ours and theirs. + + + + + + merge result as an InMemoryBucket + System.IO.IOException + + + + + + + + + + + + + + + + + + + + This exception will be thrown from the + NoteMerger + when a conflict on + Notes content is found during merge. + + + + + Construct a NotesMergeConflictException for the specified base, ours and + theirs note versions. + + + Construct a NotesMergeConflictException for the specified base, ours and + theirs note versions. + + note version + note version + note version + + + + Constructs a NotesMergeConflictException for the specified base, ours and + theirs versions of the root note tree. + + + Constructs a NotesMergeConflictException for the specified base, ours and + theirs versions of the root note tree. + + version of the root note tree + version of the root note tree + version of the root note tree + + + The standard "transfer", "fetch" and "receive" configuration parameters. + + The standard "transfer", "fetch" and "receive" configuration parameters. + + + + + Key for + NGit.Config.Get<T>(NGit.Config.SectionParser<T>) + + . + + + + strictly verify received objects? + + + + Parses a pack stream and imports it for an + NGit.ObjectInserter + . +

+ Applications can acquire an instance of a parser from ObjectInserter's + NGit.ObjectInserter.NewPackParser(Sharpen.InputStream) + + method. +

+ Implementations of + NGit.ObjectInserter + should subclass this type and + provide their own logic for the various + on*() + event methods declared + to be abstract. +

+
+ + Size of the internal stream buffer. + Size of the internal stream buffer. + + + Object database used for loading existing objects. + Object database used for loading existing objects. + + + + Position in the input stream of + buf[0] + . + + + + Every object contained within the incoming pack. + + Every object contained within the incoming pack. +

+ This is a subset of + entries + , as thin packs can add additional + objects to + entries + by copying already existing objects from the + repository onto the end of the thin pack to make it self-contained. + + + +

Objects referenced by their name from deltas, that aren't in this pack. + + Objects referenced by their name from deltas, that aren't in this pack. +

+ This is the set of objects that were copied onto the end of this pack to + make it complete. These objects were not transmitted by the remote peer, + but instead were assumed to already exist in the local repository. + + + +

Blobs whose contents need to be double-checked after indexing. + Blobs whose contents need to be double-checked after indexing. +
+ + Message to protect the pack data from garbage collection. + Message to protect the pack data from garbage collection. + + + Git object size limit + + + Initialize a pack parser. + Initialize a pack parser. + database the parser will write its objects into. + the stream the parser will read. + + + true if a thin pack (missing base objects) is permitted. + + + Configure this index pack instance to allow a thin pack. + + Configure this index pack instance to allow a thin pack. +

+ Thin packs are sometimes used during network transfers to allow a delta + to be sent without a base object. Such packs are not permitted on disk. + + true to enable a thin pack. + + +

Configure this index pack instance to keep track of new objects. + + Configure this index pack instance to keep track of new objects. +

+ By default an index pack doesn't save the new objects that were created + when it was instantiated. Setting this flag to + true + allows the + caller to use + GetNewObjectIds() + to retrieve that list. + + + true + to enable keeping track of new objects. + + + +

+ Configure this index pack instance to keep track of the objects assumed + for delta bases. + + + Configure this index pack instance to keep track of the objects assumed + for delta bases. +

+ By default an index pack doesn't save the objects that were used as delta + bases. Setting this flag to + true + will allow the caller to use + GetBaseObjectIds() + to retrieve that list. + + + true + to enable keeping track of delta bases. + + + + true if the EOF should be read from the input after the footer. + + +

Ensure EOF is read from the input stream after the footer. + Ensure EOF is read from the input stream after the footer. + true if the EOF should be read; false if it is not checked. +
+ + true if there is data expected after the pack footer. + + + + true if there is additional data in InputStream after pack. + This requires the InputStream to support the mark and reset + functions. + + + + the new objects that were sent by the user + + + set of objects the incoming pack assumed for delta purposes + + + Configure the checker used to validate received objects. + + Configure the checker used to validate received objects. +

+ Usually object checking isn't necessary, as Git implementations only + create valid objects in pack files. However, additional checking may be + useful if processing data from an untrusted source. + + the checker instance; null to disable object checking. + + +

Configure the checker used to validate received objects. + + Configure the checker used to validate received objects. +

+ Usually object checking isn't necessary, as Git implementations only + create valid objects in pack files. However, additional checking may be + useful if processing data from an untrusted source. +

+ This is shorthand for: +

+            setObjectChecker(on ? new ObjectChecker() : null);
+            
+
+ true to enable the default checker; false to disable it. +
+ + the message to record with the pack lock. + + + Set the lock message for the incoming pack data. + Set the lock message for the incoming pack data. + + if not null, the message to associate with the incoming data + while it is locked to prevent garbage collection. + + + + Set the maximum allowed Git object size. + + Set the maximum allowed Git object size. +

+ If an object is larger than the given size the pack-parsing will throw an + exception aborting the parsing. + + the Git object size limit. If zero then there is not limit. + + +

Get the number of objects in the stream. + + Get the number of objects in the stream. +

+ The object count is only available after + Parse(NGit.ProgressMonitor) + has returned. The count may have been increased if the stream was a thin + pack, and missing bases objects were appending onto it by the subclass. + + number of objects parsed out of the stream. + + +

Get the information about the requested object. + + Get the information about the requested object. +

+ The object information is only available after + Parse(NGit.ProgressMonitor) + has returned. + + + index of the object in the stream. Must be between 0 and + GetObjectCount() + -1. + + the object information. + + +

Get all of the objects, sorted by their name. + + Get all of the objects, sorted by their name. +

+ The object information is only available after + Parse(NGit.ProgressMonitor) + has returned. +

+ To maintain lower memory usage and good runtime performance, this method + sorts the objects in-place and therefore impacts the ordering presented + by + GetObject(int) + . + + comparison function, if null objects are stored by ObjectId. + sorted list of objects in this pack stream. + + +

Parse the pack stream. + Parse the pack stream. + + callback to provide progress feedback during parsing. If null, + NGit.NullProgressMonitor + will be used. + + + the pack lock, if one was requested by setting + SetLockMessage(string) + . + + the stream is malformed, or contains corrupt objects. + +
+ + Parse the pack stream. + Parse the pack stream. + + receives progress feedback during the initial receiving + objects phase. If null, + NGit.NullProgressMonitor + will be + used. + + receives progress feedback during the resolving objects phase. + + + the pack lock, if one was requested by setting + SetLockMessage(string) + . + + the stream is malformed, or contains corrupt objects. + + + + + + + + + + + + + + + + Read the header of the current object. + + Read the header of the current object. +

+ After the header has been parsed, this method automatically invokes + OnObjectHeader(Source, byte[], int, int) + + to allow the + implementation to update its internal checksums for the bytes read. +

+ When this method returns the database will be positioned on the first + byte of the deflated data stream. + + the info object to populate. + + + info + , after populating. + + the size cannot be read. + + + + + + + + + + + + + + + + + + + + + + + + current position of the input stream being parsed. + + + + + + + + + + + + + + + + + + a temporary byte array for use by the caller. + + +

Construct a PackedObjectInfo instance for this parser. + Construct a PackedObjectInfo instance for this parser. + identity of the object to be tracked. + + if the object was previously an unresolved delta, this is the + delta object that was tracking it. Otherwise null. + + + if the object was previously an unresolved delta, this is the + ObjectId of the base of the delta. The base may be outside of + the pack stream if the stream was a thin-pack. + + info object containing this object's data. +
+ + Store bytes received from the raw stream. + + Store bytes received from the raw stream. +

+ This method is invoked during + Parse(NGit.ProgressMonitor) + as data is + consumed from the incoming stream. Implementors may use this event to + archive the raw incoming stream to the destination repository in large + chunks, without paying attention to object boundaries. +

+ The only component of the pack not supplied to this method is the last 20 + bytes of the pack that comprise the trailing SHA-1 checksum. Those are + passed to + OnPackFooter(byte[]) + . + + buffer to copy data out of. + first offset within the buffer that is valid. + number of bytes in the buffer that are valid. + the stream cannot be archived. + + +

Store (and/or checksum) an object header. + + Store (and/or checksum) an object header. +

+ Invoked after any of the + onBegin() + events. The entire header is + supplied in a single invocation, before any object data is supplied. + + where the data came from + buffer to read data from. + first offset within buffer that is valid. + number of bytes in buffer that are valid. + the stream cannot be archived. + + +

Store (and/or checksum) a portion of an object's data. + + Store (and/or checksum) a portion of an object's data. +

+ This method may be invoked multiple times per object, depending on the + size of the object, the size of the parser's internal read buffer, and + the alignment of the object relative to the read buffer. +

+ Invoked after + OnObjectHeader(Source, byte[], int, int) + + . + + where the data came from + buffer to read data from. + first offset within buffer that is valid. + number of bytes in buffer that are valid. + the stream cannot be archived. + + +

Invoked for commits, trees, tags, and small blobs. + Invoked for commits, trees, tags, and small blobs. + the object info, populated. + the type of the object. + inflated data for the object. + the object cannot be archived. +
+ + Provide the implementation with the original stream's pack header. + Provide the implementation with the original stream's pack header. + number of objects expected in the stream. + the implementation refuses to work with this many objects. + + + + Provide the implementation with the original stream's pack footer. + Provide the implementation with the original stream's pack footer. + + the trailing 20 bytes of the pack, this is a SHA-1 checksum of + all of the pack data. + + the stream cannot be archived. + + + Provide the implementation with a base that was outside of the pack. + + Provide the implementation with a base that was outside of the pack. +

+ This event only occurs on a thin pack for base objects that were outside + of the pack and came from the local repository. Usually an implementation + uses this event to compress the base and append it onto the end of the + pack, so the pack stays self-contained. + + type of the base object. + complete content of the base object. + + packed object information for this base. Implementors must + populate the CRC and offset members if returning true. + + + true if the + info + should be included in the object list + returned by + GetSortedObjectList(System.Collections.Generic.IComparer<T>) + + , false if it + should not be included. + + the base could not be included into the pack. + + + +

Event indicating a thin pack has been completely processed. + + Event indicating a thin pack has been completely processed. +

+ This event is invoked only if a thin pack has delta references to objects + external from the pack. The event is called after all of those deltas + have been resolved. + + the pack cannot be archived. + + +

Reposition the database to re-read a previously stored object. + + Reposition the database to re-read a previously stored object. +

+ If the database is computing CRC-32 checksums for object data, it should + reset its internal CRC instance during this method call. + + + the object position to begin reading from. This is from + NewInfo(NGit.AnyObjectId, UnresolvedDelta, NGit.ObjectId) + + . + + object to populate with type and size. + + the + info + object. + + the database cannot reposition to this location. + + + +

Reposition the database to re-read a previously stored object. + + Reposition the database to re-read a previously stored object. +

+ If the database is computing CRC-32 checksums for object data, it should + reset its internal CRC instance during this method call. + + + the object position to begin reading from. This is an instance + previously returned by + OnEndDelta() + . + + object to populate with type and size. + + the + info + object. + + the database cannot reposition to this location. + + + +

Read from the database's current position into the buffer. + Read from the database's current position into the buffer. + the buffer to copy read data into. + + position within + dst + to start copying data into. + + + ideal target number of bytes to read. Actual read length may + be shorter. + + number of bytes stored. + the database cannot be accessed. +
+ + Check the current CRC matches the expected value. + + Check the current CRC matches the expected value. +

+ This method is invoked when an object is read back in from the database + and its data is used during delta resolution. The CRC is validated after + the object has been fully read, allowing the parser to verify there was + no silent data corruption. +

+ Implementations are free to ignore this check by always returning true if + they are performing other data integrity validations at a lower level. + + + the prior CRC that was recorded during the first scan of the + object from the pack stream. + + true if the CRC matches; false if it does not. + + +

Event notifying the start of an object stored whole (not as a delta). + Event notifying the start of an object stored whole (not as a delta). + position of this object in the incoming stream. + + type of the object; one of + NGit.Constants.OBJ_COMMIT + , + NGit.Constants.OBJ_TREE + , + NGit.Constants.OBJ_BLOB + , or + NGit.Constants.OBJ_TAG + . + + + size of the object when fully inflated. The size stored within + the pack may be larger or smaller, and is not yet known. + + the object cannot be recorded. +
+ + Event notifying the the current object. + Event notifying the the current object. + object information. + the object cannot be recorded. + + + Event notifying start of a delta referencing its base by offset. + Event notifying start of a delta referencing its base by offset. + position of this object in the incoming stream. + + + position of the base object in the incoming stream. The base + must be before the delta, therefore + + baseStreamPosition + < deltaStreamPosition + + . This is not the position + returned by a prior end object event. + + + size of the delta when fully inflated. The size stored within + the pack may be larger or smaller, and is not yet known. + + the object cannot be recorded. + + + Event notifying start of a delta referencing its base by ObjectId. + Event notifying start of a delta referencing its base by ObjectId. + position of this object in the incoming stream. + + + name of the base object. This object may be later in the + stream, or might not appear at all in the stream (in the case + of a thin-pack). + + + size of the delta when fully inflated. The size stored within + the pack may be larger or smaller, and is not yet known. + + the object cannot be recorded. + + + Event notifying the the current object. + Event notifying the the current object. + + object information that must be populated with at least the + offset. + + the object cannot be recorded. + + + + + + + + + + + + Location data is being obtained from. + Location data is being obtained from. + + + Type and size information about an object in the database buffer. + Type and size information about an object in the database buffer. + + + The type of the object. + The type of the object. + + + The inflated size of the object. + The inflated size of the object. + + + Information about an unresolved delta in this pack stream. + Information about an unresolved delta in this pack stream. + + + offset within the input stream. + + + the CRC-32 checksum of the stored delta data. + + + the CRC-32 checksum of the stored delta data. + + + + + + + + + + + + + + + + + + + Consumes a pack stream and stores as a pack file in + ObjectDirectory + . +

+ To obtain an instance of a parser, applications should use + NGit.ObjectInserter.NewPackParser(Sharpen.InputStream) + + . +

+
+ + CRC-32 computation for objects that are appended onto the pack. + CRC-32 computation for objects that are appended onto the pack. + + + + Running SHA-1 of any base objects appended after + origEnd + . + + + + Preferred format version of the pack-*.idx file to generate. + Preferred format version of the pack-*.idx file to generate. + + + If true, pack with 0 objects will be stored. + If true, pack with 0 objects will be stored. Usually these are deleted. + + + Path of the temporary file holding the pack data. + Path of the temporary file holding the pack data. + + + + Path of the index created for the pack, to find objects quickly at read + time. + + + Path of the index created for the pack, to find objects quickly at read + time. + + + + + Read/write handle to + tmpPack + while it is being parsed. + + + + Length of the original pack stream, before missing bases were appended. + Length of the original pack stream, before missing bases were appended. + + + + The original checksum of data up to + origEnd + . + + + + Current end of the pack file. + Current end of the pack file. + + + Checksum of the entire pack file. + Checksum of the entire pack file. + + + Compresses delta bases when completing a thin pack. + Compresses delta bases when completing a thin pack. + + + The pack that was created, if parsing was successful. + The pack that was created, if parsing was successful. + + + Set the pack index file format version this instance will create. + Set the pack index file format version this instance will create. + + the version to write. The special version 0 designates the + oldest (most compatible) format available for the objects. + + PackIndexWriter + + + Configure this index pack instance to keep an empty pack. + + Configure this index pack instance to keep an empty pack. +

+ By default an empty pack (a pack with no objects) is not kept, as doi so + is completely pointless. With no objects in the pack there is no d stored + by it, so the pack is unnecessary. + + true to enable keeping an empty pack. + + +

+ Get the imported + PackFile + . +

+ This method is supplied only to support testing; applications shouldn't + be using it directly to access the imported data. +

+ the imported PackFile, if parsing was successful. +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Iterates through an open object list. + + Iterates through an open object list. +

+ A cached object list should be constructed by enumerating from a single + stable commit back to the beginning of the project, using an ObjectWalk: +

+            ObjectWalk walk = new ObjectWalk(repository);
+            walk.markStart(walk.parseCommit(listName));
+            RevCommit commit;
+            while ((commit = walk.next()) != null)
+            list.addCommit(commit);
+            RevObject object;
+            while ((object == walk.nextObject()) != null)
+            list.addObject(object, walk.getPathHasCode());
+            
+

+ NGit.Storage.Pack.PackWriter + relies on the list covering a single commit, and going all + the way back to the root. If a list contains multiple starting commits the + PackWriter will include all of those objects, even if the client did not ask + for them, or should not have been given the objects. + + + +

Initialize the list iterator. + Initialize the list iterator. + + the revision pool the iterator will use when allocating the + returned objects. + +
+ + Lookup an object from the revision pool. + Lookup an object from the revision pool. + the object to allocate. + + the type of the object. The type must be accurate, as it is + used to allocate the proper RevObject instance. + + the object. + + + Pop the next most recent commit. + + Pop the next most recent commit. +

+ Commits should be returned in descending commit time order, or in + topological order. Either ordering is acceptable for a list to use. + + next most recent commit; null if traversal is over. + the list cannot be read. + + +

Pop the next most recent object. + + Pop the next most recent object. +

+ Only RevTree and RevBlob may be returned from this method, as these are + the only non-commit types reachable from a RevCommit. Lists may return + the objects clustered by type, or clustered by order of first-discovery + when walking from the most recent to the oldest commit. + + the next object. Null at the end of the list. + the list cannot be read. + + +

Get the current object's path hash code. + + Get the current object's path hash code. +

+ The path hash code should be cached from the ObjectWalk. + + path hash code; any integer may be returned. + + +

Release the resources associated with this iterator. + Release the resources associated with this iterator. +
+ + Clone a repository into a new working directory + Git documentation about Clone + + + Create clone command with no repository set + + + + Executes the + Clone + command. + + + the newly created + Git + object with associated repository + + NGit.Api.Errors.InvalidRemoteException + + NGit.Api.Errors.TransportException + + NGit.Api.Errors.GitAPIException + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + the uri to clone from + this instance + + + The optional directory associated with the clone operation. + + The optional directory associated with the clone operation. If the + directory isn't set, a name associated with the source uri will be used. + + NGit.Transport.URIish.GetHumanishName() + + the directory to clone to + this instance + + + whether the cloned repository is bare or not + this instance + + + + The remote name used to keep track of the upstream repository for the + clone operation. + + + The remote name used to keep track of the upstream repository for the + clone operation. If no remote name is set, the default value of + Constants.DEFAULT_REMOTE_NAME will be used. + + NGit.Constants.DEFAULT_REMOTE_NAME + + name that keeps track of the upstream repository + this instance + + + the initial branch to check out when cloning the repository + this instance + + + The progress monitor associated with the clone operation. + + The progress monitor associated with the clone operation. By default, + this is set to NullProgressMonitor + + NGit.NullProgressMonitor + + + + this + + + + + true when all branches have to be fetched (indicates wildcard + in created fetch refspec), false otherwise. + + + + this + + + + + true to initialize and update submodules. Ignored when + SetBare(bool) + is set to true. + + + + this + + + + + collection of branches to clone. Ignored when allSelected is + true. + + + + this + + + + + if set to true no branch will be checked out + after the clone. This enhances performance of the clone + command when there is no need for a checked out branch. + + + + this + + + + + + + + + + + + + Describes a pack file + ObjectReuseAsIs + can append onto a stream. + + + + Objects that start this pack. + + Objects that start this pack. +

+ All objects reachable from the tips are contained within this pack. If + PackWriter + is going to include everything reachable from all of + these objects, this cached pack is eligible to be appended directly onto + the output pack stream. + + the tip objects that describe this pack. + + +

Get the number of objects in this pack. + Get the number of objects in this pack. + the total object count for the pack. + if the object count cannot be read. +
+ + Get the number of delta objects stored in this pack. + + Get the number of delta objects stored in this pack. +

+ This is an optional method, not every cached pack storage system knows + the precise number of deltas stored within the pack. This number must be + smaller than + GetObjectCount() + as deltas are not supposed to span + across pack files. +

+ This method must be fast, if the only way to determine delta counts is to + scan the pack file's contents one object at a time, implementors should + return 0 and avoid the high cost of the scan. + + + the number of deltas; 0 if the number is not known or there are + no deltas. + + if the delta count cannot be read. + + +

Determine if this pack contains the object representation given. + + Determine if this pack contains the object representation given. +

+ PackWriter uses this method during the finding sources phase to prune + away any objects from the leading thin-pack that already appear within + this pack and should not be sent twice. +

+ Implementors are strongly encouraged to rely on looking at + rep + only and using its internal state to decide if this object is within this + pack. Implementors should ensure a representation from this cached pack + is tested as part of + ObjectReuseAsIs.SelectObjectRepresentation(PackWriter, NGit.ProgressMonitor, Sharpen.Iterable<T>) + + , ensuring this method would eventually return true if the object would + be included by this cached pack. + + the object being packed. Can be used as an ObjectId. + + representation from the + ObjectReuseAsIs + instance that + originally supplied this CachedPack. + + true if this pack contains this object. + + + + + + + + + + + + + + +

+ Logs activity that occurred within + UploadPack + . +

+ Implementors of the interface are responsible for associating the current + thread to a particular connection, if they need to also include connection + information. One method is to use a + Sharpen.ThreadLocal<T> + to remember + the connection information before invoking UploadPack. +

+
+ + A simple no-op logger. + A simple no-op logger. + + + Notice to the logger after a pack has been sent. + Notice to the logger after a pack has been sent. + the statistics after sending a pack to the client. + + + Default resolver serving from the local filesystem. + Default resolver serving from the local filesystem. + + + + Locate a Git + NGit.Repository + by name from the URL. + + + + Resolver configured to open nothing. + Resolver configured to open nothing. + + + + Locate and open a reference to a + NGit.Repository + . +

+ The caller is responsible for closing the returned Repository. +

+ + the current request, may be used to inspect session state + including cookies or user authentication. + + name of the repository, as parsed out of the URL. + the opened repository instance, never null. + + the repository does not exist or the name is incorrectly + formatted as a repository name. + + + the repository may exist, but HTTP access is not allowed + without authentication, i.e. this corresponds to an HTTP 401 + Unauthorized. + + + the repository may exist, but HTTP access is not allowed on the + target repository, for the current user. + + + the repository may exist, but HTTP access is not allowed for + the current request. The exception message contains a detailed + message that should be shown to the user. + + + +
+ + + + + Initialize an empty file based resolver. + Initialize an empty file based resolver. + + + Create a new resolver for the given path. + Create a new resolver for the given path. + the base path all repositories are rooted under. + + if true, exports all repositories, ignoring the check for the + git-daemon-export-ok + files. + + + + + + + + + false if git-daemon-export-ok is required to export + a repository; true if git-daemon-export-ok is + ignored. + + FileResolver<C>.SetExportAll(bool) + + + + Set whether or not to export all repositories. + + Set whether or not to export all repositories. +

+ If false (the default), repositories must have a + git-daemon-export-ok file to be accessed through this + daemon. +

+ If true, all repositories are available through the daemon, whether or + not git-daemon-export-ok exists. + + + + +

Add a single repository to the set that is exported by this daemon. + + Add a single repository to the set that is exported by this daemon. +

+ The existence (or lack-thereof) of git-daemon-export-ok is + ignored by this method. The repository is always published. + + name the repository will be published under. + the repository instance. + + +

Recursively export all Git repositories within a directory. + Recursively export all Git repositories within a directory. + + the directory to export. This directory must not itself be a + git repository, but any directory below it which has a file + named git-daemon-export-ok will be published. + +
+ + Check if this repository can be served. + + Check if this repository can be served. +

+ The default implementation of this method returns true only if either + FileResolver<C>.IsExportAll() + is true, or the + git-daemon-export-ok + file + is present in the repository's directory. + + the current HTTP request. + name of the repository, as present in the URL. + the opened repository instance. + true if the repository is accessible; false if not. + + the repository could not be accessed, the caller will claim + the repository does not exist. + + + +

Indicates the request service is not authorized for current user. + Indicates the request service is not authorized for current user. +
+ + Indicates the request service is not available. + Indicates the request service is not available. + + + Indicates the request service is not enabled on a repository. + Indicates the request service is not enabled on a repository. + + + Indicates the request service is not available. + Indicates the request service is not available. + + + + A class used to execute a + Reset + command. It has setters for all + supported options and arguments of this command and a + Call() + method + to finally execute the command. Each instance of this class should only be + used for one invocation of the command (means: one call to + Call() + ) + + Git documentation about Reset + + + + + + + Executes the + Reset + command. Each instance of this class should + only be used for one invocation of the command. Don't call this method + twice on an instance. + + the Ref after reset + NGit.Api.Errors.GitAPIException + + + + + the ref to reset to + this instance + + + the mode of the reset command + this instance + + + the file to add + this instance + + + + + + + + + + + + + + + + Kind of reset + + + Random access list that allocates entries in blocks. + + Random access list that allocates entries in blocks. +

+ Unlike + System.Collections.ArrayList<E> + + , this type does not need to reallocate the + internal array in order to expand the capacity of the list. Access to any + element is constant time, but requires two array lookups instead of one. +

+ To handle common usages, + BlockList<T>.AddItem(object) + and + BlockList<T>.Iterator() + use + internal code paths to amortize out the second array lookup, making addition + and simple iteration closer to one array operation per element processed. +

+ Similar to + ArrayList + , adding or removing from any position except the + end of the list requires O(N) time to copy all elements between the + modification point and the end of the list. Applications are strongly + encouraged to not use this access pattern with this list implementation. + + + +

Initialize an empty list. + Initialize an empty list. +
+ + Initialize an empty list with an expected capacity. + Initialize an empty list with an expected capacity. + number of elements expected to be in the list. + + + Quickly append all elements of another BlockList. + Quickly append all elements of another BlockList. + the list to copy elements from. + + + Quickly append all elements from an array. + Quickly append all elements from an array. + the source array. + first index to copy. + number of elements to copy. + + + + Fast, efficient map for + ObjectId + subclasses in only one map. +

+ To use this map type, applications must have their entry value type extend + from + Entry + , which itself extends from ObjectId. +

+ Object instances may only be stored in ONE ObjectIdOwnerMap. This + restriction exists because the map stores internal map state within each + object instance. If an instance is be placed in another ObjectIdOwnerMap it + could corrupt one or both map's internal state. +

+ If an object instance must be in more than one map, applications may use + ObjectIdOwnerMap for one of the maps, and + ObjectIdSubclassMap<V> + for the + other map(s). It is encouraged to use ObjectIdOwnerMap for the map that is + accessed most often, as this implementation runs faster than the more general + ObjectIdSubclassMap implementation. +

+
+ + Size of the initial directory, will grow as necessary. + Size of the initial directory, will grow as necessary. + + + Number of bits in a segment's index. + Number of bits in a segment's index. Segments are 2^11 in size. + + + Top level directory of the segments. + + Top level directory of the segments. +

+ The low + ObjectIdOwnerMap<V>.bits + of the SHA-1 are used to select the segment from + this directory. Each segment is constant sized at 2^SEGMENT_BITS. + + + +

Total number of objects in this map. + Total number of objects in this map. +
+ + + The map doubles in capacity when + ObjectIdOwnerMap<V>.size + reaches this target. + + + + + Number of low bits used to form the index into + ObjectIdOwnerMap<V>.directory + . + + + + + Low bit mask to index into + ObjectIdOwnerMap<V>.directory + , + 2^bits-1 + . + + + + Create an empty map. + Create an empty map. + + + Remove all entries from this map. + Remove all entries from this map. + + + Lookup an existing mapping. + Lookup an existing mapping. + the object identifier to find. + the instance mapped to toFind, or null if no mapping exists. + + + Returns true if this map contains the specified object. + Returns true if this map contains the specified object. + object to find. + true if the mapping exists for this object; false otherwise. + + + Store an object for future lookup. + + Store an object for future lookup. +

+ An existing mapping for must not be in this map. Callers must + first call + ObjectIdOwnerMap<V>.Get(AnyObjectId) + + to verify there is no current + mapping prior to adding a new mapping, or use + ObjectIdOwnerMap<V>.AddIfAbsent<Q>(Entry) + + . + + the object to store. + + +

Store an object for future lookup. + + Store an object for future lookup. +

+ Stores + newValue + , but only if there is not already an object for + the same object name. Callers can tell if the value is new by checking + the return value with reference equality: +

+            V obj = ...;
+            boolean wasNew = map.addIfAbsent(obj) == obj;
+            
+
+ the object to store. + + + newValue + if stored, or the prior value already stored and + that would have been returned had the caller used + get(newValue) + first. + +
+ + number of objects in this map. + + + + true if + ObjectIdOwnerMap<V>.Size() + is 0. + + + + Run remote commands using Jsch. + + Run remote commands using Jsch. +

+ This class is the default session implementation using Jsch. Note that + JschConfigSessionFactory + is used to create the actual session passed + to the constructor. + + + +

+ Create a new session object by passing the real Jsch session and the URI + information. + + + Create a new session object by passing the real Jsch session and the URI + information. + + the real Jsch session created elsewhere. + the URI information for the remote connection +
+ + + + + + A kludge to allow + TransportSftp + to get an Sftp channel from Jsch. + Ideally, this method would be generic, which would require implementing + generic Sftp channel operations in the RemoteSession class. + + a channel suitable for Sftp operations. + on problems getting the channel. + + + Implementation of Process for running a single command using Jsch. + + Implementation of Process for running a single command using Jsch. +

+ Uses the Jsch session to do actual command execution and manage the + execution. + + + +

+ Opens a channel on the session ("sock") for executing the given + command, opens streams, and starts command execution. + + + Opens a channel on the session ("sock") for executing the given + command, opens streams, and starts command execution. + + the command to execute + the timeout value, in seconds, for the command. + + on problems opening a channel or connecting to the remote + host + + on problems opening streams +
+ + + + + + + + + + + + + + + Encapsulates the result of a + CherryPickCommand + . + + + + commit the head points at after this cherry-pick + list of successfully cherry-picked Ref's + + + + + list of paths causing this cherry-pick to fail (see + NGit.Merge.ResolveMerger.GetFailingPaths() + + for details) + + + + + A CherryPickResult with status + CherryPickStatus.CONFLICTING + + + + the status this cherry-pick resulted in + + + + the commit the head points at after this cherry-pick, + null if + GetStatus() + is not + CherryPickStatus.OK + + + + + the list of successfully cherry-picked Ref's, + null if + GetStatus() + is not + CherryPickStatus.OK + + + + + the list of paths causing this cherry-pick to fail (see + NGit.Merge.ResolveMerger.GetFailingPaths() + + for details), + null if + GetStatus() + is not + CherryPickStatus.FAILED + + + + The cherry-pick status + + + + A class telling where the working-tree, the index and the current HEAD differ + from each other. + + + A class telling where the working-tree, the index and the current HEAD differ + from each other. Collections are exposed containing the paths of the modified + files. E.g. to find out which files are dirty in the working tree (modified + but not added) you would inspect the collection returned by + GetModified() + . +

+ The same path can be returned by multiple getters. E.g. if a modification has + been added to the index and afterwards the corresponding working tree file is + again modified this path will be returned by + GetModified() + and + GetChanged() + + + + + + + + true if no differences exist between the working-tree, the index, + and the current HEAD, false if differences do exist + + + + + list of files added to the index, not in HEAD (e.g. what you get + if you call 'git add ...' on a newly created file) + + + + + list of files changed from HEAD to index (e.g. what you get if + you modify an existing file and call 'git add ...' on it) + + + + + list of files removed from index, but in HEAD (e.g. what you get + if you call 'git rm ...' on a existing file) + + + + + list of files in index, but not filesystem (e.g. what you get if + you call 'rm ...' on a existing file) + + + + + list of files modified on disk relative to the index (e.g. what + you get if you modify an existing file without adding it to the + index) + + + + + list of files that are not ignored, and not in the index. (e.g. + what you get if you create a new file without adding it to the + index) + + + + + list of files that are in conflict. (e.g what you get if you + modify file that was modified by someone else in the meantime) + + + +

+ A class used to execute a + Status + command. It has setters for all + supported options and arguments of this command and a + Call() + method + to finally execute the command. Each instance of this class should only be + used for one invocation of the command (means: one call to + Call() + ) + + Git documentation about Status +
+ + + + + + Executes the + Status + command with all the options and parameters + collected by the setter methods of this class. Each instance of this + class should only be used for one invocation of the command. Don't call + this method twice on an instance. + + + a + Status + object telling about each path where working + tree, index or HEAD differ from each other. + + + + + + + To set the + NGit.Treewalk.WorkingTreeIterator + which should be used. If this + method is not called a standard + NGit.Treewalk.FileTreeIterator + is used. + + a working tree iterator + + + this + + + + + Hook invoked by + UploadPack + before during critical phases. +

+ If any hook function throws + ServiceMayNotContinueException + then + processing stops immediately and the exception is thrown up the call stack. + Most phases of UploadPack will try to report the exception's message text to + the end-user over the client's protocol connection. +

+
+ + A simple no-op hook. + A simple no-op hook. + + + Invoked before negotiation round is started. + Invoked before negotiation round is started. + the upload pack instance handling the connection. + the list of wanted objects. + number of objects the client has offered. + abort; the message will be sent to the user. + + + + + Invoked after a negotiation round is completed. + Invoked after a negotiation round is completed. + the upload pack instance handling the connection. + the list of wanted objects. + + number of objects this round found to be common. In a smart + HTTP transaction this includes the objects that were + previously found to be common. + + + number of objects in this round the local repository does not + have, but that were offered as potential common bases. + + + true if a pack is ready to be sent (the commit graph was + successfully cut). + + abort; the message will be sent to the user. + + + + + Invoked just before a pack will be sent to the client. + Invoked just before a pack will be sent to the client. + the upload pack instance handling the connection. + + the list of wanted objects. These may be RevObject or + RevCommit if the processed parsed them. Implementors should + not rely on the values being parsed. + + + the list of common objects. Empty on an initial clone request. + These may be RevObject or RevCommit if the processed parsed + them. Implementors should not rely on the values being parsed. + + abort; the message will be sent to the user. + + + + + + + + + + + + + + Indicates UploadPack may not continue execution. + Indicates UploadPack may not continue execution. + + + Indicates a transport service may not continue execution. + Indicates a transport service may not continue execution. + 2.0 + + + Initialize with no message. + Initialize with no message. + + + + a message explaining why it cannot continue. This message may + be shown to an end-user. + + + + true if the message was already output to the client. + + + Mark this message has being sent to the client. + Mark this message has being sent to the client. + + + Initialize with no message. + Initialize with no message. + + + + a message explaining why it cannot continue. This message may + be shown to an end-user. + + + + Add object notes. + Add object notes. + Git documentation about Notes + + + + + + + + + Sets the object id of object you want a note on. + + Sets the object id of object you want a note on. If the object already + has a note, the existing note will be replaced. + + + + + this + + + + the notes message used when adding a note + + + this + + + + + + + + the ref to read notes from. Note, the default value of + NGit.Constants.R_NOTES_COMMITS + will be used if nothing is + set + + + + this + + NGit.Constants.R_NOTES_COMMITS + + + List object notes. + List object notes. + Git documentation about Notes + + + + + + the requested notes + + + + + the ref to read notes from. Note, the default value of + NGit.Constants.R_NOTES_COMMITS + will be used if nothing is + set + + + + this + + NGit.Constants.R_NOTES_COMMITS + + + Remove object notes. + Remove object notes. + Git documentation about Notes + + + + + + + + + Sets the object id of object you want to remove a note + + + + this + + + + + + + + the ref to read notes from. Note, the default value of + NGit.Constants.R_NOTES_COMMITS + will be used if nothing is + set + + + + this + + NGit.Constants.R_NOTES_COMMITS + + + Show an object note. + Show an object note. + Git documentation about Notes + + + + + + + + + Sets the object id of object you want a note on + + + + this + + + + + the ref to read notes from. Note, the default value of + NGit.Constants.R_NOTES_COMMITS + will be used if nothing is + set + + + + this + + NGit.Constants.R_NOTES_COMMITS + + + The ls-remote command + Git documentation about ls-remote + + + + + + The remote (uri or name) used for the fetch operation. + + The remote (uri or name) used for the fetch operation. If no remote is + set, the default value of Constants.DEFAULT_REMOTE_NAME will + be used. + + NGit.Constants.DEFAULT_REMOTE_NAME + + + + + this + + + + Include refs/heads in references results + + + + this + + + + Include refs/tags in references results + + + + this + + + + The full path of git-upload-pack on the remote host + + + + this + + + + + Executes the + LsRemote + command with all the options and parameters + collected by the setter methods (e.g. + SetHeads(bool) + ) of this + class. Each instance of this class should only be used for one invocation + of the command. Don't call this method twice on an instance. + + a collection of references in the remote repository + when called with an invalid remote uri + + for errors that occurs during transport + + + + + + Formatter to format timestamps relative to the current time using time units + in the format defined by + git log --relative-date + . + + + + + System.DateTime + to format + + + age of given + System.DateTime + compared to now formatted in the same + relative format as returned by + git log --relative-date + + + + + Blame command for building a + NGit.Blame.BlameResult + for a file path. + + + + + + + Set file path + + this command + + + Set diff algorithm + + this command + + + Set raw text comparator + + this command + + + Set start commit id + + this command + + + Enable (or disable) following file renames. + + Enable (or disable) following file renames. +

+ If true renames are followed using the standard FollowFilter behavior + used by RevWalk (which matches + git log --follow + in the C + implementation). This is not the same as copy/move detection as + implemented by the C implementation's of + git blame -M -C + . + + enable following. + + + this + + + +

Configure the command to compute reverse blame (history of deletes). + Configure the command to compute reverse blame (history of deletes). + + oldest commit to traverse from. The result file will be loaded + from this commit's tree. + + + most recent commit to stop traversal at. Usually an active + branch tip, tag, or HEAD. + + + + this + + the repository cannot be read. +
+ + Configure the generator to compute reverse blame (history of deletes). + Configure the generator to compute reverse blame (history of deletes). + + oldest commit to traverse from. The result file will be loaded + from this commit's tree. + + + most recent commits to stop traversal at. Usually an active + branch tip, tag, or HEAD. + + + + this + + the repository cannot be read. + + + + Generate a list of lines with information about when the lines were + introduced into the file path. + + + Generate a list of lines with information about when the lines were + introduced into the file path. + + list of lines + + + + Remove untracked files from the working tree + Git documentation about Clean + + + + + + + Executes the + clean + command with all the options and parameters + collected by the setter methods of this class. Each instance of this + class should only be used for one invocation of the command (means: one + call to + Call() + ) + + a set of strings representing each file cleaned. + NGit.Api.Errors.GitAPIException + + NGit.Errors.NoWorkTreeException + + + + If paths are set, only these paths are affected by the cleaning. + If paths are set, only these paths are affected by the cleaning. + the paths to set + + + this + + + + If dryRun is set, the paths in question will not actually be deleted. + If dryRun is set, the paths in question will not actually be deleted. + whether to do a dry run or not + + + this + + + + Generate author information for lines based on a provided file. + + Generate author information for lines based on a provided file. +

+ Applications that want a simple one-shot computation of blame for a file + should use + ComputeBlameResult() + to prepare the entire result in one + method call. This may block for significant time as the history of the + repository must be traversed until information is gathered for every line. +

+ Applications that want more incremental update behavior may use either the + raw + Next() + streaming approach supported by this class, or construct + a + BlameResult + using + BlameResult.Create(BlameGenerator) + + and + incrementally construct the result with + BlameResult.ComputeNext() + . +

+ This class is not thread-safe. +

+ An instance of BlameGenerator can only be used once. To blame multiple files + the application must create a new BlameGenerator. +

+ During blame processing there are two files involved: +

    +
  • result - The file whose lines are being examined. This is the revision + the user is trying to view blame/annotation information alongside of.
  • +
  • source - The file that was blamed with supplying one or more lines of + data into result. The source may be a different file path (due to copy or + rename). Source line numbers may differ from result line numbers due to lines + being added/removed in intermediate revisions.
  • +
+

+ The blame algorithm is implemented by initially assigning responsibility for + all lines of the result to the starting commit. A difference against the + commit's ancestor is computed, and responsibility is passed to the ancestor + commit for any lines that are common. The starting commit is blamed only for + the lines that do not appear in the ancestor, if any. The loop repeats using + the ancestor, until there are no more lines to acquire information on, or the + file's creation point is discovered in history. + + + +

Revision pool used to acquire commits from. + Revision pool used to acquire commits from. +
+ + Indicates the commit has already been processed. + Indicates the commit has already been processed. + + + Potential candidates, sorted by commit time descending. + Potential candidates, sorted by commit time descending. + + + Number of lines that still need to be discovered. + Number of lines that still need to be discovered. + + + Blame is currently assigned to this source. + Blame is currently assigned to this source. + + + + Create a blame generator for the repository and path (relative to + repository) + + repository to access revision data from. + + initial path of the file to start scanning (relative to the + repository). + + + + repository being scanned for revision history. + + + path file path being processed. + + + Difference algorithm to use when comparing revisions. + Difference algorithm to use when comparing revisions. + + + + this + + + + Text comparator to use when comparing revisions. + Text comparator to use when comparing revisions. + + + + this + + + + Enable (or disable) following file renames, on by default. + + Enable (or disable) following file renames, on by default. +

+ If true renames are followed using the standard FollowFilter behavior + used by RevWalk (which matches + git log --follow + in the C + implementation). This is not the same as copy/move detection as + implemented by the C implementation's of + git blame -M -C + . + + enable following. + + + this + + + +

+ Obtain the RenameDetector if + setFollowFileRenames(true) + . + + + the rename detector, allowing the application to configure its + settings for rename score and breaking behavior. + +
+ + Push a candidate blob onto the generator's traversal stack. + + Push a candidate blob onto the generator's traversal stack. +

+ Candidates should be pushed in history order from oldest-to-newest. + Applications should push the starting commit first, then the index + revision (if the index is interesting), and finally the working tree + copy (if the working tree is interesting). + + description of the blob revision, such as "Working Tree". + + contents of the file. + + + this + + the repository cannot be read. + + +

Push a candidate blob onto the generator's traversal stack. + + Push a candidate blob onto the generator's traversal stack. +

+ Candidates should be pushed in history order from oldest-to-newest. + Applications should push the starting commit first, then the index + revision (if the index is interesting), and finally the working tree copy + (if the working tree is interesting). + + description of the blob revision, such as "Working Tree". + + contents of the file. + + + this + + the repository cannot be read. + + +

Push a candidate object onto the generator's traversal stack. + + Push a candidate object onto the generator's traversal stack. +

+ Candidates should be pushed in history order from oldest-to-newest. + Applications should push the starting commit first, then the index + revision (if the index is interesting), and finally the working tree copy + (if the working tree is interesting). + + description of the blob revision, such as "Working Tree". + + may be a commit or a blob. + + + this + + the repository cannot be read. + + +

Configure the generator to compute reverse blame (history of deletes). + + Configure the generator to compute reverse blame (history of deletes). +

+ This method is expensive as it immediately runs a RevWalk over the + history spanning the expression + start..end + (end being more recent + than start) and then performs the equivalent operation as + Push(string, NGit.AnyObjectId) + to begin blame traversal from the + commit named by + start + walking forwards through history until + end + blaming line deletions. +

+ A reverse blame may produce multiple sources for the same result line, + each of these is a descendant commit that removed the line, typically + this occurs when the same deletion appears in multiple side branches such + as due to a cherry-pick. Applications relying on reverse should use + BlameResult + as it filters these duplicate sources and only + remembers the first (oldest) deletion. + + + oldest commit to traverse from. The result file will be loaded + from this commit's tree. + + + most recent commit to stop traversal at. Usually an active + branch tip, tag, or HEAD. + + + + this + + the repository cannot be read. + + +

Configure the generator to compute reverse blame (history of deletes). + + Configure the generator to compute reverse blame (history of deletes). +

+ This method is expensive as it immediately runs a RevWalk over the + history spanning the expression + start..end + (end being more recent + than start) and then performs the equivalent operation as + Push(string, NGit.AnyObjectId) + to begin blame traversal from the + commit named by + start + walking forwards through history until + end + blaming line deletions. +

+ A reverse blame may produce multiple sources for the same result line, + each of these is a descendant commit that removed the line, typically + this occurs when the same deletion appears in multiple side branches such + as due to a cherry-pick. Applications relying on reverse should use + BlameResult + as it filters these duplicate sources and only + remembers the first (oldest) deletion. + + + oldest commit to traverse from. The result file will be loaded + from this commit's tree. + + + most recent commits to stop traversal at. Usually an active + branch tip, tag, or HEAD. + + + + this + + the repository cannot be read. + + +

Execute the generator in a blocking fashion until all data is ready. + Execute the generator in a blocking fashion until all data is ready. + the complete result. Null if no file exists for the given path. + the repository cannot be read. +
+ + Step the blame algorithm one iteration. + Step the blame algorithm one iteration. + + true if the generator has found a region's source. The getSource + and + GetResultStart() + , + GetResultEnd() + methods + can be used to inspect the region found. False if there are no + more regions to describe. + + repository cannot be read. + + + + + + + + + + + + + + + + + + Get the revision blamed for the current region. + + Get the revision blamed for the current region. +

+ The source commit may be null if the line was blamed to an uncommitted + revision, such as the working tree copy, or during a reverse blame if the + line survives to the end revision (e.g. the branch tip). + + current revision being blamed. + + + current author being blamed. + + + current committer being blamed. + + + path of the file being blamed. + + + + rename score if a rename occurred in + GetSourceCommit() + . + + + + + first line of the source data that has been blamed for the + current region. This is line number of where the region was added + during + GetSourceCommit() + in file + GetSourcePath() + . + + + + + one past the range of the source data that has been blamed for + the current region. This is line number of where the region was + added during + GetSourceCommit() + in file + GetSourcePath() + . + + + + + first line of the result that + GetSourceCommit() + has been + blamed for providing. Line numbers use 0 based indexing. + + + + + one past the range of the result that + GetSourceCommit() + has been blamed for providing. Line numbers use 0 based indexing. + Because a source cannot be blamed for an empty region of the + result, + GetResultEnd() + is always at least one larger + than + GetResultStart() + . + + + + + number of lines in the current region being blamed to + GetSourceCommit() + . This is always the value of the + expression + getResultEnd() - getResultStart() + , but also + getSourceEnd() - getSourceStart() + . + + + + + complete contents of the source file blamed for the current + output region. This is the contents of + GetSourcePath() + within + GetSourceCommit() + . The source contents is + temporarily available as an artifact of the blame algorithm. Most + applications will want the result contents for display to users. + + + + + complete file contents of the result file blame is annotating. + This value is accessible only after being configured and only + immediately before the first call to + Next() + . Returns + null if the path does not exist. + + repository cannot be read. + + Next() + has already been invoked. + + + +

Release the current blame session. + Release the current blame session. +
+ + + + + + + + Collects line annotations for inspection by applications. + + Collects line annotations for inspection by applications. +

+ A result is usually updated incrementally as the BlameGenerator digs back + further through history. Applications that want to lay annotations down text + to the original source file in a viewer may find the BlameResult structure an + easy way to acquire the information, at the expense of keeping tables in + memory tracking every line of the result file. +

+ This class is not thread-safe. +

+ During blame processing there are two files involved: +

    +
  • result - The file whose lines are being examined. This is the revision + the user is trying to view blame/annotation information alongside of.
  • +
  • source - The file that was blamed with supplying one or more lines of + data into result. The source may be a different file path (due to copy or + rename). Source line numbers may differ from result line numbers due to lines + being added/removed in intermediate revisions.
  • +
+
+
+ + Construct a new BlameResult for a generator. + Construct a new BlameResult for a generator. + the generator the result will consume records from. + + the new result object. null if the generator cannot find the path + it starts from. + + the repository cannot be read. + + + Warning: these are actually 1-based. + Warning: these are actually 1-based. + + + path of the file this result annotates. + + + contents of the result file, available for display. + + + + Throw away the + GetResultContents() + . + + + + Check if the given result line has been annotated yet. + Check if the given result line has been annotated yet. + line to read data of, 0 based. + true if the data has been annotated, false otherwise. + + + Check if the given result line has been annotated yet. + Check if the given result line has been annotated yet. + first index to examine. + last index to examine. + true if the data has been annotated, false otherwise. + + + Get the commit that provided the specified line of the result. + + Get the commit that provided the specified line of the result. +

+ The source commit may be null if the line was blamed to an uncommitted + revision, such as the working tree copy, or during a reverse blame if the + line survives to the end revision (e.g. the branch tip). + + line to read data of, 0 based. + + commit that provided line + idx + . May be null. + + + +

Get the author that provided the specified line of the result. + Get the author that provided the specified line of the result. + line to read data of, 0 based. + + author that provided line + idx + . May be null. + +
+ + Get the committer that provided the specified line of the result. + Get the committer that provided the specified line of the result. + line to read data of, 0 based. + + committer that provided line + idx + . May be null. + + + + Get the file path that provided the specified line of the result. + Get the file path that provided the specified line of the result. + line to read data of, 0 based. + + source file path that provided line + idx + . + + + + Get the corresponding line number in the source file. + Get the corresponding line number in the source file. + line to read data of, 0 based. + matching line number in the source file. + + + Compute all pending information. + Compute all pending information. + the repository cannot be read. + + + Compute the next available segment and return the first index. + + Compute the next available segment and return the first index. +

+ Computes one segment and returns to the caller the first index that is + available. After return the caller can also inspect + LastLength() + to determine how many lines of the result were computed. + + index that is now available. -1 if no more are available. + the repository cannot be read. + + + + length of the last segment found by + ComputeNext() + . + + + +

Compute until the entire range has been populated. + Compute until the entire range has been populated. + first index to examine. + last index to examine. + the repository cannot be read. +
+ + A source that may have supplied some (or all) of the result file. + + A source that may have supplied some (or all) of the result file. +

+ Candidates are kept in a queue by BlameGenerator, allowing the generator to + perform a parallel search down the parents of any merges that are discovered + during the history traversal. Each candidate retains a + regionList + describing sections of the result file the candidate has taken responsibility + for either directly or indirectly through its history. Actual blame from this + region list will be assigned to the candidate when its ancestor commit(s) are + themselves converted into Candidate objects and the ancestor's candidate uses + TakeBlame(NGit.Diff.EditList, Candidate) + + to accept responsibility for sections + of the result. + + + +

Next candidate in the candidate queue. + Next candidate in the candidate queue. +
+ + Commit being considered (or blamed, depending on state). + Commit being considered (or blamed, depending on state). + + + + Path of the candidate file in + sourceCommit + . + + + + + Unique name of the candidate blob in + sourceCommit + . + + + + + Complete contents of the file in + sourceCommit + . + + + + Chain of regions this candidate may be blamed for. + + Chain of regions this candidate may be blamed for. +

+ This list is always kept sorted by resultStart order, making it simple to + merge-join with the sorted EditList during blame assignment. + + + +

Score assigned to the rename to this candidate. + + Score assigned to the rename to this candidate. +

+ Consider the history "A<-B<-C". If the result file S in C was renamed to + R in B, the rename score for this rename will be held in this field by + the candidate object for B. By storing the score with B, the application + can see what the rename score was as it makes the transition from C/S to + B/R. This may seem backwards since it was C that performed the rename, + but the application doesn't learn about path R until B. + + + + + + +

Special candidate type used for reverse blame. + + Special candidate type used for reverse blame. +

+ Reverse blame inverts the commit history graph to follow from a commit to + its descendant children, rather than the normal history direction of + child to parent. These types require a + ReverseCommit + which keeps + children pointers, allowing reverse navigation of history. + + + +

Candidate loaded from a file source, and not a commit. + + Candidate loaded from a file source, and not a commit. +

+ The + Candidate.sourceCommit + field is always null on this type of + candidate. Instead history traversal follows the single + parent + field to discover the next Candidate. Often this is a normal Candidate + type that has a valid sourceCommit. + + + +

Next candidate to pass blame onto. + + Next candidate to pass blame onto. +

+ When computing the differences that this candidate introduced to the + file content, the parent's sourceText is used as the base. + + + +

Author name to refer to this blob with. + Author name to refer to this blob with. +
+ + Region of the result that still needs to be computed. + + Region of the result that still needs to be computed. +

+ Regions are held in a singly-linked-list by + Candidate + using the + Candidate.regionList + field. The list is kept in sorted order by + resultStart + . + + + +

Next entry in the region linked list. + Next entry in the region linked list. +
+ + First position of this region in the result file blame is computing. + First position of this region in the result file blame is computing. + + + + First position in the + Candidate + that owns this Region. + + + + Length of the region, always >= 1. + Length of the region, always >= 1. + + + Copy the entire result region, but at a new source position. + Copy the entire result region, but at a new source position. + the new source position. + the same result region, but offset for a new source. + + + Split the region, assigning a new source position to the first half. + Split the region, assigning a new source position to the first half. + the new source position. + length of the new region. + the first half of the region, at the new source. + + + + Edit this region to remove the first + d + elements. + + number of elements to remove from the start of this region. + + + + + + + + A DiffFormatter used to calculate the patch-id of the diff. + A DiffFormatter used to calculate the patch-id of the diff. + + + Initialize a formatter to compute a patch id. + Initialize a formatter to compute a patch id. + + + Should be called after having called one of the format methods + the patch id calculated for the provided diff. + + + + + + + + + An OutputStream which ignores everything written to it. + An OutputStream which ignores everything written to it. + + + The canonical instance. + The canonical instance. + + + Used to delete one or several tags. + + Used to delete one or several tags. + The result of + Call() + is a list with the (full) names of the deleted + tags. + + Git documentation about Tag + + + + + + the list with the full names of the deleted tags + + + + + the names of the tags to delete; if not set, this will do + nothing; invalid tag names will simply be ignored + + this instance + + + Show changes between commits, commit and working tree, etc. + Show changes between commits, commit and working tree, etc. + Git documentation about diff + + + + + + + Executes the + Diff + command with all the options and parameters + collected by the setter methods (e.g. + SetCached(bool) + of this + class. Each instance of this class should only be used for one invocation + of the command. Don't call this method twice on an instance. + + a DiffEntry for each path which is different + + + + whether to view the changes you staged for the next commit + this instance + + + parameter, used to limit the diff to the named path + this instance + + + the previous state + this instance + + + the updated state + this instance + + + whether to return only names and status of changed files + + this instance + + + the stream to write line data + this instance + + + Set number of context lines instead of the usual three. + Set number of context lines instead of the usual three. + the number of context lines + this instance + + + Set the given source prefix instead of "a/". + Set the given source prefix instead of "a/". + the prefix + this instance + + + Set the given destination prefix instead of "b/". + Set the given destination prefix instead of "b/". + the prefix + this instance + + + The progress monitor associated with the diff operation. + + The progress monitor associated with the diff operation. By default, this + is set to NullProgressMonitor + + NGit.NullProgressMonitor + a progress monitor + this instance + + + Used to obtain a list of tags. + Used to obtain a list of tags. + Git documentation about Tag + + + + + + the tags available + + + + The reflog command + Git documentation about reflog + + + + + + The ref used for the reflog operation. + + The ref used for the reflog operation. If no ref is set, the default + value of HEAD will be used. + + + + + this + + + + Run the reflog command + NGit.Api.Errors.GitAPIException + + NGit.Api.Errors.InvalidRefNameException + + + + Command class to list the stashed commits in a repository. + Command class to list the stashed commits in a repository. + Git documentation about Stash + + + Create a new stash list command + + + + + + + + A class used to execute a submodule add command. + + A class used to execute a submodule add command. + This will clone the configured submodule, register the submodule in the + .gitmodules file and the repository config file, and also add the submodule + and .gitmodules file to the index. + + Git documentation about submodules + + + + + + Set repository-relative path of submodule + + this command + + + Set URI to clone submodule from + + this command + + + The progress monitor associated with the clone operation. + + The progress monitor associated with the clone operation. By default, + this is set to NullProgressMonitor + + NGit.NullProgressMonitor + + this command + + + Is the configured already a submodule in the index? + true if submodule exists in index, false otherwise + System.IO.IOException + + + + + + A class used to execute a submodule init command. + + A class used to execute a submodule init command. + This will copy the 'url' and 'update' fields from the working tree + .gitmodules file to a repository's config file for each submodule not + currently present in the repository's config file. + + Git documentation about submodules + + + + + + Add repository-relative submodule path to initialize + + this command + + + + + + A class used to execute a submodule status command. + A class used to execute a submodule status command. + Git documentation about submodules + + + + + + Add repository-relative submodule path to limit status reporting to + + this command + + + + + + + + + + A class used to execute a submodule sync command. + + A class used to execute a submodule sync command. + This will set the remote URL in a submodule's repository to the current value + in the .gitmodules file. + + Git documentation about submodules + + + + + + Add repository-relative submodule path to synchronize + + this command + + + Get branch that HEAD currently points to + + shortened branch name, null on failures + System.IO.IOException + + + + + + A class used to execute a submodule update command. + A class used to execute a submodule update command. + Git documentation about submodules + + + + + + The progress monitor associated with the clone operation. + + The progress monitor associated with the clone operation. By default, + this is set to NullProgressMonitor + + NGit.NullProgressMonitor + + this command + + + Add repository-relative submodule path to initialize + + this command + + + Execute the SubmoduleUpdateCommand command. + Execute the SubmoduleUpdateCommand command. + a collection of updated submodule paths + NGit.Api.Errors.ConcurrentRefUpdateException + + NGit.Api.Errors.CheckoutConflictException + + NGit.Api.Errors.InvalidMergeHeadsException + + NGit.Api.Errors.InvalidConfigurationException + + NGit.Api.Errors.NoHeadException + + NGit.Api.Errors.NoMessageException + + NGit.Api.Errors.RefNotFoundException + + NGit.Api.Errors.WrongRepositoryStateException + + NGit.Api.Errors.GitAPIException + + + + + Receives a callback allowing type-specific configuration to be set + on the Transport instance after it's been created. + + + Receives a callback allowing type-specific configuration to be set + on the Transport instance after it's been created. +

+ This allows consumers of the JGit command API to perform custom + configuration that would be difficult anticipate and expose on the + API command builders. +

+ For instance, if a client needs to replace the SshSessionFactorys + on any SSHTransport used (eg to control available SSH identities), + they can set the TransportConfigCallback on the JGit API command - + once the transport has been created by the command, the callback + will be invoked and passed the transport instance, which the + client can then inspect and configure as necessary. + + + +

Add any additional transport-specific configuration required. + Add any additional transport-specific configuration required. + +
+ + Parsed information about a checkout. + Parsed information about a checkout. + + + the name of the branch before checkout + + + the name of the branch after checkout + + + Parsed reflog entry + + + the commit id before the change + + + the commit id after the change + + + user performing the change + + + textual description of the change + + + + a + CheckoutEntry + with parsed information about a branch + switch, or null if the entry is not a checkout + + + + Status class containing the type, path, and commit id of the submodule. + Status class containing the type, path, and commit id of the submodule. + + + Create submodule status + + + + + + Create submodule status + + + + + + + type + + + path + + + index object id + + + HEAD object id + + + Enumeration of different statuses that a submodule can be in + + + Walker that visits all submodule entries found in a tree + + + + Create a generator to walk over the submodule entries currently in the + index + The + .gitmodules + file is read from the index. + + + generator over submodule index entries + System.IO.IOException + + + + Create a generator and advance it to the submodule entry at the given + path + + + + the root of a tree containing both a submodule at the given path + and .gitmodules at the root. + + + generator at given path, null if no submodule at given path + System.IO.IOException + + + + Create a generator and advance it to the submodule entry at the given + path + + + + the root of a tree containing both a submodule at the given path + and .gitmodules at the root. + + + generator at given path, null if no submodule at given path + System.IO.IOException + + + Get submodule directory + + + directory + + + Get submodule repository + + + repository or null if repository doesn't exist + System.IO.IOException + + + Get submodule repository at path + + + repository or null if repository doesn't exist + System.IO.IOException + + + Resolve submodule repository URL. + + Resolve submodule repository URL. +

+ This handles relative URLs that are typically specified in the + '.gitmodules' file by resolving them against the remote URL of the parent + repository. +

+ Relative URLs will be resolved against the parent repository's working + directory if the parent repository has no configured remote URL. + + parent repository + absolute or relative URL of the submodule repository + resolved URL + System.IO.IOException + + +

Create submodule generator + + System.IO.IOException +
+ + Set the config used by this walk. + + Set the config used by this walk. + This method need only be called if constructing a walk manually instead of + with one of the static factory methods above. + + .gitmodules config object + this generator + + + + Set the tree used by this walk for finding + .gitmodules + . +

+ The root tree is not read until the first submodule is encountered by the + walk. +

+ This method need only be called if constructing a walk manually instead of + with one of the static factory methods above. +

+ tree containing .gitmodules + this generator +
+ + + Set the tree used by this walk for finding + .gitmodules + . +

+ The root tree is not read until the first submodule is encountered by the + walk. +

+ This method need only be called if constructing a walk manually instead of + with one of the static factory methods above. +

+ ID of a tree containing .gitmodules + this generator + System.IO.IOException +
+ + + Load the config for this walk from + .gitmodules + . +

+ Uses the root tree if + SetRootTree(NGit.Treewalk.AbstractTreeIterator) + + was + previously called, otherwise uses the working tree. +

+ If no submodule config is found, loads an empty config. +

+ this generator + if an error occurred, or if the repository is bare + + NGit.Errors.ConfigInvalidException + +
+ + + + + + Set tree filter + + this generator + + + Set the tree iterator used for finding submodule entries + + this generator + NGit.Errors.CorruptObjectException + + + + Set the tree used for finding submodule entries + + this generator + System.IO.IOException + NGit.Errors.IncorrectObjectTypeException + + NGit.Errors.MissingObjectException + + + + Reset generator and start new submodule walk + this generator + + + Get directory that will be the root of the submodule's local repository + submodule repository directory + + + Advance to next submodule in the index tree. + + Advance to next submodule in the index tree. + The object id and path of the next entry can be obtained by calling + GetObjectId() + and + GetPath() + . + + true if entry found, false otherwise + System.IO.IOException + + + Get path of current submodule entry + path + + + Get object id of current submodule entry + object id + + + Get the configured path for current entry. + + Get the configured path for current entry. This will be the value from + the .gitmodules file in the current repository's working tree. + + configured path + NGit.Errors.ConfigInvalidException + + System.IO.IOException + + + Get the configured remote URL for current entry. + + Get the configured remote URL for current entry. This will be the value + from the repository's config. + + configured URL + NGit.Errors.ConfigInvalidException + + System.IO.IOException + + + Get the configured remote URL for current entry. + + Get the configured remote URL for current entry. This will be the value + from the .gitmodules file in the current repository's working tree. + + configured URL + NGit.Errors.ConfigInvalidException + + System.IO.IOException + + + Get the configured update field for current entry. + + Get the configured update field for current entry. This will be the value + from the repository's config. + + update value + NGit.Errors.ConfigInvalidException + + System.IO.IOException + + + Get the configured update field for current entry. + + Get the configured update field for current entry. This will be the value + from the .gitmodules file in the current repository's working tree. + + update value + NGit.Errors.ConfigInvalidException + + System.IO.IOException + + + Get repository for current submodule entry + repository or null if non-existent + System.IO.IOException + + + Get commit id that HEAD points to in the current submodule's repository + object id of HEAD reference + System.IO.IOException + + + Get ref that HEAD points to in the current submodule's repository + ref name, null on failures + System.IO.IOException + + + Get the resolved remote URL for the current submodule. + + Get the resolved remote URL for the current submodule. +

+ This method resolves the value of + GetModulesUrl() + to an absolute + URL + + resolved remote URL + System.IO.IOException + NGit.Errors.ConfigInvalidException + + + +

Release any resources used by this walker's reader. + Release any resources used by this walker's reader. +
+ + Only produce commits which are below a specified depth. + Only produce commits which are below a specified depth. + DepthWalk + + + + Commits which used to be shallow in the client, but which are + being extended as part of this fetch. + + + Commits which used to be shallow in the client, but which are + being extended as part of this fetch. These commits should be + returned to the caller as UNINTERESTING so that their blobs/trees + can be marked appropriately in the pack writer. + + + + + Commits which the normal framework has marked as UNINTERESTING, + but which we now care about again. + + + Commits which the normal framework has marked as UNINTERESTING, + but which we now care about again. This happens if a client is + extending a shallow checkout to become deeper--the new commits at + the bottom of the graph need to be sent, even though they are + below other commits which the client already has. + + + + + Parent generator + NGit.Errors.MissingObjectException + + NGit.Errors.IncorrectObjectTypeException + + System.IO.IOException + + + + + + + + Interface for revision walkers that perform depth filtering. + Interface for revision walkers that perform depth filtering. + + + Depth to filter to. + + + flag marking commits that should become unshallow. + + + flag marking commits that are interesting again. + + + RevCommit with a depth (in commits) from a root. + RevCommit with a depth (in commits) from a root. + + + Depth of this commit in the graph, via shortest path. + Depth of this commit in the graph, via shortest path. + + + depth of this commit, as found by the shortest path. + + + Initialize a new commit. + Initialize a new commit. + object name for the commit. + + + Subclass of RevWalk that performs depth filtering. + Subclass of RevWalk that performs depth filtering. + + + Repository to walk + Maximum depth to return + + + ObjectReader to use + Maximum depth to return + + + Mark a root commit (i.e., one whose depth should be considered 0.) + Commit to mark + System.IO.IOException + NGit.Errors.IncorrectObjectTypeException + + NGit.Errors.MissingObjectException + + + + Subclass of ObjectWalk that performs depth filtering. + Subclass of ObjectWalk that performs depth filtering. + + + Repository to walk + Maximum depth to return + + + Object Reader + Maximum depth to return + + + Mark a root commit (i.e., one whose depth should be considered 0.) + Commit to mark + System.IO.IOException + NGit.Errors.IncorrectObjectTypeException + + NGit.Errors.MissingObjectException + + + + + Mark an element which used to be shallow in the client, but which + should now be considered a full commit. + + + Mark an element which used to be shallow in the client, but which + should now be considered a full commit. Any ancestors of this commit + should be included in the walk, even if they are the ancestor of an + uninteresting commit. + + Commit to mark + NGit.Errors.MissingObjectException + + NGit.Errors.IncorrectObjectTypeException + + System.IO.IOException + + + + An instance of this class can be used in conjunction with a + FollowFilter + . Whenever a rename has been detected during a revision + walk, it will be reported here. + + FollowFilter.SetRenameCallback(RenameCallback) + + + + + Called whenever a diff was found that is actually a rename or copy of a + file. + + + Called whenever a diff was found that is actually a rename or copy of a + file. + + the entry representing the rename/copy + + + + Utility methods for + RevWalk + . + + + + + Count the number of commits that are reachable from start + until a commit that is reachable from end is encountered. + + + Count the number of commits that are reachable from start + until a commit that is reachable from end is encountered. In + other words, count the number of commits that are in start, + but not in end. +

+ Note that this method calls + RevWalk.Reset() + at the beginning. + Also note that the existing rev filter on the walk is left as-is, so be + sure to set the right rev filter before calling this method. + + the rev walk to use + the commit to start counting from + + the commit where counting should end, or null if counting + should be done until there are no more commits + + the number of commits + NGit.Errors.MissingObjectException + + NGit.Errors.IncorrectObjectTypeException + + System.IO.IOException + + +

+ Find commits that are reachable from start until a commit + that is reachable from end is encountered. + + + Find commits that are reachable from start until a commit + that is reachable from end is encountered. In other words, + Find of commits that are in start, but not in + end. +

+ Note that this method calls + RevWalk.Reset() + at the beginning. + Also note that the existing rev filter on the walk is left as-is, so be + sure to set the right rev filter before calling this method. + + the rev walk to use + the commit to start counting from + + the commit where counting should end, or null if counting + should be done until there are no more commits + + the commits found + NGit.Errors.MissingObjectException + + NGit.Errors.IncorrectObjectTypeException + + System.IO.IOException + + +

+ Find the list of branches a given commit is reachable from when following + parent.s +

+ Note that this method calls + RevWalk.Reset() + at the beginning. +

+ In order to improve performance this method assumes clock skew among + committers is never larger than 24 hours. +

+ the commit we are looking at + The RevWalk to be used. + the set of branches we want to see reachability from + the list of branches a given commit is reachable from + NGit.Errors.MissingObjectException + + NGit.Errors.IncorrectObjectTypeException + + System.IO.IOException +
+ + An OutputStream that expands LF to CRLF. + + An OutputStream that expands LF to CRLF. +

+ Existing CRLF are not expanded to CRCRLF, but retained as is. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Counts the number of bytes written. + Counts the number of bytes written. +
+ + Initialize a new counting stream. + Initialize a new counting stream. + stream to output all writes to. + + + current number of bytes written. + + + + + + + + + + + + + + + + A BufferedOutputStream that throws an error if the final flush fails on + close. + + + A BufferedOutputStream that throws an error if the final flush fails on + close. +

+ Java's BufferedOutputStream swallows errors that occur when the output stream + tries to write the final bytes to the output during close. This may result in + corrupted files without notice. + + + + Sharpen.BufferedOutputStream.BufferedOutputStream(Sharpen.OutputStream) + underlying output stream + + + Sharpen.BufferedOutputStream.BufferedOutputStream(Sharpen.OutputStream, int) + underlying output stream + buffer size + + + + + +

Limits the number of commits output. + Limits the number of commits output. +
+ + Create a new max count filter. + Create a new max count filter. + the limit + a new filter + + + + + + + + + Filter that includes commits after a configured number are skipped. + Filter that includes commits after a configured number are skipped. + + + Create a new skip filter. + Create a new skip filter. + the number of commits to skip + a new filter + + + + + + + + + An exception occurring when a file cannot be locked + + + Construct a CannotLockException for the given file and message + file that could not be locked + exception message + + + Construct a CannotLockException for the given file + file that could not be locked + + + Get the file that could not be locked + file + + + JGit encountered a case that it knows it cannot yet handle. + JGit encountered a case that it knows it cannot yet handle. + + + + Construct a NotSupportedException for some issue JGit cannot + yet handle. + + + Construct a NotSupportedException for some issue JGit cannot + yet handle. + + message describing the issue + + + Construct a NotSupportedException for some issue JGit cannot yet handle. + + Construct a NotSupportedException for some issue JGit cannot yet handle. + + message describing the issue + a lower level implementation specific issue. + + + Thrown when PackParser finds an object larger than a predefined limit + + + + Construct a too large object in pack exception when the exact size of the + too large object is not available. + + + Construct a too large object in pack exception when the exact size of the + too large object is not available. This will be used when we find out + that a delta sequence is already larger than the maxObjectSizeLimit but + don't want to inflate the delta just to find out the exact size of the + resulting object. + + the maximum object size limit + + + + Construct a too large object in pack exception when the exact size of the + too large object is known. + + + Construct a too large object in pack exception when the exact size of the + too large object is known. + + + + + + + PostReceiveHook + that delegates to a list of other hooks. +

+ Hooks are run in the order passed to the constructor. +

+
+ + Create a new hook chaining the given hooks together. + Create a new hook chaining the given hooks together. + hooks to execute, in order. + a new hook chain of the given hooks. + + + + PreReceiveHook + that delegates to a list of other hooks. +

+ Hooks are run in the order passed to the constructor. +

+
+ + Create a new hook chaining the given hooks together. + Create a new hook chaining the given hooks together. + hooks to execute, in order. + a new hook chain of the given hooks. + + + + PreUploadHook + that delegates to a list of other hooks. +

+ Hooks are run in the order passed to the constructor. If running a method on + one hook throws an exception, execution of remaining hook methods is aborted. +

+
+ + Create a new hook chaining the given hooks together. + Create a new hook chaining the given hooks together. + hooks to execute, in order. + a new hook chain of the given hooks. + + + + + + + + + + + + UploadPack has already reported an error to the client. + UploadPack has already reported an error to the client. + + + Initialize a new exception. + Initialize a new exception. + root cause. + + + + UploadPackLogger + that delegates to a list of other loggers. +

+ loggers are run in the order passed to the constructor. +

+
+ + Create a new logger chaining the given loggers together. + Create a new logger chaining the given loggers together. + loggers to execute, in order. + a new logger chain of the given loggers. + + + + + + + + + + + + + + + + + + Branch section of a Git configuration file. + Branch section of a Git configuration file. + + + + Create a new branch config, which will read configuration from config + about specified branch. + + + Create a new branch config, which will read configuration from config + about specified branch. + + the config to read from + the short branch name of the section to read + + + + the full tracking branch name or null if it could + not be determined + + + + + the full remote-tracking branch name or + null + if it could + not be determined. If you also want local tracked branches use + GetTrackingBranch() + instead. + + + + Finds the tracked remote tracking branch + Remote name + + merge Ref of the local branch tracking the remote tracking + branch + + full remote tracking branch name or null + + + Status of a branch's relation to its remote-tracking branch. + Status of a branch's relation to its remote-tracking branch. + + + + Compute the tracking status for the branchName in + repository. + + + Compute the tracking status for the branchName in + repository. + + the git repository to compute the status from + the local branch + the tracking status, or null if it is not known + System.IO.IOException + + + full remote-tracking branch name + + + + number of commits that the local branch is ahead of the + remote-tracking branch + + + + + number of commits that the local branch is behind of the + remote-tracking branch + + + + + Implementation of + AdvertiseRefsHook + that advertises the same refs for + upload-pack and receive-pack. + + 2.0 + + + Hook to allow callers to take over advertising refs to the client. + Hook to allow callers to take over advertising refs to the client. + 2.0 + + + A simple hook that advertises the default refs. + + A simple hook that advertises the default refs. +

+ The method implementations do nothing to preserve the default behavior; see + UploadPack.SetAdvertisedRefs(System.Collections.Generic.IDictionary<K, V>) + + and + BaseReceivePack.SetAdvertisedRefs(System.Collections.Generic.IDictionary<K, V>, System.Collections.Generic.ICollection<E>) + + . + + + +

Advertise refs for upload-pack. + Advertise refs for upload-pack. + + instance on which to call + UploadPack.SetAdvertisedRefs(System.Collections.Generic.IDictionary<K, V>) + + if necessary. + + abort; the message will be sent to the user. + + +
+ + Advertise refs for receive-pack. + Advertise refs for receive-pack. + + instance on which to call + BaseReceivePack.SetAdvertisedRefs(System.Collections.Generic.IDictionary<K, V>, System.Collections.Generic.ICollection<E>) + + if necessary. + + abort; the message will be sent to the user. + + + + + + + + + + + Get the refs to advertise. + Get the refs to advertise. + repository instance. + open rev walk on the repository. + set of refs to advertise. + abort; the message will be sent to the user. + + + + + Get the additional haves to advertise. + Get the additional haves to advertise. + repository instance. + open rev walk on the repository. + + set of additional haves; see + BaseReceivePack.GetAdvertisedObjects() + + . + + abort; the message will be sent to the user. + + + + + + AdvertiseRefsHook + that delegates to a list of other hooks. +

+ Hooks are run in the order passed to the constructor. A hook may inspect or + modify the results of the previous hooks in the chain by calling + UploadPack.GetAdvertisedRefs() + , or + BaseReceivePack.GetAdvertisedRefs() + + or + BaseReceivePack.GetAdvertisedObjects() + + . +

+
+ + Create a new hook chaining the given hooks together. + Create a new hook chaining the given hooks together. + hooks to execute, in order. + a new hook chain of the given hooks. + + + + + + + + + Apply a patch to files and/or to the index. + Apply a patch to files and/or to the index. + Git documentation about apply + 2.0 + + + Constructs the command if the patch is to be applied to the index. + Constructs the command if the patch is to be applied to the index. + + + + the patch to apply + this instance + + + + Executes the + ApplyCommand + command with all the options and + parameters collected by the setter methods (e.g. + SetPatch(Sharpen.InputStream) + of this class. Each instance of this class + should only be used for one invocation of the command. Don't call this + method twice on an instance. + + + an + ApplyResult + object representing the command result + + NGit.Api.Errors.GitAPIException + + NGit.Api.Errors.PatchFormatException + + NGit.Api.Errors.PatchApplyException + + + + + + + + + System.IO.IOException + NGit.Api.Errors.PatchApplyException + + + + + Encapsulates the result of a + ApplyCommand + + 2.0 + + + an updated file + this instance + + + updated files + + + Command class to apply a stashed commit. + Command class to apply a stashed commit. + Git documentation about Stash + 2.0 + + + Create command to apply the changes of a stashed commit + + + + + Set the stash reference to apply +

+ This will default to apply the latest stashed commit (stash@{0}) if + unspecified +

+ + + + this + +
+ + Would unstashing overwrite local changes? + + + + + + true if unstash conflict, false otherwise + + + + + + + + + + + + + + + Apply the changes in a stashed commit to the working directory and index + + id of stashed commit that was applied + NGit.Api.Errors.GitAPIException + + NGit.Api.Errors.WrongRepositoryStateException + + + + + Stash diff filter that looks for differences in the first three trees + which must be the stash head tree, stash index tree, and stash working + directory tree in any order. + + + Stash diff filter that looks for differences in the first three trees + which must be the stash head tree, stash index tree, and stash working + directory tree in any order. + + + + + Command class to stash changes in the working directory and index in a + commit. + + + Command class to stash changes in the working directory and index in a + commit. + + Git documentation about Stash + 2.0 + + + Create a command to stash changes in the working directory and index + + + + + Set the message used when committing index changes +

+ The message will be formatted with the current branch, abbreviated commit + id, and short commit message when used. +

+ + Set the message used when committing index changes +

+ The message will be formatted with the current branch, abbreviated commit + id, and short commit message when used. + + + + + this + + + +

+ Set the message used when committing working directory changes +

+ The message will be formatted with the current branch, abbreviated commit + id, and short commit message when used. +

+ + Set the message used when committing working directory changes +

+ The message will be formatted with the current branch, abbreviated commit + id, and short commit message when used. + + + + + this + + + +

Set the person to use as the author and committer in the commits made + + + + this + +
+ + + Set the reference to update with the stashed commit id +

+ This value defaults to + NGit.Constants.R_STASH +

+ + + + this + +
+ + + + + + + + + + + + Stash the contents on the working directory and index in separate commits + and reset to the current HEAD commit. + + + Stash the contents on the working directory and index in separate commits + and reset to the current HEAD commit. + + stashed commit or null if no changes to stash + NGit.Api.Errors.GitAPIException + + + + + A utility for formatting dates according to the Git log.date formats plus + extensions. + + + A utility for formatting dates according to the Git log.date formats plus + extensions. +

+ The enum + Format + defines the available types. + + + +

Create a new Git oriented date formatter + +
+ + + Format committer, author or tagger ident according to this formatter's + specification. + + + Format committer, author or tagger ident according to this formatter's + specification. + + + formatted version of date, time and time zone + + + Git and JGit formats + + + Exception thrown when applying a patch fails + 2.0 + + + + + + + + + + Exception thrown when applying a patch fails due to an invalid format + 2.0 + + + + + + all the errors where unresolved conflicts have been detected + + + Indicates that a client request has not yet been read from the wire. + Indicates that a client request has not yet been read from the wire. + 2.0 + + + Initialize with no message. + Initialize with no message. + + + + a message explaining the state. This message should not + be shown to an end-user. + + + + Translation bundle for JGit core + + + an instance of this translation bundle + + + + A line in a Git + Config + file. + + + + The text content before entry. + The text content before entry. + + + The section name for the entry. + The section name for the entry. + + + Subsection name. + Subsection name. + + + The key name. + The key name. + + + The value. + The value. + + + The text content after entry. + The text content after entry. + + + Command class to delete a stashed commit reference + Git documentation about Stash + 2.0 + + + + + + Set the stash reference to drop (0-based). + + Set the stash reference to drop (0-based). +

+ This will default to drop the latest stashed commit (stash@{0}) if + unspecified + + + + + this + + + +

Set wheter drop all stashed commits + + true to drop all stashed commits, false to drop only the + stashed commit set via calling + SetStashRef(int) + + + + this + +
+ + + + + + + + + Drop the configured entry from the stash reflog and return value of the + stash reference after the drop occurs + + commit id of stash reference or null if no more stashed changes + NGit.Api.Errors.GitAPIException + + + + Utility for writing reflog entries + 2.0 + + + Get the ref name to be used for when locking a ref's log for rewriting + + name of the ref, relative to the Git repository top level + directory (so typically starts with refs/). + + the name of the ref's lock ref + + + Create write for repository + + + + Create write for repository + + + true to write to disk all entries logged, false to respect the + repository's config and current log file status + + + + Get repository that reflog is being written for + file repository + + + Create the log directories + System.IO.IOException + this writer + + + Locate the log file on disk for a single reference name. + Locate the log file on disk for a single reference name. + + name of the ref, relative to the Git repository top level + directory (so typically starts with refs/). + + the log file location. + + + + Write the given + ReflogEntry + entry to the ref's log + + + + this writer + System.IO.IOException + + + Write the given entry information to the ref's log + + + + + + this writer + System.IO.IOException + + + Write the given ref update to the ref's log + + + + this writer + System.IO.IOException + + + + + + Thrown when JGit detects and refuses to use an invalid path + 2.0 + + + + + + Batch of reference updates to be applied to a repository. + + Batch of reference updates to be applied to a repository. +

+ The batch update is primarily useful in the transport code, where a client or + server is making changes to more than one reference at a time. + + + +

Commands to apply during this batch. + Commands to apply during this batch. +
+ + Does the caller permit a forced update on a reference? + + + Identity to record action as within the reflog. + Identity to record action as within the reflog. + + + Message the caller wants included in the reflog. + Message the caller wants included in the reflog. + + + + Should the result value be appended to + refLogMessage + . + + + + Initialize a new batch update. + Initialize a new batch update. + the reference database of the repository to be updated. + + + + true if the batch update will permit a non-fast-forward update to + an existing reference. + + + + Set if this update wants to permit a forced update. + Set if this update wants to permit a forced update. + true if this update batch should ignore merge tests. + + + this + . + + + + identity of the user making the change in the reflog. + + + Set the identity of the user appearing in the reflog. + + Set the identity of the user appearing in the reflog. +

+ The timestamp portion of the identity is ignored. A new identity with the + current timestamp will be created automatically when the update occurs + and the log record is written. + + + identity of the user. If null the identity will be + automatically determined based on the repository + configuration. + + + + this + . + + + +

Get the message to include in the reflog. + Get the message to include in the reflog. + + message the caller wants to include in the reflog; null if the + update should not be logged. + +
+ + + + true + if the ref log message should show the result. + + + + Set the message to include in the reflog. + Set the message to include in the reflog. + + the message to describe this change. It may be null if + appendStatus is null in order not to append to the reflog + + + true if the status of the ref change (fast-forward or + forced-update) should be appended to the user supplied + message. + + + + this + . + + + + Don't record this update in the ref's associated reflog. + Don't record this update in the ref's associated reflog. + + + this + . + + + + + true if log has been disabled by + DisableRefLog() + . + + + + commands this update will process. + + + Add a single command to this batch update. + Add a single command to this batch update. + the command to add, must not be null. + + + this + . + + + + Add commands to this batch update. + Add commands to this batch update. + the commands to add, must not be null. + + + this + . + + + + Add commands to this batch update. + Add commands to this batch update. + the commands to add, must not be null. + + + this + . + + + + Execute this batch update. + + Execute this batch update. +

+ The default implementation of this method performs a sequential reference + update over each reference. + + + a RevWalk to parse tags in case the storage system wants to + store them pre-peeled, a common performance optimization. + + progress monitor to receive update status on. + + the database is unable to accept the update. Individual + command status must be tested to determine if there is a + partial failure, or a total failure. + + + +

Create a new RefUpdate copying the batch settings. + Create a new RefUpdate copying the batch settings. + specific command the update should be created to copy. + a single reference update command. + + the reference database cannot make a new update object for + the given reference. + +
+ + Exception thrown when transport operation failed + + + message describing the transport failure. + + + message describing the transport exception. + why the transport failed. + + + Formatter for constructing the commit message for a squashed commit. + + Formatter for constructing the commit message for a squashed commit. +

+ The format should be the same as C Git does it, for compatibility. + + + +

Create a new squash message formatter. + Create a new squash message formatter. +
+ + Construct the squashed commit message. + Construct the squashed commit message. + the squashed commits + the target branch + squashed commit message + + + + A garbage collector for git + FileRepository + . Instances of this class + are not thread-safe. Don't use the same instance from multiple threads. + This class started as a copy of DfsGarbageCollector from Shawn O. Pearce + adapted to FileRepositories. + + + + + the refs which existed during the last call to + Repack() + . This is + needed during + Prune(System.Collections.Generic.ICollection<E>) + + where we can optimize by looking at the + difference between the current refs and the refs which existed during + last + Repack() + . + + + + Holds the starting time of the last repack() execution. + + Holds the starting time of the last repack() execution. This is needed in + prune() to inspect only those reflog entries which have been added since + last repack(). + + + + Creates a new garbage collector with default values. + + Creates a new garbage collector with default values. An expirationTime of + two weeks and null as progress monitor will be used. + + the repo to work on + + + + Runs a garbage collector on a + FileRepository + . It will +
    +
  • pack loose references into packed-refs
  • +
  • repack all reachable objects into new pack files and delete the old + pack files
  • +
  • prune all loose objects which are now reachable by packs
  • +
+
+ + the collection of + PackFile + 's which are newly created + + System.IO.IOException + + If the configuration parameter "gc.pruneexpire" couldn't be + parsed + +
+ + Delete old pack files. + + Delete old pack files. What is 'old' is defined by specifying a set of + old pack files and a set of new pack files. Each pack file contained in + old pack files but not contained in new pack files will be deleted. + + + + + true if we should ignore the fact that a certain + pack files or index files couldn't be deleted. + false if an exception should be thrown in such + cases + + + if a pack file couldn't be deleted and + ignoreErrors is set to false + + + + + Like "git prune-packed" this method tries to prune all loose objects + which can be found in packs. + + + Like "git prune-packed" this method tries to prune all loose objects + which can be found in packs. If certain objects can't be pruned (e.g. + because the filesystem delete operation fails) this is silently ignored. + + System.IO.IOException + + + + Like "git prune" this method tries to prune all loose objects which are + unreferenced. + + + Like "git prune" this method tries to prune all loose objects which are + unreferenced. If certain objects can't be pruned (e.g. because the + filesystem delete operation fails) this is silently ignored. + + a set of objects which should explicitly not be pruned + + System.IO.IOException + + If the configuration parameter "gc.pruneexpire" couldn't be + parsed + + + + + Remove all entries from a map which key is the id of an object referenced + by the given ObjectWalk + + + + NGit.Errors.MissingObjectException + + NGit.Errors.IncorrectObjectTypeException + + System.IO.IOException + + + Packs all non-symbolic, loose refs into packed-refs. + Packs all non-symbolic, loose refs into packed-refs. + System.IO.IOException + + + + Packs all objects which reachable from any of the heads into one pack + file. + + + Packs all objects which reachable from any of the heads into one pack + file. Additionally all objects which are not reachable from any head but + which are reachable from any of the other refs (e.g. tags), special refs + (e.g. FETCH_HEAD) or index are packed into a separate pack file. Objects + included in pack files which have a .keep file associated are never + repacked. All old pack files which existed before are deleted. + + a collection of the newly created pack files + + when during reading of refs, index, packfiles, objects, + reflog-entries or during writing to the packfiles + System.IO.IOException + occurs + + + + the ref which log should be inspected + only reflog entries not older then this time are processed + + the + NGit.ObjectId + s contained in the reflog + + System.IO.IOException + + + Returns a map of all refs and additional refs (e.g. + + Returns a map of all refs and additional refs (e.g. FETCH_HEAD, + MERGE_HEAD, ...) + + a map where names of refs point to ref objects + System.IO.IOException + + + + Return a list of those objects in the index which differ from whats in + HEAD + + a set of ObjectIds of changed objects in the index + System.IO.IOException + NGit.Errors.CorruptObjectException + + NGit.Errors.NoWorkTreeException + + + + + + + Returns the number of objects stored in pack files. + + Returns the number of objects stored in pack files. If an object is + contained in multiple pack files it is counted as often as it occurs. + + the number of objects stored in pack files + System.IO.IOException + + + Set the progress monitor used for garbage collection methods. + Set the progress monitor used for garbage collection methods. + + this + + + + During gc() or prune() each unreferenced, loose object which has been + created or modified in the last expireAgeMillis milliseconds + will not be pruned. + + + During gc() or prune() each unreferenced, loose object which has been + created or modified in the last expireAgeMillis milliseconds + will not be pruned. Only older objects may be pruned. If set to 0 then + every object is a candidate for pruning. + + minimal age of objects to be pruned in milliseconds. + + + + + During gc() or prune() each unreferenced, loose object which has been + created or modified after or at expire will not be pruned. + + + During gc() or prune() each unreferenced, loose object which has been + created or modified after or at expire will not be pruned. + Only older objects may be pruned. If set to null then every object is a + candidate for pruning. + + + instant in time which defines object expiration + objects with modification time before this instant are expired + objects with modification time newer or equal to this instant + are not expired + + + + + A class holding statistical data for a FileRepository regarding how many + objects are stored as loose or packed objects + + + + The number of objects stored in pack files. + + The number of objects stored in pack files. If the same object is + stored in multiple pack files then it is counted as often as it + occurs in pack files. + + + + The number of pack files + + + The number of objects stored as loose objects. + The number of objects stored as loose objects. + + + The sum of the sizes of all files used to persist loose objects. + The sum of the sizes of all files used to persist loose objects. + + + The sum of the sizes of all pack files. + The sum of the sizes of all pack files. + + + The number of loose refs. + The number of loose refs. + + + The number of refs stored in pack files. + The number of refs stored in pack files. + + + + Parses strings with time and date specifications into + System.DateTime + . + When git needs to parse strings specified by the user this parser can be + used. One example is the parsing of the config parameter gc.pruneexpire. The + parser can handle only subset of what native gits approxidate parser + understands. + + + + The Date representing never. + + The Date representing never. Though this is a concrete value, most + callers are adviced to avoid depending on the actual value. + + + + + Parses a string into a + System.DateTime + . Since this parser also supports + relative formats (e.g. "yesterday") the caller can specify the reference + date. These types of strings can be parsed: +
    +
  • "never"
  • +
  • "now"
  • +
  • "yesterday"
  • +
  • "(x) years|months|weeks|days|hours|minutes|seconds ago"
    + Multiple specs can be combined like in "2 weeks 3 days ago". Instead of + ' ' one can use '.' to seperate the words
  • +
  • "yyyy-MM-dd HH:mm:ss Z" (ISO)
  • +
  • "EEE, dd MMM yyyy HH:mm:ss Z" (RFC)
  • +
  • "yyyy-MM-dd"
  • +
  • "yyyy.MM.dd"
  • +
  • "MM/dd/yyyy",
  • +
  • "dd.MM.yyyy"
  • +
  • "EEE MMM dd HH:mm:ss yyyy Z" (DEFAULT)
  • +
  • "EEE MMM dd HH:mm:ss yyyy" (LOCAL)
  • +
+
+ the string to be parsed + + the base date which is used for the calculation of relative + formats. E.g. if baseDate is "25.8.2012" then parsing of the + string "1 week ago" would result in a date corresponding to + "18.8.2012". This is used when a JGit command calls this + parser often but wants a consistent starting point for calls.
+ If set to null then the current time will be used + instead. + + + the parsed + System.DateTime + + if the given dateStr was not recognized +
+ + + + + + A convenient base class which provides empty method bodies for all + ProgressMonitor methods. + + + A convenient base class which provides empty method bodies for all + ProgressMonitor methods. +

+ Could be used in scenarios when only some of the progress notifications are + important and others can be ignored. + + + + diff --git a/Libraries/NGit/NSch.dll b/Libraries/NGit/NSch.dll new file mode 100644 index 0000000..4bb0dd2 Binary files /dev/null and b/Libraries/NGit/NSch.dll differ diff --git a/Libraries/NGit/NSch.xml b/Libraries/NGit/NSch.xml new file mode 100644 index 0000000..29c0627 --- /dev/null +++ b/Libraries/NGit/NSch.xml @@ -0,0 +1,1204 @@ + + + + NSch + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

Enable the agent forwarding. + Enable the agent forwarding. + +
+ + Enable the X11 forwarding. + Enable the X11 forwarding. + + 6.3.1. Requesting X11 Forwarding + + + SetEnv(string, string) + SetEnv(byte[], byte[]) + + + Set the environment variable. + + Set the environment variable. + If name and value are needed to be passed + to the remote in your faivorite encoding,use + SetEnv(byte[], byte[]) + . + + A name for environment variable. + A value for environment variable. + 6.4 Environment Variable Passing + + + Set the environment variable. + Set the environment variable. + A name of environment variable. + A value of environment variable. + SetEnv(string, string) + 6.4 Environment Variable Passing + + + Allocate a Pseudo-Terminal. + Allocate a Pseudo-Terminal. + + 6.2. Requesting a Pseudo-Terminal + + + Set the terminal mode. + Set the terminal mode. + + + + Change the window dimension interactively. + Change the window dimension interactively. + terminal width, columns + terminal height, rows + terminal width, pixels + terminal height, pixels + 6.7. Window Dimension Change Message + + + Set the terminal type. + + Set the terminal type. + This method is not effective after Channel#connect(). + + terminal type(for example, "vt100") + SetPtyType(string, int, int, int, int) + + + + Set the terminal type. + + Set the terminal type. + This method is not effective after Channel#connect(). + + terminal type(for example, "vt100") + terminal width, columns + terminal height, rows + terminal width, pixels + terminal height, pixels + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + This method will check if the given string can be expanded to the + unique string. + + + This method will check if the given string can be expanded to the + unique string. If it can be expanded to mutiple files, SftpException + will be thrown. + + the returned string is unquoted. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Returns the total number of bytes input so far. + Returns the total number of bytes input so far. + + + Returns the total number of bytes output so far. + Returns the total number of bytes output so far. + + + + + + + + + + + + + + + + + + Returns the total number of bytes input so far. + Returns the total number of bytes input so far. + + + Returns the total number of bytes output so far. + Returns the total number of bytes output so far. + + + + + + + + + + +
+
diff --git a/Libraries/NGit/Sharpen.dll b/Libraries/NGit/Sharpen.dll new file mode 100644 index 0000000..ec3b6d7 Binary files /dev/null and b/Libraries/NGit/Sharpen.dll differ diff --git a/Vss2Git/AbstractVcsWrapper.cs b/Vss2Git/AbstractVcsWrapper.cs index 6816daa..ad6bafa 100644 --- a/Vss2Git/AbstractVcsWrapper.cs +++ b/Vss2Git/AbstractVcsWrapper.cs @@ -259,17 +259,21 @@ public int Execute(ProcessStartInfo startInfo, out string stdout, out string std } public virtual string QuoteRelativePath(string path) + { + return Quote(RelativePath(path)); + } + + public virtual string RelativePath(string path) { if (path.StartsWith(outputDirectory)) { path = path.Substring(outputDirectory.Length); if (path.StartsWith("\\") || path.StartsWith("/")) - { path = path.Substring(1); - } } - return Quote(path); + return path; } + /// /// Puts quotes around a command-line argument if it includes whitespace /// or quotes. @@ -367,7 +371,7 @@ public virtual bool NeedsCommit() return needsCommit; } - public void SetNeedsCommit() + public virtual void SetNeedsCommit() { needsCommit = true; } diff --git a/Vss2Git/GitWrapper.cs b/Vss2Git/GitWrapper.cs index 03f286c..0d25036 100755 --- a/Vss2Git/GitWrapper.cs +++ b/Vss2Git/GitWrapper.cs @@ -18,9 +18,13 @@ using System.ComponentModel; using System.Diagnostics; using System.IO; +using System.Linq; using System.Text; using System.Text.RegularExpressions; using System.Threading; +using NGit; +using NGit.Api; +using NGit.Dircache; namespace Hpdi.Vss2Git { @@ -31,11 +35,11 @@ namespace Hpdi.Vss2Git class GitWrapper : AbstractVcsWrapper { public static readonly string gitMetaDir = ".git"; - public static readonly string gitExecutable = "git"; private List addQueue = new List(); private List deleteQueue = new List(); private List dirDeleteQueue = new List(); + private Git git; private Encoding commitEncoding = Encoding.UTF8; @@ -54,15 +58,15 @@ public bool ForceAnnotatedTags public GitWrapper(string outputDirectory, Logger logger, Encoding commitEncoding, bool forceAnnotatedTags) - : base(outputDirectory, logger, gitExecutable, gitMetaDir) + : base(outputDirectory, logger, null, gitMetaDir) { this.commitEncoding = commitEncoding; this.forceAnnotatedTags = forceAnnotatedTags; } - public override string QuoteRelativePath(string path) + public override string RelativePath(string path) { - return base.QuoteRelativePath(path).Replace('\\', '/'); // cygwin git compatibility + return base.RelativePath(path).Replace('\\', '/'); // cygwin git compatibility } public override void Init(bool resetRepo) @@ -72,7 +76,7 @@ public override void Init(bool resetRepo) DeleteDirectory(GetOutputDirectory()); Thread.Sleep(0); Directory.CreateDirectory(GetOutputDirectory()); - VcsExec("init"); + git = Git.Init().SetDirectory(GetOutputDirectory()).Call(); } } @@ -91,33 +95,20 @@ public override bool Add(string path) return true; } - private bool DoAdd(string paths) - { - var startInfo = GetStartInfo("add --" + paths); - - // add fails if there are no files (directories don't count) - bool result = ExecuteUnless(startInfo, "did not match any files"); - if (result) SetNeedsCommit(); - return result; - } - private bool DoAdds() { - bool rc = false; - string paths = ""; + if (addQueue.Count == 0) + return false; + + var add = git.Add(); foreach (string path in addQueue) { - if (paths.Length > 8000) - { - rc |= DoAdd(paths); - paths = ""; - } - paths += " " + QuoteRelativePath(path); + add.AddFilepattern(RelativePath(path)); } addQueue.Clear(); - if (paths.Length > 1) - rc |= DoAdd(paths); - return rc; + add.Call(); + base.SetNeedsCommit(); + return true; } public override bool AddDir(string path) @@ -134,42 +125,33 @@ public override bool NeedsCommit() public override bool AddAll() { - var startInfo = GetStartInfo("add -A"); - - // add fails if there are no files (directories don't count) - bool result = ExecuteUnless(startInfo, "did not match any files"); - if (result) SetNeedsCommit(); - return result; + // git.Add().AddFilepattern(".").Call(); + // base.SetNeedsCommit(); + return true; } public override void RemoveFile(string path) { deleteQueue.Add(path); - SetNeedsCommit(); } - private void DoDelete(string paths) + private bool DoDeletes() { - VcsExec("rm -r -f --" + paths); // is always recursive - } + if (deleteQueue.Count == 0) + return false; - private void DoDeletes() - { - string paths = ""; + var delete = git.Rm(); foreach (string path in deleteQueue) { - if (paths.Length > 8000) - { - DoDelete(paths); - paths = ""; - } - paths += " " + QuoteRelativePath(path); + delete.AddFilepattern(RelativePath(path)); } deleteQueue.Clear(); - if (paths.Length > 1) - DoDelete(paths); + delete.Call(); CleanupEmptyDirs(); + base.SetNeedsCommit(); + return true; } + private void CleanupEmptyDirs() { foreach (string dir in dirDeleteQueue) @@ -184,7 +166,6 @@ public override void RemoveDir(string path, bool recursive) { deleteQueue.Add(path); // is always recursive dirDeleteQueue.Add(path); - SetNeedsCommit(); } public override void RemoveEmptyDir(string path) @@ -194,8 +175,9 @@ public override void RemoveEmptyDir(string path) public override void Move(string sourcePath, string destPath) { - VcsExec("mv -- " + QuoteRelativePath(sourcePath) + " " + QuoteRelativePath(destPath)); - SetNeedsCommit(); + git.Rm().AddFilepattern(RelativePath(sourcePath)).Call(); + git.Add().AddFilepattern(RelativePath(destPath)).Call(); + base.SetNeedsCommit(); } public override void MoveEmptyDir(string sourcePath, string destPath) @@ -204,125 +186,65 @@ public override void MoveEmptyDir(string sourcePath, string destPath) Directory.Move(sourcePath, destPath); } - public override bool DoCommit(string authorName, string authorEmail, string comment, DateTime localTime) + public override void SetNeedsCommit() { - TempFile commentFile; - - var args = "commit"; - AddComment(comment, ref args, out commentFile); - - using (commentFile) - { - var startInfo = GetStartInfo(args); - startInfo.EnvironmentVariables["GIT_AUTHOR_NAME"] = authorName; - startInfo.EnvironmentVariables["GIT_AUTHOR_EMAIL"] = authorEmail; - startInfo.EnvironmentVariables["GIT_AUTHOR_DATE"] = GetUtcTimeString(localTime); - - // also setting the committer is supposedly useful for converting to Mercurial - startInfo.EnvironmentVariables["GIT_COMMITTER_NAME"] = authorName; - startInfo.EnvironmentVariables["GIT_COMMITTER_EMAIL"] = authorEmail; - startInfo.EnvironmentVariables["GIT_COMMITTER_DATE"] = GetUtcTimeString(localTime); - - // ignore empty commits, since they are non-trivial to detect - // (e.g. when renaming a directory) - return ExecuteUnless(startInfo, "nothing to commit"); - } + // Suppress explicit calls. } - public override void Tag(string name, string taggerName, string taggerEmail, string comment, DateTime localTime) + public override bool DoCommit(string authorName, string authorEmail, string comment, DateTime localTime) { - TempFile commentFile; +#if false + // enable this when you find empty commits or uncommitted changes; this will throw on that commit - var args = "tag"; - // tools like Mercurial's git converter only import annotated tags - // remark: annotated tags are created with the git -a option, - // see e.g. http://learn.github.com/p/tagging.html - if (forceAnnotatedTags) - { - args += " -a"; - } - AddComment(comment, ref args, out commentFile); + var status = git.Status().Call(); - // tag names are not quoted because they cannot contain whitespace or quotes - args += " -- " + name; + if (status.IsClean()) + throw new InvalidOperationException("Expected changes"); - using (commentFile) - { - var startInfo = GetStartInfo(args); - startInfo.EnvironmentVariables["GIT_COMMITTER_NAME"] = taggerName; - startInfo.EnvironmentVariables["GIT_COMMITTER_EMAIL"] = taggerEmail; - startInfo.EnvironmentVariables["GIT_COMMITTER_DATE"] = GetUtcTimeString(localTime); + if (status.GetModified().Count > 0 || status.GetMissing().Count > 0 || status.GetUntracked().Count > 0 || status.GetConflicting().Count > 0) + throw new InvalidOperationException("Have modified, missing, untracked or conflicting files"); +#endif - ExecuteUnless(startInfo, null); - } - } + var person = new PersonIdent(authorName, authorEmail, localTime, TimeZoneInfo.Local); - private void SetConfig(string name, string value) - { - VcsExec("config " + name + " " + Quote(value)); + git.Commit() + .SetMessage(comment) + .SetAuthor(person) + .SetCommitter(person) + .Call(); + + return true; } - private void AddComment(string comment, ref string args, out TempFile tempFile) + public override void Tag(string name, string taggerName, string taggerEmail, string comment, DateTime localTime) { - tempFile = null; - if (!string.IsNullOrEmpty(comment)) - { - // need to use a temporary file to specify the comment when not - // using the system default code page or it contains newlines - if (commitEncoding.CodePage != Encoding.Default.CodePage || comment.IndexOf('\n') >= 0) - { - Logger.WriteLine("Generating temp file for comment: {0}", comment); - tempFile = new TempFile(); - tempFile.Write(comment, commitEncoding); - - // temporary path might contain spaces (e.g. "Documents and Settings") - args += " -F " + Quote(tempFile.Name); - } - else - { - args += " -m " + Quote(comment); - } - } - else - { - args += " --allow-empty-message --no-edit -m \"\""; - } + git.Tag() + .SetMessage(comment) + .SetTagger(new PersonIdent(taggerName, taggerEmail, localTime, TimeZoneInfo.Local)) + .SetName(name) + .Call(); } - private static string GetUtcTimeString(DateTime localTime) + private void SetConfig(string name, string value) { - // convert local time to UTC based on whether DST was in effect at the time - var utcTime = TimeZoneInfo.ConvertTimeToUtc(localTime); - - // format time according to ISO 8601 (avoiding locale-dependent month/day names) - return utcTime.ToString("yyyy'-'MM'-'dd HH':'mm':'ss +0000"); + int pos = name.IndexOf('.'); + string section = name.Substring(0, pos); + name = name.Substring(pos + 1); + git.GetRepository().GetConfig().SetString(section, null, name, value); } - private static Regex lastCommitTimestampRegex = new Regex("^Date:\\s*(\\S+)", RegexOptions.Multiline); - public override DateTime? GetLastCommit() { - if (Directory.Exists(Path.Combine(GetOutputDirectory(), gitMetaDir)) && FindExecutable()) + if (git == null) + return null; + + foreach (var commit in git.Log().SetMaxCount(1).Call()) { - try { - var startInfo = GetStartInfo("log -n 1 --date=raw"); - string stdout, stderr; - int exitCode = Execute(startInfo, out stdout, out stderr); - if (exitCode == 0) - { - var m = lastCommitTimestampRegex.Match(stdout); - if (m.Success) - { - long unixTimeStamp = long.Parse(m.Groups[1].Value); - DateTime dt = new DateTime(1970, 1, 1, 0, 0, 0, 0, System.DateTimeKind.Utc); - dt = dt.AddSeconds(unixTimeStamp).ToLocalTime(); - return dt; - } - } - } catch (Exception ) - { - } + long unixTimeStamp = commit.CommitTime; + DateTime dt = new DateTime(1970, 1, 1, 0, 0, 0, 0, System.DateTimeKind.Utc); + return dt.AddSeconds(unixTimeStamp).ToLocalTime(); } + return null; } diff --git a/Vss2Git/MainForm.Designer.cs b/Vss2Git/MainForm.Designer.cs index c092b4e..b253c7f 100755 --- a/Vss2Git/MainForm.Designer.cs +++ b/Vss2Git/MainForm.Designer.cs @@ -129,6 +129,7 @@ private void InitializeComponent() // // vssDirButton // + this.vssDirButton.Anchor = ((System.Windows.Forms.AnchorStyles)((System.Windows.Forms.AnchorStyles.Top | System.Windows.Forms.AnchorStyles.Right))); this.vssDirButton.Image = ((System.Drawing.Image)(resources.GetObject("vssDirButton.Image"))); this.vssDirButton.Location = new System.Drawing.Point(531, 17); this.vssDirButton.Name = "vssDirButton"; @@ -308,6 +309,7 @@ private void InitializeComponent() // // outDirButton // + this.outDirButton.Anchor = ((System.Windows.Forms.AnchorStyles)((System.Windows.Forms.AnchorStyles.Top | System.Windows.Forms.AnchorStyles.Right))); this.outDirButton.Image = ((System.Drawing.Image)(resources.GetObject("outDirButton.Image"))); this.outDirButton.Location = new System.Drawing.Point(531, 17); this.outDirButton.Name = "outDirButton"; diff --git a/Vss2Git/MainForm.resx b/Vss2Git/MainForm.resx index 191e9bb..fd64d16 100755 --- a/Vss2Git/MainForm.resx +++ b/Vss2Git/MainForm.resx @@ -112,12 +112,12 @@ 2.0 - System.Resources.ResXResourceReader, System.Windows.Forms, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089 + System.Resources.ResXResourceReader, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089 - System.Resources.ResXResourceWriter, System.Windows.Forms, Version=2.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089 + System.Resources.ResXResourceWriter, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089 - + iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABGdBTUEAALGPC/xhBQAAAYRJREFUOE+t @@ -130,10 +130,10 @@ 9vvBYB0GAON8RJ2AjZv6ozVYhvEF1jp/zGyOXvoAAAAASUVORK5CYII= - + 17, 17 - + 132, 17 @@ -160,19 +160,19 @@ 9vvBYB0GAON8RJ2AjZv6ozVYhvEF1jp/zGyOXvoAAAAASUVORK5CYII= - + 241, 17 - + 404, 17 - + 569, 17 - + 746, 17 - + 924, 17 diff --git a/Vss2Git/Properties/Resources.Designer.cs b/Vss2Git/Properties/Resources.Designer.cs index 9bf49ab..0f0e5d2 100755 --- a/Vss2Git/Properties/Resources.Designer.cs +++ b/Vss2Git/Properties/Resources.Designer.cs @@ -1,7 +1,7 @@ //------------------------------------------------------------------------------ // // This code was generated by a tool. -// Runtime Version:4.0.30319.225 +// Runtime Version:4.0.30319.42000 // // Changes to this file may cause incorrect behavior and will be lost if // the code is regenerated. diff --git a/Vss2Git/VcsExporter.cs b/Vss2Git/VcsExporter.cs index e5520e4..6aa89fc 100644 --- a/Vss2Git/VcsExporter.cs +++ b/Vss2Git/VcsExporter.cs @@ -99,7 +99,7 @@ public VcsExporter(WorkQueue workQueue, Logger logger, public void ExportToVcs(string repoPath, DateTime? continueAfter) { this.continueAfter = continueAfter; - workQueue.AddLast(delegate(object work) + workQueue.AddLast(delegate (object work) { var stopwatch = Stopwatch.StartNew(); @@ -118,16 +118,19 @@ public void ExportToVcs(string repoPath, DateTime? continueAfter) string vcs = vcsWrapper.GetVcs(); - while (!vcsWrapper.FindExecutable()) + if (vcs != null) { - var button = MessageBox.Show(vcs + " not found in PATH. " + - "If you need to modify your PATH variable, please " + - "restart the program for the changes to take effect.", - "Error", MessageBoxButtons.RetryCancel, MessageBoxIcon.Error); - if (button == DialogResult.Cancel) + while (!vcsWrapper.FindExecutable()) { - workQueue.Abort(); - return; + var button = MessageBox.Show(vcs + " not found in PATH. " + + "If you need to modify your PATH variable, please " + + "restart the program for the changes to take effect.", + "Error", MessageBoxButtons.RetryCancel, MessageBoxIcon.Error); + if (button == DialogResult.Cancel) + { + workQueue.Abort(); + return; + } } } @@ -926,7 +929,8 @@ private bool WriteRevisionTo(string physical, int version, string destPath) // propagate exceptions here (e.g. disk full) to abort/retry/ignore using (contents) { - WriteStream(contents, destPath); + if (!WriteStream(contents, destPath)) + return false; } // try to use the first revision (for this branch) as the create time, @@ -947,15 +951,53 @@ private bool WriteRevisionTo(string physical, int version, string destPath) return true; } - private void WriteStream(Stream inputStream, string path) + private bool WriteStream(Stream inputStream, string path) { Directory.CreateDirectory(Path.GetDirectoryName(path)); + // check whether the new contents actually differs from what's on disk + + if (File.Exists(path) && inputStream.Length == new FileInfo(path).Length) + { + using (var outputStream = File.OpenRead(path)) + { + byte[] inBuffer = new byte[4096]; + byte[] outBuffer = new byte[4096]; + + bool differs = false; + + while (!differs) + { + int read = inputStream.Read(inBuffer, 0, inBuffer.Length); + if (read <= 0) + break; + + outputStream.Read(outBuffer, 0, outBuffer.Length); + + for (int i = 0; i < read; i++) + { + if (inBuffer[i] != outBuffer[i]) + { + differs = true; + break; + } + } + } + + if (!differs) + return false; + + inputStream.Seek(0, SeekOrigin.Begin); + } + } + using (var outputStream = new FileStream( path, FileMode.Create, FileAccess.Write, FileShare.None)) { streamCopier.Copy(inputStream, outputStream); } + + return true; } private delegate void RenameDelegate(string sourcePath, string destPath); diff --git a/Vss2Git/Vss2Git.csproj b/Vss2Git/Vss2Git.csproj index f0505f4..a5dcd12 100755 --- a/Vss2Git/Vss2Git.csproj +++ b/Vss2Git/Vss2Git.csproj @@ -10,7 +10,7 @@ Properties Hpdi.Vss2Git Vss2Git - v3.5 + v4.0 512 Vss2Git.ico @@ -32,6 +32,7 @@ false false true + true @@ -53,6 +54,21 @@ AllRules.ruleset + + ..\Libraries\NGit\ICSharpCode.SharpZipLib.dll + + + ..\Libraries\NGit\Mono.Security.dll + + + ..\Libraries\NGit\NGit.dll + + + ..\Libraries\NGit\NSch.dll + + + ..\Libraries\NGit\Sharpen.dll + 3.5 diff --git a/Vss2Git/app.config b/Vss2Git/app.config index 9aeb387..4508274 100755 --- a/Vss2Git/app.config +++ b/Vss2Git/app.config @@ -1,23 +1,23 @@ - + - -
+ +
- + $ - + - + localhost @@ -41,13 +41,13 @@ git - + - + - + True @@ -65,14 +65,14 @@ True - + - + False - + diff --git a/VssPhysicalLib/RecordException.cs b/VssPhysicalLib/RecordException.cs index a30690e..a901607 100755 --- a/VssPhysicalLib/RecordException.cs +++ b/VssPhysicalLib/RecordException.cs @@ -11,30 +11,30 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - */ - -using System; - -namespace Hpdi.VssPhysicalLib -{ - /// - /// Base class for exceptions thrown when an invalid record is read. - /// - /// Trevor Robinson - public class RecordException : Exception - { - public RecordException() - { - } - - public RecordException(string message) - : base(message) - { - } - - public RecordException(string message, Exception innerException) - : base(message, innerException) - { - } - } -} + */ + +using System; + +namespace Hpdi.VssPhysicalLib +{ + /// + /// Base class for exceptions thrown when an invalid record is read. + /// + /// Trevor Robinson + public class RecordException : Exception + { + public RecordException() + { + } + + public RecordException(string message) + : base(message) + { + } + + public RecordException(string message, Exception innerException) + : base(message, innerException) + { + } + } +} diff --git a/VssPhysicalLib/RecordTruncatedException.cs b/VssPhysicalLib/RecordTruncatedException.cs index 3f30bef..5b3ca36 100755 --- a/VssPhysicalLib/RecordTruncatedException.cs +++ b/VssPhysicalLib/RecordTruncatedException.cs @@ -1,35 +1,35 @@ -/* Copyright 2009 HPDI, LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -using System; - -namespace Hpdi.VssPhysicalLib -{ - /// - /// Exception thrown when a truncated record is found. - /// - /// Trevor Robinson - public class RecordTruncatedException : RecordException - { - public RecordTruncatedException() - { - } - - public RecordTruncatedException(string message) - : base(message) - { - } - } -} +/* Copyright 2009 HPDI, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; + +namespace Hpdi.VssPhysicalLib +{ + /// + /// Exception thrown when a truncated record is found. + /// + /// Trevor Robinson + public class RecordTruncatedException : RecordException + { + public RecordTruncatedException() + { + } + + public RecordTruncatedException(string message) + : base(message) + { + } + } +}