Merge branch 'master' into jc/web
authorJunio C Hamano <junkio@cox.net>
Tue, 24 Oct 2006 03:53:38 +0000 (20:53 -0700)
committerJunio C Hamano <junkio@cox.net>
Tue, 24 Oct 2006 03:53:38 +0000 (20:53 -0700)
* master: (114 commits)
gitweb: Fix setting $/ in parse_commit()
daemon: do not die on older clients.
xdiff/xemit.c (xdl_find_func): Elide trailing white space in a context header.
git-clone: honor --quiet
Documentation for the [remote] config
prune-packed: Fix uninitialized variable.
ignore-errors requires cl
git-send-email: do not pass custom Date: header
Use column indexes in git-cvsserver where necessary.
gitweb: Add '..' (up directory) to tree view if applicable
gitweb: Improve git_print_page_path
pager: default to LESS=FRSX not LESS=FRS
Make prune also run prune-packed
git-vc: better installation instructions
gitweb: Do not esc_html $basedir argument to git_print_tree_entry
gitweb: Whitespace cleanup - tabs are for indent, spaces are for align (2)
Fix usagestring for git-branch
git-merge: show usage if run without arguments
add the capability for index-pack to read from a stream
git-clone: define die() and use it.
...

83 files changed:
Documentation/config.txt
Documentation/diff-options.txt
Documentation/git-cherry-pick.txt
Documentation/git-grep.txt
Documentation/git-http-push.txt
Documentation/git-pack-objects.txt
Documentation/git-rebase.txt
Documentation/git-repack.txt
Documentation/git-rev-parse.txt
Documentation/git-send-pack.txt
Documentation/git-shortlog.txt
Documentation/git-svn.txt
Documentation/git.txt
Documentation/glossary.txt
Documentation/urls.txt
Makefile
archive-zip.c
builtin-apply.c
builtin-archive.c
builtin-grep.c
builtin-pack-objects.c
builtin-prune-packed.c
builtin-prune.c
builtin-unpack-objects.c
builtin.h
cache-tree.c
cache.h
combine-diff.c
commit.c
contrib/emacs/git.el
contrib/emacs/vc-git.el
daemon.c
diff-delta.c
diff.c
diff.h
fetch-clone.c
fetch-pack.c
git-bisect.sh
git-branch.sh
git-clone.sh
git-commit.sh
git-cvsserver.perl
git-fetch.sh
git-merge.sh
git-pull.sh
git-rebase.sh
git-repack.sh
git-resolve.sh
git-revert.sh
git-send-email.perl
git-shortlog.perl
git-svn.perl
git-svnimport.perl
git.c
git.spec.in
gitweb/README
gitweb/gitweb.css
gitweb/gitweb.perl
grep.c
grep.h
http-fetch.c
imap-send.c
index-pack.c
merge-recursive.c
pack.h
pager.c
revision.c
sha1_file.c
sha1_name.c
show-index.c
sideband.c
sideband.h
t/t4013/diff.diff-tree_--pretty_--root_--summary_initial
t/t4015-diff-whitespace.sh [new file with mode: 0755]
t/t4118-apply-empty-context.sh [new file with mode: 0755]
t/t5000-tar-tree.sh
t/t5600-clone-fail-cleanup.sh
t/test-lib.sh
trace.c
upload-pack.c
xdiff/xemit.c
xdiff/xmacros.h
xdiff/xutils.c
index 84e38911eeecd3f4e5f195ada61cb9bc831a4627..026d4cf9ade90cd108d351e372ed862378cde100 100644 (file)
@@ -230,6 +230,22 @@ pull.octopus::
 pull.twohead::
        The default merge strategy to use when pulling a single branch.
 
+remote.<name>.url::
+       The URL of a remote repository.  See gitlink:git-fetch[1] or
+       gitlink:git-push[1].
+
+remote.<name>.fetch::
+       The default set of "refspec" for gitlink:git-fetch[1]. See
+       gitlink:git-fetch[1].
+
+remote.<name>.push::
+       The default set of "refspec" for gitlink:git-push[1]. See
+       gitlink:git-push[1].
+
+repack.usedeltabaseoffset::
+       Allow gitlink:git-repack[1] to create packs that uses
+       delta-base offset.  Defaults to false.
+
 show.difftree::
        The default gitlink:git-diff-tree[1] arguments to be used
        for gitlink:git-show[1].
index 7b7b9e8ce92db7fbb2072b75a613b073b0c5a4e6..e112172ca57da75ce6c2dd447cc97c6aa2b6499e 100644 (file)
        The width of the filename part can be controlled by
        giving another width to it separated by a comma.
 
+--numstat::
+       Similar to \--stat, but shows number of added and
+       deleted lines in decimal notation and pathname without
+       abbreviation, to make it more machine friendly.
+
 --summary::
        Output a condensed summary of extended header information
        such as creations, renames and mode changes.
index bfa950ca19c701bd3837a874bbeeb18b440ed44f..875edb6b9f5ddc6d19b2de29cebbad5ebb7d37a0 100644 (file)
@@ -7,7 +7,7 @@ git-cherry-pick - Apply the change introduced by an existing commit
 
 SYNOPSIS
 --------
-'git-cherry-pick' [--edit] [-n] [-r] <commit>
+'git-cherry-pick' [--edit] [-n] [-x] <commit>
 
 DESCRIPTION
 -----------
@@ -24,13 +24,22 @@ OPTIONS
        With this option, `git-cherry-pick` will let you edit the commit
        message prior committing.
 
--r|--replay::
-       Usually the command appends which commit was
+-x::
+       Cause the command to append which commit was
        cherry-picked after the original commit message when
-       making a commit.  This option, '--replay', causes it to
-       use the original commit message intact.  This is useful
-       when you are reordering the patches in your private tree
-       before publishing.
+       making a commit.  Do not use this option if you are
+       cherry-picking from your private branch because the
+       information is useless to the recipient.  If on the
+       other hand you are cherry-picking between two publicly
+       visible branches (e.g. backporting a fix to a
+       maintenance branch for an older release from a
+       development branch), adding this information can be
+       useful.
+
+-r|--replay::
+       It used to be that the command defaulted to do `-x`
+       described above, and `-r` was to disable it.  Now the
+       default is not to do `-x` so this option is a no-op.
 
 -n|--no-commit::
        Usually the command automatically creates a commit with
index d8af4d961b83e362b83f25b21d40f9d54ed4bc11..bfbece9864a3c6d8b38dd526e2483fea0fb0b58f 100644 (file)
@@ -14,7 +14,7 @@ SYNOPSIS
           [-v | --invert-match] [-h|-H] [--full-name]
           [-E | --extended-regexp] [-G | --basic-regexp] [-F | --fixed-strings]
           [-n] [-l | --files-with-matches] [-L | --files-without-match]
-          [-c | --count]
+          [-c | --count] [--all-match]
           [-A <post-context>] [-B <pre-context>] [-C <context>]
           [-f <file>] [-e] <pattern> [--and|--or|--not|(|)|-e <pattern>...]
           [<tree>...]
@@ -96,6 +96,11 @@ OPTIONS
        higher precedence than `--or`.  `-e` has to be used for all
        patterns.
 
+--all-match::
+       When giving multiple pattern expressions combined with `--or`,
+       this flag is specified to limit the match to files that
+       have lines to match all of them.
+
 `<tree>...`::
        Search blobs in the trees for specified patterns.
 
@@ -111,6 +116,10 @@ git grep -e \'#define\' --and \( -e MAX_PATH -e PATH_MAX \)::
        Looks for a line that has `#define` and either `MAX_PATH` or
        `PATH_MAX`.
 
+git grep --all-match -e NODE -e Unexpected::
+       Looks for a line that has `NODE` or `Unexpected` in
+       files that have lines that match both.
+
 Author
 ------
 Originally written by Linus Torvalds <torvalds@osdl.org>, later
index 7e1f894a92f396e3354c94c9ce3339edc37b59df..c2485c6e9cdc2c6ec8c6fd876ed19758d6c5bb84 100644 (file)
@@ -34,7 +34,7 @@ OPTIONS
        Report the list of objects being walked locally and the
        list of objects successfully sent to the remote repository.
 
-<ref>...:
+<ref>...::
        The remote refs to update.
 
 
index d4661ddc2f84c1392b32ad3fdf5f4ebe5b465e00..a1e55054bd544848605eec8bfaa2cc29ac94f3d0 100644 (file)
@@ -9,7 +9,7 @@ git-pack-objects - Create a packed archive of objects
 SYNOPSIS
 --------
 [verse]
-'git-pack-objects' [-q] [--no-reuse-delta] [--non-empty]
+'git-pack-objects' [-q] [--no-reuse-delta] [--delta-base-offset] [--non-empty]
        [--local] [--incremental] [--window=N] [--depth=N]
        [--revs [--unpacked | --all]*] [--stdout | base-name] < object-list
 
@@ -71,11 +71,11 @@ base-name::
 --all::
        This implies `--revs`.  In addition to the list of
        revision arguments read from the standard input, pretend
-       as if all refs under `$GIT_DIR/refs` are specifed to be
+       as if all refs under `$GIT_DIR/refs` are specified to be
        included.
 
---window and --depth::
-       These two options affects how the objects contained in
+--window=[N], --depth=[N]::
+       These two options affect how the objects contained in
        the pack are stored using delta compression.  The
        objects are first internally sorted by type, size and
        optionally names and compared against the other objects
@@ -84,6 +84,7 @@ base-name::
        it too deep affects the performance on the unpacker
        side, because delta data needs to be applied that many
        times to get to the necessary object.
+       The default value for both --window and --depth is 10.
 
 --incremental::
        This flag causes an object already in a pack ignored
@@ -110,6 +111,17 @@ base-name::
        This flag tells the command not to reuse existing deltas
        but compute them from scratch.
 
+--delta-base-offset::
+       A packed archive can express base object of a delta as
+       either 20-byte object name or as an offset in the
+       stream, but older version of git does not understand the
+       latter.  By default, git-pack-objects only uses the
+       former format for better compatibility.  This option
+       allows the command to use the latter format for
+       compactness.  Depending on the average delta chain
+       length, this option typically shrinks the resulting
+       packfile by 3-5 per-cent.
+
 
 Author
 ------
index 9d7bcaa38cc5c13e29c00ddd18c64202e98cb5f8..10f2924f4df1eb29c1baf4484d2837377b6cfcb3 100644 (file)
@@ -7,7 +7,7 @@ git-rebase - Rebase local commits to a new head
 
 SYNOPSIS
 --------
-'git-rebase' [--merge] [--onto <newbase>] <upstream> [<branch>]
+'git-rebase' [-v] [--merge] [--onto <newbase>] <upstream> [<branch>]
 
 'git-rebase' --continue | --skip | --abort
 
@@ -121,6 +121,9 @@ OPTIONS
        is used instead (`git-merge-recursive` when merging a single
        head, `git-merge-octopus` otherwise).  This implies --merge.
 
+-v, \--verbose::
+       Display a diffstat of what changed upstream since the last rebase.
+
 include::merge-strategies.txt[]
 
 NOTES
index 49f7e0a4a446b8e393b76f39136f28c74c085dff..0fa47e3b018815ffa9de23725737fe2b599eaead 100644 (file)
@@ -57,13 +57,28 @@ OPTIONS
         `git update-server-info`.
 
 --window=[N], --depth=[N]::
-       These two options affects how the objects contained in the pack are
+       These two options affect how the objects contained in the pack are
        stored using delta compression. The objects are first internally
        sorted by type, size and optionally names and compared against the
        other objects within `--window` to see if using delta compression saves
        space. `--depth` limits the maximum delta depth; making it too deep
        affects the performance on the unpacker side, because delta data needs
        to be applied that many times to get to the necessary object.
+       The default value for both --window and --depth is 10.
+
+
+Configuration
+-------------
+
+When configuration variable `repack.UseDeltaBaseOffset` is set
+for the repository, the command passes `--delta-base-offset`
+option to `git-pack-objects`; this typically results in slightly
+smaller packs, but the generated packs are incompatible with
+versions of git older than (and including) v1.4.3; do not set
+the variable in a repository that older version of git needs to
+be able to read (this includes repositories from which packs can
+be copied out over http or rsync, and people who obtained packs
+that way can try to use older git with it).
 
 
 Author
index 2f1306c1d95654802cbe417a19de6047777b91bb..5d4257062d1776ee18900b84e805197a9028e2f0 100644 (file)
@@ -111,7 +111,9 @@ SPECIFYING REVISIONS
 
 A revision parameter typically, but not necessarily, names a
 commit object.  They use what is called an 'extended SHA1'
-syntax.
+syntax.  Here are various ways to spell object names.  The
+ones listed near the end of this list are to name trees and
+blobs contained in a commit.
 
 * The full SHA1 object name (40-byte hexadecimal string), or
   a substring of such that is unique within the repository.
@@ -119,6 +121,9 @@ syntax.
   name the same commit object if there are no other object in
   your repository whose object name starts with dae86e.
 
+* An output from `git-describe`; i.e. a closest tag, followed by a
+  dash, a 'g', and an abbreviated object name.
+
 * A symbolic ref name.  E.g. 'master' typically means the commit
   object referenced by $GIT_DIR/refs/heads/master.  If you
   happen to have both heads/master and tags/master, you can
@@ -156,6 +161,15 @@ syntax.
   and dereference the tag recursively until a non-tag object is
   found.
 
+* A suffix ':' followed by a path; this names the blob or tree
+  at the given path in the tree-ish object named by the part
+  before the colon.
+
+* A colon, optionally followed by a stage number (0 to 3) and a
+  colon, followed by a path; this names a blob object in the
+  index at the given path.  Missing stage number (and the colon
+  that follows it) names an stage 0 entry.
+
 Here is an illustration, by Jon Loeliger.  Both node B and C are
 a commit parents of commit node A.  Parent commits are ordered
 left-to-right.
index 9e67f1730261a20ffa8c22b682c9207856901534..5376f685486c6f19cbe6ea3b8391cac5b50c593b 100644 (file)
@@ -43,7 +43,7 @@ OPTIONS
 <directory>::
        The repository to update.
 
-<ref>...:
+<ref>...::
        The remote refs to update.
 
 
index 7486ebe785733a6d4f4e1a3a2ea8f5c37b3f0a51..d54fc3e5c6d7ba3be31fa2c1238e2d7039c92e74 100644 (file)
@@ -7,16 +7,29 @@ git-shortlog - Summarize 'git log' output
 
 SYNOPSIS
 --------
-git-log --pretty=short | 'git-shortlog'
+git-log --pretty=short | 'git-shortlog' [-h] [-n] [-s]
 
 DESCRIPTION
 -----------
 Summarizes 'git log' output in a format suitable for inclusion
-in release announcements. Each commit will be grouped by author
+in release announcements. Each commit will be grouped by author and
 the first line of the commit message will be shown.
 
 Additionally, "[PATCH]" will be stripped from the commit description.
 
+OPTIONS
+-------
+
+-h::
+       Print a short usage message and exit.
+
+-n::
+       Sort output according to the number of commits per author instead
+       of author alphabetic order.
+
+-s::
+       Supress commit description and provide a commit count summary only.
+
 FILES
 -----
 '.mailmap'::
index 1cfa3e342cfdc074b0a9a113a59bcebee8869d07..450ff1f85b5f6ed0cca552bebb0ca1a279788ae5 100644 (file)
@@ -37,7 +37,9 @@ COMMANDS
 'init'::
        Creates an empty git repository with additional metadata
        directories for git-svn.  The Subversion URL must be specified
-       as a command-line argument.
+       as a command-line argument.  Optionally, the target directory
+       to operate on can be specified as a second argument.  Normally
+       this command initializes the current directory.
 
 'fetch'::
 
@@ -63,7 +65,30 @@ manually joining branches on commit.
        This is advantageous over 'commit' (below) because it produces
        cleaner, more linear history.
 
+'log'::
+       This should make it easy to look up svn log messages when svn
+       users refer to -r/--revision numbers.
+
+       The following features from `svn log' are supported:
+
+       --revision=<n>[:<n>] - is supported, non-numeric args are not:
+                              HEAD, NEXT, BASE, PREV, etc ...
+       -v/--verbose         - it's not completely compatible with
+                              the --verbose output in svn log, but
+                              reasonably close.
+       --limit=<n>          - is NOT the same as --max-count,
+                              doesn't count merged/excluded commits
+       --incremental        - supported
+
+       New features:
+
+       --show-commit        - shows the git commit sha1, as well
+       --oneline            - our version of --pretty=oneline
+
+       Any other arguments are passed directly to `git log'
+
 'commit'::
+       You should consider using 'dcommit' instead of this command.
        Commit specified commit or tree objects to SVN.  This relies on
        your imported fetch data being up-to-date.  This makes
        absolutely no attempts to do patching when committing to SVN, it
@@ -86,12 +111,49 @@ manually joining branches on commit.
        directories.  The output is suitable for appending to
        the $GIT_DIR/info/exclude file.
 
+'commit-diff'::
+       Commits the diff of two tree-ish arguments from the
+       command-line.  This command is intended for interopability with
+       git-svnimport and does not rely on being inside an git-svn
+       init-ed repository.  This command takes three arguments, (a) the
+       original tree to diff against, (b) the new tree result, (c) the
+       URL of the target Subversion repository.  The final argument
+       (URL) may be omitted if you are working from a git-svn-aware
+       repository (that has been init-ed with git-svn).
+
+'graft-branches'::
+       This command attempts to detect merges/branches from already
+       imported history.  Techniques used currently include regexes,
+       file copies, and tree-matches).  This command generates (or
+       modifies) the $GIT_DIR/info/grafts file.  This command is
+       considered experimental, and inherently flawed because
+       merge-tracking in SVN is inherently flawed and inconsistent
+       across different repositories.
+
+'multi-init'::
+       This command supports git-svnimport-like command-line syntax for
+       importing repositories that are layed out as recommended by the
+       SVN folks.  This is a bit more tolerant than the git-svnimport
+       command-line syntax and doesn't require the user to figure out
+       where the repository URL ends and where the repository path
+       begins.
+
+'multi-fetch'::
+       This runs fetch on all known SVN branches we're tracking.  This
+       will NOT discover new branches (unlike git-svnimport), so
+       multi-init will need to be re-run (it's idempotent).
+
 --
 
 OPTIONS
 -------
 --
 
+--shared::
+--template=<template_directory>::
+       Only used with the 'init' command.
+       These are passed directly to gitlink:git-init-db[1].
+
 -r <ARG>::
 --revision <ARG>::
 
@@ -115,7 +177,7 @@ git-rev-list --pretty=oneline output can be used.
 
 --rmdir::
 
-Only used with the 'commit' command.
+Only used with the 'dcommit', 'commit' and 'commit-diff' commands.
 
 Remove directories from the SVN tree if there are no files left
 behind.  SVN can version empty directories, and they are not
@@ -128,7 +190,7 @@ repo-config key: svn.rmdir
 -e::
 --edit::
 
-Only used with the 'commit' command.
+Only used with the 'dcommit', 'commit' and 'commit-diff' commands.
 
 Edit the commit message before committing to SVN.  This is off by
 default for objects that are commits, and forced on when committing
@@ -139,7 +201,7 @@ repo-config key: svn.edit
 -l<num>::
 --find-copies-harder::
 
-Both of these are only used with the 'commit' command.
+Only used with the 'dcommit', 'commit' and 'commit-diff' commands.
 
 They are both passed directly to git-diff-tree see
 gitlink:git-diff-tree[1] for more information.
@@ -164,7 +226,26 @@ will abort operation. The user will then have to add the
 appropriate entry.  Re-running the previous git-svn command
 after the authors-file is modified should continue operation.
 
-repo-config key: svn.authors-file
+repo-config key: svn.authorsfile
+
+-q::
+--quiet::
+       Make git-svn less verbose.  This only affects git-svn if you
+       have the SVN::* libraries installed and are using them.
+
+--repack[=<n>]::
+--repack-flags=<flags>
+       These should help keep disk usage sane for large fetches
+       with many revisions.
+
+       --repack takes an optional argument for the number of revisions
+       to fetch before repacking.  This defaults to repacking every
+       1000 commits fetched if no argument is specified.
+
+       --repack-flags are passed directly to gitlink:git-repack[1].
+
+repo-config key: svn.repack
+repo-config key: svn.repackflags
 
 -m::
 --merge::
@@ -215,6 +296,28 @@ section on
 '<<tracking-multiple-repos,Tracking Multiple Repositories or Branches>>'
 for more information on using GIT_SVN_ID.
 
+--follow-parent::
+       This is especially helpful when we're tracking a directory
+       that has been moved around within the repository, or if we
+       started tracking a branch and never tracked the trunk it was
+       descended from.
+
+       This relies on the SVN::* libraries to work.
+
+repo-config key: svn.followparent
+
+--no-metadata::
+       This gets rid of the git-svn-id: lines at the end of every commit.
+
+       With this, you lose the ability to use the rebuild command.  If
+       you ever lose your .git/svn/git-svn/.rev_db file, you won't be
+       able to fetch again, either.  This is fine for one-shot imports.
+
+       The 'git-svn log' command will not work on repositories using this,
+       either.
+
+repo-config key: svn.nometadata
+
 --
 
 COMPATIBILITY OPTIONS
@@ -231,6 +334,9 @@ for tracking the remote.
 --no-ignore-externals::
 Only used with the 'fetch' and 'rebuild' command.
 
+This command has no effect when you are using the SVN::*
+libraries with git, svn:externals are always avoided.
+
 By default, git-svn passes --ignore-externals to svn to avoid
 fetching svn:external trees into git.  Pass this flag to enable
 externals tracking directly via git.
@@ -264,7 +370,7 @@ Basic Examples
 Tracking and contributing to an Subversion managed-project:
 
 ------------------------------------------------------------------------
-# Initialize a tree (like git init-db):
+# Initialize a repo (like git init-db):
        git-svn init http://svn.foo.org/project/trunk
 # Fetch remote revisions:
        git-svn fetch
@@ -312,8 +418,8 @@ branches or directories in a Subversion repository, git-svn has a simple
 hack to allow it to track an arbitrary number of related _or_ unrelated
 SVN repositories via one git repository.  Simply set the GIT_SVN_ID
 environment variable to a name other other than "git-svn" (the default)
-and git-svn will ignore the contents of the $GIT_DIR/git-svn directory
-and instead do all of its work in $GIT_DIR/$GIT_SVN_ID for that
+and git-svn will ignore the contents of the $GIT_DIR/svn/git-svn directory
+and instead do all of its work in $GIT_DIR/svn/$GIT_SVN_ID for that
 invocation.  The interface branch will be remotes/$GIT_SVN_ID, instead of
 remotes/git-svn.  Any remotes/$GIT_SVN_ID branch should never be modified
 by the user outside of git-svn commands.
@@ -341,6 +447,9 @@ This allows you to tie unfetched SVN revision 375 to your current HEAD:
 
 Advanced Example: Tracking a Reorganized Repository
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Note: this example is now obsolete if you have SVN::* libraries
+installed.  Simply use --follow-parent when fetching.
+
 If you're tracking a directory that has moved, or otherwise been
 branched or tagged off of another directory in the repository and you
 care about the full history of the project, then you can read this
@@ -371,20 +480,18 @@ he needed to continue tracking /ufoai/trunk where /trunk left off.
 
 BUGS
 ----
-If somebody commits a conflicting changeset to SVN at a bad moment
-(right before you commit) causing a conflict and your commit to fail,
-your svn working tree ($GIT_DIR/git-svn/tree) may be dirtied.  The
-easiest thing to do is probably just to rm -rf $GIT_DIR/git-svn/tree and
-run 'rebuild'.
+
+If you are not using the SVN::* Perl libraries and somebody commits a
+conflicting changeset to SVN at a bad moment (right before you commit)
+causing a conflict and your commit to fail, your svn working tree
+($GIT_DIR/git-svn/tree) may be dirtied.  The easiest thing to do is
+probably just to rm -rf $GIT_DIR/git-svn/tree and run 'rebuild'.
 
 We ignore all SVN properties except svn:executable.  Too difficult to
 map them since we rely heavily on git write-tree being _exactly_ the
 same on both the SVN and git working trees and I prefer not to clutter
 working trees with metadata files.
 
-svn:keywords can't be ignored in Subversion (at least I don't know of
-a way to ignore them).
-
 Renamed and copied directories are not detected by git and hence not
 tracked when committing to SVN.  I do not plan on adding support for
 this as it's quite difficult and time-consuming to get working for all
index 2135b65516b372587a9a4fa13daf21df05a5d1ce..3af6fc63e2b5fa4d21e54a76a2a18bf7c47ab1c2 100644 (file)
@@ -243,6 +243,9 @@ gitlink:git-update-server-info[1]::
        Updates auxiliary information on a dumb server to help
        clients discover references and packs on it.
 
+gitlink:git-upload-archive[1]::
+       Invoked by 'git-archive' to send a generated archive.
+
 gitlink:git-upload-pack[1]::
        Invoked by 'git-fetch-pack' to push
        what are asked for.
index 14449ca8baeb7c2c979d0fd3f61ce683fd8e08ce..7e560b0eea83ae7b32e17095f766d84c0b67671f 100644 (file)
@@ -179,7 +179,7 @@ object name::
        character hexadecimal encoding of the hash of the object (possibly
        followed by a white space).
 
-object type:
+object type::
        One of the identifiers "commit","tree","tag" and "blob" describing
        the type of an object.
 
@@ -324,7 +324,7 @@ tag::
        A tag is most typically used to mark a particular point in the
        commit ancestry chain.
 
-unmerged index:
+unmerged index::
        An index which contains unmerged index entries.
 
 working tree::
index 26ecba53fbfad4af4da46680330f998bf92f1b3f..670827c323fe276124ba1a84d59386771db868f1 100644 (file)
@@ -51,6 +51,14 @@ lines are used for `git-push` and `git-fetch`/`git-pull`,
 respectively.  Multiple `Push:` and `Pull:` lines may
 be specified for additional branch mappings.
 
+Or, equivalently, in the `$GIT_DIR/config` (note the use
+of `fetch` instead of `Pull:`):
+
+[remote "<remote>"]
+       url = <url>
+       push = <refspec>
+       fetch = <refspec>
+
 The name of a file in `$GIT_DIR/branches` directory can be
 specified as an older notation short-hand; the named
 file should contain a single line, a URL in one of the
index 09f60bb2c218576c342c2194262a460dda7160a9..9517ce7639d2bd2f1e90eca8b2b184b1baff4d29 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -764,6 +764,8 @@ $(LIB_FILE): $(LIB_OBJS)
        rm -f $@ && $(AR) rcs $@ $(LIB_OBJS)
 
 XDIFF_OBJS=xdiff/xdiffi.o xdiff/xprepare.o xdiff/xutils.o xdiff/xemit.o
+$(XDIFF_OBJS): xdiff/xinclude.h xdiff/xmacros.h xdiff/xdiff.h xdiff/xtypes.h \
+       xdiff/xutils.h xdiff/xprepare.h xdiff/xdiffi.h xdiff/xemit.h
 
 $(XDIFF_LIB): $(XDIFF_OBJS)
        rm -f $@ && $(AR) rcs $@ $(XDIFF_OBJS)
@@ -860,8 +862,9 @@ git.spec: git.spec.in
        mv $@+ $@
 
 GIT_TARNAME=git-$(GIT_VERSION)
-dist: git.spec git-tar-tree
-       ./git-tar-tree HEAD^{tree} $(GIT_TARNAME) > $(GIT_TARNAME).tar
+dist: git.spec git-archive
+       ./git-archive --format=tar \
+               --prefix=$(GIT_TARNAME)/ HEAD^{tree} > $(GIT_TARNAME).tar
        @mkdir -p $(GIT_TARNAME)
        @cp git.spec $(GIT_TARNAME)
        @echo $(GIT_VERSION) > $(GIT_TARNAME)/version
index 3ffdad68d130136312028b389da73af1a789232e..28e7352e98eaa3bcbc3c86ce772bae35a605ceb2 100644 (file)
@@ -145,6 +145,7 @@ static int write_zip_entry(const unsigned char *sha1,
 {
        struct zip_local_header header;
        struct zip_dir_header dirent;
+       unsigned long attr2;
        unsigned long compressed_size;
        unsigned long uncompressed_size;
        unsigned long crc;
@@ -172,12 +173,16 @@ static int write_zip_entry(const unsigned char *sha1,
 
        if (S_ISDIR(mode)) {
                method = 0;
+               attr2 = 16;
                result = READ_TREE_RECURSIVE;
                out = NULL;
                uncompressed_size = 0;
                compressed_size = 0;
-       } else if (S_ISREG(mode)) {
-               method = zlib_compression_level == 0 ? 0 : 8;
+       } else if (S_ISREG(mode) || S_ISLNK(mode)) {
+               method = 0;
+               attr2 = S_ISLNK(mode) ? ((mode | 0777) << 16) : 0;
+               if (S_ISREG(mode) && zlib_compression_level != 0)
+                       method = 8;
                result = 0;
                buffer = read_sha1_file(sha1, type, &size);
                if (!buffer)
@@ -213,8 +218,8 @@ static int write_zip_entry(const unsigned char *sha1,
        }
 
        copy_le32(dirent.magic, 0x02014b50);
-       copy_le16(dirent.creator_version, 0);
-       copy_le16(dirent.version, 20);
+       copy_le16(dirent.creator_version, S_ISLNK(mode) ? 0x0317 : 0);
+       copy_le16(dirent.version, 10);
        copy_le16(dirent.flags, 0);
        copy_le16(dirent.compression_method, method);
        copy_le16(dirent.mtime, zip_time);
@@ -227,7 +232,7 @@ static int write_zip_entry(const unsigned char *sha1,
        copy_le16(dirent.comment_length, 0);
        copy_le16(dirent.disk, 0);
        copy_le16(dirent.attr1, 0);
-       copy_le32(dirent.attr2, 0);
+       copy_le32(dirent.attr2, attr2);
        copy_le32(dirent.offset, zip_offset);
        memcpy(zip_dir + zip_dir_offset, &dirent, sizeof(struct zip_dir_header));
        zip_dir_offset += sizeof(struct zip_dir_header);
@@ -236,7 +241,7 @@ static int write_zip_entry(const unsigned char *sha1,
        zip_dir_entries++;
 
        copy_le32(header.magic, 0x04034b50);
-       copy_le16(header.version, 20);
+       copy_le16(header.version, 10);
        copy_le16(header.flags, 0);
        copy_le16(header.compression_method, method);
        copy_le16(header.mtime, zip_time);
index de5f855266f6fcf3274c9648b02ab960a267bb90..11397f5504f98ccff47a90b228abc71b30327fb9 100644 (file)
@@ -360,7 +360,7 @@ static int gitdiff_hdrend(const char *line, struct patch *patch)
 static char *gitdiff_verify_name(const char *line, int isnull, char *orig_name, const char *oldnew)
 {
        if (!orig_name && !isnull)
-               return find_name(line, NULL, 1, 0);
+               return find_name(line, NULL, 1, TERM_TAB);
 
        if (orig_name) {
                int len;
@@ -370,7 +370,7 @@ static char *gitdiff_verify_name(const char *line, int isnull, char *orig_name,
                len = strlen(name);
                if (isnull)
                        die("git-apply: bad git-diff - expected /dev/null, got %s on line %d", name, linenr);
-               another = find_name(line, NULL, 1, 0);
+               another = find_name(line, NULL, 1, TERM_TAB);
                if (!another || memcmp(another, name, len))
                        die("git-apply: bad git-diff - inconsistent %s filename on line %d", oldnew, linenr);
                free(another);
@@ -934,6 +934,7 @@ static int parse_fragment(char *line, unsigned long size, struct patch *patch, s
                switch (*line) {
                default:
                        return -1;
+               case '\n': /* newer GNU diff, an empty context line */
                case ' ':
                        oldlines--;
                        newlines--;
@@ -1623,6 +1624,14 @@ static int apply_one_fragment(struct buffer_desc *desc, struct fragment *frag, i
                                first = '-';
                }
                switch (first) {
+               case '\n':
+                       /* Newer GNU diff, empty context line */
+                       if (plen < 0)
+                               /* ... followed by '\No newline'; nothing */
+                               break;
+                       old[oldsize++] = '\n';
+                       new[newsize++] = '\n';
+                       break;
                case ' ':
                case '-':
                        memcpy(old + oldsize, patch + 1, plen);
@@ -1783,8 +1792,6 @@ static int apply_binary(struct buffer_desc *desc, struct patch *patch)
 {
        const char *name = patch->old_name ? patch->old_name : patch->new_name;
        unsigned char sha1[20];
-       unsigned char hdr[50];
-       int hdrlen;
 
        /* For safety, we require patch index line to contain
         * full 40-byte textual SHA1 for old and new, at least for now.
@@ -1800,8 +1807,7 @@ static int apply_binary(struct buffer_desc *desc, struct patch *patch)
                /* See if the old one matches what the patch
                 * applies to.
                 */
-               write_sha1_file_prepare(desc->buffer, desc->size,
-                                       blob_type, sha1, hdr, &hdrlen);
+               hash_sha1_file(desc->buffer, desc->size, blob_type, sha1);
                if (strcmp(sha1_to_hex(sha1), patch->old_sha1_prefix))
                        return error("the patch applies to '%s' (%s), "
                                     "which does not match the "
@@ -1846,8 +1852,7 @@ static int apply_binary(struct buffer_desc *desc, struct patch *patch)
                                     name);
 
                /* verify that the result matches */
-               write_sha1_file_prepare(desc->buffer, desc->size, blob_type,
-                                       sha1, hdr, &hdrlen);
+               hash_sha1_file(desc->buffer, desc->size, blob_type, sha1);
                if (strcmp(sha1_to_hex(sha1), patch->new_sha1_prefix))
                        return error("binary patch to '%s' creates incorrect result (expecting %s, got %s)", name, patch->new_sha1_prefix, sha1_to_hex(sha1));
        }
@@ -2112,7 +2117,7 @@ static void numstat_patch_list(struct patch *patch)
                        quote_c_style(name, NULL, stdout, 0);
                else
                        fputs(name, stdout);
-               putchar('\n');
+               putchar(line_termination);
        }
 }
 
index 6dabdee2019d15d7deadfc094d0c1e5f101d09ca..9177379122ea29152e1213cc533f7cd7c569f8d3 100644 (file)
@@ -75,7 +75,7 @@ static int run_remote_archiver(const char *remote, int argc,
                die("git-archive: expected a flush");
 
        /* Now, start reading from fd[0] and spit it out to stdout */
-       rv = recv_sideband("archive", fd[0], 1, 2, buf, sizeof(buf));
+       rv = recv_sideband("archive", fd[0], 1, 2);
        close(fd[0]);
        rv |= finish_connect(pid);
 
index 4205e5d38dea6dee2d815a7434f87eacd089e4a9..ad7dc00cde4e8e08ef35b313525781c135357df3 100644 (file)
@@ -596,6 +596,10 @@ int cmd_grep(int argc, const char **argv, const char *prefix)
                                            GREP_CLOSE_PAREN);
                        continue;
                }
+               if (!strcmp("--all-match", arg)) {
+                       opt.all_match = 1;
+                       continue;
+               }
                if (!strcmp("-e", arg)) {
                        if (1 < argc) {
                                append_grep_pattern(&opt, argv[1],
index 96c069a81da643b7ee3515ca8c89734881fa3b77..41e1e74533d64e999ec1e564e15b8dea29e4c9c6 100644 (file)
@@ -15,7 +15,7 @@
 #include <sys/time.h>
 #include <signal.h>
 
-static const char pack_usage[] = "git-pack-objects [-q] [--no-reuse-delta] [--non-empty] [--local] [--incremental] [--window=N] [--depth=N] [--revs [--unpacked | --all]*] [--stdout | base-name] <ref-list | <object-list]";
+static const char pack_usage[] = "git-pack-objects [-q] [--no-reuse-delta] [--delta-base-offset] [--non-empty] [--local] [--incremental] [--window=N] [--depth=N] [--revs [--unpacked | --all]*] [--stdout | base-name] <ref-list | <object-list]";
 
 struct object_entry {
        unsigned char sha1[20];
@@ -29,6 +29,7 @@ struct object_entry {
        enum object_type type;
        enum object_type in_pack_type;  /* could be delta */
        unsigned long delta_size;       /* delta data size (uncompressed) */
+#define in_pack_header_size delta_size /* only when reusing pack data */
        struct object_entry *delta;     /* delta base object */
        struct packed_git *in_pack;     /* already in pack */
        unsigned int in_pack_offset;
@@ -60,6 +61,8 @@ static int non_empty;
 static int no_reuse_delta;
 static int local;
 static int incremental;
+static int allow_ofs_delta;
+
 static struct object_entry **sorted_by_sha, **sorted_by_type;
 static struct object_entry *objects;
 static int nr_objects, nr_alloc, nr_result;
@@ -84,17 +87,25 @@ static int object_ix_hashsz;
  * Pack index for existing packs give us easy access to the offsets into
  * corresponding pack file where each object's data starts, but the entries
  * do not store the size of the compressed representation (uncompressed
- * size is easily available by examining the pack entry header).  We build
- * a hashtable of existing packs (pack_revindex), and keep reverse index
- * here -- pack index file is sorted by object name mapping to offset; this
- * pack_revindex[].revindex array is an ordered list of offsets, so if you
- * know the offset of an object, next offset is where its packed
- * representation ends.
+ * size is easily available by examining the pack entry header).  It is
+ * also rather expensive to find the sha1 for an object given its offset.
+ *
+ * We build a hashtable of existing packs (pack_revindex), and keep reverse
+ * index here -- pack index file is sorted by object name mapping to offset;
+ * this pack_revindex[].revindex array is a list of offset/index_nr pairs
+ * ordered by offset, so if you know the offset of an object, next offset
+ * is where its packed representation ends and the index_nr can be used to
+ * get the object sha1 from the main index.
  */
+struct revindex_entry {
+       unsigned int offset;
+       unsigned int nr;
+};
 struct pack_revindex {
        struct packed_git *p;
-       unsigned long *revindex;
-} *pack_revindex = NULL;
+       struct revindex_entry *revindex;
+};
+static struct  pack_revindex *pack_revindex;
 static int pack_revindex_hashsz;
 
 /*
@@ -141,14 +152,9 @@ static void prepare_pack_ix(void)
 
 static int cmp_offset(const void *a_, const void *b_)
 {
-       unsigned long a = *(unsigned long *) a_;
-       unsigned long b = *(unsigned long *) b_;
-       if (a < b)
-               return -1;
-       else if (a == b)
-               return 0;
-       else
-               return 1;
+       const struct revindex_entry *a = a_;
+       const struct revindex_entry *b = b_;
+       return (a->offset < b->offset) ? -1 : (a->offset > b->offset) ? 1 : 0;
 }
 
 /*
@@ -161,25 +167,27 @@ static void prepare_pack_revindex(struct pack_revindex *rix)
        int i;
        void *index = p->index_base + 256;
 
-       rix->revindex = xmalloc(sizeof(unsigned long) * (num_ent + 1));
+       rix->revindex = xmalloc(sizeof(*rix->revindex) * (num_ent + 1));
        for (i = 0; i < num_ent; i++) {
                unsigned int hl = *((unsigned int *)((char *) index + 24*i));
-               rix->revindex[i] = ntohl(hl);
+               rix->revindex[i].offset = ntohl(hl);
+               rix->revindex[i].nr = i;
        }
        /* This knows the pack format -- the 20-byte trailer
         * follows immediately after the last object data.
         */
-       rix->revindex[num_ent] = p->pack_size - 20;
-       qsort(rix->revindex, num_ent, sizeof(unsigned long), cmp_offset);
+       rix->revindex[num_ent].offset = p->pack_size - 20;
+       rix->revindex[num_ent].nr = -1;
+       qsort(rix->revindex, num_ent, sizeof(*rix->revindex), cmp_offset);
 }
 
-static unsigned long find_packed_object_size(struct packed_git *p,
-                                            unsigned long ofs)
+static struct revindex_entry * find_packed_object(struct packed_git *p,
+                                                 unsigned int ofs)
 {
        int num;
        int lo, hi;
        struct pack_revindex *rix;
-       unsigned long *revindex;
+       struct revindex_entry *revindex;
        num = pack_revindex_ix(p);
        if (num < 0)
                die("internal error: pack revindex uninitialized");
@@ -191,10 +199,10 @@ static unsigned long find_packed_object_size(struct packed_git *p,
        hi = num_packed_objects(p) + 1;
        do {
                int mi = (lo + hi) / 2;
-               if (revindex[mi] == ofs) {
-                       return revindex[mi+1] - ofs;
+               if (revindex[mi].offset == ofs) {
+                       return revindex + mi;
                }
-               else if (ofs < revindex[mi])
+               else if (ofs < revindex[mi].offset)
                        hi = mi;
                else
                        lo = mi + 1;
@@ -202,6 +210,20 @@ static unsigned long find_packed_object_size(struct packed_git *p,
        die("internal error: pack revindex corrupt");
 }
 
+static unsigned long find_packed_object_size(struct packed_git *p,
+                                            unsigned long ofs)
+{
+       struct revindex_entry *entry = find_packed_object(p, ofs);
+       return entry[1].offset - ofs;
+}
+
+static unsigned char *find_packed_object_name(struct packed_git *p,
+                                             unsigned long ofs)
+{
+       struct revindex_entry *entry = find_packed_object(p, ofs);
+       return (unsigned char *)(p->index_base + 256) + 24 * entry->nr + 4;
+}
+
 static void *delta_against(void *buf, unsigned long size, struct object_entry *entry)
 {
        unsigned long othersize, delta_size;
@@ -232,7 +254,7 @@ static int encode_header(enum object_type type, unsigned long size, unsigned cha
        int n = 1;
        unsigned char c;
 
-       if (type < OBJ_COMMIT || type > OBJ_DELTA)
+       if (type < OBJ_COMMIT || type > OBJ_REF_DELTA)
                die("bad type %d", type);
 
        c = (type << 4) | (size & 15);
@@ -247,6 +269,10 @@ static int encode_header(enum object_type type, unsigned long size, unsigned cha
        return n;
 }
 
+/*
+ * we are going to reuse the existing object data as is.  make
+ * sure it is not corrupt.
+ */
 static int check_inflate(unsigned char *data, unsigned long len, unsigned long expect)
 {
        z_stream stream;
@@ -278,32 +304,6 @@ static int check_inflate(unsigned char *data, unsigned long len, unsigned long e
        return st;
 }
 
-/*
- * we are going to reuse the existing pack entry data.  make
- * sure it is not corrupt.
- */
-static int revalidate_pack_entry(struct object_entry *entry, unsigned char *data, unsigned long len)
-{
-       enum object_type type;
-       unsigned long size, used;
-
-       if (pack_to_stdout)
-               return 0;
-
-       /* the caller has already called use_packed_git() for us,
-        * so it is safe to access the pack data from mmapped location.
-        * make sure the entry inflates correctly.
-        */
-       used = unpack_object_header_gently(data, len, &type, &size);
-       if (!used)
-               return -1;
-       if (type == OBJ_DELTA)
-               used += 20; /* skip base object name */
-       data += used;
-       len -= used;
-       return check_inflate(data, len, entry->size);
-}
-
 static int revalidate_loose_object(struct object_entry *entry,
                                   unsigned char *map,
                                   unsigned long mapsize)
@@ -334,13 +334,10 @@ static unsigned long write_object(struct sha1file *f,
        enum object_type obj_type;
        int to_reuse = 0;
 
-       if (entry->preferred_base)
-               return 0;
-
        obj_type = entry->type;
        if (! entry->in_pack)
                to_reuse = 0;   /* can't reuse what we don't have */
-       else if (obj_type == OBJ_DELTA)
+       else if (obj_type == OBJ_REF_DELTA || obj_type == OBJ_OFS_DELTA)
                to_reuse = 1;   /* check_object() decided it for us */
        else if (obj_type != entry->in_pack_type)
                to_reuse = 0;   /* pack has delta which is unusable */
@@ -380,18 +377,35 @@ static unsigned long write_object(struct sha1file *f,
                if (entry->delta) {
                        buf = delta_against(buf, size, entry);
                        size = entry->delta_size;
-                       obj_type = OBJ_DELTA;
+                       obj_type = (allow_ofs_delta && entry->delta->offset) ?
+                               OBJ_OFS_DELTA : OBJ_REF_DELTA;
                }
                /*
                 * The object header is a byte of 'type' followed by zero or
-                * more bytes of length.  For deltas, the 20 bytes of delta
-                * sha1 follows that.
+                * more bytes of length.
                 */
                hdrlen = encode_header(obj_type, size, header);
                sha1write(f, header, hdrlen);
 
-               if (entry->delta) {
-                       sha1write(f, entry->delta, 20);
+               if (obj_type == OBJ_OFS_DELTA) {
+                       /*
+                        * Deltas with relative base contain an additional
+                        * encoding of the relative offset for the delta
+                        * base from this object's position in the pack.
+                        */
+                       unsigned long ofs = entry->offset - entry->delta->offset;
+                       unsigned pos = sizeof(header) - 1;
+                       header[pos] = ofs & 127;
+                       while (ofs >>= 7)
+                               header[--pos] = 128 | (--ofs & 127);
+                       sha1write(f, header + pos, sizeof(header) - pos);
+                       hdrlen += sizeof(header) - pos;
+               } else if (obj_type == OBJ_REF_DELTA) {
+                       /*
+                        * Deltas with a base reference contain
+                        * an additional 20 bytes for the base sha1.
+                        */
+                       sha1write(f, entry->delta->sha1, 20);
                        hdrlen += 20;
                }
                datalen = sha1write_compressed(f, buf, size);
@@ -399,21 +413,40 @@ static unsigned long write_object(struct sha1file *f,
        }
        else {
                struct packed_git *p = entry->in_pack;
-               use_packed_git(p);
 
-               datalen = find_packed_object_size(p, entry->in_pack_offset);
-               buf = (char *) p->pack_base + entry->in_pack_offset;
+               if (entry->delta) {
+                       obj_type = (allow_ofs_delta && entry->delta->offset) ?
+                               OBJ_OFS_DELTA : OBJ_REF_DELTA;
+                       reused_delta++;
+               }
+               hdrlen = encode_header(obj_type, entry->size, header);
+               sha1write(f, header, hdrlen);
+               if (obj_type == OBJ_OFS_DELTA) {
+                       unsigned long ofs = entry->offset - entry->delta->offset;
+                       unsigned pos = sizeof(header) - 1;
+                       header[pos] = ofs & 127;
+                       while (ofs >>= 7)
+                               header[--pos] = 128 | (--ofs & 127);
+                       sha1write(f, header + pos, sizeof(header) - pos);
+                       hdrlen += sizeof(header) - pos;
+               } else if (obj_type == OBJ_REF_DELTA) {
+                       sha1write(f, entry->delta->sha1, 20);
+                       hdrlen += 20;
+               }
 
-               if (revalidate_pack_entry(entry, buf, datalen))
+               use_packed_git(p);
+               buf = (char *) p->pack_base
+                       + entry->in_pack_offset
+                       + entry->in_pack_header_size;
+               datalen = find_packed_object_size(p, entry->in_pack_offset)
+                               - entry->in_pack_header_size;
+               if (!pack_to_stdout && check_inflate(buf, datalen, entry->size))
                        die("corrupt delta in pack %s", sha1_to_hex(entry->sha1));
                sha1write(f, buf, datalen);
                unuse_packed_git(p);
-               hdrlen = 0; /* not really */
-               if (obj_type == OBJ_DELTA)
-                       reused_delta++;
                reused++;
        }
-       if (obj_type == OBJ_DELTA)
+       if (entry->delta)
                written_delta++;
        written++;
        return hdrlen + datalen;
@@ -423,17 +456,16 @@ static unsigned long write_one(struct sha1file *f,
                               struct object_entry *e,
                               unsigned long offset)
 {
-       if (e->offset)
+       if (e->offset || e->preferred_base)
                /* offset starts from header size and cannot be zero
                 * if it is written already.
                 */
                return offset;
-       e->offset = offset;
-       offset += write_object(f, e);
-       /* if we are deltified, write out its base object. */
+       /* if we are deltified, write out its base object first. */
        if (e->delta)
                offset = write_one(f, e->delta, offset);
-       return offset;
+       e->offset = offset;
+       return offset + write_object(f, e);
 }
 
 static void write_pack_file(void)
@@ -899,26 +931,64 @@ static void check_object(struct object_entry *entry)
        char type[20];
 
        if (entry->in_pack && !entry->preferred_base) {
-               unsigned char base[20];
-               unsigned long size;
-               struct object_entry *base_entry;
+               struct packed_git *p = entry->in_pack;
+               unsigned long left = p->pack_size - entry->in_pack_offset;
+               unsigned long size, used;
+               unsigned char *buf;
+               struct object_entry *base_entry = NULL;
+
+               use_packed_git(p);
+               buf = p->pack_base;
+               buf += entry->in_pack_offset;
 
                /* We want in_pack_type even if we do not reuse delta.
                 * There is no point not reusing non-delta representations.
                 */
-               check_reuse_pack_delta(entry->in_pack,
-                                      entry->in_pack_offset,
-                                      base, &size,
-                                      &entry->in_pack_type);
+               used = unpack_object_header_gently(buf, left,
+                                                  &entry->in_pack_type, &size);
+               if (!used || left - used <= 20)
+                       die("corrupt pack for %s", sha1_to_hex(entry->sha1));
 
                /* Check if it is delta, and the base is also an object
                 * we are going to pack.  If so we will reuse the existing
                 * delta.
                 */
-               if (!no_reuse_delta &&
-                   entry->in_pack_type == OBJ_DELTA &&
-                   (base_entry = locate_object_entry(base)) &&
-                   (!base_entry->preferred_base)) {
+               if (!no_reuse_delta) {
+                       unsigned char c, *base_name;
+                       unsigned long ofs;
+                       /* there is at least 20 bytes left in the pack */
+                       switch (entry->in_pack_type) {
+                       case OBJ_REF_DELTA:
+                               base_name = buf + used;
+                               used += 20;
+                               break;
+                       case OBJ_OFS_DELTA:
+                               c = buf[used++];
+                               ofs = c & 127;
+                               while (c & 128) {
+                                       ofs += 1;
+                                       if (!ofs || ofs & ~(~0UL >> 7))
+                                               die("delta base offset overflow in pack for %s",
+                                                   sha1_to_hex(entry->sha1));
+                                       c = buf[used++];
+                                       ofs = (ofs << 7) + (c & 127);
+                               }
+                               if (ofs >= entry->in_pack_offset)
+                                       die("delta base offset out of bound for %s",
+                                           sha1_to_hex(entry->sha1));
+                               ofs = entry->in_pack_offset - ofs;
+                               base_name = find_packed_object_name(p, ofs);
+                               break;
+                       default:
+                               base_name = NULL;
+                       }
+                       if (base_name)
+                               base_entry = locate_object_entry(base_name);
+               }
+               unuse_packed_git(p);
+               entry->in_pack_header_size = used;
+
+               if (base_entry) {
 
                        /* Depth value does not matter - find_deltas()
                         * will never consider reused delta as the
@@ -927,9 +997,9 @@ static void check_object(struct object_entry *entry)
                         */
 
                        /* uncompressed size of the delta data */
-                       entry->size = entry->delta_size = size;
+                       entry->size = size;
                        entry->delta = base_entry;
-                       entry->type = OBJ_DELTA;
+                       entry->type = entry->in_pack_type;
 
                        entry->delta_sibling = base_entry->delta_child;
                        base_entry->delta_child = entry;
@@ -1484,6 +1554,10 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
                        no_reuse_delta = 1;
                        continue;
                }
+               if (!strcmp("--delta-base-offset", arg)) {
+                       allow_ofs_delta = 1;
+                       continue;
+               }
                if (!strcmp("--stdout", arg)) {
                        pack_to_stdout = 1;
                        continue;
index 960db49859a286bf5f3515ee7c2bfe25fe44d09e..24e3b0a8c21b43e0e82e5e505353213ca79f01b0 100644 (file)
@@ -4,9 +4,7 @@
 static const char prune_packed_usage[] =
 "git-prune-packed [-n]";
 
-static int dryrun;
-
-static void prune_dir(int i, DIR *dir, char *pathname, int len)
+static void prune_dir(int i, DIR *dir, char *pathname, int len, int dryrun)
 {
        struct dirent *de;
        char hex[40];
@@ -31,7 +29,7 @@ static void prune_dir(int i, DIR *dir, char *pathname, int len)
        rmdir(pathname);
 }
 
-static void prune_packed_objects(void)
+void prune_packed_objects(int dryrun)
 {
        int i;
        static char pathname[PATH_MAX];
@@ -50,7 +48,7 @@ static void prune_packed_objects(void)
                d = opendir(pathname);
                if (!d)
                        continue;
-               prune_dir(i, d, pathname, len + 3);
+               prune_dir(i, d, pathname, len + 3, dryrun);
                closedir(d);
        }
 }
@@ -58,6 +56,7 @@ static void prune_packed_objects(void)
 int cmd_prune_packed(int argc, const char **argv, const char *prefix)
 {
        int i;
+       int dryrun = 0;
 
        for (i = 1; i < argc; i++) {
                const char *arg = argv[i];
@@ -73,6 +72,6 @@ int cmd_prune_packed(int argc, const char **argv, const char *prefix)
                usage(prune_packed_usage);
        }
        sync();
-       prune_packed_objects();
+       prune_packed_objects(dryrun);
        return 0;
 }
index 6228c7907b183fb686c9f4cc54347c3dc16f3ec4..7290e6d9aa9e26cc8256a34ed22028e80936c010 100644 (file)
@@ -255,5 +255,7 @@ int cmd_prune(int argc, const char **argv, const char *prefix)
 
        prune_object_dir(get_object_directory());
 
+       sync();
+       prune_packed_objects(show_only);
        return 0;
 }
index 4f96bcae32afd22093b9a76d5fe5b39ba8268583..e70a71163d18dff1dd182c0d0b02967044693884 100644 (file)
@@ -15,7 +15,7 @@ static const char unpack_usage[] = "git-unpack-objects [-n] [-q] [-r] < pack-fil
 
 /* We always read in 4kB chunks. */
 static unsigned char buffer[4096];
-static unsigned long offset, len;
+static unsigned long offset, len, consumed_bytes;
 static SHA_CTX ctx;
 
 /*
@@ -51,6 +51,7 @@ static void use(int bytes)
                die("used more bytes than were available");
        len -= bytes;
        offset += bytes;
+       consumed_bytes += bytes;
 }
 
 static void *get_data(unsigned long size)
@@ -89,35 +90,49 @@ static void *get_data(unsigned long size)
 
 struct delta_info {
        unsigned char base_sha1[20];
+       unsigned long base_offset;
        unsigned long size;
        void *delta;
+       unsigned nr;
        struct delta_info *next;
 };
 
 static struct delta_info *delta_list;
 
-static void add_delta_to_list(unsigned char *base_sha1, void *delta, unsigned long size)
+static void add_delta_to_list(unsigned nr, unsigned const char *base_sha1,
+                             unsigned long base_offset,
+                             void *delta, unsigned long size)
 {
        struct delta_info *info = xmalloc(sizeof(*info));
 
        hashcpy(info->base_sha1, base_sha1);
+       info->base_offset = base_offset;
        info->size = size;
        info->delta = delta;
+       info->nr = nr;
        info->next = delta_list;
        delta_list = info;
 }
 
-static void added_object(unsigned char *sha1, const char *type, void *data, unsigned long size);
+struct obj_info {
+       unsigned long offset;
+       unsigned char sha1[20];
+};
+
+static struct obj_info *obj_list;
 
-static void write_object(void *buf, unsigned long size, const char *type)
+static void added_object(unsigned nr, const char *type, void *data,
+                        unsigned long size);
+
+static void write_object(unsigned nr, void *buf, unsigned long size,
+                        const char *type)
 {
-       unsigned char sha1[20];
-       if (write_sha1_file(buf, size, type, sha1) < 0)
+       if (write_sha1_file(buf, size, type, obj_list[nr].sha1) < 0)
                die("failed to write object");
-       added_object(sha1, type, buf, size);
+       added_object(nr, type, buf, size);
 }
 
-static void resolve_delta(const char *type,
+static void resolve_delta(unsigned nr, const char *type,
                          void *base, unsigned long base_size,
                          void *delta, unsigned long delta_size)
 {
@@ -130,20 +145,23 @@ static void resolve_delta(const char *type,
        if (!result)
                die("failed to apply delta");
        free(delta);
-       write_object(result, result_size, type);
+       write_object(nr, result, result_size, type);
        free(result);
 }
 
-static void added_object(unsigned char *sha1, const char *type, void *data, unsigned long size)
+static void added_object(unsigned nr, const char *type, void *data,
+                        unsigned long size)
 {
        struct delta_info **p = &delta_list;
        struct delta_info *info;
 
        while ((info = *p) != NULL) {
-               if (!hashcmp(info->base_sha1, sha1)) {
+               if (!hashcmp(info->base_sha1, obj_list[nr].sha1) ||
+                   info->base_offset == obj_list[nr].offset) {
                        *p = info->next;
                        p = &delta_list;
-                       resolve_delta(type, data, size, info->delta, info->size);
+                       resolve_delta(info->nr, type, data, size,
+                                     info->delta, info->size);
                        free(info);
                        continue;
                }
@@ -151,7 +169,8 @@ static void added_object(unsigned char *sha1, const char *type, void *data, unsi
        }
 }
 
-static void unpack_non_delta_entry(enum object_type kind, unsigned long size)
+static void unpack_non_delta_entry(enum object_type kind, unsigned long size,
+                                  unsigned nr)
 {
        void *buf = get_data(size);
        const char *type;
@@ -164,30 +183,80 @@ static void unpack_non_delta_entry(enum object_type kind, unsigned long size)
        default: die("bad type %d", kind);
        }
        if (!dry_run && buf)
-               write_object(buf, size, type);
+               write_object(nr, buf, size, type);
        free(buf);
 }
 
-static void unpack_delta_entry(unsigned long delta_size)
+static void unpack_delta_entry(enum object_type kind, unsigned long delta_size,
+                              unsigned nr)
 {
        void *delta_data, *base;
        unsigned long base_size;
        char type[20];
        unsigned char base_sha1[20];
 
-       hashcpy(base_sha1, fill(20));
-       use(20);
+       if (kind == OBJ_REF_DELTA) {
+               hashcpy(base_sha1, fill(20));
+               use(20);
+               delta_data = get_data(delta_size);
+               if (dry_run || !delta_data) {
+                       free(delta_data);
+                       return;
+               }
+               if (!has_sha1_file(base_sha1)) {
+                       hashcpy(obj_list[nr].sha1, null_sha1);
+                       add_delta_to_list(nr, base_sha1, 0, delta_data, delta_size);
+                       return;
+               }
+       } else {
+               unsigned base_found = 0;
+               unsigned char *pack, c;
+               unsigned long base_offset;
+               unsigned lo, mid, hi;
 
-       delta_data = get_data(delta_size);
-       if (dry_run || !delta_data) {
-               free(delta_data);
-               return;
-       }
+               pack = fill(1);
+               c = *pack;
+               use(1);
+               base_offset = c & 127;
+               while (c & 128) {
+                       base_offset += 1;
+                       if (!base_offset || base_offset & ~(~0UL >> 7))
+                               die("offset value overflow for delta base object");
+                       pack = fill(1);
+                       c = *pack;
+                       use(1);
+                       base_offset = (base_offset << 7) + (c & 127);
+               }
+               base_offset = obj_list[nr].offset - base_offset;
 
-       if (!has_sha1_file(base_sha1)) {
-               add_delta_to_list(base_sha1, delta_data, delta_size);
-               return;
+               delta_data = get_data(delta_size);
+               if (dry_run || !delta_data) {
+                       free(delta_data);
+                       return;
+               }
+               lo = 0;
+               hi = nr;
+               while (lo < hi) {
+                       mid = (lo + hi)/2;
+                       if (base_offset < obj_list[mid].offset) {
+                               hi = mid;
+                       } else if (base_offset > obj_list[mid].offset) {
+                               lo = mid + 1;
+                       } else {
+                               hashcpy(base_sha1, obj_list[mid].sha1);
+                               base_found = !is_null_sha1(base_sha1);
+                               break;
+                       }
+               }
+               if (!base_found) {
+                       /* The delta base object is itself a delta that
+                          has not been resolved yet. */
+                       hashcpy(obj_list[nr].sha1, null_sha1);
+                       add_delta_to_list(nr, null_sha1, base_offset, delta_data, delta_size);
+                       return;
+               }
        }
+
        base = read_sha1_file(base_sha1, type, &base_size);
        if (!base) {
                error("failed to read delta-pack base object %s",
@@ -197,7 +266,7 @@ static void unpack_delta_entry(unsigned long delta_size)
                has_errors = 1;
                return;
        }
-       resolve_delta(type, base, base_size, delta_data, delta_size);
+       resolve_delta(nr, type, base, base_size, delta_data, delta_size);
        free(base);
 }
 
@@ -208,6 +277,8 @@ static void unpack_one(unsigned nr, unsigned total)
        unsigned long size;
        enum object_type type;
 
+       obj_list[nr].offset = consumed_bytes;
+
        pack = fill(1);
        c = *pack;
        use(1);
@@ -216,7 +287,7 @@ static void unpack_one(unsigned nr, unsigned total)
        shift = 4;
        while (c & 0x80) {
                pack = fill(1);
-               c = *pack++;
+               c = *pack;
                use(1);
                size += (c & 0x7f) << shift;
                shift += 7;
@@ -225,13 +296,14 @@ static void unpack_one(unsigned nr, unsigned total)
                static unsigned long last_sec;
                static unsigned last_percent;
                struct timeval now;
-               unsigned percentage = (nr * 100) / total;
+               unsigned percentage = ((nr+1) * 100) / total;
 
                gettimeofday(&now, NULL);
                if (percentage != last_percent || now.tv_sec != last_sec) {
                        last_sec = now.tv_sec;
                        last_percent = percentage;
-                       fprintf(stderr, "%4u%% (%u/%u) done\r", percentage, nr, total);
+                       fprintf(stderr, "%4u%% (%u/%u) done\r",
+                                       percentage, (nr+1), total);
                }
        }
        switch (type) {
@@ -239,10 +311,11 @@ static void unpack_one(unsigned nr, unsigned total)
        case OBJ_TREE:
        case OBJ_BLOB:
        case OBJ_TAG:
-               unpack_non_delta_entry(type, size);
+               unpack_non_delta_entry(type, size, nr);
                return;
-       case OBJ_DELTA:
-               unpack_delta_entry(size);
+       case OBJ_REF_DELTA:
+       case OBJ_OFS_DELTA:
+               unpack_delta_entry(type, size, nr);
                return;
        default:
                error("bad object type %d", type);
@@ -265,9 +338,10 @@ static void unpack_all(void)
                die("unknown pack file version %d", ntohl(hdr->hdr_version));
        fprintf(stderr, "Unpacking %d objects\n", nr_objects);
 
+       obj_list = xmalloc(nr_objects * sizeof(*obj_list));
        use(sizeof(struct pack_header));
        for (i = 0; i < nr_objects; i++)
-               unpack_one(i+1, nr_objects);
+               unpack_one(i, nr_objects);
        if (delta_list)
                die("unresolved deltas left after unpacking");
 }
index f9fa9ff1d245e81630438d2b321cfe04c7905905..f71b9629b9c8671dd17296fabac7e1952b3fb9e9 100644 (file)
--- a/builtin.h
+++ b/builtin.h
@@ -11,6 +11,7 @@ extern int mailinfo(FILE *in, FILE *out, int ks, const char *encoding, const cha
 extern int split_mbox(const char **mbox, const char *dir, int allow_bare, int nr_prec, int skip);
 extern void stripspace(FILE *in, FILE *out);
 extern int write_tree(unsigned char *sha1, int missing_ok, const char *prefix);
+extern void prune_packed_objects(int);
 
 extern int cmd_add(int argc, const char **argv, const char *prefix);
 extern int cmd_apply(int argc, const char **argv, const char *prefix);
index 323c68a6709f30312e0dfb0fd60fcd7e69cd710b..d388848dd25db917e9bac5a8fd95cd89d214f5d8 100644 (file)
@@ -344,12 +344,8 @@ static int update_one(struct cache_tree *it,
 #endif
        }
 
-       if (dryrun) {
-               unsigned char hdr[200];
-               int hdrlen;
-               write_sha1_file_prepare(buffer, offset, tree_type, it->sha1,
-                                       hdr, &hdrlen);
-       }
+       if (dryrun)
+               hash_sha1_file(buffer, offset, tree_type, it->sha1);
        else
                write_sha1_file(buffer, offset, tree_type, it->sha1);
        free(buffer);
diff --git a/cache.h b/cache.h
index 97debd03c51c03c6df9a96e3f7de99bf4b4313e1..d0a1657292f5b47b7e345a87877d9b8894a80860 100644 (file)
--- a/cache.h
+++ b/cache.h
@@ -245,13 +245,8 @@ char *enter_repo(char *path, int strict);
 extern int sha1_object_info(const unsigned char *, char *, unsigned long *);
 extern void * unpack_sha1_file(void *map, unsigned long mapsize, char *type, unsigned long *size);
 extern void * read_sha1_file(const unsigned char *sha1, char *type, unsigned long *size);
+extern int hash_sha1_file(void *buf, unsigned long len, const char *type, unsigned char *sha1);
 extern int write_sha1_file(void *buf, unsigned long len, const char *type, unsigned char *return_sha1);
-extern char *write_sha1_file_prepare(void *buf,
-                                    unsigned long len,
-                                    const char *type,
-                                    unsigned char *sha1,
-                                    unsigned char *hdr,
-                                    int *hdrlen);
 
 extern int check_sha1_signature(const unsigned char *sha1, void *buf, unsigned long size, const char *type);
 
@@ -274,8 +269,9 @@ enum object_type {
        OBJ_TREE = 2,
        OBJ_BLOB = 3,
        OBJ_TAG = 4,
-       /* 5/6 for future expansion */
-       OBJ_DELTA = 7,
+       /* 5 for future expansion */
+       OBJ_OFS_DELTA = 6,
+       OBJ_REF_DELTA = 7,
        OBJ_BAD,
 };
 
index 46d9121baf2ebb024f6b19993a9b75fa3b67951a..65c786807b3cca8408d5119d03a8e4e268bd3e76 100644 (file)
@@ -856,8 +856,10 @@ void diff_tree_combined(const unsigned char *sha1,
                /* show stat against the first parent even
                 * when doing combined diff.
                 */
-               if (i == 0 && opt->output_format & DIFF_FORMAT_DIFFSTAT)
-                       diffopts.output_format = DIFF_FORMAT_DIFFSTAT;
+               int stat_opt = (opt->output_format &
+                               (DIFF_FORMAT_NUMSTAT|DIFF_FORMAT_DIFFSTAT));
+               if (i == 0 && stat_opt)
+                       diffopts.output_format = stat_opt;
                else
                        diffopts.output_format = DIFF_FORMAT_NO_OUTPUT;
                diff_tree_sha1(parent[i], sha1, "", &diffopts);
@@ -887,7 +889,8 @@ void diff_tree_combined(const unsigned char *sha1,
                        }
                        needsep = 1;
                }
-               else if (opt->output_format & DIFF_FORMAT_DIFFSTAT)
+               else if (opt->output_format &
+                        (DIFF_FORMAT_NUMSTAT|DIFF_FORMAT_DIFFSTAT))
                        needsep = 1;
                if (opt->output_format & DIFF_FORMAT_PATCH) {
                        if (needsep)
index 5b6e082c85f203cf27ac5b50f2d06a18b36fdc70..a6d543eee7831cd6a479d4cf8b2ba81a1438b298 100644 (file)
--- a/commit.c
+++ b/commit.c
@@ -548,10 +548,13 @@ static int add_merge_info(enum cmit_fmt fmt, char *buf, const struct commit *com
 
        while (parent) {
                struct commit *p = parent->item;
-               const char *hex = abbrev
-                       ? find_unique_abbrev(p->object.sha1, abbrev)
-                       : sha1_to_hex(p->object.sha1);
-               const char *dots = (abbrev && strlen(hex) != 40) ? "..." : "";
+               const char *hex = NULL;
+               const char *dots;
+               if (abbrev)
+                       hex = find_unique_abbrev(p->object.sha1, abbrev);
+               if (!hex)
+                       hex = sha1_to_hex(p->object.sha1);
+               dots = (abbrev && strlen(hex) != 40) ?  "..." : "";
                parent = parent->next;
 
                offset += sprintf(buf + offset, " %s%s", hex, dots);
index 68de9be0c7cca8645275a042900766b6ea2712c7..5354cd67b3dfa05d0018c03f15d0dcaf4e3df4a7 100644 (file)
@@ -422,8 +422,8 @@ and returns the process output as a string."
         (propertize
          (concat "   ("
                  (if (eq state 'copy) "copied from "
-                   (if (eq (git-fileinfo->state info) 'added) "renamed to "
-                     "renamed from "))
+                   (if (eq (git-fileinfo->state info) 'added) "renamed from "
+                     "renamed to "))
                  (git-escape-file-name (git-fileinfo->orig-name info))
                  ")") 'face 'git-status-face)
       "")))
index 4a8f79092d1217ea6aff51a9cbefef83a4a656ff..8b6361922fd6e6a2fcd9acb20fd54f5b645b36f0 100644 (file)
 ;; system.
 ;;
 ;; To install: put this file on the load-path and add GIT to the list
-;; of supported backends in `vc-handled-backends'.
+;; of supported backends in `vc-handled-backends'; the following line,
+;; placed in your ~/.emacs, will accomplish this:
+;;
+;;     (add-to-list 'vc-handled-backends 'GIT)
 ;;
 ;; TODO
 ;;  - changelog generation
 ;;  - working with revisions other than HEAD
 ;;
 
+(eval-when-compile (require 'cl))
+
 (defvar git-commits-coding-system 'utf-8
   "Default coding system for git commits.")
 
 (defun vc-git-annotate-command (file buf &optional rev)
   ; FIXME: rev is ignored
   (let ((name (file-relative-name file)))
-    (call-process "git" nil buf nil "annotate" name)))
+    (call-process "git" nil buf nil "blame" name)))
 
 (defun vc-git-annotate-time ()
-  (and (re-search-forward "[0-9a-f]+\t(.*\t\\([0-9]+\\)-\\([0-9]+\\)-\\([0-9]+\\) \\([0-9]+\\):\\([0-9]+\\):\\([0-9]+\\) \\([-+0-9]+\\)\t[0-9]+)" nil t)
+  (and (re-search-forward "[0-9a-f]+ (.* \\([0-9]+\\)-\\([0-9]+\\)-\\([0-9]+\\) \\([0-9]+\\):\\([0-9]+\\):\\([0-9]+\\) \\([-+0-9]+\\) +[0-9]+)" nil t)
        (vc-annotate-convert-time
         (apply #'encode-time (mapcar (lambda (match) (string-to-number (match-string match))) '(6 5 4 3 2 1 7))))))
 
index ad8492873ea1db63d56c24bec6174c4682105433..e66bb802da9faed6e01229fffbe56e3d7cca60c6 100644 (file)
--- a/daemon.c
+++ b/daemon.c
@@ -450,6 +450,8 @@ void fill_in_extra_table_entries(struct interp *itable)
         * Replace literal host with lowercase-ized hostname.
         */
        hp = interp_table[INTERP_SLOT_HOST].value;
+       if (!hp)
+               return;
        for ( ; *hp; hp++)
                *hp = tolower(*hp);
 
@@ -544,8 +546,10 @@ static int execute(struct sockaddr *addr)
                loginfo("Extended attributes (%d bytes) exist <%.*s>",
                        (int) pktlen - len,
                        (int) pktlen - len, line + len + 1);
-       if (len && line[len-1] == '\n')
+       if (len && line[len-1] == '\n') {
                line[--len] = 0;
+               pktlen--;
+       }
 
        /*
         * Initialize the path interpolation table for this connection.
index 51df4608a8186e519bcb3b4e67d421c18efb696a..fa16d06c8d1e85a458428c673cb2f589857f5424 100644 (file)
@@ -308,8 +308,8 @@ create_delta(const struct delta_index *index,
                                continue;
                        if (ref_size > top - src)
                                ref_size = top - src;
-                       if (ref_size > 0xffffff)
-                               ref_size = 0xffffff;
+                       if (ref_size > 0x10000)
+                               ref_size = 0x10000;
                        if (ref_size <= msize)
                                break;
                        while (ref_size-- && *src++ == *ref)
@@ -318,8 +318,6 @@ create_delta(const struct delta_index *index,
                                /* this is our best match so far */
                                msize = ref - entry->ptr;
                                moff = entry->ptr - ref_data;
-                               if (msize >= 0x10000)
-                                       break;  /* this is good enough */
                        }
                }
 
@@ -383,8 +381,6 @@ create_delta(const struct delta_index *index,
                        if (msize & 0xff) { out[outpos++] = msize; i |= 0x10; }
                        msize >>= 8;
                        if (msize & 0xff) { out[outpos++] = msize; i |= 0x20; }
-                       msize >>= 8;
-                       if (msize & 0xff) { out[outpos++] = msize; i |= 0x40; }
 
                        *op = i;
                }
diff --git a/diff.c b/diff.c
index fb8243261cb3cc9165dbe990586d3865fad4ee61..33153787b8117396cf906e69e656849ac04f3257 100644 (file)
--- a/diff.c
+++ b/diff.c
@@ -795,6 +795,23 @@ static void show_stats(struct diffstat_t* data, struct diff_options *options)
               set, total_files, adds, dels, reset);
 }
 
+static void show_numstat(struct diffstat_t* data, struct diff_options *options)
+{
+       int i;
+
+       for (i = 0; i < data->nr; i++) {
+               struct diffstat_file *file = data->files[i];
+
+               printf("%d\t%d\t", file->added, file->deleted);
+               if (options->line_termination &&
+                   quote_c_style(file->name, NULL, NULL, 0))
+                       quote_c_style(file->name, NULL, stdout, 0);
+               else
+                       fputs(file->name, stdout);
+               putchar(options->line_termination);
+       }
+}
+
 struct checkdiff_t {
        struct xdiff_emit_state xm;
        const char *filename;
@@ -1731,6 +1748,7 @@ int diff_setup_done(struct diff_options *options)
                                      DIFF_FORMAT_CHECKDIFF |
                                      DIFF_FORMAT_NO_OUTPUT))
                options->output_format &= ~(DIFF_FORMAT_RAW |
+                                           DIFF_FORMAT_NUMSTAT |
                                            DIFF_FORMAT_DIFFSTAT |
                                            DIFF_FORMAT_SUMMARY |
                                            DIFF_FORMAT_PATCH);
@@ -1740,7 +1758,9 @@ int diff_setup_done(struct diff_options *options)
         * recursive bits for other formats here.
         */
        if (options->output_format & (DIFF_FORMAT_PATCH |
+                                     DIFF_FORMAT_NUMSTAT |
                                      DIFF_FORMAT_DIFFSTAT |
+                                     DIFF_FORMAT_SUMMARY |
                                      DIFF_FORMAT_CHECKDIFF))
                options->recursive = 1;
        /*
@@ -1828,6 +1848,9 @@ int diff_opt_parse(struct diff_options *options, const char **av, int ac)
        else if (!strcmp(arg, "--patch-with-raw")) {
                options->output_format |= DIFF_FORMAT_PATCH | DIFF_FORMAT_RAW;
        }
+       else if (!strcmp(arg, "--numstat")) {
+               options->output_format |= DIFF_FORMAT_NUMSTAT;
+       }
        else if (!strncmp(arg, "--stat", 6)) {
                char *end;
                int width = options->stat_width;
@@ -2602,7 +2625,7 @@ void diff_flush(struct diff_options *options)
                separator++;
        }
 
-       if (output_format & DIFF_FORMAT_DIFFSTAT) {
+       if (output_format & (DIFF_FORMAT_DIFFSTAT|DIFF_FORMAT_NUMSTAT)) {
                struct diffstat_t diffstat;
 
                memset(&diffstat, 0, sizeof(struct diffstat_t));
@@ -2612,7 +2635,10 @@ void diff_flush(struct diff_options *options)
                        if (check_pair_status(p))
                                diff_flush_stat(p, options, &diffstat);
                }
-               show_stats(&diffstat, options);
+               if (output_format & DIFF_FORMAT_NUMSTAT)
+                       show_numstat(&diffstat, options);
+               if (output_format & DIFF_FORMAT_DIFFSTAT)
+                       show_stats(&diffstat, options);
                separator++;
        }
 
diff --git a/diff.h b/diff.h
index b48c9914e7e3802d17870bbc0fd68c454fded61c..ce3058e437d5f0142be0746a3e50a3c32045eecb 100644 (file)
--- a/diff.h
+++ b/diff.h
@@ -26,20 +26,21 @@ typedef void (*diff_format_fn_t)(struct diff_queue_struct *q,
 
 #define DIFF_FORMAT_RAW                0x0001
 #define DIFF_FORMAT_DIFFSTAT   0x0002
-#define DIFF_FORMAT_SUMMARY    0x0004
-#define DIFF_FORMAT_PATCH      0x0008
+#define DIFF_FORMAT_NUMSTAT    0x0004
+#define DIFF_FORMAT_SUMMARY    0x0008
+#define DIFF_FORMAT_PATCH      0x0010
 
 /* These override all above */
-#define DIFF_FORMAT_NAME       0x0010
-#define DIFF_FORMAT_NAME_STATUS        0x0020
-#define DIFF_FORMAT_CHECKDIFF  0x0040
+#define DIFF_FORMAT_NAME       0x0100
+#define DIFF_FORMAT_NAME_STATUS        0x0200
+#define DIFF_FORMAT_CHECKDIFF  0x0400
 
 /* Same as output_format = 0 but we know that -s flag was given
  * and we should not give default value to output_format.
  */
-#define DIFF_FORMAT_NO_OUTPUT  0x0080
+#define DIFF_FORMAT_NO_OUTPUT  0x0800
 
-#define DIFF_FORMAT_CALLBACK   0x0100
+#define DIFF_FORMAT_CALLBACK   0x1000
 
 struct diff_options {
        const char *filter;
@@ -170,6 +171,7 @@ extern void diffcore_std_no_resolve(struct diff_options *);
 "  --patch-with-raw\n" \
 "                output both a patch and the diff-raw format.\n" \
 "  --stat        show diffstat instead of patch.\n" \
+"  --numstat     show numeric diffstat instead of patch.\n" \
 "  --patch-with-stat\n" \
 "                output a patch and prepend its diffstat.\n" \
 "  --name-only   show only names of changed files.\n" \
index b632ca0438b378944b37a7d64fd508ea3f7f470f..76b99afcdb8556fec284a621bbdc76223845877e 100644 (file)
@@ -115,12 +115,10 @@ static pid_t setup_sideband(int sideband, const char *me, int fd[2], int xd[2])
                die("%s: unable to fork off sideband demultiplexer", me);
        if (!side_pid) {
                /* subprocess */
-               char buf[LARGE_PACKET_MAX];
-
                close(fd[0]);
                if (xd[0] != xd[1])
                        close(xd[1]);
-               if (recv_sideband(me, xd[0], fd[1], 2, buf, sizeof(buf)))
+               if (recv_sideband(me, xd[0], fd[1], 2))
                        exit(1);
                exit(0);
        }
index e8708aa802b8e09d8044bb99dbccb0fecdb14481..474d54520eae356a8987349b1e36186a30914966 100644 (file)
@@ -166,12 +166,13 @@ static int find_common(int fd[2], unsigned char *result_sha1,
                }
 
                if (!fetching)
-                       packet_write(fd[1], "want %s%s%s%s%s\n",
+                       packet_write(fd[1], "want %s%s%s%s%s%s\n",
                                     sha1_to_hex(remote),
                                     (multi_ack ? " multi_ack" : ""),
                                     (use_sideband == 2 ? " side-band-64k" : ""),
                                     (use_sideband == 1 ? " side-band" : ""),
-                                    (use_thin_pack ? " thin-pack" : ""));
+                                    (use_thin_pack ? " thin-pack" : ""),
+                                    " ofs-delta");
                else
                        packet_write(fd[1], "want %s\n", sha1_to_hex(remote));
                fetching++;
index 06a8d26945a679b06438308ceb96c69cd76c43db..6da31e87a01af9b883958b3b0bae4304bfa94520 100755 (executable)
@@ -179,11 +179,12 @@ bisect_reset() {
         *)
            usage ;;
        esac
-       git checkout "$branch" &&
-       rm -fr "$GIT_DIR/refs/bisect"
-       rm -f "$GIT_DIR/refs/heads/bisect" "$GIT_DIR/head-name"
-       rm -f "$GIT_DIR/BISECT_LOG"
-       rm -f "$GIT_DIR/BISECT_NAMES"
+       if git checkout "$branch"; then
+               rm -fr "$GIT_DIR/refs/bisect"
+               rm -f "$GIT_DIR/refs/heads/bisect" "$GIT_DIR/head-name"
+               rm -f "$GIT_DIR/BISECT_LOG"
+               rm -f "$GIT_DIR/BISECT_NAMES"
+       fi
 }
 
 bisect_replay () {
index 4f31903d63662db7d49d3d257e328004ca2958f7..f823c788fd92cc2038a7c9700c5acf254cc0d5b6 100755 (executable)
@@ -1,6 +1,6 @@
 #!/bin/sh
 
-USAGE='[-l] [(-d | -D) <branchname>] | [[-f] <branchname> [<start-point>]] | -r'
+USAGE='[-l] [-f] <branchname> [<start-point>] | (-d | -D) <branchname> | [-r]'
 LONG_USAGE='If no arguments, show available branches and mark current branch with a star.
 If one argument, create a new branch <branchname> based off of current HEAD.
 If two arguments, create a new branch <branchname> based off of <start-point>.'
index 3998c55cef3658eda18817d513a5bd5003c30e89..3f006d1a778636e5906ad9e8671adb6561fa0d98 100755 (executable)
@@ -8,11 +8,15 @@
 # See git-sh-setup why.
 unset CDPATH
 
-usage() {
-       echo >&2 "Usage: $0 [--template=<template_directory>] [--use-separate-remote] [--reference <reference-repo>] [--bare] [-l [-s]] [-q] [-u <upload-pack>] [--origin <name>] [-n] <repo> [<dir>]"
+die() {
+       echo >&2 "$@"
        exit 1
 }
 
+usage() {
+       die "Usage: $0 [--template=<template_directory>] [--use-separate-remote] [--reference <reference-repo>] [--bare] [-l [-s]] [-q] [-u <upload-pack>] [--origin <name>] [-n] <repo> [<dir>]"
+}
+
 get_repo_base() {
        (cd "$1" && (cd .git ; pwd)) 2> /dev/null
 }
@@ -35,11 +39,9 @@ clone_dumb_http () {
                "`git-repo-config --bool http.noEPSV`" = true ]; then
                curl_extra_args="${curl_extra_args} --disable-epsv"
        fi
-       http_fetch "$1/info/refs" "$clone_tmp/refs" || {
-               echo >&2 "Cannot get remote repository information.
+       http_fetch "$1/info/refs" "$clone_tmp/refs" ||
+               die "Cannot get remote repository information.
 Perhaps git-update-server-info needs to be run there?"
-               exit 1;
-       }
        while read sha1 refname
        do
                name=`expr "z$refname" : 'zrefs/\(.*\)'` &&
@@ -143,17 +145,12 @@ while
                '')
                    usage ;;
                */*)
-                   echo >&2 "'$2' is not suitable for an origin name"
-                   exit 1
+                   die "'$2' is not suitable for an origin name"
                esac
-               git-check-ref-format "heads/$2" || {
-                   echo >&2 "'$2' is not suitable for a branch name"
-                   exit 1
-               }
-               test -z "$origin_override" || {
-                   echo >&2 "Do not give more than one --origin options."
-                   exit 1
-               }
+               git-check-ref-format "heads/$2" ||
+                   die "'$2' is not suitable for a branch name"
+               test -z "$origin_override" ||
+                   die "Do not give more than one --origin options."
                origin_override=yes
                origin="$2"; shift
                ;;
@@ -169,24 +166,19 @@ do
 done
 
 repo="$1"
-if test -z "$repo"
-then
-    echo >&2 'you must specify a repository to clone.'
-    exit 1
-fi
+test -n "$repo" ||
+    die 'you must specify a repository to clone.'
 
 # --bare implies --no-checkout
 if test yes = "$bare"
 then
        if test yes = "$origin_override"
        then
-               echo >&2 '--bare and --origin $origin options are incompatible.'
-               exit 1
+               die '--bare and --origin $origin options are incompatible.'
        fi
        if test t = "$use_separate_remote"
        then
-               echo >&2 '--bare and --use-separate-remote options are incompatible.'
-               exit 1
+               die '--bare and --use-separate-remote options are incompatible.'
        fi
        no_checkout=yes
 fi
@@ -206,7 +198,7 @@ fi
 dir="$2"
 # Try using "humanish" part of source repo if user didn't specify one
 [ -z "$dir" ] && dir=$(echo "$repo" | sed -e 's|/$||' -e 's|:*/*\.git$||' -e 's|.*[/:]||g')
-[ -e "$dir" ] && echo "$dir already exists." && usage
+[ -e "$dir" ] && die "destination directory '$dir' already exists."
 mkdir -p "$dir" &&
 D=$(cd "$dir" && pwd) &&
 trap 'err=$?; cd ..; rm -rf "$D"; exit $err' 0
@@ -233,7 +225,7 @@ then
                 cd reference-tmp &&
                 tar xf -)
        else
-               echo >&2 "$reference: not a local directory." && usage
+               die "reference repository '$reference' is not a local directory."
        fi
 fi
 
@@ -242,10 +234,8 @@ rm -f "$GIT_DIR/CLONE_HEAD"
 # We do local magic only when the user tells us to.
 case "$local,$use_local" in
 yes,yes)
-       ( cd "$repo/objects" ) || {
-               echo >&2 "-l flag seen but $repo is not local."
-               exit 1
-       }
+       ( cd "$repo/objects" ) ||
+               die "-l flag seen but repository '$repo' is not local."
 
        case "$local_shared" in
        no)
@@ -307,18 +297,15 @@ yes,yes)
                then
                        clone_dumb_http "$repo" "$D"
                else
-                       echo >&2 "http transport not supported, rebuild Git with curl support"
-                       exit 1
+                       die "http transport not supported, rebuild Git with curl support"
                fi
                ;;
        *)
-               cd "$D" && case "$upload_pack" in
+               case "$upload_pack" in
                '') git-fetch-pack --all -k $quiet "$repo" ;;
                *) git-fetch-pack --all -k $quiet "$upload_pack" "$repo" ;;
-               esac >"$GIT_DIR/CLONE_HEAD" || {
-                       echo >&2 "fetch-pack from '$repo' failed."
-                       exit 1
-               }
+               esac >"$GIT_DIR/CLONE_HEAD" ||
+                       die "fetch-pack from '$repo' failed."
                ;;
        esac
        ;;
@@ -414,7 +401,8 @@ Pull: refs/heads/$head_points_at:$origin_track" &&
 
        case "$no_checkout" in
        '')
-               git-read-tree -m -u -v HEAD HEAD
+               test "z$quiet" = z && v=-v || v=
+               git-read-tree -m -u $v HEAD HEAD
        esac
 fi
 rm -f "$GIT_DIR/CLONE_HEAD" "$GIT_DIR/REMOTE_HEAD"
index 5a4c659b6fb5f0b23c6a10b0e3bd69893d9c380a..5b1cf85825b0c69f2515445842812a0caf48ef10 100755 (executable)
@@ -32,33 +32,6 @@ save_index () {
        cp -p "$THIS_INDEX" "$NEXT_INDEX"
 }
 
-report () {
-  header="#
-# $1:
-#   ($2)
-#
-"
-  trailer=""
-  while read status name newname
-  do
-    printf '%s' "$header"
-    header=""
-    trailer="#
-"
-    case "$status" in
-    M ) echo "#        modified: $name";;
-    D*) echo "#        deleted:  $name";;
-    T ) echo "#        typechange: $name";;
-    C*) echo "#        copied: $name -> $newname";;
-    R*) echo "#        renamed: $name -> $newname";;
-    A*) echo "#        new file: $name";;
-    U ) echo "#        unmerged: $name";;
-    esac
-  done
-  printf '%s' "$trailer"
-  [ "$header" ]
-}
-
 run_status () {
        # If TMP_INDEX is defined, that means we are doing
        # "--only" partial commit, and that index file is used
@@ -68,21 +41,21 @@ run_status () {
        # so the regular index file is what we use to compare.
        if test '' != "$TMP_INDEX"
        then
-           GIT_INDEX_FILE="$TMP_INDEX"
-           export GIT_INDEX_FILE
+               GIT_INDEX_FILE="$TMP_INDEX"
+               export GIT_INDEX_FILE
        elif test -f "$NEXT_INDEX"
        then
-           GIT_INDEX_FILE="$NEXT_INDEX"
-           export GIT_INDEX_FILE
+               GIT_INDEX_FILE="$NEXT_INDEX"
+               export GIT_INDEX_FILE
        fi
 
-  case "$status_only" in
-    t) color= ;;
-    *) color=--nocolor ;;
-  esac
-  git-runstatus ${color} \
-                ${verbose:+--verbose} \
-                ${amend:+--amend} \
+       case "$status_only" in
+       t) color= ;;
+       *) color=--nocolor ;;
+       esac
+       git-runstatus ${color} \
+               ${verbose:+--verbose} \
+               ${amend:+--amend} \
                ${untracked_files:+--untracked}
 }
 
@@ -114,179 +87,181 @@ only_include_assumed=
 untracked_files=
 while case "$#" in 0) break;; esac
 do
-  case "$1" in
-  -F|--F|-f|--f|--fi|--fil|--file)
-      case "$#" in 1) usage ;; esac
-      shift
-      no_edit=t
-      log_given=t$log_given
-      logfile="$1"
-      shift
-      ;;
-  -F*|-f*)
-      no_edit=t
-      log_given=t$log_given
-      logfile=`expr "z$1" : 'z-[Ff]\(.*\)'`
-      shift
-      ;;
-  --F=*|--f=*|--fi=*|--fil=*|--file=*)
-      no_edit=t
-      log_given=t$log_given
-      logfile=`expr "z$1" : 'z-[^=]*=\(.*\)'`
-      shift
-      ;;
-  -a|--a|--al|--all)
-      all=t
-      shift
-      ;;
-  --au=*|--aut=*|--auth=*|--autho=*|--author=*)
-      force_author=`expr "z$1" : 'z-[^=]*=\(.*\)'`
-      shift
-      ;;
-  --au|--aut|--auth|--autho|--author)
-      case "$#" in 1) usage ;; esac
-      shift
-      force_author="$1"
-      shift
-      ;;
-  -e|--e|--ed|--edi|--edit)
-      edit_flag=t
-      shift
-      ;;
-  -i|--i|--in|--inc|--incl|--inclu|--includ|--include)
-      also=t
-      shift
-      ;;
-  -o|--o|--on|--onl|--only)
-      only=t
-      shift
-      ;;
-  -m|--m|--me|--mes|--mess|--messa|--messag|--message)
-      case "$#" in 1) usage ;; esac
-      shift
-      log_given=m$log_given
-      if test "$log_message" = ''
-      then
-          log_message="$1"
-      else
-          log_message="$log_message
+       case "$1" in
+       -F|--F|-f|--f|--fi|--fil|--file)
+               case "$#" in 1) usage ;; esac
+               shift
+               no_edit=t
+               log_given=t$log_given
+               logfile="$1"
+               shift
+               ;;
+       -F*|-f*)
+               no_edit=t
+               log_given=t$log_given
+               logfile=`expr "z$1" : 'z-[Ff]\(.*\)'`
+               shift
+               ;;
+       --F=*|--f=*|--fi=*|--fil=*|--file=*)
+               no_edit=t
+               log_given=t$log_given
+               logfile=`expr "z$1" : 'z-[^=]*=\(.*\)'`
+               shift
+               ;;
+       -a|--a|--al|--all)
+               all=t
+               shift
+               ;;
+       --au=*|--aut=*|--auth=*|--autho=*|--author=*)
+               force_author=`expr "z$1" : 'z-[^=]*=\(.*\)'`
+               shift
+               ;;
+       --au|--aut|--auth|--autho|--author)
+               case "$#" in 1) usage ;; esac
+               shift
+               force_author="$1"
+               shift
+               ;;
+       -e|--e|--ed|--edi|--edit)
+               edit_flag=t
+               shift
+               ;;
+       -i|--i|--in|--inc|--incl|--inclu|--includ|--include)
+               also=t
+               shift
+               ;;
+       -o|--o|--on|--onl|--only)
+               only=t
+               shift
+               ;;
+       -m|--m|--me|--mes|--mess|--messa|--messag|--message)
+               case "$#" in 1) usage ;; esac
+               shift
+               log_given=m$log_given
+               if test "$log_message" = ''
+               then
+                   log_message="$1"
+               else
+                   log_message="$log_message
 
 $1"
-      fi
-      no_edit=t
-      shift
-      ;;
-  -m*)
-      log_given=m$log_given
-      if test "$log_message" = ''
-      then
-          log_message=`expr "z$1" : 'z-m\(.*\)'`
-      else
-          log_message="$log_message
+               fi
+               no_edit=t
+               shift
+               ;;
+       -m*)
+               log_given=m$log_given
+               if test "$log_message" = ''
+               then
+                   log_message=`expr "z$1" : 'z-m\(.*\)'`
+               else
+                   log_message="$log_message
 
 `expr "z$1" : 'z-m\(.*\)'`"
-      fi
-      no_edit=t
-      shift
-      ;;
-  --m=*|--me=*|--mes=*|--mess=*|--messa=*|--messag=*|--message=*)
-      log_given=m$log_given
-      if test "$log_message" = ''
-      then
-          log_message=`expr "z$1" : 'z-[^=]*=\(.*\)'`
-      else
-          log_message="$log_message
+               fi
+               no_edit=t
+               shift
+               ;;
+       --m=*|--me=*|--mes=*|--mess=*|--messa=*|--messag=*|--message=*)
+               log_given=m$log_given
+               if test "$log_message" = ''
+               then
+                   log_message=`expr "z$1" : 'z-[^=]*=\(.*\)'`
+               else
+                   log_message="$log_message
 
 `expr "z$1" : 'zq-[^=]*=\(.*\)'`"
-      fi
-      no_edit=t
-      shift
-      ;;
-  -n|--n|--no|--no-|--no-v|--no-ve|--no-ver|--no-veri|--no-verif|--no-verify)
-      verify=
-      shift
-      ;;
-  --a|--am|--ame|--amen|--amend)
-      amend=t
-      log_given=t$log_given
-      use_commit=HEAD
-      shift
-      ;;
-  -c)
-      case "$#" in 1) usage ;; esac
-      shift
-      log_given=t$log_given
-      use_commit="$1"
-      no_edit=
-      shift
-      ;;
-  --ree=*|--reed=*|--reedi=*|--reedit=*|--reedit-=*|--reedit-m=*|\
-  --reedit-me=*|--reedit-mes=*|--reedit-mess=*|--reedit-messa=*|\
-  --reedit-messag=*|--reedit-message=*)
-      log_given=t$log_given
-      use_commit=`expr "z$1" : 'z-[^=]*=\(.*\)'`
-      no_edit=
-      shift
-      ;;
-  --ree|--reed|--reedi|--reedit|--reedit-|--reedit-m|--reedit-me|\
-  --reedit-mes|--reedit-mess|--reedit-messa|--reedit-messag|--reedit-message)
-      case "$#" in 1) usage ;; esac
-      shift
-      log_given=t$log_given
-      use_commit="$1"
-      no_edit=
-      shift
-      ;;
-  -C)
-      case "$#" in 1) usage ;; esac
-      shift
-      log_given=t$log_given
-      use_commit="$1"
-      no_edit=t
-      shift
-      ;;
-  --reu=*|--reus=*|--reuse=*|--reuse-=*|--reuse-m=*|--reuse-me=*|\
-  --reuse-mes=*|--reuse-mess=*|--reuse-messa=*|--reuse-messag=*|\
-  --reuse-message=*)
-      log_given=t$log_given
-      use_commit=`expr "z$1" : 'z-[^=]*=\(.*\)'`
-      no_edit=t
-      shift
-      ;;
-  --reu|--reus|--reuse|--reuse-|--reuse-m|--reuse-me|--reuse-mes|\
-  --reuse-mess|--reuse-messa|--reuse-messag|--reuse-message)
-      case "$#" in 1) usage ;; esac
-      shift
-      log_given=t$log_given
-      use_commit="$1"
-      no_edit=t
-      shift
-      ;;
-  -s|--s|--si|--sig|--sign|--signo|--signof|--signoff)
-      signoff=t
-      shift
-      ;;
-  -v|--v|--ve|--ver|--verb|--verbo|--verbos|--verbose)
-      verbose=t
-      shift
-      ;;
-  -u|--u|--un|--unt|--untr|--untra|--untrac|--untrack|--untracke|--untracked|\
-  --untracked-|--untracked-f|--untracked-fi|--untracked-fil|--untracked-file|\
-  --untracked-files)
-      untracked_files=t
-      shift
-      ;;
-  --)
-      shift
-      break
-      ;;
-  -*)
-      usage
-      ;;
-  *)
-      break
-      ;;
-  esac
+               fi
+               no_edit=t
+               shift
+               ;;
+       -n|--n|--no|--no-|--no-v|--no-ve|--no-ver|--no-veri|--no-verif|\
+       --no-verify)
+               verify=
+               shift
+               ;;
+       --a|--am|--ame|--amen|--amend)
+               amend=t
+               log_given=t$log_given
+               use_commit=HEAD
+               shift
+               ;;
+       -c)
+               case "$#" in 1) usage ;; esac
+               shift
+               log_given=t$log_given
+               use_commit="$1"
+               no_edit=
+               shift
+               ;;
+       --ree=*|--reed=*|--reedi=*|--reedit=*|--reedit-=*|--reedit-m=*|\
+       --reedit-me=*|--reedit-mes=*|--reedit-mess=*|--reedit-messa=*|\
+       --reedit-messag=*|--reedit-message=*)
+               log_given=t$log_given
+               use_commit=`expr "z$1" : 'z-[^=]*=\(.*\)'`
+               no_edit=
+               shift
+               ;;
+       --ree|--reed|--reedi|--reedit|--reedit-|--reedit-m|--reedit-me|\
+       --reedit-mes|--reedit-mess|--reedit-messa|--reedit-messag|\
+       --reedit-message)
+               case "$#" in 1) usage ;; esac
+               shift
+               log_given=t$log_given
+               use_commit="$1"
+               no_edit=
+               shift
+               ;;
+       -C)
+               case "$#" in 1) usage ;; esac
+               shift
+               log_given=t$log_given
+               use_commit="$1"
+               no_edit=t
+               shift
+               ;;
+       --reu=*|--reus=*|--reuse=*|--reuse-=*|--reuse-m=*|--reuse-me=*|\
+       --reuse-mes=*|--reuse-mess=*|--reuse-messa=*|--reuse-messag=*|\
+       --reuse-message=*)
+               log_given=t$log_given
+               use_commit=`expr "z$1" : 'z-[^=]*=\(.*\)'`
+               no_edit=t
+               shift
+               ;;
+       --reu|--reus|--reuse|--reuse-|--reuse-m|--reuse-me|--reuse-mes|\
+       --reuse-mess|--reuse-messa|--reuse-messag|--reuse-message)
+               case "$#" in 1) usage ;; esac
+               shift
+               log_given=t$log_given
+               use_commit="$1"
+               no_edit=t
+               shift
+               ;;
+       -s|--s|--si|--sig|--sign|--signo|--signof|--signoff)
+               signoff=t
+               shift
+               ;;
+       -v|--v|--ve|--ver|--verb|--verbo|--verbos|--verbose)
+               verbose=t
+               shift
+               ;;
+       -u|--u|--un|--unt|--untr|--untra|--untrac|--untrack|--untracke|\
+       --untracked|--untracked-|--untracked-f|--untracked-fi|--untracked-fil|\
+       --untracked-file|--untracked-files)
+               untracked_files=t
+               shift
+               ;;
+       --)
+               shift
+               break
+               ;;
+       -*)
+               usage
+               ;;
+       *)
+               break
+               ;;
+       esac
 done
 case "$edit_flag" in t) no_edit= ;; esac
 
@@ -295,33 +270,33 @@ case "$edit_flag" in t) no_edit= ;; esac
 
 case "$amend,$initial_commit" in
 t,t)
-  die "You do not have anything to amend." ;;
+       die "You do not have anything to amend." ;;
 t,)
-  if [ -f "$GIT_DIR/MERGE_HEAD" ]; then
-    die "You are in the middle of a merge -- cannot amend."
-  fi ;;
+       if [ -f "$GIT_DIR/MERGE_HEAD" ]; then
+               die "You are in the middle of a merge -- cannot amend."
+       fi ;;
 esac
 
 case "$log_given" in
 tt*)
-  die "Only one of -c/-C/-F can be used." ;;
+       die "Only one of -c/-C/-F can be used." ;;
 *tm*|*mt*)
-  die "Option -m cannot be combined with -c/-C/-F." ;;
+       die "Option -m cannot be combined with -c/-C/-F." ;;
 esac
 
 case "$#,$also,$only,$amend" in
 *,t,t,*)
-  die "Only one of --include/--only can be used." ;;
+       die "Only one of --include/--only can be used." ;;
 0,t,,* | 0,,t,)
-  die "No paths with --include/--only does not make sense." ;;
+       die "No paths with --include/--only does not make sense." ;;
 0,,t,t)
-  only_include_assumed="# Clever... amending the last one with dirty index." ;;
+       only_include_assumed="# Clever... amending the last one with dirty index." ;;
 0,,,*)
-  ;;
+       ;;
 *,,,*)
-  only_include_assumed="# Explicit paths specified without -i nor -o; assuming --only paths..."
-  also=
-  ;;
+       only_include_assumed="# Explicit paths specified without -i nor -o; assuming --only paths..."
+       also=
+       ;;
 esac
 unset only
 case "$all,$also,$#" in
@@ -368,47 +343,47 @@ t,)
 ,)
        case "$#" in
        0)
-           ;; # commit as-is
+               ;; # commit as-is
        *)
-           if test -f "$GIT_DIR/MERGE_HEAD"
-           then
-               refuse_partial "Cannot do a partial commit during a merge."
-           fi
-           TMP_INDEX="$GIT_DIR/tmp-index$$"
-           if test -z "$initial_commit"
-           then
-               # make sure index is clean at the specified paths, or
-               # they are additions.
-               dirty_in_index=`git-diff-index --cached --name-status \
-                       --diff-filter=DMTU HEAD -- "$@"`
-               test -z "$dirty_in_index" ||
-               refuse_partial "Different in index and the last commit:
+               if test -f "$GIT_DIR/MERGE_HEAD"
+               then
+                       refuse_partial "Cannot do a partial commit during a merge."
+               fi
+               TMP_INDEX="$GIT_DIR/tmp-index$$"
+               if test -z "$initial_commit"
+               then
+                       # make sure index is clean at the specified paths, or
+                       # they are additions.
+                       dirty_in_index=`git-diff-index --cached --name-status \
+                               --diff-filter=DMTU HEAD -- "$@"`
+                       test -z "$dirty_in_index" ||
+                       refuse_partial "Different in index and the last commit:
 $dirty_in_index"
-           fi
-           commit_only=`git-ls-files --error-unmatch -- "$@"` || exit
-
-           # Build the temporary index and update the real index
-           # the same way.
-           if test -z "$initial_commit"
-           then
-               cp "$THIS_INDEX" "$TMP_INDEX"
-               GIT_INDEX_FILE="$TMP_INDEX" git-read-tree -m HEAD
-           else
-                   rm -f "$TMP_INDEX"
-           fi || exit
-
-           echo "$commit_only" |
-           GIT_INDEX_FILE="$TMP_INDEX" \
-           git-update-index --add --remove --stdin &&
-
-           save_index &&
-           echo "$commit_only" |
-           (
-               GIT_INDEX_FILE="$NEXT_INDEX"
-               export GIT_INDEX_FILE
-               git-update-index --remove --stdin
-           ) || exit
-           ;;
+               fi
+               commit_only=`git-ls-files --error-unmatch -- "$@"` || exit
+
+               # Build the temporary index and update the real index
+               # the same way.
+               if test -z "$initial_commit"
+               then
+                       cp "$THIS_INDEX" "$TMP_INDEX"
+                       GIT_INDEX_FILE="$TMP_INDEX" git-read-tree -m HEAD
+               else
+                       rm -f "$TMP_INDEX"
+               fi || exit
+
+               echo "$commit_only" |
+               GIT_INDEX_FILE="$TMP_INDEX" \
+               git-update-index --add --remove --stdin &&
+
+               save_index &&
+               echo "$commit_only" |
+               (
+                       GIT_INDEX_FILE="$NEXT_INDEX"
+                       export GIT_INDEX_FILE
+                       git-update-index --remove --stdin
+               ) || exit
+               ;;
        esac
        ;;
 esac
@@ -426,7 +401,7 @@ else
 fi
 
 GIT_INDEX_FILE="$USE_INDEX" \
-    git-update-index -q $unmerged_ok_if_status --refresh || exit
+       git-update-index -q $unmerged_ok_if_status --refresh || exit
 
 ################################################################
 # If the request is status, just show it and exit.
index 2130d57020de2dae570b7be0d9a516389bd1829f..8817f8bb4f7c6b69d186c70d83cfb49b1fcea80e 100755 (executable)
@@ -275,7 +275,7 @@ sub req_Directory
     $state->{directory} = "" if ( $state->{directory} eq "." );
     $state->{directory} .= "/" if ( $state->{directory} =~ /\S/ );
 
-    if ( not defined($state->{prependdir}) and $state->{localdir} eq "." and $state->{path} =~ /\S/ )
+    if ( (not defined($state->{prependdir}) or $state->{prependdir} eq '') and $state->{localdir} eq "." and $state->{path} =~ /\S/ )
     {
         $log->info("Setting prepend to '$state->{path}'");
         $state->{prependdir} = $state->{path};
@@ -805,7 +805,14 @@ sub req_update
             $meta = $updater->getmeta($filename);
         }
 
-        next unless ( $meta->{revision} );
+       if ( ! defined $meta )
+       {
+           $meta = {
+               name => $filename,
+               revision => 0,
+               filehash => 'added'
+           };
+       }
 
         my $oldmeta = $meta;
 
@@ -835,7 +842,7 @@ sub req_update
              and not exists ( $state->{opt}{C} ) )
         {
             $log->info("Tell the client the file is modified");
-            print "MT text U\n";
+            print "MT text \n";
             print "MT fname $filename\n";
             print "MT newline\n";
             next;
@@ -855,15 +862,36 @@ sub req_update
            }
         }
         elsif ( not defined ( $state->{entries}{$filename}{modified_hash} )
-               or $state->{entries}{$filename}{modified_hash} eq $oldmeta->{filehash} )
+               or $state->{entries}{$filename}{modified_hash} eq $oldmeta->{filehash}
+               or $meta->{filehash} eq 'added' )
         {
-            $log->info("Updating '$filename'");
-            # normal update, just send the new revision (either U=Update, or A=Add, or R=Remove)
-            print "MT +updated\n";
-            print "MT text U\n";
-            print "MT fname $filename\n";
-            print "MT newline\n";
-            print "MT -updated\n";
+            # normal update, just send the new revision (either U=Update,
+            # or A=Add, or R=Remove)
+           if ( defined($wrev) && $wrev < 0 )
+           {
+               $log->info("Tell the client the file is scheduled for removal");
+               print "MT text R \n";
+                print "MT fname $filename\n";
+                print "MT newline\n";
+               next;
+           }
+           elsif ( !defined($wrev) || $wrev == 0 )
+           {
+               $log->info("Tell the client the file will be added");
+               print "MT text A \n";
+                print "MT fname $filename\n";
+                print "MT newline\n";
+               next;
+
+           }
+           else {
+                $log->info("Updating '$filename' $wrev");
+                print "MT +updated\n";
+                print "MT text U \n";
+                print "MT fname $filename\n";
+                print "MT newline\n";
+               print "MT -updated\n";
+           }
 
             my ( $filepart, $dirpart ) = filenamesplit($filename,1);
 
@@ -1709,6 +1737,17 @@ sub argsfromdir
 
     return if ( scalar ( @{$state->{args}} ) > 1 );
 
+    my @gethead = @{$updater->gethead};
+
+    # push added files
+    foreach my $file (keys %{$state->{entries}}) {
+       if ( exists $state->{entries}{$file}{revision} &&
+               $state->{entries}{$file}{revision} == 0 )
+       {
+           push @gethead, { name => $file, filehash => 'added' };
+       }
+    }
+
     if ( scalar(@{$state->{args}}) == 1 )
     {
         my $arg = $state->{args}[0];
@@ -1716,7 +1755,7 @@ sub argsfromdir
 
         $log->info("Only one arg specified, checking for directory expansion on '$arg'");
 
-        foreach my $file ( @{$updater->gethead} )
+        foreach my $file ( @gethead )
         {
             next if ( $file->{filehash} eq "deleted" and not defined ( $state->{entries}{$file->{name}} ) );
             next unless ( $file->{name} =~ /^$arg\// or $file->{name} eq $arg  );
@@ -1729,7 +1768,7 @@ sub argsfromdir
 
         $state->{args} = [];
 
-        foreach my $file ( @{$updater->gethead} )
+        foreach my $file ( @gethead )
         {
             next if ( $file->{filehash} eq "deleted" and not defined ( $state->{entries}{$file->{name}} ) );
             next unless ( $file->{name} =~ s/^$state->{prependdir}// );
@@ -2079,9 +2118,17 @@ sub new
                 mode       TEXT NOT NULL
             )
         ");
+        $self->{dbh}->do("
+            CREATE INDEX revision_ix1
+            ON revision (name,revision)
+        ");
+        $self->{dbh}->do("
+            CREATE INDEX revision_ix2
+            ON revision (name,commithash)
+        ");
     }
 
-    # Construct the revision table if required
+    # Construct the head table if required
     unless ( $self->{tables}{head} )
     {
         $self->{dbh}->do("
@@ -2095,6 +2142,10 @@ sub new
                 mode       TEXT NOT NULL
             )
         ");
+        $self->{dbh}->do("
+            CREATE INDEX head_ix1
+            ON head (name)
+        ");
     }
 
     # Construct the properties table if required
index f1522bd49a2fc1c3106c6b2ed0a38af0a9ff72de..b15fc2b389f35e2b30b17eada4bcc711a12d2a11 100755 (executable)
@@ -129,22 +129,25 @@ append_fetch_head () {
     then
        headc_=$(git-rev-parse --verify "$head_^0") || exit
        echo "$headc_   $not_for_merge_ $note_" >>"$GIT_DIR/FETCH_HEAD"
-       [ "$verbose" ] && echo >&2 "* committish: $head_"
-       [ "$verbose" ] && echo >&2 "  $note_"
     else
        echo "$head_    not-for-merge   $note_" >>"$GIT_DIR/FETCH_HEAD"
-       [ "$verbose" ] && echo >&2 "* non-commit: $head_"
-       [ "$verbose" ] && echo >&2 "  $note_"
-    fi
-    if test "$local_name_" != ""
-    then
-       # We are storing the head locally.  Make sure that it is
-       # a fast forward (aka "reverse push").
-       fast_forward_local "$local_name_" "$head_" "$note_"
     fi
+
+    update_local_ref "$local_name_" "$head_" "$note_"
 }
 
-fast_forward_local () {
+update_local_ref () {
+    # If we are storing the head locally make sure that it is
+    # a fast forward (aka "reverse push").
+
+    label_=$(git-cat-file -t $2)
+    newshort_=$(git-rev-parse --short $2)
+    if test -z "$1" ; then
+       [ "$verbose" ] && echo >&2 "* fetched $3"
+       [ "$verbose" ] && echo >&2 "  $label_: $newshort_"
+       return 0
+    fi
+    oldshort_=$(git-rev-parse --short "$1" 2>/dev/null)
     mkdir -p "$(dirname "$GIT_DIR/$1")"
     case "$1" in
     refs/tags/*)
@@ -154,13 +157,16 @@ fast_forward_local () {
        then
                if now_=$(cat "$GIT_DIR/$1") && test "$now_" = "$2"
                then
-                       [ "$verbose" ] && echo >&2 "* $1: same as $3" ||:
+                       [ "$verbose" ] && echo >&2 "* $1: same as $3"
+                       [ "$verbose" ] && echo >&2 "  $label_: $newshort_" ||:
                else
                        echo >&2 "* $1: updating with $3"
+                       echo >&2 "  $label_: $newshort_"
                        git-update-ref -m "$rloga: updating tag" "$1" "$2"
                fi
        else
                echo >&2 "* $1: storing $3"
+               echo >&2 "  $label_: $newshort_"
                git-update-ref -m "$rloga: storing tag" "$1" "$2"
        fi
        ;;
@@ -178,31 +184,34 @@ fast_forward_local () {
                if test -n "$verbose"
                then
                        echo >&2 "* $1: same as $3"
+                       echo >&2 "  $label_: $newshort_"
                fi
                ;;
            *,$local)
                echo >&2 "* $1: fast forward to $3"
-               echo >&2 "  from $local to $2"
+               echo >&2 "  old..new: $oldshort_..$newshort_"
                git-update-ref -m "$rloga: fast-forward" "$1" "$2" "$local"
                ;;
            *)
                false
                ;;
            esac || {
-               echo >&2 "* $1: does not fast forward to $3;"
                case ",$force,$single_force," in
                *,t,*)
-                       echo >&2 "  forcing update."
+                       echo >&2 "* $1: forcing update to non-fast forward $3"
+                       echo >&2 "  old...new: $oldshort_...$newshort_"
                        git-update-ref -m "$rloga: forced-update" "$1" "$2" "$local"
                        ;;
                *)
-                       echo >&2 "  not updating."
+                       echo >&2 "* $1: not updating to non-fast forward $3"
+                       echo >&2 "  old...new: $oldshort_...$newshort_"
                        exit 1
                        ;;
                esac
            }
        else
            echo >&2 "* $1: storing $3"
+           echo >&2 "  $label_: $newshort_"
            git-update-ref -m "$rloga: storing head" "$1" "$2"
        fi
        ;;
@@ -436,10 +445,10 @@ esac
 
 # If the original head was empty (i.e. no "master" yet), or
 # if we were told not to worry, we do not have to check.
-case ",$update_head_ok,$orig_head," in
-*,, | t,* )
+case "$orig_head" in
+'')
        ;;
-*)
+?*)
        curr_head=$(git-rev-parse --verify HEAD 2>/dev/null)
        if test "$curr_head" != "$orig_head"
        then
index 5b34b4de99c33a99dfb841795baced8889f74a88..cb094388bb5c830d37a790f5e7bbf13e8c61352a 100755 (executable)
@@ -93,6 +93,8 @@ finish () {
        esac
 }
 
+case "$#" in 0) usage ;; esac
+
 rloga=
 while case "$#" in 0) break ;; esac
 do
@@ -197,7 +199,7 @@ f,*)
        ;;
 ?,1,"$head",*)
        # Again the most common case of merging one remote.
-       echo "Updating from $head to $1"
+       echo "Updating $(git-rev-parse --short $head)..$(git-rev-parse --short $1)"
        git-update-index --refresh 2>/dev/null
        new_head=$(git-rev-parse --verify "$1^0") &&
        git-read-tree -u -v -m $head "$new_head" &&
index f380437997f053d15a177a941f3b8a1543c56a18..ed04e7d8d8753cc2cdc7d2737228cb58934534f4 100755 (executable)
@@ -58,7 +58,7 @@ then
 
        echo >&2 "Warning: fetch updated the current branch head."
        echo >&2 "Warning: fast forwarding your working tree from"
-       echo >&2 "Warning: $orig_head commit."
+       echo >&2 "Warning: commit $orig_head."
        git-update-index --refresh 2>/dev/null
        git-read-tree -u -m "$orig_head" "$curr_head" ||
                die 'Cannot fast-forward your working tree.
index a7373c0532fad447263e7199d0b8ec2908c683c9..546fa446fc3c6c63488b9ef38cb5bdc960953cb1 100755 (executable)
@@ -3,7 +3,7 @@
 # Copyright (c) 2005 Junio C Hamano.
 #
 
-USAGE='[--onto <newbase>] <upstream> [<branch>]'
+USAGE='[-v] [--onto <newbase>] <upstream> [<branch>]'
 LONG_USAGE='git-rebase replaces <branch> with a new branch of the
 same name.  When the --onto option is provided the new branch starts
 out with a HEAD equal to <newbase>, otherwise it is equal to <upstream>
@@ -39,6 +39,7 @@ strategy=recursive
 do_merge=
 dotest=$GIT_DIR/.dotest-merge
 prec=4
+verbose=
 
 continue_merge () {
        test -n "$prev_head" || die "prev_head must be defined"
@@ -190,6 +191,9 @@ do
                esac
                do_merge=t
                ;;
+       -v|--verbose)
+               verbose=t
+               ;;
        -*)
                usage
                ;;
@@ -273,6 +277,12 @@ then
        exit 0
 fi
 
+if test -n "$verbose"
+then
+       echo "Changes from $mb to $onto:"
+       git-diff-tree --stat --summary "$mb" "$onto"
+fi
+
 # Rewind the head to "$onto"; this saves our current head in ORIG_HEAD.
 git-reset --hard "$onto"
 
@@ -286,7 +296,7 @@ fi
 
 if test -z "$do_merge"
 then
-       git-format-patch -k --stdout --full-index "$upstream"..ORIG_HEAD |
+       git-format-patch -k --stdout --full-index --ignore-if-in-upstream "$upstream"..ORIG_HEAD |
        git am --binary -3 -k --resolvemsg="$RESOLVEMSG" \
                --reflog-action=rebase
        exit $?
index f2c9071d1109e014832f0efd8a1fd67dca44c8af..17e24526c279467891389295a55f8f257ca0d01b 100755 (executable)
@@ -3,7 +3,7 @@
 # Copyright (c) 2005 Linus Torvalds
 #
 
-USAGE='[-a] [-d] [-f] [-l] [-n] [-q]'
+USAGE='[-a] [-d] [-f] [-l] [-n] [-q] [--window=N] [--depth=N]'
 SUBDIRECTORY_OK='Yes'
 . git-sh-setup
 
@@ -25,6 +25,15 @@ do
        shift
 done
 
+# Later we will default repack.UseDeltaBaseOffset to true
+default_dbo=false
+
+case "`git repo-config --bool repack.usedeltabaseoffset ||
+       echo $default_dbo`" in
+true)
+       extra="$extra --delta-base-offset" ;;
+esac
+
 PACKDIR="$GIT_OBJECT_DIRECTORY/pack"
 PACKTMP="$GIT_DIR/.tmp-$$-pack"
 rm -f "$PACKTMP"-*
index 729ec65dc9e0ddebfe81fb4d837a5f9c83b537ee..36b90e38494eb79b4859cf59301b5a4e6ccccea1 100755 (executable)
@@ -46,7 +46,7 @@ case "$common" in
        exit 0
        ;;
 "$head")
-       echo "Updating from $head to $merge"
+       echo "Updating $(git-rev-parse --short $head)..$(git-rev-parse --short $merge)"
        git-read-tree -u -m $head $merge || exit 1
        git-update-ref -m "resolve $merge_name: Fast forward" \
                HEAD "$merge" "$head"
index 2bf35d116c2141a1750a0ca0bd8f7297e0c237b6..4fd81b6ed60e5877df85c1052057b0ad5410f4fe 100755 (executable)
@@ -7,18 +7,20 @@
 case "$0" in
 *-revert* )
        test -t 0 && edit=-e
+       replay=
        me=revert
        USAGE='[--edit | --no-edit] [-n] <commit-ish>' ;;
 *-cherry-pick* )
+       replay=t
        edit=
        me=cherry-pick
-       USAGE='[--edit] [-n] [-r] <commit-ish>'  ;;
+       USAGE='[--edit] [-n] [-r] [-x] <commit-ish>'  ;;
 * )
        die "What are you talking about?" ;;
 esac
 . git-sh-setup
 
-no_commit= replay=
+no_commit=
 while case "$#" in 0) break ;; esac
 do
        case "$1" in
@@ -32,8 +34,10 @@ do
        --n|--no|--no-|--no-e|--no-ed|--no-edi|--no-edit)
                edit=
                ;;
-       -r|--r|--re|--rep|--repl|--repla|--replay)
-               replay=t
+       -r)
+               : no-op ;;
+       -x|--i-really-want-to-expose-my-private-commit-object-name)
+               replay=
                ;;
        -*)
                usage
@@ -121,7 +125,7 @@ cherry-pick)
        git-cat-file commit $commit | sed -e '1,/^$/d'
        case "$replay" in
        '')
-               echo "(cherry picked from $commit commit)"
+               echo "(cherry picked from commit $commit)"
                test "$rev" = "$commit" ||
                echo "(original 'git cherry-pick' arguments: $@)"
                ;;
index 4a20310841b69280a21ac4c79d95ce9ce3ea7df4..c42dc3bc943edb2e8f5d9e11a309f6e6d86848ab 100755 (executable)
@@ -83,11 +83,12 @@ sub format_2822_time {
 my $compose_filename = ".msg.$$";
 
 # Variables we fill in automatically, or via prompting:
-my (@to,@cc,@initial_cc,@bcclist,
+my (@to,@cc,@initial_cc,@bcclist,@xh,
        $initial_reply_to,$initial_subject,@files,$from,$compose,$time);
 
 # Behavior modification variables
-my ($chain_reply_to, $quiet, $suppress_from, $no_signed_off_cc) = (1, 0, 0, 0);
+my ($chain_reply_to, $quiet, $suppress_from, $no_signed_off_cc,
+       $dry_run) = (1, 0, 0, 0, 0);
 my $smtp_server;
 
 # Example reply to:
@@ -116,6 +117,7 @@ sub format_2822_time {
                    "quiet" => \$quiet,
                    "suppress-from" => \$suppress_from,
                    "no-signed-off-cc|no-signed-off-by-cc" => \$no_signed_off_cc,
+                   "dry-run" => \$dry_run,
         );
 
 # Verify the user input
@@ -409,6 +411,11 @@ sub send_message
            $gitversion = Git::version();
        }
 
+       my ($author_name) = ($from =~ /^(.*?)\s+</);
+       if ($author_name && $author_name =~ /\./ && $author_name !~ /^".*"$/) {
+               my ($name, $addr) = ($from =~ /^(.*?)(\s+<.*)/);
+               $from = "\"$name\"$addr";
+       }
        my $header = "From: $from
 To: $to
 Cc: $cc
@@ -422,8 +429,13 @@ sub send_message
                $header .= "In-Reply-To: $reply_to\n";
                $header .= "References: $references\n";
        }
+       if (@xh) {
+               $header .= join("\n", @xh) . "\n";
+       }
 
-       if ($smtp_server =~ m#^/#) {
+       if ($dry_run) {
+               # We don't want to send the email.
+       } elsif ($smtp_server =~ m#^/#) {
                my $pid = open my $sm, '|-';
                defined $pid or die $!;
                if (!$pid) {
@@ -472,15 +484,22 @@ sub send_message
 
        my $author_not_sender = undef;
        @cc = @initial_cc;
-       my $found_mbox = 0;
+       @xh = ();
+       my $input_format = undef;
        my $header_done = 0;
        $message = "";
        while(<F>) {
                if (!$header_done) {
-                       $found_mbox = 1, next if (/^From /);
+                       if (/^From /) {
+                               $input_format = 'mbox';
+                               next;
+                       }
                        chomp;
+                       if (!defined $input_format && /^[-A-Za-z]+:\s/) {
+                               $input_format = 'mbox';
+                       }
 
-                       if ($found_mbox) {
+                       if (defined $input_format && $input_format eq 'mbox') {
                                if (/^Subject:\s+(.*)$/) {
                                        $subject = $1;
 
@@ -495,6 +514,9 @@ sub send_message
                                                $2, $_) unless $quiet;
                                        push @cc, $2;
                                }
+                               elsif (!/^Date:\s/ && /^[-A-Za-z]+:\s+\S/) {
+                                       push @xh, $_;
+                               }
 
                        } else {
                                # In the traditional
@@ -502,6 +524,7 @@ sub send_message
                                # line 1 = cc
                                # line 2 = subject
                                # So let's support that, too.
+                               $input_format = 'lots';
                                if (@cc == 0) {
                                        printf("(non-mbox) Adding cc: %s from line '%s'\n",
                                                $_, $_) unless $quiet;
@@ -538,7 +561,7 @@ sub send_message
        send_message();
 
        # set up for the next message
-       if ($chain_reply_to || length($reply_to) == 0) {
+       if ($chain_reply_to || !defined $reply_to || length($reply_to) == 0) {
                $reply_to = $message_id;
                if (length $references > 0) {
                        $references .= " $message_id";
index 0b14f833ee97a0e5b589098914e52c49b0be50d0..334fec7477e6e4af646808c54ad7af1357fc5f8b 100755 (executable)
@@ -1,6 +1,18 @@
 #!/usr/bin/perl -w
 
 use strict;
+use Getopt::Std;
+use File::Basename qw(basename dirname);
+
+our ($opt_h, $opt_n, $opt_s);
+getopts('hns');
+
+$opt_h && usage();
+
+sub usage {
+       print STDERR "Usage: ${\basename $0} [-h] [-n] [-s] < <log_data>\n";
+        exit(1);
+}
 
 my (%mailmap);
 my (%email);
@@ -38,16 +50,38 @@ ($$)
 
        uc($a) cmp uc($b);
 }
+sub by_nbentries($$) {
+       my ($a, $b) = @_;
+       my $a_entries = $map{$a};
+       my $b_entries = $map{$b};
+
+       @$b_entries - @$a_entries || by_name $a, $b;
+}
+
+my $sort_method = $opt_n ? \&by_nbentries : \&by_name;
+
+sub summary_output {
+       my ($obj, $num, $key);
+
+       foreach $key (sort $sort_method keys %map) {
+               $obj = $map{$key};
+               $num = @$obj;
+               printf "%s: %u\n", $key, $num;
+               $n_output += $num;
+       }
+}
 
 sub shortlog_output {
-       my ($obj, $key, $desc);
+       my ($obj, $num, $key, $desc);
+
+       foreach $key (sort $sort_method keys %map) {
+               $obj = $map{$key};
+               $num = @$obj;
 
-       foreach $key (sort by_name keys %map) {
                # output author
-               printf "%s:\n", $key;
+               printf "%s (%u):\n", $key, $num;
 
                # output author's 1-line summaries
-               $obj = $map{$key};
                foreach $desc (reverse @$obj) {
                        print "  $desc\n";
                        $n_output++;
@@ -152,7 +186,7 @@ sub finalize {
 
 &setup_mailmap;
 &changelog_input;
-&shortlog_output;
+$opt_s ? &summary_output : &shortlog_output;
 &finalize;
 exit(0);
 
index f5c7d46341016a5ca77a52ad16fbcd8c3830678f..54d23569337f680f7936390c8a83d2e7f2868c38 100755 (executable)
 memoize('get_commit_time');
 
 my ($SVN_PATH, $SVN, $SVN_LOG, $_use_lib);
+
+sub nag_lib {
+       print STDERR <<EOF;
+! Please consider installing the SVN Perl libraries (version 1.1.0 or
+! newer).  You will generally get better performance and fewer bugs,
+! especially if you:
+! 1) have a case-insensitive filesystem
+! 2) replace symlinks with files (and vice-versa) in commits
+
+EOF
+}
+
 $_use_lib = 1 unless $ENV{GIT_SVN_NO_LIB};
 libsvn_load();
+nag_lib() unless $_use_lib;
+
 my $_optimize_commits = 1 unless $ENV{GIT_SVN_NO_OPTIMIZE_COMMITS};
 my $sha1 = qr/[a-f\d]{40}/;
 my $sha1_short = qr/[a-f\d]{4,40}/;
@@ -52,7 +66,7 @@
        $_template, $_shared, $_no_default_regex, $_no_graft_copy,
        $_limit, $_verbose, $_incremental, $_oneline, $_l_fmt, $_show_commit,
        $_version, $_upgrade, $_authors, $_branch_all_refs, @_opt_m,
-       $_merge, $_strategy, $_dry_run, $_ignore_nodate);
+       $_merge, $_strategy, $_dry_run, $_ignore_nodate, $_non_recursive);
 my (@_branch_from, %tree_map, %users, %rusers, %equiv);
 my ($_svn_co_url_revs, $_svn_pg_peg_revs);
 my @repo_path_split_cache;
                          'incremental' => \$_incremental,
                          'oneline' => \$_oneline,
                          'show-commit' => \$_show_commit,
+                         'non-recursive' => \$_non_recursive,
                          'authors-file|A=s' => \$_authors,
                        } ],
        'commit-diff' => [ \&commit_diff, 'Commit a diff between two trees',
@@ -168,11 +183,11 @@ sub usage {
 
        foreach (sort keys %cmd) {
                next if $cmd && $cmd ne $_;
-               print $fd '  ',pack('A13',$_),$cmd{$_}->[1],"\n";
+               print $fd '  ',pack('A17',$_),$cmd{$_}->[1],"\n";
                foreach (keys %{$cmd{$_}->[2]}) {
                        # prints out arguments as they should be passed:
                        my $x = s#[:=]s$## ? '<arg>' : s#[:=]i$## ? '<num>' : '';
-                       print $fd ' ' x 17, join(', ', map { length $_ > 1 ?
+                       print $fd ' ' x 21, join(', ', map { length $_ > 1 ?
                                                        "--$_" : "-$_" }
                                                split /\|/,$_)," $x\n";
                }
@@ -521,7 +536,7 @@ sub commit_lib {
                        $SVN = libsvn_connect($repo);
                        my $ed = SVN::Git::Editor->new(
                                        {       r => $r_last,
-                                               ra => $SVN,
+                                               ra => $SVN_LOG,
                                                c => $c,
                                                svn_path => $SVN_PATH
                                        },
@@ -682,12 +697,17 @@ sub multi_init {
                }
                $_trunk = $url . $_trunk;
        }
+       my $ch_id;
        if ($GIT_SVN eq 'git-svn') {
-               print "GIT_SVN_ID set to 'trunk' for $_trunk\n";
+               $ch_id = 1;
                $GIT_SVN = $ENV{GIT_SVN_ID} = 'trunk';
        }
        init_vars();
-       init($_trunk);
+       unless (-d $GIT_SVN_DIR) {
+               print "GIT_SVN_ID set to 'trunk' for $_trunk\n" if $ch_id;
+               init($_trunk);
+               sys('git-repo-config', 'svn.trunk', $_trunk);
+       }
        complete_url_ls_init($url, $_branches, '--branches/-b', '');
        complete_url_ls_init($url, $_tags, '--tags/-t', 'tags/');
 }
@@ -747,13 +767,18 @@ sub show_log {
                        # ignore
                } elsif (/^:\d{6} \d{6} $sha1_short/o) {
                        push @{$c->{raw}}, $_;
+               } elsif (/^[ACRMDT]\t/) {
+                       # we could add $SVN_PATH here, but that requires
+                       # remote access at the moment (repo_path_split)...
+                       s#^([ACRMDT])\t#   $1 #;
+                       push @{$c->{changed}}, $_;
                } elsif (/^diff /) {
                        $d = 1;
                        push @{$c->{diff}}, $_;
                } elsif ($d) {
                        push @{$c->{diff}}, $_;
                } elsif (/^    (git-svn-id:.+)$/) {
-                       (undef, $c->{r}, undef) = extract_metadata($1);
+                       ($c->{url}, $c->{r}, undef) = extract_metadata($1);
                } elsif (s/^    //) {
                        push @{$c->{l}}, $_;
                }
@@ -807,7 +832,7 @@ sub commit_diff {
        $SVN ||= libsvn_connect($repo);
        my @lock = $SVN::Core::VERSION ge '1.2.0' ? (undef, 0) : ();
        my $ed = SVN::Git::Editor->new({        r => $SVN->get_latest_revnum,
-                                               ra => $SVN, c => $tb,
+                                               ra => $SVN_LOG, c => $tb,
                                                svn_path => $SVN_PATH
                                        },
                                $SVN->get_commit_editor($_message,
@@ -845,7 +870,8 @@ sub git_svn_log_cmd {
        my ($r_min, $r_max) = @_;
        my @cmd = (qw/git-log --abbrev-commit --pretty=raw
                        --default/, "refs/remotes/$GIT_SVN");
-       push @cmd, '--summary' if $_verbose;
+       push @cmd, '-r' unless $_non_recursive;
+       push @cmd, qw/--raw --name-status/ if $_verbose;
        return @cmd unless defined $r_max;
        if ($r_max == $r_min) {
                push @cmd, '--max-count=1';
@@ -856,7 +882,7 @@ sub git_svn_log_cmd {
                my ($c_min, $c_max);
                $c_max = revdb_get($REVDB, $r_max);
                $c_min = revdb_get($REVDB, $r_min);
-               if ($c_min && $c_max) {
+               if (defined $c_min && defined $c_max) {
                        if ($r_max > $r_max) {
                                push @cmd, "$c_min..$c_max";
                        } else {
@@ -937,16 +963,21 @@ sub complete_url_ls_init {
                                print STDERR "W: Unrecognized URL: $u\n";
                                die "This should never happen\n";
                        }
+                       # don't try to init already existing refs
                        my $id = $pfx.$1;
-                       print "init $u => $id\n";
                        $GIT_SVN = $ENV{GIT_SVN_ID} = $id;
                        init_vars();
-                       init($u);
+                       unless (-d $GIT_SVN_DIR) {
+                               print "init $u => $id\n";
+                               init($u);
+                       }
                }
                exit 0;
        }
        waitpid $pid, 0;
        croak $? if $?;
+       my ($n) = ($switch =~ /^--(\w+)/);
+       sys('git-repo-config', "svn.$n", $var);
 }
 
 sub common_prefix {
@@ -2551,6 +2582,12 @@ sub show_commit {
        }
 }
 
+sub show_commit_changed_paths {
+       my ($c) = @_;
+       return unless $c->{changed};
+       print "Changed paths:\n", @{$c->{changed}};
+}
+
 sub show_commit_normal {
        my ($c) = @_;
        print '-' x72, "\nr$c->{r} | ";
@@ -2560,7 +2597,8 @@ sub show_commit_normal {
        my $nr_line = 0;
 
        if (my $l = $c->{l}) {
-               while ($l->[$#$l] eq "\n" && $l->[($#$l - 1)] eq "\n") {
+               while ($l->[$#$l] eq "\n" && $#$l > 0
+                                         && $l->[($#$l - 1)] eq "\n") {
                        pop @$l;
                }
                $nr_line = scalar @$l;
@@ -2572,11 +2610,15 @@ sub show_commit_normal {
                        } else {
                                $nr_line .= ' lines';
                        }
-                       print $nr_line, "\n\n";
+                       print $nr_line, "\n";
+                       show_commit_changed_paths($c);
+                       print "\n";
                        print $_ foreach @$l;
                }
        } else {
-               print "1 line\n\n";
+               print "1 line\n";
+               show_commit_changed_paths($c);
+               print "\n";
 
        }
        foreach my $x (qw/raw diff/) {
@@ -3312,9 +3354,11 @@ sub chg_file {
        seek $fh, 0, 0 or croak $!;
 
        my $exp = $md5->hexdigest;
-       my $atd = $self->apply_textdelta($fbat, undef, $self->{pool});
-       my $got = SVN::TxDelta::send_stream($fh, @$atd, $self->{pool});
+       my $pool = SVN::Pool->new;
+       my $atd = $self->apply_textdelta($fbat, undef, $pool);
+       my $got = SVN::TxDelta::send_stream($fh, @$atd, $pool);
        die "Checksum mismatch\nexpected: $exp\ngot: $got\n" if ($got ne $exp);
+       $pool->clear;
 
        close $fh or croak $!;
 }
index 988514e293fcde4c536cc18d5cd8c9fb77b890d3..f6eff8e32adc92a072b33de9040e2154efd13a8a 100755 (executable)
@@ -193,6 +193,13 @@ sub ignore {
        }
 }
 
+sub dir_list {
+       my($self,$path,$rev) = @_;
+       my ($dirents,undef,$properties)
+           = $self->{'svn'}->get_dir($path,$rev,undef);
+       return $dirents;
+}
+
 package main;
 use URI;
 
@@ -342,35 +349,16 @@ ($$)
 
 open BRANCHES,">>", "$git_dir/svn2git";
 
-sub node_kind($$$) {
-       my ($branch, $path, $revision) = @_;
+sub node_kind($$) {
+       my ($svnpath, $revision) = @_;
        my $pool=SVN::Pool->new;
-       my $kind = $svn->{'svn'}->check_path(revert_split_path($branch,$path),$revision,$pool);
+       my $kind = $svn->{'svn'}->check_path($svnpath,$revision,$pool);
        $pool->clear;
        return $kind;
 }
 
-sub revert_split_path($$) {
-       my($branch,$path) = @_;
-
-       my $svnpath;
-       $path = "" if $path eq "/"; # this should not happen, but ...
-       if($branch eq "/") {
-               $svnpath = "$trunk_name/$path";
-       } elsif($branch =~ m#^/#) {
-               $svnpath = "$tag_name$branch/$path";
-       } else {
-               $svnpath = "$branch_name/$branch/$path";
-       }
-
-       $svnpath =~ s#/+$##;
-       return $svnpath;
-}
-
 sub get_file($$$) {
-       my($rev,$branch,$path) = @_;
-
-       my $svnpath = revert_split_path($branch,$path);
+       my($svnpath,$rev,$path) = @_;
 
        # now get it
        my ($name,$mode);
@@ -413,10 +401,9 @@ ($$$)
 }
 
 sub get_ignore($$$$$) {
-       my($new,$old,$rev,$branch,$path) = @_;
+       my($new,$old,$rev,$path,$svnpath) = @_;
 
        return unless $opt_I;
-       my $svnpath = revert_split_path($branch,$path);
        my $name = $svn->ignore("$svnpath",$rev);
        if ($path eq '/') {
                $path = $opt_I;
@@ -435,7 +422,7 @@ ($$$$$)
                close $F;
                unlink $name;
                push(@$new,['0644',$sha,$path]);
-       } else {
+       } elsif (defined $old) {
                push(@$old,$path);
        }
 }
@@ -480,6 +467,27 @@ ($$)
        return $therev;
 }
 
+sub expand_svndir($$$);
+
+sub expand_svndir($$$)
+{
+       my ($svnpath, $rev, $path) = @_;
+       my @list;
+       get_ignore(\@list, undef, $rev, $path, $svnpath);
+       my $dirents = $svn->dir_list($svnpath, $rev);
+       foreach my $p(keys %$dirents) {
+               my $kind = node_kind($svnpath.'/'.$p, $rev);
+               if ($kind eq $SVN::Node::file) {
+                       my $f = get_file($svnpath.'/'.$p, $rev, $path.'/'.$p);
+                       push(@list, $f) if $f;
+               } elsif ($kind eq $SVN::Node::dir) {
+                       push(@list,
+                            expand_svndir($svnpath.'/'.$p, $rev, $path.'/'.$p));
+               }
+       }
+       return @list;
+}
+
 sub copy_path($$$$$$$$) {
        # Somebody copied a whole subdirectory.
        # We need to find the index entries from the old version which the
@@ -488,8 +496,11 @@ ($$$$$$$$)
        my($newrev,$newbranch,$path,$oldpath,$rev,$node_kind,$new,$parents) = @_;
 
        my($srcbranch,$srcpath) = split_path($rev,$oldpath);
-       unless(defined $srcbranch) {
-               print "Path not found when copying from $oldpath @ $rev\n";
+       unless(defined $srcbranch && defined $srcpath) {
+               print "Path not found when copying from $oldpath @ $rev.\n".
+                       "Will try to copy from original SVN location...\n"
+                       if $opt_v;
+               push (@$new, expand_svndir($oldpath, $rev, $path));
                return;
        }
        my $therev = branch_rev($srcbranch, $rev);
@@ -503,7 +514,7 @@ ($$$$$$$$)
        }
        print "$newrev:$newbranch:$path: copying from $srcbranch:$srcpath @ $rev\n" if $opt_v;
        if ($node_kind eq $SVN::Node::dir) {
-                       $srcpath =~ s#/*$#/#;
+               $srcpath =~ s#/*$#/#;
        }
        
        my $pid = open my $f,'-|';
@@ -582,10 +593,12 @@ sub commit {
                if(defined $oldpath) {
                        my $p;
                        ($parent,$p) = split_path($revision,$oldpath);
-                       if($parent eq "/") {
-                               $parent = $opt_o;
-                       } else {
-                               $parent =~ s#^/##; # if it's a tag
+                       if(defined $parent) {
+                               if($parent eq "/") {
+                                       $parent = $opt_o;
+                               } else {
+                                       $parent =~ s#^/##; # if it's a tag
+                               }
                        }
                } else {
                        $parent = undef;
@@ -651,9 +664,10 @@ sub commit {
                                push(@old,$path); # remove any old stuff
                        }
                        if(($action->[0] eq "A") || ($action->[0] eq "R")) {
-                               my $node_kind = node_kind($branch,$path,$revision);
+                               my $node_kind = node_kind($action->[3], $revision);
                                if ($node_kind eq $SVN::Node::file) {
-                                       my $f = get_file($revision,$branch,$path);
+                                       my $f = get_file($action->[3],
+                                                        $revision, $path);
                                        if ($f) {
                                                push(@new,$f) if $f;
                                        } else {
@@ -668,19 +682,20 @@ sub commit {
                                                          \@new, \@parents);
                                        } else {
                                                get_ignore(\@new, \@old, $revision,
-                                                          $branch, $path);
+                                                          $path, $action->[3]);
                                        }
                                }
                        } elsif ($action->[0] eq "D") {
                                push(@old,$path);
                        } elsif ($action->[0] eq "M") {
-                               my $node_kind = node_kind($branch,$path,$revision);
+                               my $node_kind = node_kind($action->[3], $revision);
                                if ($node_kind eq $SVN::Node::file) {
-                                       my $f = get_file($revision,$branch,$path);
+                                       my $f = get_file($action->[3],
+                                                        $revision, $path);
                                        push(@new,$f) if $f;
                                } elsif ($node_kind eq $SVN::Node::dir) {
                                        get_ignore(\@new, \@old, $revision,
-                                                  $branch,$path);
+                                                  $path, $action->[3]);
                                }
                        } else {
                                die "$revision: unknown action '".$action->[0]."' for $path\n";
@@ -838,7 +853,7 @@ sub commit {
                print $out ("object $cid\n".
                    "type commit\n".
                    "tag $dest\n".
-                   "tagger $committer_name <$committer_email>\n") and
+                   "tagger $committer_name <$committer_email> 0 +0000\n") and
                close($out)
                    or die "Cannot create tag object $dest: $!\n";
 
diff --git a/git.c b/git.c
index ae80e78456007e8ccb025949af82279f84cdb8fa..e089b53571cd8776ab208fe14b87af53b7057739 100644 (file)
--- a/git.c
+++ b/git.c
@@ -16,7 +16,7 @@
 #include "builtin.h"
 
 const char git_usage_string[] =
-       "git [--version] [--exec-path[=GIT_EXEC_PATH]] [--help] COMMAND [ ARGS ]";
+       "git [--version] [--exec-path[=GIT_EXEC_PATH]] [-p|--paginate] [--bare] [--git-dir=GIT_DIR] [--help] COMMAND [ARGS]";
 
 static void prepend_to_path(const char *dir, int len)
 {
@@ -226,7 +226,7 @@ static void handle_internal_command(int argc, const char **argv, char **envp)
                { "check-ref-format", cmd_check_ref_format },
                { "commit-tree", cmd_commit_tree, RUN_SETUP },
                { "count-objects", cmd_count_objects, RUN_SETUP },
-               { "diff", cmd_diff, RUN_SETUP },
+               { "diff", cmd_diff, RUN_SETUP | USE_PAGER },
                { "diff-files", cmd_diff_files, RUN_SETUP },
                { "diff-index", cmd_diff_index, RUN_SETUP },
                { "diff-stages", cmd_diff_stages, RUN_SETUP },
@@ -258,7 +258,7 @@ static void handle_internal_command(int argc, const char **argv, char **envp)
                { "show", cmd_show, RUN_SETUP | USE_PAGER },
                { "stripspace", cmd_stripspace },
                { "symbolic-ref", cmd_symbolic_ref, RUN_SETUP },
-               { "tar-tree", cmd_tar_tree, RUN_SETUP },
+               { "tar-tree", cmd_tar_tree },
                { "unpack-objects", cmd_unpack_objects, RUN_SETUP },
                { "update-index", cmd_update_index, RUN_SETUP },
                { "update-ref", cmd_update_ref, RUN_SETUP },
index 6d900342e35e4948888dd3a8ac2045a504435259..9b1217ac3f2a39f352e0b4046ceac9d438e4ac95 100644 (file)
@@ -97,7 +97,7 @@ find $RPM_BUILD_ROOT -type f -name '*.bs' -empty -exec rm -f {} ';'
 find $RPM_BUILD_ROOT -type f -name perllocal.pod -exec rm -f {} ';'
 
 (find $RPM_BUILD_ROOT%{_bindir} -type f | grep -vE "arch|svn|cvs|email|gitk" | sed -e s@^$RPM_BUILD_ROOT@@)               > bin-man-doc-files
-(find $RPM_BUILD_ROOT%{perl_vendorarch} -type f | sed -e s@^$RPM_BUILD_ROOT@@) >> perl-files
+(find $RPM_BUILD_ROOT%{perl_vendorlib} -type f | sed -e s@^$RPM_BUILD_ROOT@@) >> perl-files
 %if %{!?_without_docs:1}0
 (find $RPM_BUILD_ROOT%{_mandir} $RPM_BUILD_ROOT/Documentation -type f | grep -vE "arch|svn|git-cvs|email|gitk" | sed -e s@^$RPM_BUILD_ROOT@@ -e 's/$/*/' ) >> bin-man-doc-files
 %else
index 61c7ab5d4bbb770e50e89e2e5d0619ffda7e8577..e02e90f0429be0d2a69b76571101f20b8f75530f 100644 (file)
@@ -26,12 +26,26 @@ You can specify the following configuration variables when building GIT:
  * GITWEB_LOGO
    Points to the location where you put git-logo.png on your web server.
  * GITWEB_CONFIG
-   This file will be loaded using 'require'.  If the environment
+   This file will be loaded using 'require' and can be used to override any
+   of the options above as well as some other options - see the top of
+   'gitweb.cgi' for their full list and description.  If the environment
    $GITWEB_CONFIG is set when gitweb.cgi is executed the file in the
    environment variable will be loaded instead of the file
    specified when gitweb.cgi was created.
 
 
+Runtime gitweb configuration
+----------------------------
+
+You can adjust gitweb behaviour using the file specified in `GITWEB_CONFIG`
+(defaults to 'gitweb_config.perl' in the same directory as the CGI).
+See the top of 'gitweb.cgi' for the list of variables and some description.
+The most notable thing that is not configurable at compile time are the
+optional features, stored in the '%features' variable. You can find further
+description on how to reconfigure the default features setting in your
+`GITWEB_CONFIG` or per-project in `project.git/config` inside 'gitweb.cgi'.
+
+
 Webserver configuration
 -----------------------
 
@@ -43,6 +57,7 @@ repositories, you can configure apache like this:
     DocumentRoot /pub/git
     RewriteEngine on
     RewriteRule ^/(.*\.git/(?!/?(info|objects|refs)).*)?$ /cgi-bin/gitweb.cgi%{REQUEST_URI}  [L,PT]
+    SetEnv     GITWEB_CONFIG   /etc/gitweb.conf
 </VirtualHost>
 
 The above configuration expects your public repositories to live under
@@ -51,6 +66,12 @@ both as cloneable GIT URL and as browseable gitweb interface.
 If you then start your git-daemon with --base-path=/pub/git --export-all
 then you can even use the git:// URL with exactly the same path.
 
+Setting the environment variable GITWEB_CONFIG will tell gitweb to use
+the named file (i.e. in this example /etc/gitweb.conf) as a
+configuration for gitweb.  Perl variables defined in here will
+override the defaults given at the head of the gitweb.perl (or
+gitweb.cgi).  Look at the comments in that file for information on
+which variables and what they mean.
 
 
 Originally written by:
index eb9fc3804b1445207a9afff688d9c8ba954255df..3f62b6d752a82d33de2b3ad2a67a384819dbca2a 100644 (file)
@@ -16,6 +16,11 @@ a:hover, a:visited, a:active {
        color: #880000;
 }
 
+img.logo {
+       float: right;
+       border-width: 0px;
+}
+
 div.page_header {
        height: 25px;
        padding: 8px;
@@ -173,6 +178,12 @@ table.blame {
        border-collapse: collapse;
 }
 
+table.blame td {
+       padding: 0px 5px;
+       font-size: 12px;
+       vertical-align: top;
+}
+
 th {
        padding: 2px 5px;
        font-size: 12px;
index e119e334231318f2d9b5a0edb0d3ce71848b8739..65d0a145e4b19688a4175aad935f28eaffb02985 100755 (executable)
 # default is not to define style sheet, but it can be overwritten later
 undef $stylesheet;
 
-# URI of GIT logo
+# URI of default stylesheet
+our $stylesheet = "++GITWEB_CSS++";
+# URI of GIT logo (72x27 size)
 our $logo = "++GITWEB_LOGO++";
 # URI of GIT favicon, assumed to be image/png type
 our $favicon = "++GITWEB_FAVICON++";
 
+# URI and label (title) of GIT logo link
+#our $logo_url = "http://www.kernel.org/pub/software/scm/git/docs/";
+#our $logo_label = "git documentation";
+our $logo_url = "http://git.or.cz/";
+our $logo_label = "git homepage";
+
 # source of projects list
 our $projects_list = "++GITWEB_LIST++";
 
        #
        # use gitweb_check_feature(<feature>) to check if <feature> is enabled
 
+       # Enable the 'blame' blob view, showing the last commit that modified
+       # each line in the file. This can be very CPU-intensive.
+
+       # To enable system wide have in $GITWEB_CONFIG
+       # $feature{'blame'}{'default'} = [1];
+       # To have project specific config enable override in $GITWEB_CONFIG
+       # $feature{'blame'}{'override'} = 1;
+       # and in project config gitweb.blame = 0|1;
        'blame' => {
                'sub' => \&feature_blame,
                'override' => 0,
                'default' => [0]},
 
+       # Enable the 'snapshot' link, providing a compressed tarball of any
+       # tree. This can potentially generate high traffic if you have large
+       # project.
+
+       # To disable system wide have in $GITWEB_CONFIG
+       # $feature{'snapshot'}{'default'} = [undef];
+       # To have project specific config enable override in $GITWEB_CONFIG
+       # $feature{'blame'}{'override'} = 1;
+       # and in project config gitweb.snapshot = none|gzip|bzip2;
        'snapshot' => {
                'sub' => \&feature_snapshot,
                'override' => 0,
                #         => [content-encoding, suffix, program]
                'default' => ['x-gzip', 'gz', 'gzip']},
 
+       # Enable the pickaxe search, which will list the commits that modified
+       # a given string in a file. This can be practical and quite faster
+       # alternative to 'blame', but still potentially CPU-intensive.
+
+       # To enable system wide have in $GITWEB_CONFIG
+       # $feature{'pickaxe'}{'default'} = [1];
+       # To have project specific config enable override in $GITWEB_CONFIG
+       # $feature{'pickaxe'}{'override'} = 1;
+       # and in project config gitweb.pickaxe = 0|1;
        'pickaxe' => {
                'sub' => \&feature_pickaxe,
                'override' => 0,
                'default' => [1]},
+
+       # Make gitweb use an alternative format of the URLs which can be
+       # more readable and natural-looking: project name is embedded
+       # directly in the path and the query string contains other
+       # auxiliary information. All gitweb installations recognize
+       # URL in either format; this configures in which formats gitweb
+       # generates links.
+
+       # To enable system wide have in $GITWEB_CONFIG
+       # $feature{'pathinfo'}{'default'} = [1];
+       # Project specific override is not supported.
+
+       # Note that you will need to change the default location of CSS,
+       # favicon, logo and possibly other files to an absolute URL. Also,
+       # if gitweb.cgi serves as your indexfile, you will need to force
+       # $my_uri to contain the script name in your $GITWEB_CONFIG.
+       'pathinfo' => {
+               'override' => 0,
+               'default' => [0]},
 );
 
 sub gitweb_check_feature {
@@ -120,15 +173,13 @@ sub gitweb_check_feature {
                $feature{$name}{'override'},
                @{$feature{$name}{'default'}});
        if (!$override) { return @defaults; }
+       if (!defined $sub) {
+               warn "feature $name is not overrideable";
+               return @defaults;
+       }
        return $sub->(@defaults);
 }
 
-# To enable system wide have in $GITWEB_CONFIG
-# $feature{'blame'}{'default'} = [1];
-# To have project specific config enable override in $GITWEB_CONFIG
-# $feature{'blame'}{'override'} = 1;
-# and in project config gitweb.blame = 0|1;
-
 sub feature_blame {
        my ($val) = git_get_project_config('blame', '--bool');
 
@@ -141,12 +192,6 @@ sub feature_blame {
        return $_[0];
 }
 
-# To disable system wide have in $GITWEB_CONFIG
-# $feature{'snapshot'}{'default'} = [undef];
-# To have project specific config enable override in $GITWEB_CONFIG
-# $feature{'blame'}{'override'} = 1;
-# and in project config  gitweb.snapshot = none|gzip|bzip2
-
 sub feature_snapshot {
        my ($ctype, $suffix, $command) = @_;
 
@@ -170,12 +215,6 @@ sub gitweb_have_snapshot {
        return $have_snapshot;
 }
 
-# To enable system wide have in $GITWEB_CONFIG
-# $feature{'pickaxe'}{'default'} = [1];
-# To have project specific config enable override in $GITWEB_CONFIG
-# $feature{'pickaxe'}{'override'} = 1;
-# and in project config gitweb.pickaxe = 0|1;
-
 sub feature_pickaxe {
        my ($val) = git_get_project_config('pickaxe', '--bool');
 
@@ -399,6 +438,10 @@ sub evaluate_path_info {
 
 sub href(%) {
        my %params = @_;
+       my $href = $my_uri;
+
+       # XXX: Warning: If you touch this, check the search form for updating,
+       # too.
 
        my @mapping = (
                project => "p",
@@ -417,6 +460,19 @@ (%)
 
        $params{'project'} = $project unless exists $params{'project'};
 
+       my ($use_pathinfo) = gitweb_check_feature('pathinfo');
+       if ($use_pathinfo) {
+               # use PATH_INFO for project name
+               $href .= "/$params{'project'}" if defined $params{'project'};
+               delete $params{'project'};
+
+               # Summary just uses the project path URL
+               if (defined $params{'action'} && $params{'action'} eq 'summary') {
+                       delete $params{'action'};
+               }
+       }
+
+       # now encode the parameters explicitly
        my @result = ();
        for (my $i = 0; $i < @mapping; $i += 2) {
                my ($name, $symbol) = ($mapping[$i], $mapping[$i+1]);
@@ -424,7 +480,9 @@ (%)
                        push @result, $symbol . "=" . esc_param($params{$name});
                }
        }
-       return "$my_uri?" . join(';', @result);
+       $href .= "?" . join(';', @result) if scalar @result;
+
+       return $href;
 }
 
 
@@ -464,6 +522,12 @@ sub validate_refname {
        return $input;
 }
 
+# very thin wrapper for decode("utf8", $str, Encode::FB_DEFAULT);
+sub to_utf8 {
+       my $str = shift;
+       return decode("utf8", $str, Encode::FB_DEFAULT);
+}
+
 # quote unsafe chars, but keep the slash, even when it's not
 # correct, but quoted slashes look too horrible in bookmarks
 sub esc_param {
@@ -486,7 +550,7 @@ sub esc_url {
 # replace invalid utf8 character with SUBSTITUTION sequence
 sub esc_html {
        my $str = shift;
-       $str = decode("utf8", $str, Encode::FB_DEFAULT);
+       $str = to_utf8($str);
        $str = escapeHTML($str);
        $str =~ s/\014/^L/g; # escape FORM FEED (FF) character (e.g. in COPYING file)
        $str =~ s/\033/^[/g; # "escape" ESCAPE (\e) character (e.g. commit 20a3847d8a5032ce41f90dcc68abfb36e6fee9b1)
@@ -689,7 +753,7 @@ sub format_subject_html {
 
        if (length($short) < length($long)) {
                return $cgi->a({-href => $href, -class => "list subject",
-                               -title => decode("utf8", $long, Encode::FB_DEFAULT)},
+                               -title => to_utf8($long)},
                       esc_html($short) . $extra);
        } else {
                return $cgi->a({-href => $href, -class => "list subject"},
@@ -864,7 +928,7 @@ sub git_get_projects_list {
                        if (check_export_ok("$projectroot/$path")) {
                                my $pr = {
                                        path => $path,
-                                       owner => decode("utf8", $owner, Encode::FB_DEFAULT),
+                                       owner => to_utf8($owner),
                                };
                                push @list, $pr
                        }
@@ -893,7 +957,7 @@ sub git_get_project_owner {
                        $pr = unescape($pr);
                        $ow = unescape($ow);
                        if ($pr eq $project) {
-                               $owner = decode("utf8", $ow, Encode::FB_DEFAULT);
+                               $owner = to_utf8($ow);
                                last;
                        }
                }
@@ -1037,12 +1101,11 @@ sub parse_commit {
        if (defined $commit_text) {
                @commit_lines = @$commit_text;
        } else {
-               $/ = "\0";
+               local $/ = "\0";
                open my $fd, "-|", git_cmd(), "rev-list", "--header", "--parents", "--max-count=1", $commit_id
                        or return;
                @commit_lines = split '\n', <$fd>;
                close $fd or return;
-               $/ = "\n";
                pop @commit_lines;
        }
        my $header = shift @commit_lines;
@@ -1102,6 +1165,9 @@ sub parse_commit {
                        last;
                }
        }
+       if ($co{'title'} eq "") {
+               $co{'title'} = $co{'title_short'} = '(no commit message)';
+       }
        # remove added spaces
        foreach my $line (@commit_lines) {
                $line =~ s/^    //;
@@ -1273,7 +1339,7 @@ sub get_file_owner {
        }
        my $owner = $gcos;
        $owner =~ s/[,;].*$//;
-       return decode("utf8", $owner, Encode::FB_DEFAULT);
+       return to_utf8($owner);
 }
 
 ## ......................................................................
@@ -1429,9 +1495,9 @@ sub git_header_html {
        }
 
        print "<div class=\"page_header\">\n" .
-             "<a href=\"http://www.kernel.org/pub/software/scm/git/docs/\" title=\"git documentation\">" .
-             "<img src=\"$logo\" width=\"72\" height=\"27\" alt=\"git\" style=\"float:right; border-width:0px;\"/>" .
-             "</a>\n";
+             $cgi->a({-href => esc_url($logo_url),
+                      -title => $logo_label},
+                     qq(<img src="$logo" width="72" height="27" alt="git" class="logo"/>));
        print $cgi->a({-href => esc_url($home_link)}, $home_link_str) . " / ";
        if (defined $project) {
                print $cgi->a({-href => href(action=>"summary")}, esc_html($project));
@@ -1452,6 +1518,7 @@ sub git_header_html {
                }
                $cgi->param("a", "search");
                $cgi->param("h", $search_hash);
+               $cgi->param("p", $project);
                print $cgi->startform(-method => "get", -action => $my_uri) .
                      "<div class=\"search\">\n" .
                      $cgi->hidden(-name => "p") . "\n" .
@@ -1612,17 +1679,16 @@ sub git_print_page_path {
        my $type = shift;
        my $hb = shift;
 
-       if (!defined $name) {
-               print "<div class=\"page_path\">/</div>\n";
-       } else {
+
+       print "<div class=\"page_path\">";
+       print $cgi->a({-href => href(action=>"tree", hash_base=>$hb),
+                     -title => 'tree root'}, "[$project]");
+       print " / ";
+       if (defined $name) {
                my @dirname = split '/', $name;
                my $basename = pop @dirname;
                my $fullname = '';
 
-               print "<div class=\"page_path\">";
-               print $cgi->a({-href => href(action=>"tree", hash_base=>$hb),
-                             -title => 'tree root'}, "[$project]");
-               print " / ";
                foreach my $dir (@dirname) {
                        $fullname .= ($fullname ? '/' : '') . $dir;
                        print $cgi->a({-href => href(action=>"tree", file_name=>$fullname,
@@ -1638,11 +1704,12 @@ sub git_print_page_path {
                        print $cgi->a({-href => href(action=>"tree", file_name=>$file_name,
                                                     hash_base=>$hb),
                                      -title => $name}, esc_html($basename));
+                       print " / ";
                } else {
                        print esc_html($basename);
                }
-               print "<br/></div>\n";
        }
+       print "<br/></div>\n";
 }
 
 # sub git_print_log (\@;%) {
@@ -1719,13 +1786,13 @@ sub git_print_tree_entry {
        if ($t->{'type'} eq "blob") {
                print "<td class=\"list\">" .
                        $cgi->a({-href => href(action=>"blob", hash=>$t->{'hash'},
-                                              file_name=>"$basedir$t->{'name'}", %base_key),
-                                -class => "list"}, esc_html($t->{'name'})) . "</td>\n";
+                                              file_name=>"$basedir$t->{'name'}", %base_key),
+                               -class => "list"}, esc_html($t->{'name'})) . "</td>\n";
                print "<td class=\"link\">";
                if ($have_blame) {
                        print $cgi->a({-href => href(action=>"blame", hash=>$t->{'hash'},
-                                                    file_name=>"$basedir$t->{'name'}", %base_key)},
-                                     "blame");
+                                                          file_name=>"$basedir$t->{'name'}", %base_key)},
+                                           "blame");
                }
                if (defined $hash_base) {
                        if ($have_blame) {
@@ -1737,8 +1804,8 @@ sub git_print_tree_entry {
                }
                print " | " .
                        $cgi->a({-href => href(action=>"blob_plain", hash_base=>$hash_base,
-                                              file_name=>"$basedir$t->{'name'}")},
-                               "raw");
+                                              file_name=>"$basedir$t->{'name'}")},
+                               "raw");
                print "</td>\n";
 
        } elsif ($t->{'type'} eq "tree") {
@@ -1806,7 +1873,7 @@ sub git_difftree_body {
                        print "<td>";
                        print $cgi->a({-href => href(action=>"blob", hash=>$diff{'to_id'},
                                                     hash_base=>$hash, file_name=>$diff{'file'}),
-                                      -class => "list"}, esc_html($diff{'file'}));
+                                     -class => "list"}, esc_html($diff{'file'}));
                        print "</td>\n";
                        print "<td>$mode_chng</td>\n";
                        print "<td class=\"link\">";
@@ -1833,11 +1900,11 @@ sub git_difftree_body {
                                print " | ";
                        }
                        print $cgi->a({-href => href(action=>"blame", hash_base=>$parent,
-                                                    file_name=>$diff{'file'})},
-                                     "blame") . " | ";
+                                                    file_name=>$diff{'file'})},
+                                     "blame") . " | ";
                        print $cgi->a({-href => href(action=>"history", hash_base=>$parent,
-                                                    file_name=>$diff{'file'})},
-                                     "history");
+                                                    file_name=>$diff{'file'})},
+                                     "history");
                        print "</td>\n";
 
                } elsif ($diff{'status'} eq "M" || $diff{'status'} eq "T") { # modified, or type changed
@@ -1858,8 +1925,8 @@ sub git_difftree_body {
                        }
                        print "<td>";
                        print $cgi->a({-href => href(action=>"blob", hash=>$diff{'to_id'},
-                                                    hash_base=>$hash, file_name=>$diff{'file'}),
-                                      -class => "list"}, esc_html($diff{'file'}));
+                                                    hash_base=>$hash, file_name=>$diff{'file'}),
+                                     -class => "list"}, esc_html($diff{'file'}));
                        print "</td>\n";
                        print "<td>$mode_chnge</td>\n";
                        print "<td class=\"link\">";
@@ -1870,19 +1937,19 @@ sub git_difftree_body {
                                        print $cgi->a({-href => "#patch$patchno"}, "patch");
                                } else {
                                        print $cgi->a({-href => href(action=>"blobdiff",
-                                                                    hash=>$diff{'to_id'}, hash_parent=>$diff{'from_id'},
-                                                                    hash_base=>$hash, hash_parent_base=>$parent,
-                                                                    file_name=>$diff{'file'})},
-                                                     "diff");
+                                                                    hash=>$diff{'to_id'}, hash_parent=>$diff{'from_id'},
+                                                                    hash_base=>$hash, hash_parent_base=>$parent,
+                                                                    file_name=>$diff{'file'})},
+                                                     "diff");
                                }
                                print " | ";
                        }
                        print $cgi->a({-href => href(action=>"blame", hash_base=>$hash,
-                                                    file_name=>$diff{'file'})},
-                                     "blame") . " | ";
+                                                    file_name=>$diff{'file'})},
+                                     "blame") . " | ";
                        print $cgi->a({-href => href(action=>"history", hash_base=>$hash,
-                                                    file_name=>$diff{'file'})},
-                                     "history");
+                                                    file_name=>$diff{'file'})},
+                                     "history");
                        print "</td>\n";
 
                } elsif ($diff{'status'} eq "R" || $diff{'status'} eq "C") { # renamed or copied
@@ -1910,19 +1977,19 @@ sub git_difftree_body {
                                        print $cgi->a({-href => "#patch$patchno"}, "patch");
                                } else {
                                        print $cgi->a({-href => href(action=>"blobdiff",
-                                                                    hash=>$diff{'to_id'}, hash_parent=>$diff{'from_id'},
-                                                                    hash_base=>$hash, hash_parent_base=>$parent,
-                                                                    file_name=>$diff{'to_file'}, file_parent=>$diff{'from_file'})},
-                                                     "diff");
+                                                                    hash=>$diff{'to_id'}, hash_parent=>$diff{'from_id'},
+                                                                    hash_base=>$hash, hash_parent_base=>$parent,
+                                                                    file_name=>$diff{'to_file'}, file_parent=>$diff{'from_file'})},
+                                                     "diff");
                                }
                                print " | ";
                        }
                        print $cgi->a({-href => href(action=>"blame", hash_base=>$parent,
-                                                    file_name=>$diff{'from_file'})},
-                                     "blame") . " | ";
+                                                    file_name=>$diff{'from_file'})},
+                                     "blame") . " | ";
                        print $cgi->a({-href => href(action=>"history", hash_base=>$parent,
-                                                    file_name=>$diff{'from_file'})},
-                                     "history");
+                                                   file_name=>$diff{'from_file'})},
+                                     "history");
                        print "</td>\n";
 
                } # we should not encounter Unmerged (U) or Unknown (X) status
@@ -1974,14 +2041,14 @@ sub git_patchset_body {
                                print "<div class=\"diff_info\">" . file_type($diffinfo->{'to_mode'}) . ":" .
                                      $cgi->a({-href => href(action=>"blob", hash_base=>$hash,
                                                             hash=>$diffinfo->{'to_id'}, file_name=>$diffinfo->{'file'})},
-                                             $diffinfo->{'to_id'}) . "(new)" .
+                                             $diffinfo->{'to_id'}) . " (new)" .
                                      "</div>\n"; # class="diff_info"
 
                        } elsif ($diffinfo->{'status'} eq "D") { # deleted
                                print "<div class=\"diff_info\">" . file_type($diffinfo->{'from_mode'}) . ":" .
                                      $cgi->a({-href => href(action=>"blob", hash_base=>$hash_parent,
                                                             hash=>$diffinfo->{'from_id'}, file_name=>$diffinfo->{'file'})},
-                                             $diffinfo->{'from_id'}) . "(deleted)" .
+                                             $diffinfo->{'from_id'}) . " (deleted)" .
                                      "</div>\n"; # class="diff_info"
 
                        } elsif ($diffinfo->{'status'} eq "R" || # renamed
@@ -2085,8 +2152,10 @@ sub git_shortlog_body {
                print "</td>\n" .
                      "<td class=\"link\">" .
                      $cgi->a({-href => href(action=>"commitdiff", hash=>$commit)}, "commitdiff") . " | " .
-                     $cgi->a({-href => href(action=>"tree", hash=>$commit, hash_base=>$commit)}, "tree") . " | " .
-                     $cgi->a({-href => href(action=>"snapshot", hash=>$commit)}, "snapshot");
+                     $cgi->a({-href => href(action=>"tree", hash=>$commit, hash_base=>$commit)}, "tree");
+               if (gitweb_have_snapshot()) {
+                       print " | " . $cgi->a({-href => href(action=>"snapshot", hash=>$commit)}, "snapshot");
+               }
                print "</td>\n" .
                      "</tr>\n";
        }
@@ -2832,7 +2901,7 @@ sub git_tree {
        my $refs = git_get_references();
        my $ref = format_ref_marker($refs, $hash_base);
        git_header_html();
-       my $base = "";
+       my $basedir = '';
        my ($have_blame) = gitweb_check_feature('blame');
        if (defined $hash_base && (my %co = parse_commit($hash_base))) {
                my @views_nav = ();
@@ -2849,7 +2918,7 @@ sub git_tree {
                        # FIXME: Should be available when we have no hash base as well.
                        push @views_nav,
                                $cgi->a({-href => href(action=>"snapshot", hash=>$hash)},
-                                       "snapshot");
+                                       "snapshot");
                }
                git_print_page_nav('tree','', $hash_base, undef, undef, join(' | ', @views_nav));
                git_print_header_div('commit', esc_html($co{'title'}) . $ref, $hash_base);
@@ -2860,12 +2929,39 @@ sub git_tree {
                print "<div class=\"title\">$hash</div>\n";
        }
        if (defined $file_name) {
-               $base = esc_html("$file_name/");
+               $basedir = $file_name;
+               if ($basedir ne '' && substr($basedir, -1) ne '/') {
+                       $basedir .= '/';
+               }
        }
        git_print_page_path($file_name, 'tree', $hash_base);
        print "<div class=\"page_body\">\n";
        print "<table cellspacing=\"0\">\n";
        my $alternate = 1;
+       # '..' (top directory) link if possible
+       if (defined $hash_base &&
+           defined $file_name && $file_name =~ m![^/]+$!) {
+               if ($alternate) {
+                       print "<tr class=\"dark\">\n";
+               } else {
+                       print "<tr class=\"light\">\n";
+               }
+               $alternate ^= 1;
+
+               my $up = $file_name;
+               $up =~ s!/?[^/]+$!!;
+               undef $up unless $up;
+               # based on git_print_tree_entry
+               print '<td class="mode">' . mode_str('040000') . "</td>\n";
+               print '<td class="list">';
+               print $cgi->a({-href => href(action=>"tree", hash_base=>$hash_base,
+                                            file_name=>$up)},
+                             "..");
+               print "</td>\n";
+               print "<td class=\"link\"></td>\n";
+
+               print "</tr>\n";
+       }
        foreach my $line (@entries) {
                my %t = parse_ls_tree_line($line, -z => 1);
 
@@ -2876,7 +2972,7 @@ sub git_tree {
                }
                $alternate ^= 1;
 
-               git_print_tree_entry(\%t, $base, $hash_base, $have_blame);
+               git_print_tree_entry(\%t, $basedir, $hash_base, $have_blame);
 
                print "</tr>\n";
        }
@@ -2904,9 +3000,12 @@ sub git_snapshot {
                -content_disposition => 'inline; filename="' . "$filename" . '"',
                -status => '200 OK');
 
-       my $git_command = git_cmd_str();
-       open my $fd, "-|", "$git_command tar-tree $hash \'$project\' | $command" or
-               die_error(undef, "Execute git-tar-tree failed.");
+       my $git = git_cmd_str();
+       my $name = $project;
+       $name =~ s/\047/\047\\\047\047/g;
+       open my $fd, "-|",
+       "$git archive --format=tar --prefix=\'$name\'/ $hash | $command"
+               or die_error(undef, "Execute git-tar-tree failed.");
        binmode STDOUT, ':raw';
        print <$fd>;
        binmode STDOUT, ':utf8'; # as set at the beginning of gitweb.cgi
@@ -3003,13 +3102,8 @@ sub git_commit {
                        $cgi->a({-href => href(action=>"blame", hash_parent=>$parent, file_name=>$file_name)},
                                "blame");
        }
-       if (defined $co{'parent'}) {
-               push @views_nav,
-                       $cgi->a({-href => href(action=>"shortlog", hash=>$hash)}, "shortlog"),
-                       $cgi->a({-href => href(action=>"log", hash=>$hash)}, "log");
-       }
        git_header_html(undef, $expires);
-       git_print_page_nav('commit', defined $co{'parent'} ? '' : 'commitdiff',
+       git_print_page_nav('commit', '',
                           $hash, $co{'tree'}, $hash,
                           join (' | ', @views_nav));
 
@@ -3652,7 +3746,7 @@ sub git_rss {
                      "<![CDATA[\n";
                my $comment = $co{'comment'};
                foreach my $line (@$comment) {
-                       $line = decode("utf8", $line, Encode::FB_DEFAULT);
+                       $line = to_utf8($line);
                        print "$line<br/>\n";
                }
                print "<br/>\n";
@@ -3661,7 +3755,7 @@ sub git_rss {
                                next;
                        }
                        my $file = esc_html(unquote($7));
-                       $file = decode("utf8", $file, Encode::FB_DEFAULT);
+                       $file = to_utf8($file);
                        print "$file<br/>\n";
                }
                print "]]>\n" .
diff --git a/grep.c b/grep.c
index c411ddd4d52e2aa68edd979733dd0b15f51d5f32..0fc078ec0ac42e39125c8e5a8f05a3b6ecae4fa3 100644 (file)
--- a/grep.c
+++ b/grep.c
@@ -34,7 +34,7 @@ static void compile_regexp(struct grep_pat *p, struct grep_opt *opt)
        }
 }
 
-static struct grep_expr *compile_pattern_expr(struct grep_pat **);
+static struct grep_expr *compile_pattern_or(struct grep_pat **);
 static struct grep_expr *compile_pattern_atom(struct grep_pat **list)
 {
        struct grep_pat *p;
@@ -52,7 +52,7 @@ static struct grep_expr *compile_pattern_atom(struct grep_pat **list)
                return x;
        case GREP_OPEN_PAREN:
                *list = p->next;
-               x = compile_pattern_expr(list);
+               x = compile_pattern_or(list);
                if (!x)
                        return NULL;
                if (!*list || (*list)->token != GREP_CLOSE_PAREN)
@@ -138,6 +138,9 @@ void compile_grep_patterns(struct grep_opt *opt)
 {
        struct grep_pat *p;
 
+       if (opt->all_match)
+               opt->extended = 1;
+
        for (p = opt->pattern_list; p; p = p->next) {
                switch (p->token) {
                case GREP_PATTERN: /* atom */
@@ -309,40 +312,63 @@ static int match_one_pattern(struct grep_opt *opt, struct grep_pat *p, char *bol
        return hit;
 }
 
-static int match_expr_eval(struct grep_opt *opt,
+static int match_expr_eval(struct grep_opt *o,
                           struct grep_expr *x,
                           char *bol, char *eol,
-                          enum grep_context ctx)
+                          enum grep_context ctx,
+                          int collect_hits)
 {
+       int h = 0;
+
        switch (x->node) {
        case GREP_NODE_ATOM:
-               return match_one_pattern(opt, x->u.atom, bol, eol, ctx);
+               h = match_one_pattern(o, x->u.atom, bol, eol, ctx);
                break;
        case GREP_NODE_NOT:
-               return !match_expr_eval(opt, x->u.unary, bol, eol, ctx);
+               h = !match_expr_eval(o, x->u.unary, bol, eol, ctx, 0);
+               break;
        case GREP_NODE_AND:
-               return (match_expr_eval(opt, x->u.binary.left, bol, eol, ctx) &&
-                       match_expr_eval(opt, x->u.binary.right, bol, eol, ctx));
+               if (!collect_hits)
+                       return (match_expr_eval(o, x->u.binary.left,
+                                               bol, eol, ctx, 0) &&
+                               match_expr_eval(o, x->u.binary.right,
+                                               bol, eol, ctx, 0));
+               h = match_expr_eval(o, x->u.binary.left, bol, eol, ctx, 0);
+               h &= match_expr_eval(o, x->u.binary.right, bol, eol, ctx, 0);
+               break;
        case GREP_NODE_OR:
-               return (match_expr_eval(opt, x->u.binary.left, bol, eol, ctx) ||
-                       match_expr_eval(opt, x->u.binary.right, bol, eol, ctx));
+               if (!collect_hits)
+                       return (match_expr_eval(o, x->u.binary.left,
+                                               bol, eol, ctx, 0) ||
+                               match_expr_eval(o, x->u.binary.right,
+                                               bol, eol, ctx, 0));
+               h = match_expr_eval(o, x->u.binary.left, bol, eol, ctx, 0);
+               x->u.binary.left->hit |= h;
+               h |= match_expr_eval(o, x->u.binary.right, bol, eol, ctx, 1);
+               break;
+       default:
+               die("Unexpected node type (internal error) %d\n", x->node);
        }
-       die("Unexpected node type (internal error) %d\n", x->node);
+       if (collect_hits)
+               x->hit |= h;
+       return h;
 }
 
 static int match_expr(struct grep_opt *opt, char *bol, char *eol,
-                     enum grep_context ctx)
+                     enum grep_context ctx, int collect_hits)
 {
        struct grep_expr *x = opt->pattern_expression;
-       return match_expr_eval(opt, x, bol, eol, ctx);
+       return match_expr_eval(opt, x, bol, eol, ctx, collect_hits);
 }
 
 static int match_line(struct grep_opt *opt, char *bol, char *eol,
-                     enum grep_context ctx)
+                     enum grep_context ctx, int collect_hits)
 {
        struct grep_pat *p;
        if (opt->extended)
-               return match_expr(opt, bol, eol, ctx);
+               return match_expr(opt, bol, eol, ctx, collect_hits);
+
+       /* we do not call with collect_hits without being extended */
        for (p = opt->pattern_list; p; p = p->next) {
                if (match_one_pattern(opt, p, bol, eol, ctx))
                        return 1;
@@ -350,7 +376,8 @@ static int match_line(struct grep_opt *opt, char *bol, char *eol,
        return 0;
 }
 
-int grep_buffer(struct grep_opt *opt, const char *name, char *buf, unsigned long size)
+static int grep_buffer_1(struct grep_opt *opt, const char *name,
+                        char *buf, unsigned long size, int collect_hits)
 {
        char *bol = buf;
        unsigned long left = size;
@@ -386,7 +413,7 @@ int grep_buffer(struct grep_opt *opt, const char *name, char *buf, unsigned long
 
        while (left) {
                char *eol, ch;
-               int hit = 0;
+               int hit;
 
                eol = end_of_line(bol, &left);
                ch = *eol;
@@ -395,9 +422,12 @@ int grep_buffer(struct grep_opt *opt, const char *name, char *buf, unsigned long
                if ((ctx == GREP_CONTEXT_HEAD) && (eol == bol))
                        ctx = GREP_CONTEXT_BODY;
 
-               hit = match_line(opt, bol, eol, ctx);
+               hit = match_line(opt, bol, eol, ctx, collect_hits);
                *eol = ch;
 
+               if (collect_hits)
+                       goto next_line;
+
                /* "grep -v -e foo -e bla" should list lines
                 * that do not have either, so inversion should
                 * be done outside.
@@ -477,6 +507,8 @@ int grep_buffer(struct grep_opt *opt, const char *name, char *buf, unsigned long
        }
 
        free(prev);
+       if (collect_hits)
+               return 0;
 
        if (opt->status_only)
                return 0;
@@ -496,3 +528,49 @@ int grep_buffer(struct grep_opt *opt, const char *name, char *buf, unsigned long
        return !!last_hit;
 }
 
+static void clr_hit_marker(struct grep_expr *x)
+{
+       /* All-hit markers are meaningful only at the very top level
+        * OR node.
+        */
+       while (1) {
+               x->hit = 0;
+               if (x->node != GREP_NODE_OR)
+                       return;
+               x->u.binary.left->hit = 0;
+               x = x->u.binary.right;
+       }
+}
+
+static int chk_hit_marker(struct grep_expr *x)
+{
+       /* Top level nodes have hit markers.  See if they all are hits */
+       while (1) {
+               if (x->node != GREP_NODE_OR)
+                       return x->hit;
+               if (!x->u.binary.left->hit)
+                       return 0;
+               x = x->u.binary.right;
+       }
+}
+
+int grep_buffer(struct grep_opt *opt, const char *name, char *buf, unsigned long size)
+{
+       /*
+        * we do not have to do the two-pass grep when we do not check
+        * buffer-wide "all-match".
+        */
+       if (!opt->all_match)
+               return grep_buffer_1(opt, name, buf, size, 0);
+
+       /* Otherwise the toplevel "or" terms hit a bit differently.
+        * We first clear hit markers from them.
+        */
+       clr_hit_marker(opt->pattern_expression);
+       grep_buffer_1(opt, name, buf, size, 1);
+
+       if (!chk_hit_marker(opt->pattern_expression))
+               return 0;
+
+       return grep_buffer_1(opt, name, buf, size, 0);
+}
diff --git a/grep.h b/grep.h
index af9098cfe8699680aeaa11790dad3e8d556ed395..d252dd25f81526d9b8663b4d3c9585d69a901397 100644 (file)
--- a/grep.h
+++ b/grep.h
@@ -35,6 +35,7 @@ enum grep_expr_node {
 
 struct grep_expr {
        enum grep_expr_node node;
+       unsigned hit;
        union {
                struct grep_pat *atom;
                struct grep_expr *unary;
@@ -59,6 +60,7 @@ struct grep_opt {
        unsigned count:1;
        unsigned word_regexp:1;
        unsigned fixed:1;
+       unsigned all_match:1;
 #define GREP_BINARY_DEFAULT    0
 #define GREP_BINARY_NOMATCH    1
 #define GREP_BINARY_TEXT       2
index bc74f30f76fe0200f6c7ecc215ee1cd9211670f4..396552da022a1dca9c43738be4d87c664f31a06c 100644 (file)
@@ -4,35 +4,6 @@
 #include "fetch.h"
 #include "http.h"
 
-#ifndef NO_EXPAT
-#include <expat.h>
-
-/* Definitions for DAV requests */
-#define DAV_PROPFIND "PROPFIND"
-#define DAV_PROPFIND_RESP ".multistatus.response"
-#define DAV_PROPFIND_NAME ".multistatus.response.href"
-#define DAV_PROPFIND_COLLECTION ".multistatus.response.propstat.prop.resourcetype.collection"
-#define PROPFIND_ALL_REQUEST "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<D:propfind xmlns:D=\"DAV:\">\n<D:allprop/>\n</D:propfind>"
-
-/* Definitions for processing XML DAV responses */
-#ifndef XML_STATUS_OK
-enum XML_Status {
-  XML_STATUS_OK = 1,
-  XML_STATUS_ERROR = 0
-};
-#define XML_STATUS_OK    1
-#define XML_STATUS_ERROR 0
-#endif
-
-/* Flags that control remote_ls processing */
-#define PROCESS_FILES (1u << 0)
-#define PROCESS_DIRS  (1u << 1)
-#define RECURSIVE     (1u << 2)
-
-/* Flags that remote_ls passes to callback functions */
-#define IS_DIR (1u << 0)
-#endif
-
 #define PREV_BUF_SIZE 4096
 #define RANGE_HEADER_SIZE 30
 
@@ -90,30 +61,6 @@ struct alternates_request {
        int http_specific;
 };
 
-#ifndef NO_EXPAT
-struct xml_ctx
-{
-       char *name;
-       int len;
-       char *cdata;
-       void (*userFunc)(struct xml_ctx *ctx, int tag_closed);
-       void *userData;
-};
-
-struct remote_ls_ctx
-{
-       struct alt_base *repo;
-       char *path;
-       void (*userFunc)(struct remote_ls_ctx *ls);
-       void *userData;
-       int flags;
-       char *dentry_name;
-       int dentry_flags;
-       int rc;
-       struct remote_ls_ctx *parent;
-};
-#endif
-
 static struct object_request *object_queue_head;
 
 static size_t fwrite_sha1_file(void *ptr, size_t eltsize, size_t nmemb,
@@ -714,204 +661,6 @@ static void fetch_alternates(const char *base)
        free(url);
 }
 
-#ifndef NO_EXPAT
-static void
-xml_start_tag(void *userData, const char *name, const char **atts)
-{
-       struct xml_ctx *ctx = (struct xml_ctx *)userData;
-       const char *c = strchr(name, ':');
-       int new_len;
-
-       if (c == NULL)
-               c = name;
-       else
-               c++;
-
-       new_len = strlen(ctx->name) + strlen(c) + 2;
-
-       if (new_len > ctx->len) {
-               ctx->name = xrealloc(ctx->name, new_len);
-               ctx->len = new_len;
-       }
-       strcat(ctx->name, ".");
-       strcat(ctx->name, c);
-
-       free(ctx->cdata);
-       ctx->cdata = NULL;
-
-       ctx->userFunc(ctx, 0);
-}
-
-static void
-xml_end_tag(void *userData, const char *name)
-{
-       struct xml_ctx *ctx = (struct xml_ctx *)userData;
-       const char *c = strchr(name, ':');
-       char *ep;
-
-       ctx->userFunc(ctx, 1);
-
-       if (c == NULL)
-               c = name;
-       else
-               c++;
-
-       ep = ctx->name + strlen(ctx->name) - strlen(c) - 1;
-       *ep = 0;
-}
-
-static void
-xml_cdata(void *userData, const XML_Char *s, int len)
-{
-       struct xml_ctx *ctx = (struct xml_ctx *)userData;
-       free(ctx->cdata);
-       ctx->cdata = xmalloc(len + 1);
-       strlcpy(ctx->cdata, s, len + 1);
-}
-
-static int remote_ls(struct alt_base *repo, const char *path, int flags,
-                    void (*userFunc)(struct remote_ls_ctx *ls),
-                    void *userData);
-
-static void handle_remote_ls_ctx(struct xml_ctx *ctx, int tag_closed)
-{
-       struct remote_ls_ctx *ls = (struct remote_ls_ctx *)ctx->userData;
-
-       if (tag_closed) {
-               if (!strcmp(ctx->name, DAV_PROPFIND_RESP) && ls->dentry_name) {
-                       if (ls->dentry_flags & IS_DIR) {
-                               if (ls->flags & PROCESS_DIRS) {
-                                       ls->userFunc(ls);
-                               }
-                               if (strcmp(ls->dentry_name, ls->path) &&
-                                   ls->flags & RECURSIVE) {
-                                       ls->rc = remote_ls(ls->repo,
-                                                          ls->dentry_name,
-                                                          ls->flags,
-                                                          ls->userFunc,
-                                                          ls->userData);
-                               }
-                       } else if (ls->flags & PROCESS_FILES) {
-                               ls->userFunc(ls);
-                       }
-               } else if (!strcmp(ctx->name, DAV_PROPFIND_NAME) && ctx->cdata) {
-                       ls->dentry_name = xmalloc(strlen(ctx->cdata) -
-                                                 ls->repo->path_len + 1);
-                       strcpy(ls->dentry_name, ctx->cdata + ls->repo->path_len);
-               } else if (!strcmp(ctx->name, DAV_PROPFIND_COLLECTION)) {
-                       ls->dentry_flags |= IS_DIR;
-               }
-       } else if (!strcmp(ctx->name, DAV_PROPFIND_RESP)) {
-               free(ls->dentry_name);
-               ls->dentry_name = NULL;
-               ls->dentry_flags = 0;
-       }
-}
-
-static int remote_ls(struct alt_base *repo, const char *path, int flags,
-                    void (*userFunc)(struct remote_ls_ctx *ls),
-                    void *userData)
-{
-       char *url = xmalloc(strlen(repo->base) + strlen(path) + 1);
-       struct active_request_slot *slot;
-       struct slot_results results;
-       struct buffer in_buffer;
-       struct buffer out_buffer;
-       char *in_data;
-       char *out_data;
-       XML_Parser parser = XML_ParserCreate(NULL);
-       enum XML_Status result;
-       struct curl_slist *dav_headers = NULL;
-       struct xml_ctx ctx;
-       struct remote_ls_ctx ls;
-
-       ls.flags = flags;
-       ls.repo = repo;
-       ls.path = xstrdup(path);
-       ls.dentry_name = NULL;
-       ls.dentry_flags = 0;
-       ls.userData = userData;
-       ls.userFunc = userFunc;
-       ls.rc = 0;
-
-       sprintf(url, "%s%s", repo->base, path);
-
-       out_buffer.size = strlen(PROPFIND_ALL_REQUEST);
-       out_data = xmalloc(out_buffer.size + 1);
-       snprintf(out_data, out_buffer.size + 1, PROPFIND_ALL_REQUEST);
-       out_buffer.posn = 0;
-       out_buffer.buffer = out_data;
-
-       in_buffer.size = 4096;
-       in_data = xmalloc(in_buffer.size);
-       in_buffer.posn = 0;
-       in_buffer.buffer = in_data;
-
-       dav_headers = curl_slist_append(dav_headers, "Depth: 1");
-       dav_headers = curl_slist_append(dav_headers, "Content-Type: text/xml");
-
-       slot = get_active_slot();
-       slot->results = &results;
-       curl_easy_setopt(slot->curl, CURLOPT_INFILE, &out_buffer);
-       curl_easy_setopt(slot->curl, CURLOPT_INFILESIZE, out_buffer.size);
-       curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, fread_buffer);
-       curl_easy_setopt(slot->curl, CURLOPT_FILE, &in_buffer);
-       curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer);
-       curl_easy_setopt(slot->curl, CURLOPT_URL, url);
-       curl_easy_setopt(slot->curl, CURLOPT_UPLOAD, 1);
-       curl_easy_setopt(slot->curl, CURLOPT_CUSTOMREQUEST, DAV_PROPFIND);
-       curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, dav_headers);
-
-       if (start_active_slot(slot)) {
-               run_active_slot(slot);
-               if (results.curl_result == CURLE_OK) {
-                       ctx.name = xcalloc(10, 1);
-                       ctx.len = 0;
-                       ctx.cdata = NULL;
-                       ctx.userFunc = handle_remote_ls_ctx;
-                       ctx.userData = &ls;
-                       XML_SetUserData(parser, &ctx);
-                       XML_SetElementHandler(parser, xml_start_tag,
-                                             xml_end_tag);
-                       XML_SetCharacterDataHandler(parser, xml_cdata);
-                       result = XML_Parse(parser, in_buffer.buffer,
-                                          in_buffer.posn, 1);
-                       free(ctx.name);
-
-                       if (result != XML_STATUS_OK) {
-                               ls.rc = error("XML error: %s",
-                                             XML_ErrorString(
-                                                     XML_GetErrorCode(parser)));
-                       }
-               } else {
-                       ls.rc = -1;
-               }
-       } else {
-               ls.rc = error("Unable to start PROPFIND request");
-       }
-
-       free(ls.path);
-       free(url);
-       free(out_data);
-       free(in_buffer.buffer);
-       curl_slist_free_all(dav_headers);
-
-       return ls.rc;
-}
-
-static void process_ls_pack(struct remote_ls_ctx *ls)
-{
-       unsigned char sha1[20];
-
-       if (strlen(ls->dentry_name) == 63 &&
-           !strncmp(ls->dentry_name, "objects/pack/pack-", 18) &&
-           has_extension(ls->dentry_name, ".pack")) {
-               get_sha1_hex(ls->dentry_name + 18, sha1);
-               setup_index(ls->repo, sha1);
-       }
-}
-#endif
-
 static int fetch_indices(struct alt_base *repo)
 {
        unsigned char sha1[20];
@@ -934,12 +683,6 @@ static int fetch_indices(struct alt_base *repo)
        if (get_verbosely)
                fprintf(stderr, "Getting pack list for %s\n", repo->base);
 
-#ifndef NO_EXPAT
-       if (remote_ls(repo, "objects/pack/", PROCESS_FILES,
-                     process_ls_pack, NULL) == 0)
-               return 0;
-#endif
-
        url = xmalloc(strlen(repo->base) + 21);
        sprintf(url, "%s/objects/info/packs", repo->base);
 
index 362e4743740435dc0e5b08933c0e05c53b7b62af..16804ab286a42c0dc2733dccf816ab2173e465af 100644 (file)
@@ -1226,6 +1226,14 @@ split_msg( msg_data_t *all_msgs, msg_data_t *msg, int *ofs )
        if (msg->len < 5 || strncmp( data, "From ", 5 ))
                return 0;
 
+       p = strchr( data, '\n' );
+       if (p) {
+               p = &p[1];
+               msg->len -= p-data;
+               *ofs += p-data;
+               data = p;
+       }
+
        p = strstr( data, "\nFrom " );
        if (p)
                msg->len = &p[1] - data;
index 80bc6cb45b1a91c31dbdf18b48399b7a7a6b2737..e33f60524f240f288df7dc6d398486d1d6e7e5ee 100644 (file)
@@ -13,63 +13,93 @@ static const char index_pack_usage[] =
 struct object_entry
 {
        unsigned long offset;
+       unsigned long size;
+       unsigned int hdr_size;
        enum object_type type;
        enum object_type real_type;
        unsigned char sha1[20];
 };
 
+union delta_base {
+       unsigned char sha1[20];
+       unsigned long offset;
+};
+
+/*
+ * Even if sizeof(union delta_base) == 24 on 64-bit archs, we really want
+ * to memcmp() only the first 20 bytes.
+ */
+#define UNION_BASE_SZ  20
+
 struct delta_entry
 {
        struct object_entry *obj;
-       unsigned char base_sha1[20];
+       union delta_base base;
 };
 
 static const char *pack_name;
-static unsigned char *pack_base;
-static unsigned long pack_size;
 static struct object_entry *objects;
 static struct delta_entry *deltas;
 static int nr_objects;
 static int nr_deltas;
 
-static void open_pack_file(void)
+/* We always read in 4kB chunks. */
+static unsigned char input_buffer[4096];
+static unsigned long input_offset, input_len, consumed_bytes;
+static SHA_CTX input_ctx;
+static int input_fd;
+
+/*
+ * Make sure at least "min" bytes are available in the buffer, and
+ * return the pointer to the buffer.
+ */
+static void * fill(int min)
 {
-       int fd;
-       struct stat st;
+       if (min <= input_len)
+               return input_buffer + input_offset;
+       if (min > sizeof(input_buffer))
+               die("cannot fill %d bytes", min);
+       if (input_offset) {
+               SHA1_Update(&input_ctx, input_buffer, input_offset);
+               memcpy(input_buffer, input_buffer + input_offset, input_len);
+               input_offset = 0;
+       }
+       do {
+               int ret = xread(input_fd, input_buffer + input_len,
+                               sizeof(input_buffer) - input_len);
+               if (ret <= 0) {
+                       if (!ret)
+                               die("early EOF");
+                       die("read error on input: %s", strerror(errno));
+               }
+               input_len += ret;
+       } while (input_len < min);
+       return input_buffer;
+}
+
+static void use(int bytes)
+{
+       if (bytes > input_len)
+               die("used more bytes than were available");
+       input_len -= bytes;
+       input_offset += bytes;
+       consumed_bytes += bytes;
+}
 
-       fd = open(pack_name, O_RDONLY);
-       if (fd < 0)
+static void open_pack_file(void)
+{
+       input_fd = open(pack_name, O_RDONLY);
+       if (input_fd < 0)
                die("cannot open packfile '%s': %s", pack_name,
                    strerror(errno));
-       if (fstat(fd, &st)) {
-               int err = errno;
-               close(fd);
-               die("cannot fstat packfile '%s': %s", pack_name,
-                   strerror(err));
-       }
-       pack_size = st.st_size;
-       pack_base = mmap(NULL, pack_size, PROT_READ, MAP_PRIVATE, fd, 0);
-       if (pack_base == MAP_FAILED) {
-               int err = errno;
-               close(fd);
-               die("cannot mmap packfile '%s': %s", pack_name,
-                   strerror(err));
-       }
-       close(fd);
+       SHA1_Init(&input_ctx);
 }
 
 static void parse_pack_header(void)
 {
-       const struct pack_header *hdr;
-       unsigned char sha1[20];
-       SHA_CTX ctx;
-
-       /* Ensure there are enough bytes for the header and final SHA1 */
-       if (pack_size < sizeof(struct pack_header) + 20)
-               die("packfile '%s' is too small", pack_name);
+       struct pack_header *hdr = fill(sizeof(struct pack_header));
 
        /* Header consistency check */
-       hdr = (void *)pack_base;
        if (hdr->hdr_signature != htonl(PACK_SIGNATURE))
                die("packfile '%s' signature mismatch", pack_name);
        if (!pack_version_ok(hdr->hdr_version))
@@ -77,13 +107,8 @@ static void parse_pack_header(void)
                    pack_name, ntohl(hdr->hdr_version));
 
        nr_objects = ntohl(hdr->hdr_entries);
-
-       /* Check packfile integrity */
-       SHA1_Init(&ctx);
-       SHA1_Update(&ctx, pack_base, pack_size - 20);
-       SHA1_Final(sha1, &ctx);
-       if (hashcmp(sha1, pack_base + pack_size - 20))
-               die("packfile '%s' SHA1 mismatch", pack_name);
+       use(sizeof(struct pack_header));
+       /*fprintf(stderr, "Indexing %d objects\n", nr_objects);*/
 }
 
 static void bad_object(unsigned long offset, const char *format,
@@ -101,86 +126,121 @@ static void bad_object(unsigned long offset, const char *format, ...)
            pack_name, offset, buf);
 }
 
-static void *unpack_entry_data(unsigned long offset,
-                              unsigned long *current_pos, unsigned long size)
+static void *unpack_entry_data(unsigned long offset, unsigned long size)
 {
-       unsigned long pack_limit = pack_size - 20;
-       unsigned long pos = *current_pos;
        z_stream stream;
        void *buf = xmalloc(size);
 
        memset(&stream, 0, sizeof(stream));
        stream.next_out = buf;
        stream.avail_out = size;
-       stream.next_in = pack_base + pos;
-       stream.avail_in = pack_limit - pos;
+       stream.next_in = fill(1);
+       stream.avail_in = input_len;
        inflateInit(&stream);
 
        for (;;) {
                int ret = inflate(&stream, 0);
-               if (ret == Z_STREAM_END)
+               use(input_len - stream.avail_in);
+               if (stream.total_out == size && ret == Z_STREAM_END)
                        break;
                if (ret != Z_OK)
                        bad_object(offset, "inflate returned %d", ret);
+               stream.next_in = fill(1);
+               stream.avail_in = input_len;
        }
        inflateEnd(&stream);
-       if (stream.total_out != size)
-               bad_object(offset, "size mismatch (expected %lu, got %lu)",
-                          size, stream.total_out);
-       *current_pos = pack_limit - stream.avail_in;
        return buf;
 }
 
-static void *unpack_raw_entry(unsigned long offset,
-                             enum object_type *obj_type,
-                             unsigned long *obj_size,
-                             unsigned char *delta_base,
-                             unsigned long *next_obj_offset)
+static void *unpack_raw_entry(struct object_entry *obj, union delta_base *delta_base)
 {
-       unsigned long pack_limit = pack_size - 20;
-       unsigned long pos = offset;
-       unsigned char c;
-       unsigned long size;
+       unsigned char *p, c;
+       unsigned long size, base_offset;
        unsigned shift;
-       enum object_type type;
-       void *data;
 
-       c = pack_base[pos++];
-       type = (c >> 4) & 7;
+       obj->offset = consumed_bytes;
+
+       p = fill(1);
+       c = *p;
+       use(1);
+       obj->type = (c >> 4) & 7;
        size = (c & 15);
        shift = 4;
        while (c & 0x80) {
-               if (pos >= pack_limit)
-                       bad_object(offset, "object extends past end of pack");
-               c = pack_base[pos++];
+               p = fill(1);
+               c = *p;
+               use(1);
                size += (c & 0x7fUL) << shift;
                shift += 7;
        }
+       obj->size = size;
 
-       switch (type) {
-       case OBJ_DELTA:
-               if (pos + 20 >= pack_limit)
-                       bad_object(offset, "object extends past end of pack");
-               hashcpy(delta_base, pack_base + pos);
-               pos += 20;
-               /* fallthru */
+       switch (obj->type) {
+       case OBJ_REF_DELTA:
+               hashcpy(delta_base->sha1, fill(20));
+               use(20);
+               break;
+       case OBJ_OFS_DELTA:
+               memset(delta_base, 0, sizeof(*delta_base));
+               p = fill(1);
+               c = *p;
+               use(1);
+               base_offset = c & 127;
+               while (c & 128) {
+                       base_offset += 1;
+                       if (!base_offset || base_offset & ~(~0UL >> 7))
+                               bad_object(obj->offset, "offset value overflow for delta base object");
+                       p = fill(1);
+                       c = *p;
+                       use(1);
+                       base_offset = (base_offset << 7) + (c & 127);
+               }
+               delta_base->offset = obj->offset - base_offset;
+               if (delta_base->offset >= obj->offset)
+                       bad_object(obj->offset, "delta base offset is out of bound");
+               break;
        case OBJ_COMMIT:
        case OBJ_TREE:
        case OBJ_BLOB:
        case OBJ_TAG:
-               data = unpack_entry_data(offset, &pos, size);
                break;
        default:
-               bad_object(offset, "bad object type %d", type);
+               bad_object(obj->offset, "bad object type %d", obj->type);
        }
+       obj->hdr_size = consumed_bytes - obj->offset;
 
-       *obj_type = type;
-       *obj_size = size;
-       *next_obj_offset = pos;
+       return unpack_entry_data(obj->offset, obj->size);
+}
+
+static void * get_data_from_pack(struct object_entry *obj)
+{
+       unsigned long from = obj[0].offset + obj[0].hdr_size;
+       unsigned long len = obj[1].offset - from;
+       unsigned pg_offset = from % getpagesize();
+       unsigned char *map, *data;
+       z_stream stream;
+       int st;
+
+       map = mmap(NULL, len + pg_offset, PROT_READ, MAP_PRIVATE,
+                  input_fd, from - pg_offset);
+       if (map == MAP_FAILED)
+               die("cannot mmap packfile '%s': %s", pack_name, strerror(errno));
+       data = xmalloc(obj->size);
+       memset(&stream, 0, sizeof(stream));
+       stream.next_out = data;
+       stream.avail_out = obj->size;
+       stream.next_in = map + pg_offset;
+       stream.avail_in = len;
+       inflateInit(&stream);
+       while ((st = inflate(&stream, Z_FINISH)) == Z_OK);
+       inflateEnd(&stream);
+       if (st != Z_STREAM_END || stream.total_out != obj->size)
+               die("serious inflate inconsistency");
+       munmap(map, len + pg_offset);
        return data;
 }
 
-static int find_delta(const unsigned char *base_sha1)
+static int find_delta(const union delta_base *base)
 {
        int first = 0, last = nr_deltas;
 
@@ -189,7 +249,7 @@ static int find_delta(const unsigned char *base_sha1)
                 struct delta_entry *delta = &deltas[next];
                 int cmp;
 
-                cmp = hashcmp(base_sha1, delta->base_sha1);
+                cmp = memcmp(base, &delta->base, UNION_BASE_SZ);
                 if (!cmp)
                         return next;
                 if (cmp < 0) {
@@ -201,18 +261,18 @@ static int find_delta(const unsigned char *base_sha1)
         return -first-1;
 }
 
-static int find_deltas_based_on_sha1(const unsigned char *base_sha1,
-                                    int *first_index, int *last_index)
+static int find_delta_childs(const union delta_base *base,
+                            int *first_index, int *last_index)
 {
-       int first = find_delta(base_sha1);
+       int first = find_delta(base);
        int last = first;
        int end = nr_deltas - 1;
 
        if (first < 0)
                return -1;
-       while (first > 0 && !hashcmp(deltas[first - 1].base_sha1, base_sha1))
+       while (first > 0 && !memcmp(&deltas[first - 1].base, base, UNION_BASE_SZ))
                --first;
-       while (last < end && !hashcmp(deltas[last + 1].base_sha1, base_sha1))
+       while (last < end && !memcmp(&deltas[last + 1].base, base, UNION_BASE_SZ))
                ++last;
        *first_index = first;
        *last_index = last;
@@ -252,25 +312,34 @@ static void resolve_delta(struct delta_entry *delta, void *base_data,
        unsigned long delta_size;
        void *result;
        unsigned long result_size;
-       enum object_type delta_type;
-       unsigned char base_sha1[20];
-       unsigned long next_obj_offset;
+       union delta_base delta_base;
        int j, first, last;
 
        obj->real_type = type;
-       delta_data = unpack_raw_entry(obj->offset, &delta_type,
-                                     &delta_size, base_sha1,
-                                     &next_obj_offset);
+       delta_data = get_data_from_pack(obj);
+       delta_size = obj->size;
        result = patch_delta(base_data, base_size, delta_data, delta_size,
                             &result_size);
        free(delta_data);
        if (!result)
                bad_object(obj->offset, "failed to apply delta");
        sha1_object(result, result_size, type, obj->sha1);
-       if (!find_deltas_based_on_sha1(obj->sha1, &first, &last)) {
+
+       hashcpy(delta_base.sha1, obj->sha1);
+       if (!find_delta_childs(&delta_base, &first, &last)) {
+               for (j = first; j <= last; j++)
+                       if (deltas[j].obj->type == OBJ_REF_DELTA)
+                               resolve_delta(&deltas[j], result, result_size, type);
+       }
+
+       memset(&delta_base, 0, sizeof(delta_base));
+       delta_base.offset = obj->offset;
+       if (!find_delta_childs(&delta_base, &first, &last)) {
                for (j = first; j <= last; j++)
-                       resolve_delta(&deltas[j], result, result_size, type);
+                       if (deltas[j].obj->type == OBJ_OFS_DELTA)
+                               resolve_delta(&deltas[j], result, result_size, type);
        }
+
        free(result);
 }
 
@@ -278,16 +347,16 @@ static int compare_delta_entry(const void *a, const void *b)
 {
        const struct delta_entry *delta_a = a;
        const struct delta_entry *delta_b = b;
-       return hashcmp(delta_a->base_sha1, delta_b->base_sha1);
+       return memcmp(&delta_a->base, &delta_b->base, UNION_BASE_SZ);
 }
 
-static void parse_pack_objects(void)
+/* Parse all objects and return the pack content SHA1 hash */
+static void parse_pack_objects(unsigned char *sha1)
 {
        int i;
-       unsigned long offset = sizeof(struct pack_header);
-       unsigned char base_sha1[20];
+       struct delta_entry *delta = deltas;
        void *data;
-       unsigned long data_size;
+       struct stat st;
 
        /*
         * First pass:
@@ -297,22 +366,32 @@ static void parse_pack_objects(void)
         */
        for (i = 0; i < nr_objects; i++) {
                struct object_entry *obj = &objects[i];
-               obj->offset = offset;
-               data = unpack_raw_entry(offset, &obj->type, &data_size,
-                                       base_sha1, &offset);
+               data = unpack_raw_entry(obj, &delta->base);
                obj->real_type = obj->type;
-               if (obj->type == OBJ_DELTA) {
-                       struct delta_entry *delta = &deltas[nr_deltas++];
+               if (obj->type == OBJ_REF_DELTA || obj->type == OBJ_OFS_DELTA) {
+                       nr_deltas++;
                        delta->obj = obj;
-                       hashcpy(delta->base_sha1, base_sha1);
+                       delta++;
                } else
-                       sha1_object(data, data_size, obj->type, obj->sha1);
+                       sha1_object(data, obj->size, obj->type, obj->sha1);
                free(data);
        }
-       if (offset != pack_size - 20)
+       objects[i].offset = consumed_bytes;
+
+       /* Check pack integrity */
+       SHA1_Update(&input_ctx, input_buffer, input_offset);
+       SHA1_Final(sha1, &input_ctx);
+       if (hashcmp(fill(20), sha1))
+               die("packfile '%s' SHA1 mismatch", pack_name);
+       use(20);
+
+       /* If input_fd is a file, we should have reached its end now. */
+       if (fstat(input_fd, &st))
+               die("cannot fstat packfile '%s': %s", pack_name, strerror(errno));
+       if (S_ISREG(st.st_mode) && st.st_size != consumed_bytes)
                die("packfile '%s' has junk at the end", pack_name);
 
-       /* Sort deltas by base SHA1 for fast searching */
+       /* Sort deltas by base SHA1/offset for fast searching */
        qsort(deltas, nr_deltas, sizeof(struct delta_entry),
              compare_delta_entry);
 
@@ -326,22 +405,36 @@ static void parse_pack_objects(void)
         */
        for (i = 0; i < nr_objects; i++) {
                struct object_entry *obj = &objects[i];
-               int j, first, last;
+               union delta_base base;
+               int j, ref, ref_first, ref_last, ofs, ofs_first, ofs_last;
 
-               if (obj->type == OBJ_DELTA)
+               if (obj->type == OBJ_REF_DELTA || obj->type == OBJ_OFS_DELTA)
                        continue;
-               if (find_deltas_based_on_sha1(obj->sha1, &first, &last))
+               hashcpy(base.sha1, obj->sha1);
+               ref = !find_delta_childs(&base, &ref_first, &ref_last);
+               memset(&base, 0, sizeof(base));
+               base.offset = obj->offset;
+               ofs = !find_delta_childs(&base, &ofs_first, &ofs_last);
+               if (!ref && !ofs)
                        continue;
-               data = unpack_raw_entry(obj->offset, &obj->type, &data_size,
-                                       base_sha1, &offset);
-               for (j = first; j <= last; j++)
-                       resolve_delta(&deltas[j], data, data_size, obj->type);
+               data = get_data_from_pack(obj);
+               if (ref)
+                       for (j = ref_first; j <= ref_last; j++)
+                               if (deltas[j].obj->type == OBJ_REF_DELTA)
+                                       resolve_delta(&deltas[j], data,
+                                                     obj->size, obj->type);
+               if (ofs)
+                       for (j = ofs_first; j <= ofs_last; j++)
+                               if (deltas[j].obj->type == OBJ_OFS_DELTA)
+                                       resolve_delta(&deltas[j], data,
+                                                     obj->size, obj->type);
                free(data);
        }
 
        /* Check for unresolved deltas */
        for (i = 0; i < nr_deltas; i++) {
-               if (deltas[i].obj->real_type == OBJ_DELTA)
+               if (deltas[i].obj->real_type == OBJ_REF_DELTA ||
+                   deltas[i].obj->real_type == OBJ_OFS_DELTA)
                        die("packfile '%s' has unresolved deltas",  pack_name);
        }
 }
@@ -353,6 +446,10 @@ static int sha1_compare(const void *_a, const void *_b)
        return hashcmp(a->sha1, b->sha1);
 }
 
+/*
+ * On entry *sha1 contains the pack content SHA1 hash, on exit it is
+ * the SHA1 hash of sorted object names.
+ */
 static void write_index_file(const char *index_name, unsigned char *sha1)
 {
        struct sha1file *f;
@@ -412,7 +509,7 @@ static void write_index_file(const char *index_name, unsigned char *sha1)
                sha1write(f, obj->sha1, 20);
                SHA1_Update(&ctx, obj->sha1, 20);
        }
-       sha1write(f, pack_base + pack_size - 20, 20);
+       sha1write(f, sha1, 20);
        sha1close(f, NULL, 1);
        free(sorted_by_sha);
        SHA1_Final(sha1, &ctx);
@@ -458,9 +555,9 @@ int main(int argc, char **argv)
 
        open_pack_file();
        parse_pack_header();
-       objects = xcalloc(nr_objects, sizeof(struct object_entry));
+       objects = xcalloc(nr_objects + 1, sizeof(struct object_entry));
        deltas = xcalloc(nr_objects, sizeof(struct delta_entry));
-       parse_pack_objects();
+       parse_pack_objects(sha1);
        free(deltas);
        write_index_file(index_name, sha1);
        free(objects);
index 611cd95cf597b0ccf06cf9b4318eb34a86a4c05a..2ba43ae84b20f993ba175f728297cf5360066872 100644 (file)
@@ -1235,13 +1235,10 @@ int merge(struct commit *h1,
        if (merged_common_ancestors == NULL) {
                /* if there is no common ancestor, make an empty tree */
                struct tree *tree = xcalloc(1, sizeof(struct tree));
-               unsigned char hdr[40];
-               int hdrlen;
 
                tree->object.parsed = 1;
                tree->object.type = OBJ_TREE;
-               write_sha1_file_prepare(NULL, 0, tree_type, tree->object.sha1,
-                                       hdr, &hdrlen);
+               hash_sha1_file(NULL, 0, tree_type, tree->object.sha1);
                merged_common_ancestors = make_virtual_commit(tree, "ancestor");
        }
 
diff --git a/pack.h b/pack.h
index 05557da1528e3185cf4d7d89a6577beb8f9e95ad..4814800f2806a245a675ea9832f894dc95b27b89 100644 (file)
--- a/pack.h
+++ b/pack.h
@@ -7,7 +7,7 @@
  * Packed object header
  */
 #define PACK_SIGNATURE 0x5041434b      /* "PACK" */
-#define PACK_VERSION 3
+#define PACK_VERSION 2
 #define pack_version_ok(v) ((v) == htonl(2) || (v) == htonl(3))
 struct pack_header {
        unsigned int hdr_signature;
@@ -16,7 +16,4 @@ struct pack_header {
 };
 
 extern int verify_pack(struct packed_git *, int);
-extern int check_reuse_pack_delta(struct packed_git *, unsigned long,
-                                 unsigned char *, unsigned long *,
-                                 enum object_type *);
 #endif
diff --git a/pager.c b/pager.c
index dcb398da8e703de2999badb976dee7322eff470b..4587fbbdb51e50c1e3e6e4c3476cdccd4ac59cee 100644 (file)
--- a/pager.c
+++ b/pager.c
@@ -50,7 +50,7 @@ void setup_pager(void)
        close(fd[0]);
        close(fd[1]);
 
-       setenv("LESS", "-RS", 0);
+       setenv("LESS", "FRSX", 0);
        run_pager(pager);
        die("unable to execute pager '%s'", pager);
        exit(255);
index 93f25130a05ccca3e3e6c65b750f256246ae16be..f1e0caaae3d2a96de6cf9bad12e8641c19d788dd 100644 (file)
@@ -732,6 +732,7 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, const ch
        int i, flags, seen_dashdash, show_merge;
        const char **unrecognized = argv + 1;
        int left = 1;
+       int all_match = 0;
 
        /* First, search for "--" */
        seen_dashdash = 0;
@@ -967,6 +968,10 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, const ch
                                add_message_grep(revs, arg+7);
                                continue;
                        }
+                       if (!strcmp(arg, "--all-match")) {
+                               all_match = 1;
+                               continue;
+                       }
 
                        opts = diff_opt_parse(&revs->diffopt, argv+i, argc-i);
                        if (opts > 0) {
@@ -1028,8 +1033,10 @@ int setup_revisions(int argc, const char **argv, struct rev_info *revs, const ch
        if (diff_setup_done(&revs->diffopt) < 0)
                die("diff_setup_done failed");
 
-       if (revs->grep_filter)
+       if (revs->grep_filter) {
+               revs->grep_filter->all_match = all_match;
                compile_grep_patterns(revs->grep_filter);
+       }
 
        return left;
 }
index 27b1ebb720b1530d673f3dfb1c27862b4cf90dfd..e89d24c01595aa8ea6c6306928639b40f3313a50 100644 (file)
@@ -671,14 +671,8 @@ static void reprepare_packed_git(void)
 
 int check_sha1_signature(const unsigned char *sha1, void *map, unsigned long size, const char *type)
 {
-       char header[100];
        unsigned char real_sha1[20];
-       SHA_CTX c;
-
-       SHA1_Init(&c);
-       SHA1_Update(&c, header, 1+sprintf(header, "%s %lu", type, size));
-       SHA1_Update(&c, map, size);
-       SHA1_Final(real_sha1, &c);
+       hash_sha1_file(map, size, type, real_sha1);
        return hashcmp(sha1, real_sha1) ? -1 : 0;
 }
 
@@ -883,26 +877,61 @@ void * unpack_sha1_file(void *map, unsigned long mapsize, char *type, unsigned l
        return unpack_sha1_rest(&stream, hdr, *size);
 }
 
+static unsigned long get_delta_base(struct packed_git *p,
+                                   unsigned long offset,
+                                   enum object_type kind,
+                                   unsigned long delta_obj_offset,
+                                   unsigned long *base_obj_offset)
+{
+       unsigned char *base_info = (unsigned char *) p->pack_base + offset;
+       unsigned long base_offset;
+
+       /* there must be at least 20 bytes left regardless of delta type */
+       if (p->pack_size <= offset + 20)
+               die("truncated pack file");
+
+       if (kind == OBJ_OFS_DELTA) {
+               unsigned used = 0;
+               unsigned char c = base_info[used++];
+               base_offset = c & 127;
+               while (c & 128) {
+                       base_offset += 1;
+                       if (!base_offset || base_offset & ~(~0UL >> 7))
+                               die("offset value overflow for delta base object");
+                       c = base_info[used++];
+                       base_offset = (base_offset << 7) + (c & 127);
+               }
+               base_offset = delta_obj_offset - base_offset;
+               if (base_offset >= delta_obj_offset)
+                       die("delta base offset out of bound");
+               offset += used;
+       } else if (kind == OBJ_REF_DELTA) {
+               /* The base entry _must_ be in the same pack */
+               base_offset = find_pack_entry_one(base_info, p);
+               if (!base_offset)
+                       die("failed to find delta-pack base object %s",
+                               sha1_to_hex(base_info));
+               offset += 20;
+       } else
+               die("I am totally screwed");
+       *base_obj_offset = base_offset;
+       return offset;
+}
+
 /* forward declaration for a mutually recursive function */
 static int packed_object_info(struct packed_git *p, unsigned long offset,
                              char *type, unsigned long *sizep);
 
 static int packed_delta_info(struct packed_git *p,
                             unsigned long offset,
+                            enum object_type kind,
+                            unsigned long obj_offset,
                             char *type,
                             unsigned long *sizep)
 {
        unsigned long base_offset;
-       unsigned char *base_sha1 = (unsigned char *) p->pack_base + offset;
 
-       if (p->pack_size < offset + 20)
-               die("truncated pack file");
-       /* The base entry _must_ be in the same pack */
-       base_offset = find_pack_entry_one(base_sha1, p);
-       if (!base_offset)
-               die("failed to find delta-pack base object %s",
-                   sha1_to_hex(base_sha1));
-       offset += 20;
+       offset = get_delta_base(p, offset, kind, obj_offset, &base_offset);
 
        /* We choose to only get the type of the base object and
         * ignore potentially corrupt pack file that expects the delta
@@ -914,7 +943,7 @@ static int packed_delta_info(struct packed_git *p,
 
        if (sizep) {
                const unsigned char *data;
-               unsigned char delta_head[64];
+               unsigned char delta_head[20];
                unsigned long result_size;
                z_stream stream;
                int st;
@@ -965,25 +994,6 @@ static unsigned long unpack_object_header(struct packed_git *p, unsigned long of
        return offset + used;
 }
 
-int check_reuse_pack_delta(struct packed_git *p, unsigned long offset,
-                          unsigned char *base, unsigned long *sizep,
-                          enum object_type *kindp)
-{
-       unsigned long ptr;
-       int status = -1;
-
-       use_packed_git(p);
-       ptr = offset;
-       ptr = unpack_object_header(p, ptr, kindp, sizep);
-       if (*kindp != OBJ_DELTA)
-               goto done;
-       hashcpy(base, (unsigned char *) p->pack_base + ptr);
-       status = 0;
- done:
-       unuse_packed_git(p);
-       return status;
-}
-
 void packed_object_info_detail(struct packed_git *p,
                               unsigned long offset,
                               char *type,
@@ -992,11 +1002,12 @@ void packed_object_info_detail(struct packed_git *p,
                               unsigned int *delta_chain_length,
                               unsigned char *base_sha1)
 {
-       unsigned long val;
+       unsigned long obj_offset, val;
        unsigned char *next_sha1;
        enum object_type kind;
 
        *delta_chain_length = 0;
+       obj_offset = offset;
        offset = unpack_object_header(p, offset, &kind, size);
 
        for (;;) {
@@ -1011,7 +1022,13 @@ void packed_object_info_detail(struct packed_git *p,
                        strcpy(type, type_names[kind]);
                        *store_size = 0; /* notyet */
                        return;
-               case OBJ_DELTA:
+               case OBJ_OFS_DELTA:
+                       get_delta_base(p, offset, kind, obj_offset, &offset);
+                       if (*delta_chain_length == 0) {
+                               /* TODO: find base_sha1 as pointed by offset */
+                       }
+                       break;
+               case OBJ_REF_DELTA:
                        if (p->pack_size <= offset + 20)
                                die("pack file %s records an incomplete delta base",
                                    p->pack_name);
@@ -1021,6 +1038,7 @@ void packed_object_info_detail(struct packed_git *p,
                        offset = find_pack_entry_one(next_sha1, p);
                        break;
                }
+               obj_offset = offset;
                offset = unpack_object_header(p, offset, &kind, &val);
                (*delta_chain_length)++;
        }
@@ -1029,15 +1047,15 @@ void packed_object_info_detail(struct packed_git *p,
 static int packed_object_info(struct packed_git *p, unsigned long offset,
                              char *type, unsigned long *sizep)
 {
-       unsigned long size;
+       unsigned long size, obj_offset = offset;
        enum object_type kind;
 
        offset = unpack_object_header(p, offset, &kind, &size);
 
-       if (kind == OBJ_DELTA)
-               return packed_delta_info(p, offset, type, sizep);
-
        switch (kind) {
+       case OBJ_OFS_DELTA:
+       case OBJ_REF_DELTA:
+               return packed_delta_info(p, offset, kind, obj_offset, type, sizep);
        case OBJ_COMMIT:
        case OBJ_TREE:
        case OBJ_BLOB:
@@ -1083,23 +1101,15 @@ static void *unpack_compressed_entry(struct packed_git *p,
 static void *unpack_delta_entry(struct packed_git *p,
                                unsigned long offset,
                                unsigned long delta_size,
+                               enum object_type kind,
+                               unsigned long obj_offset,
                                char *type,
                                unsigned long *sizep)
 {
        void *delta_data, *result, *base;
        unsigned long result_size, base_size, base_offset;
-       unsigned char *base_sha1;
-
-       if (p->pack_size < offset + 20)
-               die("truncated pack file");
-       /* The base entry _must_ be in the same pack */
-       base_sha1 = (unsigned char*)p->pack_base + offset;
-       base_offset = find_pack_entry_one(base_sha1, p);
-       if (!base_offset)
-               die("failed to find delta-pack base object %s",
-                   sha1_to_hex(base_sha1));
-       offset += 20;
 
+       offset = get_delta_base(p, offset, kind, obj_offset, &base_offset);
        base = unpack_entry_gently(p, base_offset, type, &base_size);
        if (!base)
                die("failed to read delta base object at %lu from %s",
@@ -1136,13 +1146,14 @@ static void *unpack_entry(struct pack_entry *entry,
 void *unpack_entry_gently(struct packed_git *p, unsigned long offset,
                          char *type, unsigned long *sizep)
 {
-       unsigned long size;
+       unsigned long size, obj_offset = offset;
        enum object_type kind;
 
        offset = unpack_object_header(p, offset, &kind, &size);
        switch (kind) {
-       case OBJ_DELTA:
-               return unpack_delta_entry(p, offset, size, type, sizep);
+       case OBJ_OFS_DELTA:
+       case OBJ_REF_DELTA:
+               return unpack_delta_entry(p, offset, size, kind, obj_offset, type, sizep);
        case OBJ_COMMIT:
        case OBJ_TREE:
        case OBJ_BLOB:
@@ -1347,12 +1358,9 @@ void *read_object_with_reference(const unsigned char *sha1,
        }
 }
 
-char *write_sha1_file_prepare(void *buf,
-                             unsigned long len,
-                             const char *type,
-                             unsigned char *sha1,
-                             unsigned char *hdr,
-                             int *hdrlen)
+static void write_sha1_file_prepare(void *buf, unsigned long len,
+                                    const char *type, unsigned char *sha1,
+                                    unsigned char *hdr, int *hdrlen)
 {
        SHA_CTX c;
 
@@ -1364,8 +1372,6 @@ char *write_sha1_file_prepare(void *buf,
        SHA1_Update(&c, hdr, *hdrlen);
        SHA1_Update(&c, buf, len);
        SHA1_Final(sha1, &c);
-
-       return sha1_file_name(sha1);
 }
 
 /*
@@ -1501,6 +1507,15 @@ static void setup_object_header(z_stream *stream, const char *type, unsigned lon
        stream->avail_out -= hdr;
 }
 
+int hash_sha1_file(void *buf, unsigned long len, const char *type,
+                   unsigned char *sha1)
+{
+       unsigned char hdr[50];
+       int hdrlen;
+       write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
+       return 0;
+}
+
 int write_sha1_file(void *buf, unsigned long len, const char *type, unsigned char *returnsha1)
 {
        int size;
@@ -1515,7 +1530,8 @@ int write_sha1_file(void *buf, unsigned long len, const char *type, unsigned cha
        /* Normally if we have it in the pack then we do not bother writing
         * it out into .git/objects/??/?{38} file.
         */
-       filename = write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
+       write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
+       filename = sha1_file_name(sha1);
        if (returnsha1)
                hashcpy(returnsha1, sha1);
        if (has_sha1_file(sha1))
@@ -1784,8 +1800,6 @@ int index_pipe(unsigned char *sha1, int fd, const char *type, int write_object)
        unsigned long size = 4096;
        char *buf = xmalloc(size);
        int ret;
-       unsigned char hdr[50];
-       int hdrlen;
 
        if (read_pipe(fd, &buf, &size)) {
                free(buf);
@@ -1796,10 +1810,8 @@ int index_pipe(unsigned char *sha1, int fd, const char *type, int write_object)
                type = blob_type;
        if (write_object)
                ret = write_sha1_file(buf, size, type, sha1);
-       else {
-               write_sha1_file_prepare(buf, size, type, sha1, hdr, &hdrlen);
-               ret = 0;
-       }
+       else
+               ret = hash_sha1_file(buf, size, type, sha1);
        free(buf);
        return ret;
 }
@@ -1809,8 +1821,6 @@ int index_fd(unsigned char *sha1, int fd, struct stat *st, int write_object, con
        unsigned long size = st->st_size;
        void *buf;
        int ret;
-       unsigned char hdr[50];
-       int hdrlen;
 
        buf = "";
        if (size)
@@ -1823,10 +1833,8 @@ int index_fd(unsigned char *sha1, int fd, struct stat *st, int write_object, con
                type = blob_type;
        if (write_object)
                ret = write_sha1_file(buf, size, type, sha1);
-       else {
-               write_sha1_file_prepare(buf, size, type, sha1, hdr, &hdrlen);
-               ret = 0;
-       }
+       else
+               ret = hash_sha1_file(buf, size, type, sha1);
        if (size)
                munmap(buf, size);
        return ret;
@@ -1855,12 +1863,9 @@ int index_path(unsigned char *sha1, const char *path, struct stat *st, int write
                        return error("readlink(\"%s\"): %s", path,
                                     errstr);
                }
-               if (!write_object) {
-                       unsigned char hdr[50];
-                       int hdrlen;
-                       write_sha1_file_prepare(target, st->st_size, blob_type,
-                                               sha1, hdr, &hdrlen);
-               } else if (write_sha1_file(target, st->st_size, blob_type, sha1))
+               if (!write_object)
+                       hash_sha1_file(target, st->st_size, blob_type, sha1);
+               else if (write_sha1_file(target, st->st_size, blob_type, sha1))
                        return error("%s: failed to insert into database",
                                     path);
                free(target);
index 9b226e3579b68fe8b59c7105bd926e3a0a70b0ad..6ffee22081aee3fc7b27b9ccc93c4c721652ec4b 100644 (file)
@@ -157,7 +157,7 @@ static int get_short_sha1(const char *name, int len, unsigned char *sha1,
        char canonical[40];
        unsigned char res[20];
 
-       if (len < MINIMUM_ABBREV)
+       if (len < MINIMUM_ABBREV || len > 40)
                return -1;
        hashclr(res);
        memset(canonical, 'x', 40);
index c21d660b628a5b6d67b0f56b41fc76a921df340f..a30a2de5d13931c590169cf30bd7004e23f2df1b 100644 (file)
@@ -8,7 +8,7 @@ int main(int argc, char **argv)
        static unsigned int top_index[256];
 
        if (fread(top_index, sizeof(top_index), 1, stdin) != 1)
-               die("unable to read idex");
+               die("unable to read index");
        nr = 0;
        for (i = 0; i < 256; i++) {
                unsigned n = ntohl(top_index[i]);
index 1b14ff88924668e6803229d4d2cef981823522fb..277fa3c10d19ee7997ee5b38c5f77a6cd04576f1 100644 (file)
  * stream, aka "verbose").  A message over band #3 is a signal that
  * the remote died unexpectedly.  A flush() concludes the stream.
  */
-int recv_sideband(const char *me, int in_stream, int out, int err, char *buf, int bufsz)
+int recv_sideband(const char *me, int in_stream, int out, int err)
 {
+       char buf[7 + LARGE_PACKET_MAX + 1];
+       strcpy(buf, "remote:");
        while (1) {
-               int len = packet_read_line(in_stream, buf, bufsz);
+               int band, len;
+               len     = packet_read_line(in_stream, buf+7, LARGE_PACKET_MAX);
                if (len == 0)
                        break;
                if (len < 1) {
@@ -22,25 +25,26 @@ int recv_sideband(const char *me, int in_stream, int out, int err, char *buf, in
                        safe_write(err, buf, len);
                        return SIDEBAND_PROTOCOL_ERROR;
                }
+               band = buf[7] & 0xff;
                len--;
-               switch (buf[0] & 0xFF) {
+               switch (band) {
                case 3:
-                       safe_write(err, "remote: ", 8);
-                       safe_write(err, buf+1, len);
-                       safe_write(err, "\n", 1);
+                       buf[7] = ' ';
+                       buf[8+len] = '\n';
+                       safe_write(err, buf, 8+len+1);
                        return SIDEBAND_REMOTE_ERROR;
                case 2:
-                       safe_write(err, "remote: ", 8);
-                       safe_write(err, buf+1, len);
+                       buf[7] = ' ';
+                       safe_write(err, buf, 8+len);
                        continue;
                case 1:
-                       safe_write(out, buf+1, len);
+                       safe_write(out, buf+8, len);
                        continue;
                default:
-                       len = sprintf(buf + 1,
+                       len = sprintf(buf,
                                      "%s: protocol error: bad band #%d\n",
-                                     me, buf[0] & 0xFF);
-                       safe_write(err, buf+1, len);
+                                     me, band);
+                       safe_write(err, buf, len);
                        return SIDEBAND_PROTOCOL_ERROR;
                }
        }
index 4872106fa0881a4ccb6fbaf6343d63bd31effd8c..a84b6917c7a17b5f8a922540801e98d46aa24431 100644 (file)
@@ -7,7 +7,7 @@
 #define DEFAULT_PACKET_MAX 1000
 #define LARGE_PACKET_MAX 65520
 
-int recv_sideband(const char *me, int in_stream, int out, int err, char *, int);
+int recv_sideband(const char *me, int in_stream, int out, int err);
 ssize_t send_sideband(int fd, int band, const char *data, ssize_t sz, int packet_max);
 
 #endif
index ea48205537b60376b1fab346effb82548d48954e..58e5f74aeae880c69776bfa8e51bc9e5fe2463cf 100644 (file)
@@ -5,7 +5,7 @@ Date:   Mon Jun 26 00:00:00 2006 +0000
 
     Initial
 
- create mode 040000 dir
+ create mode 100644 dir/sub
  create mode 100644 file0
  create mode 100644 file2
 $
diff --git a/t/t4015-diff-whitespace.sh b/t/t4015-diff-whitespace.sh
new file mode 100755 (executable)
index 0000000..1bc5b7a
--- /dev/null
@@ -0,0 +1,122 @@
+#!/bin/sh
+#
+# Copyright (c) 2006 Johannes E. Schindelin
+#
+
+test_description='Test special whitespace in diff engine.
+
+'
+. ./test-lib.sh
+. ../diff-lib.sh
+
+# Ray Lehtiniemi's example
+
+cat << EOF > x
+do {
+   nothing;
+} while (0);
+EOF
+
+git-update-index --add x
+
+cat << EOF > x
+do
+{
+   nothing;
+}
+while (0);
+EOF
+
+cat << EOF > expect
+diff --git a/x b/x
+index adf3937..6edc172 100644
+--- a/x
++++ b/x
+@@ -1,3 +1,5 @@
+-do {
++do
++{
+    nothing;
+-} while (0);
++}
++while (0);
+EOF
+
+git-diff > out
+test_expect_success "Ray's example without options" 'diff -u expect out'
+
+git-diff -w > out
+test_expect_success "Ray's example with -w" 'diff -u expect out'
+
+git-diff -b > out
+test_expect_success "Ray's example with -b" 'diff -u expect out'
+
+tr 'Q' '\015' << EOF > x
+whitespace at beginning
+whitespace change
+whitespace in the middle
+whitespace at end
+unchanged line
+CR at endQ
+EOF
+
+git-update-index x
+
+cat << EOF > x
+       whitespace at beginning
+whitespace      change
+white space in the middle
+whitespace at end  
+unchanged line
+CR at end
+EOF
+
+tr 'Q' '\015' << EOF > expect
+diff --git a/x b/x
+index d99af23..8b32fb5 100644
+--- a/x
++++ b/x
+@@ -1,6 +1,6 @@
+-whitespace at beginning
+-whitespace change
+-whitespace in the middle
+-whitespace at end
++      whitespace at beginning
++whitespace     change
++white space in the middle
++whitespace at end  
+ unchanged line
+-CR at endQ
++CR at end
+EOF
+git-diff > out
+test_expect_success 'another test, without options' 'diff -u expect out'
+
+cat << EOF > expect
+diff --git a/x b/x
+index d99af23..8b32fb5 100644
+EOF
+git-diff -w > out
+test_expect_success 'another test, with -w' 'diff -u expect out'
+
+tr 'Q' '\015' << EOF > expect
+diff --git a/x b/x
+index d99af23..8b32fb5 100644
+--- a/x
++++ b/x
+@@ -1,6 +1,6 @@
+-whitespace at beginning
++      whitespace at beginning
+ whitespace change
+-whitespace in the middle
+-whitespace at end
++white space in the middle
++whitespace at end  
+ unchanged line
+-CR at endQ
++CR at end
+EOF
+git-diff -b > out
+test_expect_success 'another test, with -b' 'diff -u expect out'
+
+test_done
diff --git a/t/t4118-apply-empty-context.sh b/t/t4118-apply-empty-context.sh
new file mode 100755 (executable)
index 0000000..7309422
--- /dev/null
@@ -0,0 +1,55 @@
+#!/bin/sh
+#
+# Copyright (c) 2006 Junio C Hamano
+#
+
+test_description='git-apply with new style GNU diff with empty context
+
+'
+
+. ./test-lib.sh
+
+test_expect_success setup '
+       {
+               echo; echo;
+               echo A; echo B; echo C;
+               echo;
+       } >file1 &&
+       cat file1 >file1.orig &&
+       {
+               cat file1 &&
+               echo Q | tr -d "\\012"
+       } >file2 &&
+       cat file2 >file2.orig
+       git add file1 file2 &&
+       sed -e "/^B/d" <file1.orig >file1 &&
+       sed -e "/^B/d" <file2.orig >file2 &&
+       cat file1 >file1.mods &&
+       cat file2 >file2.mods &&
+       git diff |
+       sed -e "s/^ \$//" >diff.output
+'
+
+test_expect_success 'apply --numstat' '
+
+       git apply --numstat diff.output >actual &&
+       {
+               echo "0 1       file1" &&
+               echo "0 1       file2"
+       } >expect &&
+       diff -u expect actual
+
+'
+
+test_expect_success 'apply --apply' '
+
+       cat file1.orig >file1 &&
+       cat file2.orig >file2 &&
+       git update-index file1 file2 &&
+       git apply --index diff.output &&
+       diff -u file1.mods file1 &&
+       diff -u file2.mods file2
+'
+
+test_done
+
index 278eb6670116d0036413a81fc129615974458e5d..cf08e9279c1b2dc61cb53cf1e4940143cb3bf05c 100755 (executable)
@@ -26,6 +26,7 @@ commit id embedding:
 
 . ./test-lib.sh
 TAR=${TAR:-tar}
+UNZIP=${UNZIP:-unzip}
 
 test_expect_success \
     'populate workdir' \
@@ -95,4 +96,38 @@ test_expect_success \
     'validate file contents with prefix' \
     'diff -r a c/prefix/a'
 
+test_expect_success \
+    'git-archive --format=zip' \
+    'git-archive --format=zip HEAD >d.zip'
+
+test_expect_success \
+    'extract ZIP archive' \
+    '(mkdir d && cd d && $UNZIP ../d.zip)'
+
+test_expect_success \
+    'validate filenames' \
+    '(cd d/a && find .) | sort >d.lst &&
+     diff a.lst d.lst'
+
+test_expect_success \
+    'validate file contents' \
+    'diff -r a d/a'
+
+test_expect_success \
+    'git-archive --format=zip with prefix' \
+    'git-archive --format=zip --prefix=prefix/ HEAD >e.zip'
+
+test_expect_success \
+    'extract ZIP archive with prefix' \
+    '(mkdir e && cd e && $UNZIP ../e.zip)'
+
+test_expect_success \
+    'validate filenames with prefix' \
+    '(cd e/prefix/a && find .) | sort >e.lst &&
+     diff a.lst e.lst'
+
+test_expect_success \
+    'validate file contents with prefix' \
+    'diff -r a e/prefix/a'
+
 test_done
index 0c6a363be90f749515d5957ed78cb6f4e8f86dd3..041be04f5ceed683f2b4959b119cde4e055ed6e4 100755 (executable)
@@ -25,6 +25,12 @@ test_create_repo foo
 # clone doesn't like it if there is no HEAD. Is that a bug?
 (cd foo && touch file && git add file && git commit -m 'add file' >/dev/null 2>&1)
 
+# source repository given to git-clone should be relative to the
+# current path not to the target dir
+test_expect_failure \
+    'clone of non-existent (relative to $PWD) source should fail' \
+    'git-clone ../foo baz'
+
 test_expect_success \
     'clone should work now that source exists' \
     'git-clone foo bar'
index b523fef339da2ac21d7edd750cc6d5431a70fe52..2488e6eae1f515baa3adcaceca75000d15976612 100755 (executable)
@@ -135,6 +135,7 @@ test_expect_failure () {
        else
                test_failure_ "$@"
        fi
+       echo >&3 ""
 }
 
 test_expect_success () {
@@ -148,6 +149,7 @@ test_expect_success () {
        else
                test_failure_ "$@"
        fi
+       echo >&3 ""
 }
 
 test_expect_code () {
@@ -161,6 +163,7 @@ test_expect_code () {
        else
                test_failure_ "$@"
        fi
+       echo >&3 ""
 }
 
 # Most tests can use the created repository, but some amy need to create more.
diff --git a/trace.c b/trace.c
index f9efc918b8a0aa907a36edd107a2fa2a14582e78..495e5ed92a68837a8abbd1569b2ccb59a3d50429 100644 (file)
--- a/trace.c
+++ b/trace.c
@@ -55,7 +55,8 @@ static int get_trace_fd(int *need_close)
 {
        char *trace = getenv("GIT_TRACE");
 
-       if (!trace || !strcmp(trace, "0") || !strcasecmp(trace, "false"))
+       if (!trace || !strcmp(trace, "") ||
+           !strcmp(trace, "0") || !strcasecmp(trace, "false"))
                return 0;
        if (!strcmp(trace, "1") || !strcasecmp(trace, "true"))
                return STDERR_FILENO;
index 189b239cc093ab1d585627a647c7c36bf65efe18..9ec3775049a44bf1ffdc145240e6d7cdea303b91 100644 (file)
@@ -16,7 +16,7 @@ static const char upload_pack_usage[] = "git-upload-pack [--strict] [--timeout=n
 #define OUR_REF (1U << 1)
 #define WANTED (1U << 2)
 static int multi_ack, nr_our_refs;
-static int use_thin_pack;
+static int use_thin_pack, use_ofs_delta;
 static struct object_array have_obj;
 static struct object_array want_obj;
 static unsigned int timeout;
@@ -137,7 +137,9 @@ static void create_pack_file(void)
                close(pu_pipe[1]);
                close(pe_pipe[0]);
                close(pe_pipe[1]);
-               execl_git_cmd("pack-objects", "--stdout", "--progress", NULL);
+               execl_git_cmd("pack-objects", "--stdout", "--progress",
+                             use_ofs_delta ? "--delta-base-offset" : NULL,
+                             NULL);
                kill(pid_rev_list, SIGKILL);
                die("git-upload-pack: unable to exec git-pack-objects");
        }
@@ -393,6 +395,8 @@ static void receive_needs(void)
                        multi_ack = 1;
                if (strstr(line+45, "thin-pack"))
                        use_thin_pack = 1;
+               if (strstr(line+45, "ofs-delta"))
+                       use_ofs_delta = 1;
                if (strstr(line+45, "side-band-64k"))
                        use_sideband = LARGE_PACKET_MAX;
                else if (strstr(line+45, "side-band"))
@@ -418,7 +422,7 @@ static void receive_needs(void)
 
 static int send_ref(const char *refname, const unsigned char *sha1)
 {
-       static const char *capabilities = "multi_ack thin-pack side-band side-band-64k";
+       static const char *capabilities = "multi_ack thin-pack side-band side-band-64k ofs-delta";
        struct object *o = parse_object(sha1);
 
        if (!o)
index 714c563547462d962b4a6b69b524092e1bc2a036..154c26fdcdbef9095e254d7b469daf11c5653a04 100644 (file)
@@ -90,7 +90,7 @@ static void xdl_find_func(xdfile_t *xf, long i, char *buf, long sz, long *ll) {
                     *rec == '#')) {    /* #define? */
                        if (len > sz)
                                len = sz;
-                       if (len && rec[len - 1] == '\n')
+                       while (0 < len && isspace((unsigned char)rec[len - 1]))
                                len--;
                        memcpy(buf, rec, len);
                        *ll = len;
index 4c2fde80c143b3aaf086dc36c1a050851f83feaa..e2cd2023b31e794bc0c5ba28f64ae4ce4e128184 100644 (file)
 #define XMACROS_H
 
 
-#define GR_PRIME 0x9e370001UL
 
 
 #define XDL_MIN(a, b) ((a) < (b) ? (a): (b))
 #define XDL_MAX(a, b) ((a) > (b) ? (a): (b))
 #define XDL_ABS(v) ((v) >= 0 ? (v): -(v))
 #define XDL_ISDIGIT(c) ((c) >= '0' && (c) <= '9')
-#define XDL_HASHLONG(v, b) (((unsigned long)(v) * GR_PRIME) >> ((CHAR_BIT * sizeof(unsigned long)) - (b)))
+#define XDL_ADDBITS(v,b)       ((v) + ((v) >> (b)))
+#define XDL_MASKBITS(b)                ((1UL << (b)) - 1)
+#define XDL_HASHLONG(v,b)      (XDL_ADDBITS((unsigned long)(v), b) & XDL_MASKBITS(b))
 #define XDL_PTRFREE(p) do { if (p) { xdl_free(p); (p) = NULL; } } while (0)
 #define XDL_LE32_PUT(p, v) \
 do { \
index f7bdd395ad1f69067654a754b4d60fcc7c359275..9e4bb47ee97a0fca5120c41365a80531ddb849e4 100644 (file)
@@ -191,36 +191,30 @@ int xdl_recmatch(const char *l1, long s1, const char *l2, long s2, long flags)
        int i1, i2;
 
        if (flags & XDF_IGNORE_WHITESPACE) {
-               for (i1 = i2 = 0; i1 < s1 && i2 < s2; i1++, i2++) {
+               for (i1 = i2 = 0; i1 < s1 && i2 < s2; ) {
                        if (isspace(l1[i1]))
                                while (isspace(l1[i1]) && i1 < s1)
                                        i1++;
-                       else if (isspace(l2[i2]))
+                       if (isspace(l2[i2]))
                                while (isspace(l2[i2]) && i2 < s2)
                                        i2++;
-                       else if (l1[i1] != l2[i2])
-                               return l2[i2] - l1[i1];
+                       if (i1 < s1 && i2 < s2 && l1[i1++] != l2[i2++])
+                               return 0;
                }
-               if (i1 >= s1)
-                       return 1;
-               else if (i2 >= s2)
-                       return -1;
+               return (i1 >= s1 && i2 >= s2);
        } else if (flags & XDF_IGNORE_WHITESPACE_CHANGE) {
-               for (i1 = i2 = 0; i1 < s1 && i2 < s2; i1++, i2++) {
+               for (i1 = i2 = 0; i1 < s1 && i2 < s2; ) {
                        if (isspace(l1[i1])) {
                                if (!isspace(l2[i2]))
-                                       return -1;
+                                       return 0;
                                while (isspace(l1[i1]) && i1 < s1)
                                        i1++;
                                while (isspace(l2[i2]) && i2 < s2)
                                        i2++;
-                       } else if (l1[i1] != l2[i2])
-                               return l2[i2] - l1[i1];
+                       } else if (l1[i1++] != l2[i2++])
+                               return 0;
                }
-               if (i1 >= s1)
-                       return 1;
-               else if (i2 >= s2)
-                       return -1;
+               return (i1 >= s1 && i2 >= s2);
        } else
                return s1 == s2 && !memcmp(l1, l2, s1);
 
@@ -233,7 +227,8 @@ unsigned long xdl_hash_record(char const **data, char const *top, long flags) {
 
        for (; ptr < top && *ptr != '\n'; ptr++) {
                if (isspace(*ptr) && (flags & XDF_WHITESPACE_FLAGS)) {
-                       while (ptr < top && isspace(*ptr) && ptr[1] != '\n')
+                       while (ptr + 1 < top && isspace(ptr[1])
+                                       && ptr[1] != '\n')
                                ptr++;
                        if (flags & XDF_IGNORE_WHITESPACE_CHANGE) {
                                ha += (ha << 5);