#include "diff.h"
#include "revision.h"
#include "remote.h"
+#include "blob.h"
+#include "xdiff-interface.h"
+#include "ll-merge.h"
static const char * const checkout_usage[] = {
"git checkout [options] <branch>",
NULL,
};
+struct checkout_opts {
+ int quiet;
+ int merge;
+ int force;
+ int writeout_stage;
+ int writeout_error;
+
+ const char *new_branch;
+ int new_branch_log;
+ enum branch_track track;
+};
+
static int post_checkout_hook(struct commit *old, struct commit *new,
int changed)
{
return pos;
}
+static int check_stage(int stage, struct cache_entry *ce, int pos)
+{
+ while (pos < active_nr &&
+ !strcmp(active_cache[pos]->name, ce->name)) {
+ if (ce_stage(active_cache[pos]) == stage)
+ return 0;
+ pos++;
+ }
+ return error("path '%s' does not have %s version",
+ ce->name,
+ (stage == 2) ? "our" : "their");
+}
+
+static int check_all_stages(struct cache_entry *ce, int pos)
+{
+ if (ce_stage(ce) != 1 ||
+ active_nr <= pos + 2 ||
+ strcmp(active_cache[pos+1]->name, ce->name) ||
+ ce_stage(active_cache[pos+1]) != 2 ||
+ strcmp(active_cache[pos+2]->name, ce->name) ||
+ ce_stage(active_cache[pos+2]) != 3)
+ return error("path '%s' does not have all three versions",
+ ce->name);
+ return 0;
+}
+
+static int checkout_stage(int stage, struct cache_entry *ce, int pos,
+ struct checkout *state)
+{
+ while (pos < active_nr &&
+ !strcmp(active_cache[pos]->name, ce->name)) {
+ if (ce_stage(active_cache[pos]) == stage)
+ return checkout_entry(active_cache[pos], state, NULL);
+ pos++;
+ }
+ return error("path '%s' does not have %s version",
+ ce->name,
+ (stage == 2) ? "our" : "their");
+}
+
+/* NEEDSWORK: share with merge-recursive */
+static void fill_mm(const unsigned char *sha1, mmfile_t *mm)
+{
+ unsigned long size;
+ enum object_type type;
+
+ if (!hashcmp(sha1, null_sha1)) {
+ mm->ptr = xstrdup("");
+ mm->size = 0;
+ return;
+ }
+
+ mm->ptr = read_sha1_file(sha1, &type, &size);
+ if (!mm->ptr || type != OBJ_BLOB)
+ die("unable to read blob object %s", sha1_to_hex(sha1));
+ mm->size = size;
+}
+
+static int checkout_merged(int pos, struct checkout *state)
+{
+ struct cache_entry *ce = active_cache[pos];
+ const char *path = ce->name;
+ mmfile_t ancestor, ours, theirs;
+ int status;
+ unsigned char sha1[20];
+ mmbuffer_t result_buf;
+
+ if (ce_stage(ce) != 1 ||
+ active_nr <= pos + 2 ||
+ strcmp(active_cache[pos+1]->name, path) ||
+ ce_stage(active_cache[pos+1]) != 2 ||
+ strcmp(active_cache[pos+2]->name, path) ||
+ ce_stage(active_cache[pos+2]) != 3)
+ return error("path '%s' does not have all 3 versions", path);
+
+ fill_mm(active_cache[pos]->sha1, &ancestor);
+ fill_mm(active_cache[pos+1]->sha1, &ours);
+ fill_mm(active_cache[pos+2]->sha1, &theirs);
+
+ status = ll_merge(&result_buf, path, &ancestor,
+ &ours, "ours", &theirs, "theirs", 1);
+ free(ancestor.ptr);
+ free(ours.ptr);
+ free(theirs.ptr);
+ if (status < 0 || !result_buf.ptr) {
+ free(result_buf.ptr);
+ return error("path '%s': cannot merge", path);
+ }
+
+ /*
+ * NEEDSWORK:
+ * There is absolutely no reason to write this as a blob object
+ * and create a phoney cache entry just to leak. This hack is
+ * primarily to get to the write_entry() machinery that massages
+ * the contents to work-tree format and writes out which only
+ * allows it for a cache entry. The code in write_entry() needs
+ * to be refactored to allow us to feed a <buffer, size, mode>
+ * instead of a cache entry. Such a refactoring would help
+ * merge_recursive as well (it also writes the merge result to the
+ * object database even when it may contain conflicts).
+ */
+ if (write_sha1_file(result_buf.ptr, result_buf.size,
+ blob_type, sha1))
+ die("Unable to add merge result for '%s'", path);
+ ce = make_cache_entry(create_ce_mode(active_cache[pos+1]->ce_mode),
+ sha1,
+ path, 2, 0);
+ if (!ce)
+ die("make_cache_entry failed for path '%s'", path);
+ status = checkout_entry(ce, state, NULL);
+ return status;
+}
-static int checkout_paths(struct tree *source_tree, const char **pathspec)
+static int checkout_paths(struct tree *source_tree, const char **pathspec,
+ struct checkout_opts *opts)
{
int pos;
struct checkout state;
int flag;
struct commit *head;
int errs = 0;
-
+ int stage = opts->writeout_stage;
+ int merge = opts->merge;
int newfd;
struct lock_file *lock_file = xcalloc(1, sizeof(struct lock_file));
if (pathspec_match(pathspec, NULL, ce->name, 0)) {
if (!ce_stage(ce))
continue;
- errs = 1;
- error("path '%s' is unmerged", ce->name);
+ if (opts->force) {
+ warning("path '%s' is unmerged", ce->name);
+ } else if (stage) {
+ errs |= check_stage(stage, ce, pos);
+ } else if (opts->merge) {
+ errs |= check_all_stages(ce, pos);
+ } else {
+ errs = 1;
+ error("path '%s' is unmerged", ce->name);
+ }
pos = skip_same_name(ce, pos) - 1;
}
}
errs |= checkout_entry(ce, &state, NULL);
continue;
}
+ if (stage)
+ errs |= checkout_stage(stage, ce, pos, &state);
+ else if (merge)
+ errs |= checkout_merged(pos, &state);
pos = skip_same_name(ce, pos) - 1;
}
}
static void describe_detached_head(char *msg, struct commit *commit)
{
- struct strbuf sb;
- strbuf_init(&sb, 0);
+ struct strbuf sb = STRBUF_INIT;
parse_commit(commit);
pretty_print_commit(CMIT_FMT_ONELINE, commit, &sb, 0, NULL, NULL, 0, 0);
fprintf(stderr, "%s %s... %s\n", msg,
strbuf_release(&sb);
}
-struct checkout_opts {
- int quiet;
- int merge;
- int force;
- int writeout_error;
-
- char *new_branch;
- int new_branch_log;
- enum branch_track track;
-};
-
static int reset_tree(struct tree *tree, struct checkout_opts *o, int worktree)
{
struct unpack_trees_options opts;
static void setup_branch_path(struct branch_info *branch)
{
- struct strbuf buf;
- strbuf_init(&buf, 0);
+ struct strbuf buf = STRBUF_INIT;
strbuf_addstr(&buf, "refs/heads/");
strbuf_addstr(&buf, branch->name);
branch->path = strbuf_detach(&buf, NULL);
*/
struct tree *result;
struct tree *work;
+ struct merge_options o;
if (!opts->merge)
return 1;
parse_commit(old->commit);
*/
add_files_to_cache(NULL, NULL, 0);
- work = write_tree_from_memory();
+ init_merge_options(&o);
+ o.verbosity = 0;
+ work = write_tree_from_memory(&o);
ret = reset_tree(new->commit->tree, opts, 1);
if (ret)
return ret;
- merge_trees(new->commit->tree, work, old->commit->tree,
- new->name, "local", &result);
+ o.branch1 = new->name;
+ o.branch2 = "local";
+ merge_trees(&o, new->commit->tree, work,
+ old->commit->tree, &result);
ret = reset_tree(new->commit->tree, opts, 0);
if (ret)
return ret;
struct branch_info *old,
struct branch_info *new)
{
- struct strbuf msg;
+ struct strbuf msg = STRBUF_INIT;
const char *old_desc;
if (opts->new_branch) {
create_branch(old->name, opts->new_branch, new->name, 0,
setup_branch_path(new);
}
- strbuf_init(&msg, 0);
old_desc = old->name;
if (!old_desc && old->commit)
old_desc = sha1_to_hex(old->commit->object.sha1);
if (!opts->quiet && !old.path && old.commit && new->commit != old.commit)
describe_detached_head("Previous HEAD position was", old.commit);
- if (!old.commit) {
+ if (!old.commit && !opts->force) {
if (!opts->quiet) {
fprintf(stderr, "warning: You appear to be on a branch yet to be born.\n");
fprintf(stderr, "warning: Forcing checkout of %s.\n", new->name);
return ret || opts->writeout_error;
}
+static int git_checkout_config(const char *var, const char *value, void *cb)
+{
+ return git_xmerge_config(var, value, cb);
+}
+
int cmd_checkout(int argc, const char **argv, const char *prefix)
{
struct checkout_opts opts;
const char *arg;
struct branch_info new;
struct tree *source_tree = NULL;
+ char *conflict_style = NULL;
struct option options[] = {
OPT__QUIET(&opts.quiet),
OPT_STRING('b', NULL, &opts.new_branch, "new branch", "branch"),
OPT_BOOLEAN('l', NULL, &opts.new_branch_log, "log for new branch"),
OPT_SET_INT('t', "track", &opts.track, "track",
BRANCH_TRACK_EXPLICIT),
+ OPT_SET_INT('2', "ours", &opts.writeout_stage, "stage",
+ 2),
+ OPT_SET_INT('3', "theirs", &opts.writeout_stage, "stage",
+ 3),
OPT_BOOLEAN('f', NULL, &opts.force, "force"),
- OPT_BOOLEAN('m', NULL, &opts.merge, "merge"),
+ OPT_BOOLEAN('m', "merge", &opts.merge, "merge"),
+ OPT_STRING(0, "conflict", &conflict_style, "style",
+ "conflict style (merge or diff3)"),
OPT_END(),
};
int has_dash_dash;
memset(&opts, 0, sizeof(opts));
memset(&new, 0, sizeof(new));
- git_config(git_default_config, NULL);
+ git_config(git_checkout_config, NULL);
- opts.track = git_branch_track;
+ opts.track = BRANCH_TRACK_UNSPECIFIED;
argc = parse_options(argc, argv, options, checkout_usage,
PARSE_OPT_KEEP_DASHDASH);
- if (!opts.new_branch && (opts.track != git_branch_track))
- die("git checkout: --track and --no-track require -b");
+ /* --track without -b should DWIM */
+ if (0 < opts.track && !opts.new_branch) {
+ const char *argv0 = argv[0];
+ if (!argc || !strcmp(argv0, "--"))
+ die ("--track needs a branch name");
+ if (!prefixcmp(argv0, "refs/"))
+ argv0 += 5;
+ if (!prefixcmp(argv0, "remotes/"))
+ argv0 += 8;
+ argv0 = strchr(argv0, '/');
+ if (!argv0 || !argv0[1])
+ die ("Missing branch name; try -b");
+ opts.new_branch = argv0 + 1;
+ }
+
+ if (opts.track == BRANCH_TRACK_UNSPECIFIED)
+ opts.track = git_branch_track;
+ if (conflict_style) {
+ opts.merge = 1; /* implied */
+ git_xmerge_config("merge.conflictstyle", conflict_style, NULL);
+ }
if (opts.force && opts.merge)
die("git checkout: -f and -m are incompatible");
die("invalid path specification");
/* Checkout paths */
- if (opts.new_branch || opts.force || opts.merge) {
+ if (opts.new_branch) {
if (argc == 1) {
- die("git checkout: updating paths is incompatible with switching branches/forcing\nDid you intend to checkout '%s' which can not be resolved as commit?", argv[0]);
+ die("git checkout: updating paths is incompatible with switching branches.\nDid you intend to checkout '%s' which can not be resolved as commit?", argv[0]);
} else {
- die("git checkout: updating paths is incompatible with switching branches/forcing");
+ die("git checkout: updating paths is incompatible with switching branches.");
}
}
- return checkout_paths(source_tree, pathspec);
+ if (1 < !!opts.writeout_stage + !!opts.force + !!opts.merge)
+ die("git checkout: --ours/--theirs, --force and --merge are incompatible when\nchecking out of the index.");
+
+ return checkout_paths(source_tree, pathspec, &opts);
}
if (opts.new_branch) {
- struct strbuf buf;
- strbuf_init(&buf, 0);
+ struct strbuf buf = STRBUF_INIT;
strbuf_addstr(&buf, "refs/heads/");
strbuf_addstr(&buf, opts.new_branch);
if (!get_sha1(buf.buf, rev))
if (new.name && !new.commit) {
die("Cannot switch branch to a non-commit.");
}
+ if (opts.writeout_stage)
+ die("--ours/--theirs is incompatible with switching branches.");
return switch_branches(&opts, &new);
}
{
local cmd i is_hash=y dir="$(__gitdir "$1")"
if [ -d "$dir" ]; then
- for i in $(git --git-dir="$dir" \
- for-each-ref --format='%(refname)' \
- refs/heads ); do
- echo "${i#refs/heads/}"
- done
+ git --git-dir="$dir" for-each-ref --format='%(refname:short)' \
+ refs/heads
return
fi
for i in $(git ls-remote "$1" 2>/dev/null); do
{
local cmd i is_hash=y dir="$(__gitdir "$1")"
if [ -d "$dir" ]; then
- for i in $(git --git-dir="$dir" \
- for-each-ref --format='%(refname)' \
- refs/tags ); do
- echo "${i#refs/tags/}"
- done
+ git --git-dir="$dir" for-each-ref --format='%(refname:short)' \
+ refs/tags
return
fi
for i in $(git ls-remote "$1" 2>/dev/null); do
__git_refs ()
{
- local cmd i is_hash=y dir="$(__gitdir "$1")"
+ local i is_hash=y dir="$(__gitdir "$1")"
+ local cur="${COMP_WORDS[COMP_CWORD]}" format refs
if [ -d "$dir" ]; then
- if [ -e "$dir/HEAD" ]; then echo HEAD; fi
- for i in $(git --git-dir="$dir" \
- for-each-ref --format='%(refname)' \
- refs/tags refs/heads refs/remotes); do
- case "$i" in
- refs/tags/*) echo "${i#refs/tags/}" ;;
- refs/heads/*) echo "${i#refs/heads/}" ;;
- refs/remotes/*) echo "${i#refs/remotes/}" ;;
- *) echo "$i" ;;
- esac
- done
+ case "$cur" in
+ refs|refs/*)
+ format="refname"
+ refs="${cur%/*}"
+ ;;
+ *)
+ if [ -e "$dir/HEAD" ]; then echo HEAD; fi
+ format="refname:short"
+ refs="refs/tags refs/heads refs/remotes"
+ ;;
+ esac
+ git --git-dir="$dir" for-each-ref --format="%($format)" \
+ $refs
return
fi
for i in $(git ls-remote "$dir" 2>/dev/null); do
_git_bundle ()
{
- local mycword="$COMP_CWORD"
- case "${COMP_WORDS[0]}" in
- git)
- local cmd="${COMP_WORDS[2]}"
- mycword="$((mycword-1))"
- ;;
- git-bundle*)
- local cmd="${COMP_WORDS[1]}"
- ;;
- esac
- case "$mycword" in
- 1)
+ local cmd="${COMP_WORDS[2]}"
+ case "$COMP_CWORD" in
+ 2)
__gitcomp "create list-heads verify unbundle"
;;
- 2)
+ 3)
# looking for a file
;;
*)
__gitcomp "$(__git_refs)" "$pfx" "${cur#*:}"
;;
*)
- local remote
- case "${COMP_WORDS[0]}" in
- git-fetch) remote="${COMP_WORDS[1]}" ;;
- git) remote="${COMP_WORDS[2]}" ;;
- esac
- __gitcomp "$(__git_refs2 "$remote")"
+ __gitcomp "$(__git_refs2 "${COMP_WORDS[2]}")"
;;
esac
fi
attributes cli core-tutorial cvs-migration
diffcore gitk glossary hooks ignore modules
repository-layout tutorial tutorial-2
+ workflows
"
}
if [ "$COMP_CWORD" = 2 ]; then
__gitcomp "$(__git_remotes)"
else
- local remote
- case "${COMP_WORDS[0]}" in
- git-pull) remote="${COMP_WORDS[1]}" ;;
- git) remote="${COMP_WORDS[2]}" ;;
- esac
- __gitcomp "$(__git_refs "$remote")"
+ __gitcomp "$(__git_refs "${COMP_WORDS[2]}")"
fi
}
else
case "$cur" in
*:*)
- local remote
- case "${COMP_WORDS[0]}" in
- git-push) remote="${COMP_WORDS[1]}" ;;
- git) remote="${COMP_WORDS[2]}" ;;
- esac
-
local pfx=""
case "$COMP_WORDBREAKS" in
*:*) : great ;;
*) pfx="${cur%%:*}:" ;;
esac
- __gitcomp "$(__git_refs "$remote")" "$pfx" "${cur#*:}"
+ __gitcomp "$(__git_refs "${COMP_WORDS[2]}")" "$pfx" "${cur#*:}"
;;
+*)
__gitcomp "$(__git_refs)" + "${cur#+}"
--no-suppress-from --no-thread --quiet
--signed-off-by-cc --smtp-pass --smtp-server
--smtp-server-port --smtp-ssl --smtp-user --subject
- --suppress-cc --suppress-from --thread --to"
+ --suppress-cc --suppress-from --thread --to
+ --validate --no-validate"
return
;;
esac
return
;;
esac
- COMPREPLY=()
+ __gitcomp "$(__git_refs)"
}
_git_rm ()
{
__git_has_doubledash && return
- local subcommands="add status init update"
+ local subcommands="add status init update summary foreach sync"
if [ -z "$(__git_find_subcommand "$subcommands")" ]; then
local cur="${COMP_WORDS[COMP_CWORD]}"
case "$cur" in
-m|-F)
COMPREPLY=()
;;
- -*|tag|git-tag)
+ -*|tag)
if [ $f = 1 ]; then
__gitcomp "$(__git_tags)"
else
stderr=subprocess.PIPE, stdout=subprocess.PIPE);
return proc.wait() == 0;
+_gitConfig = {}
def gitConfig(key):
- return read_pipe("git config %s" % key, ignore_error=True).strip()
+ if not _gitConfig.has_key(key):
+ _gitConfig[key] = read_pipe("git config %s" % key, ignore_error=True).strip()
+ return _gitConfig[key]
def p4BranchesInGit(branchesAreInRemotes = True):
branches = {}
newdiff = newdiff.replace("\n", "\r\n")
tmpFile.write(submitTemplate + separatorLine + diff + newdiff)
tmpFile.close()
+ mtime = os.stat(fileName).st_mtime
defaultEditor = "vi"
if platform.system() == "Windows":
defaultEditor = "notepad"
else:
editor = os.environ.get("EDITOR", defaultEditor);
system(editor + " " + fileName)
- tmpFile = open(fileName, "rb")
- message = tmpFile.read()
- tmpFile.close()
- os.remove(fileName)
- submitTemplate = message[:message.index(separatorLine)]
- if self.isWindows:
- submitTemplate = submitTemplate.replace("\r\n", "\n")
- p4_write_pipe("submit -i", submitTemplate)
+ response = "y"
+ if os.stat(fileName).st_mtime <= mtime:
+ response = "x"
+ while response != "y" and response != "n":
+ response = raw_input("Submit template unchanged. Submit anyway? [y]es, [n]o (skip this patch) ")
+
+ if response == "y":
+ tmpFile = open(fileName, "rb")
+ message = tmpFile.read()
+ tmpFile.close()
+ submitTemplate = message[:message.index(separatorLine)]
+ if self.isWindows:
+ submitTemplate = submitTemplate.replace("\r\n", "\n")
+ p4_write_pipe("submit -i", submitTemplate)
+ else:
+ for f in editedFiles:
+ p4_system("revert \"%s\"" % f);
+ for f in filesToAdd:
+ p4_system("revert \"%s\"" % f);
+ system("rm %s" %f)
+
+ os.remove(fileName)
else:
fileName = "submit.txt"
file = open(fileName, "w+")
if includeFile:
filesForCommit.append(f)
- if f['action'] != 'delete':
+ if f['action'] not in ('delete', 'purge'):
filesToRead.append(f)
filedata = []
while j < len(filedata):
stat = filedata[j]
j += 1
- text = [];
+ text = ''
while j < len(filedata) and filedata[j]['code'] in ('text', 'unicode', 'binary'):
- text.append(filedata[j]['data'])
+ text += filedata[j]['data']
+ del filedata[j]['data']
j += 1
- text = ''.join(text)
if not stat.has_key('depotFile'):
sys.stderr.write("p4 print fails with: %s\n" % repr(stat))
if stat['type'] in ('text+ko', 'unicode+ko', 'binary+ko'):
text = re.sub(r'(?i)\$(Id|Header):[^$]*\$',r'$\1$', text)
elif stat['type'] in ('text+k', 'ktext', 'kxtext', 'unicode+k', 'binary+k'):
- text = re.sub(r'\$(Id|Header|Author|Date|DateTime|Change|File|Revision):[^$]*\$',r'$\1$', text)
+ text = re.sub(r'\$(Id|Header|Author|Date|DateTime|Change|File|Revision):[^$\n]*\$',r'$\1$', text)
contents[stat['depotFile']] = text
continue
relPath = self.stripRepoPath(file['path'], branchPrefixes)
- if file["action"] == "delete":
+ if file["action"] in ("delete", "purge"):
self.gitStream.write("D %s\n" % relPath)
else:
data = file['data']
cleanedFiles = {}
for info in files:
- if info["action"] == "delete":
+ if info["action"] in ("delete", "purge"):
continue
cleanedFiles[info["depotFile"]] = info["rev"]
if change > newestRevision:
newestRevision = change
- if info["action"] == "delete":
+ if info["action"] in ("delete", "purge"):
# don't increase the file cnt, otherwise details["depotFile123"] will have gaps!
#fileCnt = fileCnt + 1
continue
if not P4Sync.run(self, depotPaths):
return False
if self.branch != "master":
- if gitBranchExists("refs/remotes/p4/master"):
- system("git branch master refs/remotes/p4/master")
+ if self.importIntoRemotes:
+ masterbranch = "refs/remotes/p4/master"
+ else:
+ masterbranch = "refs/heads/p4/master"
+ if gitBranchExists(masterbranch):
+ system("git branch master %s" % masterbranch)
system("git checkout -f")
else:
print "Could not detect main branch. No checkout/master branch created."
pos = strchr(pos, '/');
if (!pos)
break;
- *pos = 0;
+ while (*++pos == '/')
+ ;
+ if (!*pos)
+ break;
+ *--pos = '\0';
if (!stat(path, &st)) {
/* path exists */
if (!S_ISDIR(st.st_mode)) {
*/
static int link_alt_odb_entry(const char * entry, int len, const char * relative_base, int depth)
{
- struct stat st;
const char *objdir = get_object_directory();
struct alternate_object_database *ent;
struct alternate_object_database *alt;
ent->base[pfxlen] = ent->base[entlen-1] = 0;
/* Detect cases where alternate disappeared */
- if (stat(ent->base, &st) || !S_ISDIR(st.st_mode)) {
+ if (!is_directory(ent->base)) {
error("object directory %s does not exist; "
"check .git/objects/info/alternates.",
ent->base);
link_alt_odb_entries(alt, alt + strlen(alt), '\n', NULL, 0);
}
+void foreach_alt_odb(alt_odb_fn fn, void *cb)
+{
+ struct alternate_object_database *ent;
+
+ prepare_alt_odb();
+ for (ent = alt_odb_list; ent; ent = ent->next)
+ if (fn(ent, cb))
+ return;
+}
+
void prepare_alt_odb(void)
{
const char *alt;
read_info_alternates(get_object_directory(), 0);
}
-static int has_loose_object(const unsigned char *sha1)
+static int has_loose_object_local(const unsigned char *sha1)
{
char *name = sha1_file_name(sha1);
- struct alternate_object_database *alt;
+ return !access(name, F_OK);
+}
- if (!access(name, F_OK))
- return 1;
+int has_loose_object_nonlocal(const unsigned char *sha1)
+{
+ struct alternate_object_database *alt;
prepare_alt_odb();
for (alt = alt_odb_list; alt; alt = alt->next) {
- name = alt->name;
- fill_sha1_path(name, sha1);
+ fill_sha1_path(alt->name, sha1);
if (!access(alt->base, F_OK))
return 1;
}
return 0;
}
+static int has_loose_object(const unsigned char *sha1)
+{
+ return has_loose_object_local(sha1) ||
+ has_loose_object_nonlocal(sha1);
+}
+
static unsigned int pack_used_ctr;
static unsigned int pack_mmap_calls;
static unsigned int peak_pack_open_windows;
return NULL;
}
memcpy(p->pack_name, path, path_len);
+
+ strcpy(p->pack_name + path_len, ".keep");
+ if (!access(p->pack_name, F_OK))
+ p->pack_keep = 1;
+
strcpy(p->pack_name + path_len, ".pack");
if (stat(p->pack_name, &st) || !S_ISREG(st.st_mode)) {
free(p);
return 0;
}
-unsigned long unpack_object_header_gently(const unsigned char *buf, unsigned long len, enum object_type *type, unsigned long *sizep)
+unsigned long unpack_object_header_buffer(const unsigned char *buf,
+ unsigned long len, enum object_type *type, unsigned long *sizep)
{
unsigned shift;
unsigned char c;
size = c & 15;
shift = 4;
while (c & 0x80) {
- if (len <= used)
- return 0;
- if (sizeof(long) * 8 <= shift)
+ if (len <= used || sizeof(long) * 8 <= shift) {
+ error("bad object header");
return 0;
+ }
c = buf[used++];
size += (c & 0x7f) << shift;
shift += 7;
* really worth it and we don't write it any longer. But we
* can still read it.
*/
- used = unpack_object_header_gently(map, mapsize, &type, &size);
+ used = unpack_object_header_buffer(map, mapsize, &type, &size);
if (!used || !valid_loose_object_type[type])
return -1;
map += used;
} while ((st == Z_OK || st == Z_BUF_ERROR) &&
stream.total_out < sizeof(delta_head));
inflateEnd(&stream);
- if ((st != Z_STREAM_END) && stream.total_out != sizeof(delta_head))
- die("delta data unpack-initial failed");
+ if ((st != Z_STREAM_END) && stream.total_out != sizeof(delta_head)) {
+ error("delta data unpack-initial failed");
+ return 0;
+ }
/* Examine the initial part of the delta to figure out
* the result size.
base_offset = (base_offset << 7) + (c & 127);
}
base_offset = delta_obj_offset - base_offset;
- if (base_offset >= delta_obj_offset)
+ if (base_offset <= 0 || base_offset >= delta_obj_offset)
return 0; /* out of bound */
*curpos += used;
} else if (type == OBJ_REF_DELTA) {
off_t base_offset;
base_offset = get_delta_base(p, w_curs, &curpos, type, obj_offset);
+ if (!base_offset)
+ return OBJ_BAD;
type = packed_object_info(p, base_offset, NULL);
+ if (type <= OBJ_NONE) {
+ struct revindex_entry *revidx;
+ const unsigned char *base_sha1;
+ revidx = find_pack_revindex(p, base_offset);
+ if (!revidx)
+ return OBJ_BAD;
+ base_sha1 = nth_packed_object_sha1(p, revidx->nr);
+ mark_bad_packed_object(p, base_sha1);
+ type = sha1_object_info(base_sha1, NULL);
+ if (type <= OBJ_NONE)
+ return OBJ_BAD;
+ }
/* We choose to only get the type of the base object and
* ignore potentially corrupt pack file that expects the delta
* based on a base with a wrong size. This saves tons of
* inflate() calls.
*/
- if (sizep)
+ if (sizep) {
*sizep = get_size_from_delta(p, w_curs, curpos);
+ if (*sizep == 0)
+ type = OBJ_BAD;
+ }
return type;
}
* insane, so we know won't exceed what we have been given.
*/
base = use_pack(p, w_curs, *curpos, &left);
- used = unpack_object_header_gently(base, left, &type, sizep);
- if (!used)
- die("object offset outside of pack file");
- *curpos += used;
+ used = unpack_object_header_buffer(base, left, &type, sizep);
+ if (!used) {
+ type = OBJ_BAD;
+ } else
+ *curpos += used;
return type;
}
*sizep = size;
break;
default:
- die("pack %s contains unknown object type %d",
- p->pack_name, type);
+ error("unknown object type %i at offset %"PRIuMAX" in %s",
+ type, (uintmax_t)obj_offset, p->pack_name);
+ type = OBJ_BAD;
}
unuse_pack(&w_curs);
return type;
struct delta_base_cache_entry *ent = delta_base_cache + hash;
ret = ent->data;
- if (ret && ent->p == p && ent->base_offset == base_offset)
- goto found_cache_entry;
- return unpack_entry(p, base_offset, type, base_size);
+ if (!ret || ent->p != p || ent->base_offset != base_offset)
+ return unpack_entry(p, base_offset, type, base_size);
-found_cache_entry:
if (!keep_cache) {
ent->data = NULL;
ent->lru.next->prev = ent->lru.prev;
* This is costly but should happen only in the presence
* of a corrupted pack, and is better than failing outright.
*/
- struct revindex_entry *revidx = find_pack_revindex(p, base_offset);
- const unsigned char *base_sha1 =
- nth_packed_object_sha1(p, revidx->nr);
+ struct revindex_entry *revidx;
+ const unsigned char *base_sha1;
+ revidx = find_pack_revindex(p, base_offset);
+ if (!revidx)
+ return NULL;
+ base_sha1 = nth_packed_object_sha1(p, revidx->nr);
error("failed to read delta base object %s"
" at offset %"PRIuMAX" from %s",
sha1_to_hex(base_sha1), (uintmax_t)base_offset,
return result;
}
+int do_check_packed_object_crc;
+
void *unpack_entry(struct packed_git *p, off_t obj_offset,
enum object_type *type, unsigned long *sizep)
{
off_t curpos = obj_offset;
void *data;
+ if (do_check_packed_object_crc && p->index_version > 1) {
+ struct revindex_entry *revidx = find_pack_revindex(p, obj_offset);
+ unsigned long len = revidx[1].offset - obj_offset;
+ if (check_pack_crc(p, &w_curs, obj_offset, len, revidx->nr)) {
+ const unsigned char *sha1 =
+ nth_packed_object_sha1(p, revidx->nr);
+ error("bad packed object CRC for %s",
+ sha1_to_hex(sha1));
+ mark_bad_packed_object(p, sha1);
+ unuse_pack(&w_curs);
+ return NULL;
+ }
+ }
+
*type = unpack_object_header(p, &w_curs, &curpos, sizep);
switch (*type) {
case OBJ_OFS_DELTA:
if (!find_pack_entry(sha1, &e, NULL))
return status;
}
- return packed_object_info(e.p, e.offset, sizep);
+
+ status = packed_object_info(e.p, e.offset, sizep);
+ if (status < 0) {
+ mark_bad_packed_object(e.p, sha1);
+ status = sha1_object_info(sha1, sizep);
+ }
+
+ return status;
}
static void *read_packed_sha1(const unsigned char *sha1,
static int cached_object_nr, cached_object_alloc;
static struct cached_object empty_tree = {
- /* empty tree sha1: 4b825dc642cb6eb9a060e54bf8d69288fbee4904 */
- "\x4b\x82\x5d\xc6\x42\xcb\x6e\xb9\xa0\x60"
- "\xe5\x4b\xf8\xd6\x92\x88\xfb\xee\x49\x04",
+ EMPTY_TREE_SHA1_BIN,
OBJ_TREE,
"",
0
const char *type, unsigned char *sha1,
char *hdr, int *hdrlen)
{
- SHA_CTX c;
+ git_SHA_CTX c;
/* Generate the header */
*hdrlen = sprintf(hdr, "%s %lu", type, len)+1;
/* Sha1.. */
- SHA1_Init(&c);
- SHA1_Update(&c, hdr, *hdrlen);
- SHA1_Update(&c, buf, len);
- SHA1_Final(sha1, &c);
+ git_SHA1_Init(&c);
+ git_SHA1_Update(&c, hdr, *hdrlen);
+ git_SHA1_Update(&c, buf, len);
+ git_SHA1_Final(sha1, &c);
}
/*
filename = sha1_file_name(sha1);
fd = create_tmpfile(tmpfile, sizeof(tmpfile), filename);
if (fd < 0) {
- if (errno == EPERM)
+ if (errno == EACCES)
return error("insufficient permission for adding an object to repository database %s\n", get_object_directory());
else
return error("unable to create temporary sha1 filename %s: %s\n", tmpfile, strerror(errno));
return has_loose_object(sha1);
}
-int index_pipe(unsigned char *sha1, int fd, const char *type, int write_object)
-{
- struct strbuf buf;
- int ret;
-
- strbuf_init(&buf, 0);
- if (strbuf_read(&buf, fd, 4096) < 0) {
- strbuf_release(&buf);
- return -1;
- }
-
- if (!type)
- type = blob_type;
- if (write_object)
- ret = write_sha1_file(buf.buf, buf.len, type, sha1);
- else
- ret = hash_sha1_file(buf.buf, buf.len, type, sha1);
- strbuf_release(&buf);
-
- return ret;
-}
-
-int index_fd(unsigned char *sha1, int fd, struct stat *st, int write_object,
- enum object_type type, const char *path)
+static int index_mem(unsigned char *sha1, void *buf, size_t size,
+ int write_object, enum object_type type, const char *path)
{
- size_t size = xsize_t(st->st_size);
- void *buf = NULL;
int ret, re_allocated = 0;
- if (size)
- buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
- close(fd);
-
if (!type)
type = OBJ_BLOB;
/*
* Convert blobs to git internal format
*/
- if ((type == OBJ_BLOB) && S_ISREG(st->st_mode)) {
- struct strbuf nbuf;
- strbuf_init(&nbuf, 0);
+ if ((type == OBJ_BLOB) && path) {
+ struct strbuf nbuf = STRBUF_INIT;
if (convert_to_git(path, buf, size, &nbuf,
write_object ? safe_crlf : 0)) {
- munmap(buf, size);
buf = strbuf_detach(&nbuf, &size);
re_allocated = 1;
}
ret = write_sha1_file(buf, size, typename(type), sha1);
else
ret = hash_sha1_file(buf, size, typename(type), sha1);
- if (re_allocated) {
+ if (re_allocated)
free(buf);
- return ret;
- }
- if (size)
+ return ret;
+}
+
+int index_fd(unsigned char *sha1, int fd, struct stat *st, int write_object,
+ enum object_type type, const char *path)
+{
+ int ret;
+ size_t size = xsize_t(st->st_size);
+
+ if (!S_ISREG(st->st_mode)) {
+ struct strbuf sbuf = STRBUF_INIT;
+ if (strbuf_read(&sbuf, fd, 4096) >= 0)
+ ret = index_mem(sha1, sbuf.buf, sbuf.len, write_object,
+ type, path);
+ else
+ ret = -1;
+ strbuf_release(&sbuf);
+ } else if (size) {
+ void *buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
+ ret = index_mem(sha1, buf, size, write_object, type, path);
munmap(buf, size);
+ } else
+ ret = index_mem(sha1, NULL, size, write_object, type, path);
+ close(fd);
return ret;
}
# Copyright (c) 2007 Johannes E. Schindelin
#
-test_description='git-fast-export'
+test_description='git fast-export'
. ./test-lib.sh
test_expect_success 'setup' '
git config i18n.commitencoding ISO-8859-1 &&
# use author and committer name in ISO-8859-1 to match it.
- . ../t3901-8859-1.txt &&
+ . "$TEST_DIRECTORY"/t3901-8859-1.txt &&
test_tick &&
echo rosten >file &&
git commit -s -m den file &&
'
+ test_expect_success 'fast-export | fast-import when master is tagged' '
+
+ git tag -m msg last &&
+ git fast-export -C -C --signed-tags=strip --all > output &&
+ test $(grep -c "^tag " output) = 3
+
+ '
+
test_done