SCRIPT_SH += git-am.sh
SCRIPT_SH += git-bisect.sh
- SCRIPT_SH += git-clone.sh
SCRIPT_SH += git-filter-branch.sh
SCRIPT_SH += git-lost-found.sh
SCRIPT_SH += git-merge-octopus.sh
LIB_H += dir.h
LIB_H += fsck.h
LIB_H += git-compat-util.h
+LIB_H += graph.h
LIB_H += grep.h
LIB_H += hash.h
LIB_H += list-objects.h
LIB_OBJS += environment.o
LIB_OBJS += exec_cmd.o
LIB_OBJS += fsck.o
+LIB_OBJS += graph.o
LIB_OBJS += grep.o
LIB_OBJS += hash.o
LIB_OBJS += help.o
LIB_OBJS += mailmap.o
LIB_OBJS += match-trees.o
LIB_OBJS += merge-file.o
+LIB_OBJS += name-hash.o
LIB_OBJS += object.o
LIB_OBJS += pack-check.o
LIB_OBJS += pack-revindex.o
BUILTIN_OBJS += builtin-checkout-index.o
BUILTIN_OBJS += builtin-checkout.o
BUILTIN_OBJS += builtin-clean.o
+ BUILTIN_OBJS += builtin-clone.o
BUILTIN_OBJS += builtin-commit-tree.o
BUILTIN_OBJS += builtin-commit.o
BUILTIN_OBJS += builtin-config.o
ifeq ($(uname_S),AIX)
NO_STRCASESTR=YesPlease
NO_MEMMEM = YesPlease
+ NO_MKDTEMP = YesPlease
NO_STRLCPY = YesPlease
+ FREAD_READS_DIRECTORIES = UnfortunatelyYes
+ INTERNAL_QSORT = UnfortunatelyYes
NEEDS_LIBICONV=YesPlease
+ BASIC_CFLAGS += -D_LARGE_FILES
endif
ifeq ($(uname_S),GNU)
# GNU/Hurd
/* Merge everything on the command line, but not --tags */
for (rm = ref_map; rm; rm = rm->next)
rm->merge = 1;
- if (tags == TAGS_SET) {
- struct refspec refspec;
- refspec.src = "refs/tags/";
- refspec.dst = "refs/tags/";
- refspec.pattern = 1;
- refspec.force = 0;
- get_fetch_map(remote_refs, &refspec, &tail, 0);
- }
+ if (tags == TAGS_SET)
+ get_fetch_map(remote_refs, tag_refspec, &tail, 0);
} else {
/* Use the defaults */
struct remote *remote = transport->remote;
if (ref)
update_local_ref(ref, what, verbose, note);
- else if (verbose)
+ else
sprintf(note, "* %-*s %-*s -> FETCH_HEAD",
SUMMARY_WIDTH, *kind ? kind : "branch",
REFCOL_WIDTH, *what ? what : "HEAD");
- else
- *note = '\0';
if (*note) {
if (!shown_url) {
fprintf(stderr, "From %.*s\n",
will_fetch(head, ref->old_sha1))) {
path_list_insert(ref_name, &new_refs);
- rm = alloc_ref(strlen(ref_name) + 1);
- strcpy(rm->name, ref_name);
- rm->peer_ref = alloc_ref(strlen(ref_name) + 1);
- strcpy(rm->peer_ref->name, ref_name);
+ rm = alloc_ref_from_str(ref_name);
+ rm->peer_ref = alloc_ref_from_str(ref_name);
hashcpy(rm->old_sha1, ref_sha1);
**tail = rm;
}
}
- static void copy_templates(const char *git_dir, int len, const char *template_dir)
+ static void copy_templates(const char *template_dir)
{
char path[PATH_MAX];
char template_path[PATH_MAX];
int template_len;
DIR *dir;
+ const char *git_dir = get_git_dir();
+ int len = strlen(git_dir);
if (!template_dir)
template_dir = getenv(TEMPLATE_DIR_ENVIRONMENT);
}
memcpy(path, git_dir, len);
+ if (len && path[len - 1] != '/')
+ path[len++] = '/';
path[len] = 0;
copy_templates_1(path, len,
template_path, template_len,
closedir(dir);
}
- static int create_default_files(const char *git_dir, const char *template_path)
+ static int create_default_files(const char *template_path)
{
+ const char *git_dir = get_git_dir();
unsigned len = strlen(git_dir);
static char path[PATH_MAX];
struct stat st1;
/*
* Create .git/refs/{heads,tags}
*/
- strcpy(path + len, "refs");
- safe_create_dir(path, 1);
- strcpy(path + len, "refs/heads");
- safe_create_dir(path, 1);
- strcpy(path + len, "refs/tags");
- safe_create_dir(path, 1);
+ safe_create_dir(git_path("refs"), 1);
+ safe_create_dir(git_path("refs/heads"), 1);
+ safe_create_dir(git_path("refs/tags"), 1);
/* First copy the templates -- we might have the default
* config file there, in which case we would want to read
* from it after installing.
*/
- path[len] = 0;
- copy_templates(path, len, template_path);
+ copy_templates(template_path);
git_config(git_default_config);
* shared-repository settings, we would need to fix them up.
*/
if (shared_repository) {
- path[len] = 0;
- adjust_shared_perm(path);
- strcpy(path + len, "refs");
- adjust_shared_perm(path);
- strcpy(path + len, "refs/heads");
- adjust_shared_perm(path);
- strcpy(path + len, "refs/tags");
- adjust_shared_perm(path);
+ adjust_shared_perm(get_git_dir());
+ adjust_shared_perm(git_path("refs"));
+ adjust_shared_perm(git_path("refs/heads"));
+ adjust_shared_perm(git_path("refs/tags"));
}
/*
/* allow template config file to override the default */
if (log_all_ref_updates == -1)
git_config_set("core.logallrefupdates", "true");
- if (work_tree != git_work_tree_cfg)
+ if (prefixcmp(git_dir, work_tree) ||
+ strcmp(git_dir + strlen(work_tree), "/.git")) {
git_config_set("core.worktree", work_tree);
+ }
}
- /* Check if symlink is supported in the work tree */
if (!reinit) {
+ /* Check if symlink is supported in the work tree */
path[len] = 0;
strcpy(path + len, "tXXXXXX");
if (!close(xmkstemp(path)) &&
unlink(path); /* good */
else
git_config_set("core.symlinks", "false");
+
+ /* Check if the filesystem is case-insensitive */
+ path[len] = 0;
+ strcpy(path + len, "CoNfIg");
+ if (!access(path, F_OK))
+ git_config_set("core.ignorecase", "true");
}
return reinit;
}
- static void guess_repository_type(const char *git_dir)
+ int init_db(const char *template_dir, unsigned int flags)
+ {
+ const char *sha1_dir;
+ char *path;
+ int len, reinit;
+
+ safe_create_dir(get_git_dir(), 0);
+
+ /* Check to see if the repository version is right.
+ * Note that a newly created repository does not have
+ * config file, so this will not fail. What we are catching
+ * is an attempt to reinitialize new repository with an old tool.
+ */
+ check_repository_format();
+
+ reinit = create_default_files(template_dir);
+
+ sha1_dir = get_object_directory();
+ len = strlen(sha1_dir);
+ path = xmalloc(len + 40);
+ memcpy(path, sha1_dir, len);
+
+ safe_create_dir(sha1_dir, 1);
+ strcpy(path+len, "/pack");
+ safe_create_dir(path, 1);
+ strcpy(path+len, "/info");
+ safe_create_dir(path, 1);
+
+ if (shared_repository) {
+ char buf[10];
+ /* We do not spell "group" and such, so that
+ * the configuration can be read by older version
+ * of git. Note, we use octal numbers for new share modes,
+ * and compatibility values for PERM_GROUP and
+ * PERM_EVERYBODY.
+ */
+ if (shared_repository == PERM_GROUP)
+ sprintf(buf, "%d", OLD_PERM_GROUP);
+ else if (shared_repository == PERM_EVERYBODY)
+ sprintf(buf, "%d", OLD_PERM_EVERYBODY);
+ else
+ sprintf(buf, "0%o", shared_repository);
+ git_config_set("core.sharedrepository", buf);
+ git_config_set("receive.denyNonFastforwards", "true");
+ }
+
+ if (!(flags & INIT_DB_QUIET))
+ printf("%s%s Git repository in %s/\n",
+ reinit ? "Reinitialized existing" : "Initialized empty",
+ shared_repository ? " shared" : "",
+ get_git_dir());
+
+ return 0;
+ }
+
+ static int guess_repository_type(const char *git_dir)
{
char cwd[PATH_MAX];
const char *slash;
- if (0 <= is_bare_repository_cfg)
- return;
- if (!git_dir)
- return;
-
/*
* "GIT_DIR=. git init" is always bare.
* "GIT_DIR=`pwd` git init" too.
*/
if (!strcmp(".", git_dir))
- goto force_bare;
+ return 1;
if (!getcwd(cwd, sizeof(cwd)))
die("cannot tell cwd");
if (!strcmp(git_dir, cwd))
- goto force_bare;
+ return 1;
/*
* "GIT_DIR=.git or GIT_DIR=something/.git is usually not.
*/
if (!strcmp(git_dir, ".git"))
- return;
+ return 0;
slash = strrchr(git_dir, '/');
if (slash && !strcmp(slash, "/.git"))
- return;
+ return 0;
/*
* Otherwise it is often bare. At this point
* we are just guessing.
*/
- force_bare:
- is_bare_repository_cfg = 1;
- return;
+ return 1;
}
static const char init_db_usage[] =
int cmd_init_db(int argc, const char **argv, const char *prefix)
{
const char *git_dir;
- const char *sha1_dir;
const char *template_dir = NULL;
- char *path;
- int len, i, reinit;
- int quiet = 0;
+ unsigned int flags = 0;
+ int i;
for (i = 1; i < argc; i++, argv++) {
const char *arg = argv[1];
else if (!prefixcmp(arg, "--shared="))
shared_repository = git_config_perm("arg", arg+9);
else if (!strcmp(arg, "-q") || !strcmp(arg, "--quiet"))
- quiet = 1;
+ flags |= INIT_DB_QUIET;
else
usage(init_db_usage);
}
GIT_WORK_TREE_ENVIRONMENT,
GIT_DIR_ENVIRONMENT);
- guess_repository_type(git_dir);
-
- if (is_bare_repository_cfg <= 0) {
- git_work_tree_cfg = xcalloc(PATH_MAX, 1);
- if (!getcwd(git_work_tree_cfg, PATH_MAX))
- die ("Cannot access current working directory.");
- if (access(get_git_work_tree(), X_OK))
- die ("Cannot access work tree '%s'",
- get_git_work_tree());
- }
-
/*
* Set up the default .git directory contents
*/
- git_dir = getenv(GIT_DIR_ENVIRONMENT);
if (!git_dir)
git_dir = DEFAULT_GIT_DIR_ENVIRONMENT;
- safe_create_dir(git_dir, 0);
-
- /* Check to see if the repository version is right.
- * Note that a newly created repository does not have
- * config file, so this will not fail. What we are catching
- * is an attempt to reinitialize new repository with an old tool.
- */
- check_repository_format();
-
- reinit = create_default_files(git_dir, template_dir);
-
- /*
- * And set up the object store.
- */
- sha1_dir = get_object_directory();
- len = strlen(sha1_dir);
- path = xmalloc(len + 40);
- memcpy(path, sha1_dir, len);
-
- safe_create_dir(sha1_dir, 1);
- strcpy(path+len, "/pack");
- safe_create_dir(path, 1);
- strcpy(path+len, "/info");
- safe_create_dir(path, 1);
- if (shared_repository) {
- char buf[10];
- /* We do not spell "group" and such, so that
- * the configuration can be read by older version
- * of git. Note, we use octal numbers for new share modes,
- * and compatibility values for PERM_GROUP and
- * PERM_EVERYBODY.
- */
- if (shared_repository == PERM_GROUP)
- sprintf(buf, "%d", OLD_PERM_GROUP);
- else if (shared_repository == PERM_EVERYBODY)
- sprintf(buf, "%d", OLD_PERM_EVERYBODY);
- else
- sprintf(buf, "0%o", shared_repository);
- git_config_set("core.sharedrepository", buf);
- git_config_set("receive.denyNonFastforwards", "true");
+ if (is_bare_repository_cfg < 0)
+ is_bare_repository_cfg = guess_repository_type(git_dir);
+
+ if (!is_bare_repository_cfg) {
+ if (git_dir) {
+ const char *git_dir_parent = strrchr(git_dir, '/');
+ if (git_dir_parent) {
+ char *rel = xstrndup(git_dir, git_dir_parent - git_dir);
+ git_work_tree_cfg = xstrdup(make_absolute_path(rel));
+ free(rel);
+ }
+ }
+ if (!git_work_tree_cfg) {
+ git_work_tree_cfg = xcalloc(PATH_MAX, 1);
+ if (!getcwd(git_work_tree_cfg, PATH_MAX))
+ die ("Cannot access current working directory.");
+ }
+ if (access(get_git_work_tree(), X_OK))
+ die ("Cannot access work tree '%s'",
+ get_git_work_tree());
}
- if (!quiet)
- printf("%s%s Git repository in %s/\n",
- reinit ? "Reinitialized existing" : "Initialized empty",
- shared_repository ? " shared" : "",
- git_dir);
+ set_git_dir(make_absolute_path(git_dir));
- return 0;
+ return init_db(template_dir, flags);
}
#define CE_UPDATE (0x10000)
#define CE_REMOVE (0x20000)
#define CE_UPTODATE (0x40000)
+#define CE_ADDED (0x80000)
#define CE_HASHED (0x100000)
#define CE_UNHASHED (0x200000)
dst->ce_flags = (dst->ce_flags & ~CE_STATE_MASK) | state;
}
-/*
- * We don't actually *remove* it, we can just mark it invalid so that
- * we won't find it in lookups.
- *
- * Not only would we have to search the lists (simple enough), but
- * we'd also have to rehash other hash buckets in case this makes the
- * hash bucket empty (common). So it's much better to just mark
- * it.
- */
-static inline void remove_index_entry(struct cache_entry *ce)
-{
- ce->ce_flags |= CE_UNHASHED;
-}
-
static inline unsigned create_ce_flags(size_t len, unsigned stage)
{
if (len >= CE_NAMEMASK)
extern struct index_state the_index;
+/* Name hashing */
+extern void add_name_hash(struct index_state *istate, struct cache_entry *ce);
+/*
+ * We don't actually *remove* it, we can just mark it invalid so that
+ * we won't find it in lookups.
+ *
+ * Not only would we have to search the lists (simple enough), but
+ * we'd also have to rehash other hash buckets in case this makes the
+ * hash bucket empty (common). So it's much better to just mark
+ * it.
+ */
+static inline void remove_name_hash(struct cache_entry *ce)
+{
+ ce->ce_flags |= CE_UNHASHED;
+}
+
+
#ifndef NO_THE_INDEX_COMPATIBILITY_MACROS
#define active_cache (the_index.cache)
#define active_nr (the_index.cache_nr)
#define add_cache_entry(ce, option) add_index_entry(&the_index, (ce), (option))
#define remove_cache_entry_at(pos) remove_index_entry_at(&the_index, (pos))
#define remove_file_from_cache(path) remove_file_from_index(&the_index, (path))
+#define add_to_cache(path, st, verbose) add_to_index(&the_index, (path), (st), (verbose))
#define add_file_to_cache(path, verbose) add_file_to_index(&the_index, (path), (verbose))
#define refresh_cache(flags) refresh_index(&the_index, (flags), NULL, NULL)
#define ce_match_stat(ce, st, options) ie_match_stat(&the_index, (ce), (st), (options))
#define ce_modified(ce, st, options) ie_modified(&the_index, (ce), (st), (options))
-#define cache_name_exists(name, namelen) index_name_exists(&the_index, (name), (namelen))
+#define cache_name_exists(name, namelen, igncase) index_name_exists(&the_index, (name), (namelen), (igncase))
#endif
enum object_type {
extern char *get_graft_file(void);
extern int set_git_dir(const char *path);
extern const char *get_git_work_tree(void);
+extern const char *read_gitfile_gently(const char *path);
+ extern void set_git_work_tree(const char *tree);
#define ALTERNATE_DB_ENVIRONMENT "GIT_ALTERNATE_OBJECT_DIRECTORIES"
extern void verify_filename(const char *prefix, const char *name);
extern void verify_non_filename(const char *prefix, const char *name);
+ #define INIT_DB_QUIET 0x0001
+
+ extern int init_db(const char *template_dir, unsigned int flags);
+
#define alloc_nr(x) (((x)+16)*3/2)
/*
extern int discard_index(struct index_state *);
extern int unmerged_index(const struct index_state *);
extern int verify_path(const char *path);
-extern int index_name_exists(struct index_state *istate, const char *name, int namelen);
+extern struct cache_entry *index_name_exists(struct index_state *istate, const char *name, int namelen, int igncase);
extern int index_name_pos(const struct index_state *, const char *name, int namelen);
#define ADD_CACHE_OK_TO_ADD 1 /* Ok to add */
#define ADD_CACHE_OK_TO_REPLACE 2 /* Ok to replace file/directory */
extern struct cache_entry *refresh_cache_entry(struct cache_entry *ce, int really);
extern int remove_index_entry_at(struct index_state *, int pos);
extern int remove_file_from_index(struct index_state *, const char *path);
+extern int add_to_index(struct index_state *, const char *path, struct stat *, int verbose);
extern int add_file_to_index(struct index_state *, const char *path, int verbose);
extern struct cache_entry *make_cache_entry(unsigned int mode, const unsigned char *sha1, const char *path, int stage, int refresh);
extern int ce_same_name(struct cache_entry *a, struct cache_entry *b);
#define REFRESH_UNMERGED 0x0002 /* allow unmerged */
#define REFRESH_QUIET 0x0004 /* be quiet about it */
#define REFRESH_IGNORE_MISSING 0x0008 /* ignore non-existent */
+#define REFRESH_IGNORE_SUBMODULES 0x0008 /* ignore submodules */
extern int refresh_index(struct index_state *, unsigned int flags, const char **pathspec, char *seen);
struct lock_file {
char filename[PATH_MAX];
};
extern int hold_lock_file_for_update(struct lock_file *, const char *path, int);
+ extern int hold_lock_file_for_append(struct lock_file *, const char *path, int);
extern int commit_lock_file(struct lock_file *);
extern int hold_locked_index(struct lock_file *, int);
extern int trust_executable_bit;
extern int quote_path_fully;
extern int has_symlinks;
+extern int ignore_case;
extern int assume_unchanged;
extern int prefer_symlink_refs;
extern int log_all_ref_updates;
BRANCH_TRACK_EXPLICIT,
};
+enum rebase_setup_type {
+ AUTOREBASE_NEVER = 0,
+ AUTOREBASE_LOCAL,
+ AUTOREBASE_REMOTE,
+ AUTOREBASE_ALWAYS,
+};
+
extern enum branch_track git_branch_track;
+extern enum rebase_setup_type autorebase;
#define GIT_REPO_VERSION 0
extern int repository_format_version;
extern int hash_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *sha1);
extern int write_sha1_file(void *buf, unsigned long len, const char *type, unsigned char *return_sha1);
extern int pretend_sha1_file(void *, unsigned long, enum object_type, unsigned char *);
+extern int force_object_loose(const unsigned char *sha1, time_t mtime);
extern int check_sha1_signature(const unsigned char *sha1, void *buf, unsigned long size, const char *type);
};
extern int checkout_entry(struct cache_entry *ce, const struct checkout *state, char *topath);
-extern int has_symlink_leading_path(const char *name, char *last_symlink);
+extern int has_symlink_leading_path(int len, const char *name);
extern struct alternate_object_database {
struct alternate_object_database *next;
char base[FLEX_ARRAY]; /* more */
} *alt_odb_list;
extern void prepare_alt_odb(void);
+ extern void add_to_alternates_file(const char *reference);
struct pack_window {
struct pack_window *next;
struct ref *next;
unsigned char old_sha1[20];
unsigned char new_sha1[20];
+ char *symref;
unsigned int force:1,
merge:1,
nonfastforward:1,
#define MAX_GITNAME (1000)
extern char git_default_email[MAX_GITNAME];
extern char git_default_name[MAX_GITNAME];
+extern int user_ident_explicitly_given;
extern const char *git_commit_encoding;
extern const char *git_log_output_encoding;
extern int convert_to_working_tree(const char *path, const char *src, size_t len, struct strbuf *dst);
/* add */
-void add_files_to_cache(int verbose, const char *prefix, const char **pathspec);
+#define ADD_FILES_VERBOSE 01
+#define ADD_FILES_IGNORE_ERRORS 02
+/*
+ * return 0 if success, 1 - if addition of a file failed and
+ * ADD_FILES_IGNORE_ERRORS was specified in flags
+ */
+int add_files_to_cache(const char *prefix, const char **pathspec, int flags);
/* diff.c */
extern int diff_auto_refresh_index;
--- /dev/null
- err=$?
+ #!/bin/sh
+ #
+ # Copyright (c) 2005, Linus Torvalds
+ # Copyright (c) 2005, Junio C Hamano
+ #
+ # Clone a repository into a different directory that does not yet exist.
+
+ # See git-sh-setup why.
+ unset CDPATH
+
+ OPTIONS_SPEC="\
+ git-clone [options] [--] <repo> [<dir>]
+ --
+ n,no-checkout don't create a checkout
+ bare create a bare repository
+ naked create a bare repository
+ l,local to clone from a local repository
+ no-hardlinks don't use local hardlinks, always copy
+ s,shared setup as a shared repository
+ template= path to the template directory
+ q,quiet be quiet
+ reference= reference repository
+ o,origin= use <name> instead of 'origin' to track upstream
+ u,upload-pack= path to git-upload-pack on the remote
+ depth= create a shallow clone of that depth
+
+ use-separate-remote compatibility, do not use
+ no-separate-remote compatibility, do not use"
+
+ die() {
+ echo >&2 "$@"
+ exit 1
+ }
+
+ usage() {
+ exec "$0" -h
+ }
+
+ eval "$(echo "$OPTIONS_SPEC" | git rev-parse --parseopt -- "$@" || echo exit $?)"
+
+ get_repo_base() {
+ (
+ cd "`/bin/pwd`" &&
+ cd "$1" || cd "$1.git" &&
+ {
+ cd .git
+ pwd
+ }
+ ) 2>/dev/null
+ }
+
+ if [ -n "$GIT_SSL_NO_VERIFY" -o \
+ "`git config --bool http.sslVerify`" = false ]; then
+ curl_extra_args="-k"
+ fi
+
+ http_fetch () {
+ # $1 = Remote, $2 = Local
+ curl -nsfL $curl_extra_args "$1" >"$2"
+ curl_exit_status=$?
+ case $curl_exit_status in
+ 126|127) exit ;;
+ *) return $curl_exit_status ;;
+ esac
+ }
+
+ clone_dumb_http () {
+ # $1 - remote, $2 - local
+ cd "$2" &&
+ clone_tmp="$GIT_DIR/clone-tmp" &&
+ mkdir -p "$clone_tmp" || exit 1
+ if [ -n "$GIT_CURL_FTP_NO_EPSV" -o \
+ "`git config --bool http.noEPSV`" = true ]; then
+ curl_extra_args="${curl_extra_args} --disable-epsv"
+ fi
+ http_fetch "$1/info/refs" "$clone_tmp/refs" ||
+ die "Cannot get remote repository information.
+ Perhaps git-update-server-info needs to be run there?"
+ test "z$quiet" = z && v=-v || v=
+ while read sha1 refname
+ do
+ name=`expr "z$refname" : 'zrefs/\(.*\)'` &&
+ case "$name" in
+ *^*) continue;;
+ esac
+ case "$bare,$name" in
+ yes,* | ,heads/* | ,tags/*) ;;
+ *) continue ;;
+ esac
+ if test -n "$use_separate_remote" &&
+ branch_name=`expr "z$name" : 'zheads/\(.*\)'`
+ then
+ tname="remotes/$origin/$branch_name"
+ else
+ tname=$name
+ fi
+ git-http-fetch $v -a -w "$tname" "$sha1" "$1" || exit 1
+ done <"$clone_tmp/refs"
+ rm -fr "$clone_tmp"
+ http_fetch "$1/HEAD" "$GIT_DIR/REMOTE_HEAD" ||
+ rm -f "$GIT_DIR/REMOTE_HEAD"
+ if test -f "$GIT_DIR/REMOTE_HEAD"; then
+ head_sha1=`cat "$GIT_DIR/REMOTE_HEAD"`
+ case "$head_sha1" in
+ 'ref: refs/'*)
+ ;;
+ *)
+ git-http-fetch $v -a "$head_sha1" "$1" ||
+ rm -f "$GIT_DIR/REMOTE_HEAD"
+ ;;
+ esac
+ fi
+ }
+
+ quiet=
+ local=no
+ use_local_hardlink=yes
+ local_shared=no
+ unset template
+ no_checkout=
+ upload_pack=
+ bare=
+ reference=
+ origin=
+ origin_override=
+ use_separate_remote=t
+ depth=
+ no_progress=
+ local_explicitly_asked_for=
+ test -t 1 || no_progress=--no-progress
+
+ while test $# != 0
+ do
+ case "$1" in
+ -n|--no-checkout)
+ no_checkout=yes ;;
+ --naked|--bare)
+ bare=yes ;;
+ -l|--local)
+ local_explicitly_asked_for=yes
+ use_local_hardlink=yes
+ ;;
+ --no-hardlinks)
+ use_local_hardlink=no ;;
+ -s|--shared)
+ local_shared=yes ;;
+ --template)
+ shift; template="--template=$1" ;;
+ -q|--quiet)
+ quiet=-q ;;
+ --use-separate-remote|--no-separate-remote)
+ die "clones are always made with separate-remote layout" ;;
+ --reference)
+ shift; reference="$1" ;;
+ -o|--origin)
+ shift;
+ case "$1" in
+ '')
+ usage ;;
+ */*)
+ die "'$1' is not suitable for an origin name"
+ esac
+ git check-ref-format "heads/$1" ||
+ die "'$1' is not suitable for a branch name"
+ test -z "$origin_override" ||
+ die "Do not give more than one --origin options."
+ origin_override=yes
+ origin="$1"
+ ;;
+ -u|--upload-pack)
+ shift
+ upload_pack="--upload-pack=$1" ;;
+ --depth)
+ shift
+ depth="--depth=$1" ;;
+ --)
+ shift
+ break ;;
+ *)
+ usage ;;
+ esac
+ shift
+ done
+
+ repo="$1"
+ test -n "$repo" ||
+ die 'you must specify a repository to clone.'
+
+ # --bare implies --no-checkout and --no-separate-remote
+ if test yes = "$bare"
+ then
+ if test yes = "$origin_override"
+ then
+ die '--bare and --origin $origin options are incompatible.'
+ fi
+ no_checkout=yes
+ use_separate_remote=
+ fi
+
+ if test -z "$origin"
+ then
+ origin=origin
+ fi
+
+ # Turn the source into an absolute path if
+ # it is local
+ if base=$(get_repo_base "$repo"); then
+ repo="$base"
+ if test -z "$depth"
+ then
+ local=yes
+ fi
+ elif test -f "$repo"
+ then
+ case "$repo" in /*) ;; *) repo="$PWD/$repo" ;; esac
+ fi
+
+ # Decide the directory name of the new repository
+ if test -n "$2"
+ then
+ dir="$2"
+ test $# = 2 || die "excess parameter to git-clone"
+ else
+ # Derive one from the repository name
+ # Try using "humanish" part of source repo if user didn't specify one
+ if test -f "$repo"
+ then
+ # Cloning from a bundle
+ dir=$(echo "$repo" | sed -e 's|/*\.bundle$||' -e 's|.*/||g')
+ else
+ dir=$(echo "$repo" |
+ sed -e 's|/$||' -e 's|:*/*\.git$||' -e 's|.*[/:]||g')
+ fi
+ fi
+
+ [ -e "$dir" ] && die "destination directory '$dir' already exists."
+ [ yes = "$bare" ] && unset GIT_WORK_TREE
+ [ -n "$GIT_WORK_TREE" ] && [ -e "$GIT_WORK_TREE" ] &&
+ die "working tree '$GIT_WORK_TREE' already exists."
+ D=
+ W=
+ cleanup() {
-trap cleanup 0
+ test -z "$D" && rm -rf "$dir"
+ test -z "$W" && test -n "$GIT_WORK_TREE" && rm -rf "$GIT_WORK_TREE"
+ cd ..
+ test -n "$D" && rm -rf "$D"
+ test -n "$W" && rm -rf "$W"
+ exit $err
+ }
- find objects -depth -print | cpio $cpio_quiet_flag -pumd$l "$GIT_DIR/" || \
++trap 'err=$?; cleanup' 0
+ mkdir -p "$dir" && D=$(cd "$dir" && pwd) || usage
+ test -n "$GIT_WORK_TREE" && mkdir -p "$GIT_WORK_TREE" &&
+ W=$(cd "$GIT_WORK_TREE" && pwd) && GIT_WORK_TREE="$W" && export GIT_WORK_TREE
+ if test yes = "$bare" || test -n "$GIT_WORK_TREE"; then
+ GIT_DIR="$D"
+ else
+ GIT_DIR="$D/.git"
+ fi &&
+ export GIT_DIR &&
+ GIT_CONFIG="$GIT_DIR/config" git-init $quiet ${template+"$template"} || usage
+
+ if test -n "$bare"
+ then
+ GIT_CONFIG="$GIT_DIR/config" git config core.bare true
+ fi
+
+ if test -n "$reference"
+ then
+ ref_git=
+ if test -d "$reference"
+ then
+ if test -d "$reference/.git/objects"
+ then
+ ref_git="$reference/.git"
+ elif test -d "$reference/objects"
+ then
+ ref_git="$reference"
+ fi
+ fi
+ if test -n "$ref_git"
+ then
+ ref_git=$(cd "$ref_git" && pwd)
+ echo "$ref_git/objects" >"$GIT_DIR/objects/info/alternates"
+ (
+ GIT_DIR="$ref_git" git for-each-ref \
+ --format='%(objectname) %(*objectname)'
+ ) |
+ while read a b
+ do
+ test -z "$a" ||
+ git update-ref "refs/reference-tmp/$a" "$a"
+ test -z "$b" ||
+ git update-ref "refs/reference-tmp/$b" "$b"
+ done
+ else
+ die "reference repository '$reference' is not a local directory."
+ fi
+ fi
+
+ rm -f "$GIT_DIR/CLONE_HEAD"
+
+ # We do local magic only when the user tells us to.
+ case "$local" in
+ yes)
+ ( cd "$repo/objects" ) ||
+ die "cannot chdir to local '$repo/objects'."
+
+ if test "$local_shared" = yes
+ then
+ mkdir -p "$GIT_DIR/objects/info"
+ echo "$repo/objects" >>"$GIT_DIR/objects/info/alternates"
+ else
+ cpio_quiet_flag=""
+ cpio --help 2>&1 | grep -- --quiet >/dev/null && \
+ cpio_quiet_flag=--quiet
+ l= &&
+ if test "$use_local_hardlink" = yes
+ then
+ # See if we can hardlink and drop "l" if not.
+ sample_file=$(cd "$repo" && \
+ find objects -type f -print | sed -e 1q)
+ # objects directory should not be empty because
+ # we are cloning!
+ test -f "$repo/$sample_file" ||
+ die "fatal: cannot clone empty repository"
+ if ln "$repo/$sample_file" "$GIT_DIR/objects/sample" 2>/dev/null
+ then
+ rm -f "$GIT_DIR/objects/sample"
+ l=l
+ elif test -n "$local_explicitly_asked_for"
+ then
+ echo >&2 "Warning: -l asked but cannot hardlink to $repo"
+ fi
+ fi &&
+ cd "$repo" &&
++ # Create dirs using umask and permissions and destination
++ find objects -type d -print | (cd "$GIT_DIR" && xargs mkdir -p) &&
++ # Copy existing 0444 permissions on content
++ find objects ! -type d -print | cpio $cpio_quiet_flag -pumd$l "$GIT_DIR/" || \
+ exit 1
+ fi
+ git-ls-remote "$repo" >"$GIT_DIR/CLONE_HEAD" || exit 1
+ ;;
+ *)
+ case "$repo" in
+ rsync://*)
+ case "$depth" in
+ "") ;;
+ *) die "shallow over rsync not supported" ;;
+ esac
+ rsync $quiet -av --ignore-existing \
+ --exclude info "$repo/objects/" "$GIT_DIR/objects/" ||
+ exit
+ # Look at objects/info/alternates for rsync -- http will
+ # support it natively and git native ones will do it on the
+ # remote end. Not having that file is not a crime.
+ rsync -q "$repo/objects/info/alternates" \
+ "$GIT_DIR/TMP_ALT" 2>/dev/null ||
+ rm -f "$GIT_DIR/TMP_ALT"
+ if test -f "$GIT_DIR/TMP_ALT"
+ then
+ ( cd "$D" &&
+ . git-parse-remote &&
+ resolve_alternates "$repo" <"$GIT_DIR/TMP_ALT" ) |
+ while read alt
+ do
+ case "$alt" in 'bad alternate: '*) die "$alt";; esac
+ case "$quiet" in
+ '') echo >&2 "Getting alternate: $alt" ;;
+ esac
+ rsync $quiet -av --ignore-existing \
+ --exclude info "$alt" "$GIT_DIR/objects" || exit
+ done
+ rm -f "$GIT_DIR/TMP_ALT"
+ fi
+ git-ls-remote "$repo" >"$GIT_DIR/CLONE_HEAD" || exit 1
+ ;;
+ https://*|http://*|ftp://*)
+ case "$depth" in
+ "") ;;
+ *) die "shallow over http or ftp not supported" ;;
+ esac
+ if test -z "@@NO_CURL@@"
+ then
+ clone_dumb_http "$repo" "$D"
+ else
+ die "http transport not supported, rebuild Git with curl support"
+ fi
+ ;;
+ *)
+ if [ -f "$repo" ] ; then
+ git bundle unbundle "$repo" > "$GIT_DIR/CLONE_HEAD" ||
+ die "unbundle from '$repo' failed."
+ else
+ case "$upload_pack" in
+ '') git-fetch-pack --all -k $quiet $depth $no_progress "$repo";;
+ *) git-fetch-pack --all -k \
+ $quiet "$upload_pack" $depth $no_progress "$repo" ;;
+ esac >"$GIT_DIR/CLONE_HEAD" ||
+ die "fetch-pack from '$repo' failed."
+ fi
+ ;;
+ esac
+ ;;
+ esac
+ test -d "$GIT_DIR/refs/reference-tmp" && rm -fr "$GIT_DIR/refs/reference-tmp"
+
+ if test -f "$GIT_DIR/CLONE_HEAD"
+ then
+ # Read git-fetch-pack -k output and store the remote branches.
+ if [ -n "$use_separate_remote" ]
+ then
+ branch_top="remotes/$origin"
+ else
+ branch_top="heads"
+ fi
+ tag_top="tags"
+ while read sha1 name
+ do
+ case "$name" in
+ *'^{}')
+ continue ;;
+ HEAD)
+ destname="REMOTE_HEAD" ;;
+ refs/heads/*)
+ destname="refs/$branch_top/${name#refs/heads/}" ;;
+ refs/tags/*)
+ destname="refs/$tag_top/${name#refs/tags/}" ;;
+ *)
+ continue ;;
+ esac
+ git update-ref -m "clone: from $repo" "$destname" "$sha1" ""
+ done < "$GIT_DIR/CLONE_HEAD"
+ fi
+
+ if test -n "$W"; then
+ cd "$W" || exit
+ else
+ cd "$D" || exit
+ fi
+
+ if test -z "$bare"
+ then
+ # a non-bare repository is always in separate-remote layout
+ remote_top="refs/remotes/$origin"
+ head_sha1=
+ test ! -r "$GIT_DIR/REMOTE_HEAD" || head_sha1=`cat "$GIT_DIR/REMOTE_HEAD"`
+ case "$head_sha1" in
+ 'ref: refs/'*)
+ # Uh-oh, the remote told us (http transport done against
+ # new style repository with a symref HEAD).
+ # Ideally we should skip the guesswork but for now
+ # opt for minimum change.
+ head_sha1=`expr "z$head_sha1" : 'zref: refs/heads/\(.*\)'`
+ head_sha1=`cat "$GIT_DIR/$remote_top/$head_sha1"`
+ ;;
+ esac
+
+ # The name under $remote_top the remote HEAD seems to point at.
+ head_points_at=$(
+ (
+ test -f "$GIT_DIR/$remote_top/master" && echo "master"
+ cd "$GIT_DIR/$remote_top" &&
+ find . -type f -print | sed -e 's/^\.\///'
+ ) | (
+ done=f
+ while read name
+ do
+ test t = $done && continue
+ branch_tip=`cat "$GIT_DIR/$remote_top/$name"`
+ if test "$head_sha1" = "$branch_tip"
+ then
+ echo "$name"
+ done=t
+ fi
+ done
+ )
+ )
+
+ # Upstream URL
+ git config remote."$origin".url "$repo" &&
+
+ # Set up the mappings to track the remote branches.
+ git config remote."$origin".fetch \
+ "+refs/heads/*:$remote_top/*" '^$' &&
+
+ # Write out remote.$origin config, and update our "$head_points_at".
+ case "$head_points_at" in
+ ?*)
+ # Local default branch
+ git symbolic-ref HEAD "refs/heads/$head_points_at" &&
+
+ # Tracking branch for the primary branch at the remote.
+ git update-ref HEAD "$head_sha1" &&
+
+ rm -f "refs/remotes/$origin/HEAD"
+ git symbolic-ref "refs/remotes/$origin/HEAD" \
+ "refs/remotes/$origin/$head_points_at" &&
+
+ git config branch."$head_points_at".remote "$origin" &&
+ git config branch."$head_points_at".merge "refs/heads/$head_points_at"
+ ;;
+ '')
+ if test -z "$head_sha1"
+ then
+ # Source had nonexistent ref in HEAD
+ echo >&2 "Warning: Remote HEAD refers to nonexistent ref, unable to checkout."
+ no_checkout=t
+ else
+ # Source had detached HEAD pointing nowhere
+ git update-ref --no-deref HEAD "$head_sha1" &&
+ rm -f "refs/remotes/$origin/HEAD"
+ fi
+ ;;
+ esac
+
+ case "$no_checkout" in
+ '')
+ test "z$quiet" = z -a "z$no_progress" = z && v=-v || v=
+ git read-tree -m -u $v HEAD HEAD
+ esac
+ fi
+ rm -f "$GIT_DIR/CLONE_HEAD" "$GIT_DIR/REMOTE_HEAD"
+
+ trap - 0
char git_default_email[MAX_GITNAME];
char git_default_name[MAX_GITNAME];
+int user_ident_explicitly_given;
int trust_executable_bit = 1;
int quote_path_fully = 1;
int has_symlinks = 1;
+int ignore_case;
int assume_unchanged;
int prefer_symlink_refs;
int is_bare_repository_cfg = -1; /* unspecified */
enum safe_crlf safe_crlf = SAFE_CRLF_WARN;
unsigned whitespace_rule_cfg = WS_DEFAULT_RULE;
enum branch_track git_branch_track = BRANCH_TRACK_REMOTE;
+enum rebase_setup_type autorebase = AUTOREBASE_NEVER;
/* This is set by setup_git_dir_gently() and/or git_default_config() */
char *git_work_tree_cfg;
- static const char *work_tree;
+ static char *work_tree;
static const char *git_dir;
static char *git_object_dir, *git_index_file, *git_refs_dir, *git_graft_file;
static void setup_git_env(void)
{
git_dir = getenv(GIT_DIR_ENVIRONMENT);
+ if (!git_dir)
+ git_dir = read_gitfile_gently(DEFAULT_GIT_DIR_ENVIRONMENT);
if (!git_dir)
git_dir = DEFAULT_GIT_DIR_ENVIRONMENT;
git_object_dir = getenv(DB_ENVIRONMENT);
return git_dir;
}
+ static int git_work_tree_initialized;
+
+ /*
+ * Note. This works only before you used a work tree. This was added
+ * primarily to support git-clone to work in a new repository it just
+ * created, and is not meant to flip between different work trees.
+ */
+ void set_git_work_tree(const char *new_work_tree)
+ {
+ if (is_bare_repository_cfg >= 0)
+ die("cannot set work tree after initialization");
+ git_work_tree_initialized = 1;
+ free(work_tree);
+ work_tree = xstrdup(make_absolute_path(new_work_tree));
+ is_bare_repository_cfg = 0;
+ }
+
const char *get_git_work_tree(void)
{
- static int initialized = 0;
- if (!initialized) {
+ if (!git_work_tree_initialized) {
work_tree = getenv(GIT_WORK_TREE_ENVIRONMENT);
/* core.bare = true overrides implicit and config work tree */
if (!work_tree && is_bare_repository_cfg < 1) {
work_tree = xstrdup(make_absolute_path(git_path(work_tree)));
} else if (work_tree)
work_tree = xstrdup(make_absolute_path(work_tree));
- initialized = 1;
+ git_work_tree_initialized = 1;
if (work_tree)
is_bare_repository_cfg = 0;
}
static void remove_lock_file_on_signal(int signo)
{
remove_lock_file();
- signal(SIGINT, SIG_DFL);
+ signal(signo, SIG_DFL);
raise(signo);
}
return fd;
}
+ int hold_lock_file_for_append(struct lock_file *lk, const char *path, int die_on_error)
+ {
+ int fd, orig_fd;
+
+ fd = lock_file(lk, path);
+ if (fd < 0) {
+ if (die_on_error)
+ die("unable to create '%s.lock': %s", path, strerror(errno));
+ return fd;
+ }
+
+ orig_fd = open(path, O_RDONLY);
+ if (orig_fd < 0) {
+ if (errno != ENOENT) {
+ if (die_on_error)
+ die("cannot open '%s' for copying", path);
+ close(fd);
+ return error("cannot open '%s' for copying", path);
+ }
+ } else if (copy_fd(orig_fd, fd)) {
+ if (die_on_error)
+ exit(128);
+ close(fd);
+ return -1;
+ }
+ return fd;
+ }
+
int close_lock_file(struct lock_file *lk)
{
int fd = lk->fd;
} cached_refs;
static struct ref_list *current_ref;
+ static struct ref_list *extra_refs;
+
static void free_ref_list(struct ref_list *list)
{
struct ref_list *next;
cached_refs->packed = sort_ref_list(list);
}
+ void add_extra_ref(const char *name, const unsigned char *sha1, int flag)
+ {
+ extra_refs = add_ref(name, sha1, flag, extra_refs, NULL);
+ }
+
+ void clear_extra_refs(void)
+ {
+ free_ref_list(extra_refs);
+ extra_refs = NULL;
+ }
+
static struct ref_list *get_packed_refs(void)
{
if (!cached_refs.did_packed) {
{
int len = strlen(path), retval;
char *gitdir;
+ const char *tmp;
while (len && path[len-1] == '/')
len--;
return -1;
gitdir = xmalloc(len + MAXREFLEN + 8);
memcpy(gitdir, path, len);
- memcpy(gitdir + len, "/.git/", 7);
-
- retval = resolve_gitlink_ref_recursive(gitdir, len+6, refname, result, 0);
+ memcpy(gitdir + len, "/.git", 6);
+ len += 5;
+
+ tmp = read_gitfile_gently(gitdir);
+ if (tmp) {
+ free(gitdir);
+ len = strlen(tmp);
+ gitdir = xmalloc(len + MAXREFLEN + 3);
+ memcpy(gitdir, tmp, len);
+ }
+ gitdir[len] = '/';
+ gitdir[++len] = '\0';
+ retval = resolve_gitlink_ref_recursive(gitdir, len, refname, result, 0);
free(gitdir);
return retval;
}
struct ref_list *packed = get_packed_refs();
struct ref_list *loose = get_loose_refs();
+ struct ref_list *extra;
+
+ for (extra = extra_refs; extra; extra = extra->next)
+ retval = do_one_ref(base, fn, trim, cb_data, extra);
+
while (packed && loose) {
struct ref_list *entry;
int cmp = strcmp(packed->name, loose->name);
#include "remote.h"
#include "refs.h"
+ static struct refspec s_tag_refspec = {
+ 0,
+ 1,
++ 0,
+ "refs/tags/",
+ "refs/tags/"
+ };
+
+ const struct refspec *tag_refspec = &s_tag_refspec;
+
struct counted_string {
size_t len;
const char *s;
return 0;
}
remote = make_remote(name, subkey - name);
- if (!value) {
- /* if we ever have a boolean variable, e.g. "remote.*.disabled"
- * [remote "frotz"]
- * disabled
- * is a valid way to set it to true; we get NULL in value so
- * we need to handle it here.
- *
- * if (!strcmp(subkey, ".disabled")) {
- * val = git_config_bool(key, value);
- * return 0;
- * } else
- *
- */
- return 0; /* ignore unknown booleans */
- }
- if (!strcmp(subkey, ".url")) {
- add_url(remote, xstrdup(value));
+ if (!strcmp(subkey, ".mirror"))
+ remote->mirror = git_config_bool(key, value);
+ else if (!strcmp(subkey, ".skipdefaultupdate"))
+ remote->skip_default_update = git_config_bool(key, value);
+
+ else if (!strcmp(subkey, ".url")) {
+ const char *v;
+ if (git_config_string(&v, key, value))
+ return -1;
+ add_url(remote, v);
} else if (!strcmp(subkey, ".push")) {
- add_push_refspec(remote, xstrdup(value));
+ const char *v;
+ if (git_config_string(&v, key, value))
+ return -1;
+ add_push_refspec(remote, v);
} else if (!strcmp(subkey, ".fetch")) {
- add_fetch_refspec(remote, xstrdup(value));
+ const char *v;
+ if (git_config_string(&v, key, value))
+ return -1;
+ add_fetch_refspec(remote, v);
} else if (!strcmp(subkey, ".receivepack")) {
+ const char *v;
+ if (git_config_string(&v, key, value))
+ return -1;
if (!remote->receivepack)
- remote->receivepack = xstrdup(value);
+ remote->receivepack = v;
else
error("more than one receivepack given, using the first");
} else if (!strcmp(subkey, ".uploadpack")) {
+ const char *v;
+ if (git_config_string(&v, key, value))
+ return -1;
if (!remote->uploadpack)
- remote->uploadpack = xstrdup(value);
+ remote->uploadpack = v;
else
error("more than one uploadpack given, using the first");
} else if (!strcmp(subkey, ".tagopt")) {
if (!strcmp(value, "--no-tags"))
remote->fetch_tags = -1;
} else if (!strcmp(subkey, ".proxy")) {
- remote->http_proxy = xstrdup(value);
- } else if (!strcmp(subkey, ".skipdefaultupdate"))
- remote->skip_default_update = 1;
+ return git_config_string((const char **)&remote->http_proxy,
+ key, value);
+ }
return 0;
}
}
rhs = strrchr(lhs, ':');
+
+ /*
+ * Before going on, special case ":" (or "+:") as a refspec
+ * for matching refs.
+ */
+ if (!fetch && rhs == lhs && rhs[1] == '\0') {
+ rs[i].matching = 1;
+ continue;
+ }
+
if (rhs) {
rhs++;
rlen = strlen(rhs);
return ret;
}
+struct ref *alloc_ref_from_str(const char* str)
+{
+ struct ref *ret = alloc_ref(strlen(str) + 1);
+ strcpy(ret->name, str);
+ return ret;
+}
+
static struct ref *copy_ref(const struct ref *ref)
{
struct ref *ret = xmalloc(sizeof(struct ref) + strlen(ref->name) + 1);
return ret;
}
+void free_ref(struct ref *ref)
+{
+ if (!ref)
+ return;
+ free(ref->remote_status);
+ free(ref->symref);
+ free(ref);
+}
+
void free_refs(struct ref *ref)
{
struct ref *next;
while (ref) {
next = ref->next;
free(ref->peer_ref);
- free(ref);
+ free_ref(ref);
ref = next;
}
}
{
unsigned char sha1[20];
struct ref *ref;
- int len;
if (!*name) {
ref = alloc_ref(20);
}
if (get_sha1(name, sha1))
return NULL;
- len = strlen(name) + 1;
- ref = alloc_ref(len);
- memcpy(ref->name, name, len);
+ ref = alloc_ref_from_str(name);
hashcpy(ref->new_sha1, sha1);
return ref;
}
static struct ref *make_linked_ref(const char *name, struct ref ***tail)
{
- struct ref *ret;
- size_t len;
-
- len = strlen(name) + 1;
- ret = alloc_ref(len);
- memcpy(ret->name, name, len);
+ struct ref *ret = alloc_ref_from_str(name);
tail_link_ref(ret, tail);
return ret;
}
const char *dst_value = rs->dst;
char *dst_guess;
- if (rs->pattern)
+ if (rs->pattern || rs->matching)
return errs;
matched_src = matched_dst = NULL;
const struct ref *src)
{
int i;
+ int matching_refs = -1;
for (i = 0; i < rs_nr; i++) {
+ if (rs[i].matching &&
+ (matching_refs == -1 || rs[i].force)) {
+ matching_refs = i;
+ continue;
+ }
+
if (rs[i].pattern &&
!prefixcmp(src->name, rs[i].src) &&
src->name[strlen(rs[i].src)] == '/')
return rs + i;
}
- return NULL;
+ if (matching_refs != -1)
+ return rs + matching_refs;
+ else
+ return NULL;
}
/*
int match_refs(struct ref *src, struct ref *dst, struct ref ***dst_tail,
int nr_refspec, const char **refspec, int flags)
{
- struct refspec *rs =
- parse_push_refspec(nr_refspec, (const char **) refspec);
+ struct refspec *rs;
int send_all = flags & MATCH_REFS_ALL;
int send_mirror = flags & MATCH_REFS_MIRROR;
+ static const char *default_refspec[] = { ":", 0 };
+ if (!nr_refspec) {
+ nr_refspec = 1;
+ refspec = default_refspec;
+ }
+ rs = parse_push_refspec(nr_refspec, (const char **) refspec);
if (match_explicit_refs(src, dst, dst_tail, rs, nr_refspec))
return -1;
char *dst_name;
if (src->peer_ref)
continue;
- if (nr_refspec) {
- pat = check_pattern_match(rs, nr_refspec, src);
- if (!pat)
- continue;
- }
- else if (!send_mirror && prefixcmp(src->name, "refs/heads/"))
+
+ pat = check_pattern_match(rs, nr_refspec, src);
+ if (!pat)
+ continue;
+
+ if (pat->matching) {
/*
* "matching refs"; traditionally we pushed everything
* including refs outside refs/heads/ hierarchy, but
* that does not make much sense these days.
*/
- continue;
+ if (!send_mirror && prefixcmp(src->name, "refs/heads/"))
+ continue;
+ dst_name = xstrdup(src->name);
- if (pat) {
+ } else {
const char *dst_side = pat->dst ? pat->dst : pat->src;
dst_name = xmalloc(strlen(dst_side) +
strlen(src->name) -
strlen(pat->src) + 2);
strcpy(dst_name, dst_side);
strcat(dst_name, src->name + strlen(pat->src));
- } else
- dst_name = xstrdup(src->name);
+ }
dst_peer = find_ref_by_name(dst, dst_name);
- if (dst_peer && dst_peer->peer_ref)
- /* We're already sending something to this ref. */
- goto free_name;
+ if (dst_peer) {
+ if (dst_peer->peer_ref)
+ /* We're already sending something to this ref. */
+ goto free_name;
+
+ } else {
+ if (pat->matching && !(send_all || send_mirror))
+ /*
+ * Remote doesn't have it, and we have no
+ * explicit pattern, and we don't have
+ * --all nor --mirror.
+ */
+ goto free_name;
- if (!dst_peer && !nr_refspec && !(send_all || send_mirror))
- /*
- * Remote doesn't have it, and we have no
- * explicit pattern, and we don't have
- * --all nor --mirror.
- */
- goto free_name;
- if (!dst_peer) {
/* Create a new one and link it */
dst_peer = make_linked_ref(dst_name, dst_tail);
hashcpy(dst_peer->new_sha1, src->new_sha1);
}
dst_peer->peer_ref = src;
- if (pat)
- dst_peer->force = pat->force;
+ dst_peer->force = pat->force;
free_name:
free(dst_name);
}
return NULL;
if (!prefixcmp(name, "refs/")) {
- ret = alloc_ref(strlen(name) + 1);
- strcpy(ret->name, name);
- return ret;
+ return alloc_ref_from_str(name);
}
if (!prefixcmp(name, "heads/") ||
return 0;
}
+
+int resolve_remote_symref(struct ref *ref, struct ref *list)
+{
+ if (!ref->symref)
+ return 0;
+ for (; list; list = list->next)
+ if (!strcmp(ref->symref, list->name)) {
+ hashcpy(ref->old_sha1, list->old_sha1);
+ return 0;
+ }
+ return 1;
+}
*/
int fetch_tags;
int skip_default_update;
+ int mirror;
const char *receivepack;
const char *uploadpack;
struct refspec {
unsigned force : 1;
unsigned pattern : 1;
+ unsigned matching : 1;
char *src;
char *dst;
};
+ extern const struct refspec *tag_refspec;
+
struct ref *alloc_ref(unsigned namelen);
+struct ref *alloc_ref_from_str(const char* str);
+
struct ref *copy_ref_list(const struct ref *ref);
int check_ref_type(const struct ref *ref, int flags);
*/
void free_refs(struct ref *ref);
+int resolve_remote_symref(struct ref *ref, struct ref *list);
+
/*
* Removes and frees any duplicate refs in the map.
*/
return base;
}
-char *sha1_pack_name(const unsigned char *sha1)
+static char *sha1_get_pack_name(const unsigned char *sha1,
+ char **name, char **base)
{
static const char hex[] = "0123456789abcdef";
- static char *name, *base, *buf;
+ char *buf;
int i;
- if (!base) {
+ if (!*base) {
const char *sha1_file_directory = get_object_directory();
int len = strlen(sha1_file_directory);
- base = xmalloc(len + 60);
- sprintf(base, "%s/pack/pack-1234567890123456789012345678901234567890.pack", sha1_file_directory);
- name = base + len + 11;
+ *base = xmalloc(len + 60);
+ sprintf(*base, "%s/pack/pack-1234567890123456789012345678901234567890.pack", sha1_file_directory);
+ *name = *base + len + 11;
}
- buf = name;
+ buf = *name;
for (i = 0; i < 20; i++) {
unsigned int val = *sha1++;
*buf++ = hex[val & 0xf];
}
- return base;
+ return *base;
}
-char *sha1_pack_index_name(const unsigned char *sha1)
+char *sha1_pack_name(const unsigned char *sha1)
{
- static const char hex[] = "0123456789abcdef";
- static char *name, *base, *buf;
- int i;
-
- if (!base) {
- const char *sha1_file_directory = get_object_directory();
- int len = strlen(sha1_file_directory);
- base = xmalloc(len + 60);
- sprintf(base, "%s/pack/pack-1234567890123456789012345678901234567890.idx", sha1_file_directory);
- name = base + len + 11;
- }
+ static char *name, *base;
- buf = name;
+ return sha1_get_pack_name(sha1, &name, &base);
+}
- for (i = 0; i < 20; i++) {
- unsigned int val = *sha1++;
- *buf++ = hex[val >> 4];
- *buf++ = hex[val & 0xf];
- }
+char *sha1_pack_index_name(const unsigned char *sha1)
+{
+ static char *name, *base;
- return base;
+ return sha1_get_pack_name(sha1, &name, &base);
}
struct alternate_object_database *alt_odb_list;
munmap(map, mapsz);
}
+ void add_to_alternates_file(const char *reference)
+ {
+ struct lock_file *lock = xcalloc(1, sizeof(struct lock_file));
+ int fd = hold_lock_file_for_append(lock, git_path("objects/info/alternates"), 1);
+ char *alt = mkpath("%s/objects\n", reference);
+ write_or_die(fd, alt, strlen(alt));
+ if (commit_lock_file(lock))
+ die("could not close alternates file");
+ if (alt_odb_tail)
+ link_alt_odb_entries(alt, alt + strlen(alt), '\n', NULL, 0);
+ }
+
void prepare_alt_odb(void)
{
const char *alt;
return 0;
}
-int write_sha1_file(void *buf, unsigned long len, const char *type, unsigned char *returnsha1)
+static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
+ void *buf, unsigned long len, time_t mtime)
{
- int size, ret;
+ int fd, size, ret;
unsigned char *compressed;
z_stream stream;
- unsigned char sha1[20];
char *filename;
static char tmpfile[PATH_MAX];
- char hdr[32];
- int fd, hdrlen;
- /* Normally if we have it in the pack then we do not bother writing
- * it out into .git/objects/??/?{38} file.
- */
- write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
filename = sha1_file_name(sha1);
- if (returnsha1)
- hashcpy(returnsha1, sha1);
- if (has_sha1_file(sha1))
- return 0;
fd = open(filename, O_RDONLY);
if (fd >= 0) {
/*
die("unable to write sha1 file");
free(compressed);
+ if (mtime) {
+ struct utimbuf utb;
+ utb.actime = mtime;
+ utb.modtime = mtime;
+ if (utime(tmpfile, &utb) < 0)
+ warning("failed utime() on %s: %s",
+ tmpfile, strerror(errno));
+ }
+
return move_temp_to_file(tmpfile, filename);
}
+int write_sha1_file(void *buf, unsigned long len, const char *type, unsigned char *returnsha1)
+{
+ unsigned char sha1[20];
+ char hdr[32];
+ int hdrlen;
+
+ /* Normally if we have it in the pack then we do not bother writing
+ * it out into .git/objects/??/?{38} file.
+ */
+ write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
+ if (returnsha1)
+ hashcpy(returnsha1, sha1);
+ if (has_sha1_file(sha1))
+ return 0;
+ return write_loose_object(sha1, hdr, hdrlen, buf, len, 0);
+}
+
+int force_object_loose(const unsigned char *sha1, time_t mtime)
+{
+ struct stat st;
+ void *buf;
+ unsigned long len;
+ enum object_type type;
+ char hdr[32];
+ int hdrlen;
+
+ if (find_sha1_file(sha1, &st))
+ return 0;
+ buf = read_packed_sha1(sha1, &type, &len);
+ if (!buf)
+ return error("cannot read sha1_file for %s", sha1_to_hex(sha1));
+ hdrlen = sprintf(hdr, "%s %lu", typename(type), len) + 1;
+ return write_loose_object(sha1, hdr, hdrlen, buf, len, mtime);
+}
+
/*
* We need to unpack and recompress the object for writing
* it out to a different file.
base_dir=`pwd`
+ U=$base_dir/UPLOAD_LOG
+
test_expect_success 'preparing first repository' \
'test_create_repo A && cd A &&
echo first > file1 &&
cd "$base_dir"
+ rm -f $U
+
test_expect_success 'cloning with reference (no -l -s)' \
- 'git clone --reference B "file://$(pwd)/A" D'
-'GIT_DEBUG_SEND_PACK=3 git clone --reference B file://`pwd`/A D 3>$U'
++'GIT_DEBUG_SEND_PACK=3 git clone --reference B "file://$(pwd)/A" D 3>$U'
+
+ test_expect_success 'fetched no objects' \
+ '! grep "^want" $U'
cd "$base_dir"
cd "$base_dir"
+ test_expect_success 'preparing alternate repository #1' \
+ 'test_create_repo F && cd F &&
+ echo first > file1 &&
+ git add file1 &&
+ git commit -m initial'
+
+ cd "$base_dir"
+
+ test_expect_success 'cloning alternate repo #2 and adding changes to repo #1' \
+ 'git clone F G && cd F &&
+ echo second > file2 &&
+ git add file2 &&
+ git commit -m addition'
+
+ cd "$base_dir"
+
+ test_expect_success 'cloning alternate repo #1, using #2 as reference' \
+ 'git clone --reference G F H'
+
+ cd "$base_dir"
+
+ test_expect_success 'cloning with reference being subset of source (-l -s)' \
+ 'git clone -l -s --reference A B E'
+
+ cd "$base_dir"
+
test_done
}
static int fetch_objs_via_rsync(struct transport *transport,
- int nr_objs, struct ref **to_fetch)
+ int nr_objs, const struct ref **to_fetch)
{
struct strbuf buf = STRBUF_INIT;
struct child_process rsync;
#ifndef NO_CURL /* http fetch is the only user */
static int fetch_objs_via_walker(struct transport *transport,
- int nr_objs, struct ref **to_fetch)
+ int nr_objs, const struct ref **to_fetch)
{
char *dest = xstrdup(transport->url);
struct walker *walker = transport->data;
struct ref *ref = NULL;
struct ref *last_ref = NULL;
+ struct walker *walker;
+
if (!transport->data)
transport->data = get_http_walker(transport->url,
transport->remote);
+ walker = transport->data;
+
refs_url = xmalloc(strlen(transport->url) + 11);
sprintf(refs_url, "%s/info/refs", transport->url);
strbuf_release(&buffer);
+ ref = alloc_ref_from_str("HEAD");
+ if (!walker->fetch_ref(walker, ref) &&
+ !resolve_remote_symref(ref, refs)) {
+ ref->next = refs;
+ refs = ref;
+ } else {
+ free(ref);
+ }
+
return refs;
}
static int fetch_objs_via_curl(struct transport *transport,
- int nr_objs, struct ref **to_fetch)
+ int nr_objs, const struct ref **to_fetch)
{
if (!transport->data)
transport->data = get_http_walker(transport->url,
die ("Could not read bundle '%s'.", transport->url);
for (i = 0; i < data->header.references.nr; i++) {
struct ref_list_entry *e = data->header.references.list + i;
- struct ref *ref = alloc_ref(strlen(e->name) + 1);
+ struct ref *ref = alloc_ref_from_str(e->name);
hashcpy(ref->old_sha1, e->sha1);
- strcpy(ref->name, e->name);
ref->next = result;
result = ref;
}
}
static int fetch_refs_from_bundle(struct transport *transport,
- int nr_heads, struct ref **to_fetch)
+ int nr_heads, const struct ref **to_fetch)
{
struct bundle_transport_data *data = transport->data;
return unbundle(&data->header, data->fd);
}
static int fetch_refs_via_pack(struct transport *transport,
- int nr_heads, struct ref **to_fetch)
+ int nr_heads, const struct ref **to_fetch)
{
struct git_transport_data *data = transport->data;
char **heads = xmalloc(nr_heads * sizeof(*heads));
return transport->remote_refs;
}
- int transport_fetch_refs(struct transport *transport, struct ref *refs)
+ int transport_fetch_refs(struct transport *transport, const struct ref *refs)
{
int rc;
int nr_heads = 0, nr_alloc = 0;
- struct ref **heads = NULL;
- struct ref *rm;
+ const struct ref **heads = NULL;
+ const struct ref *rm;
for (rm = refs; rm; rm = rm->next) {
if (rm->peer_ref &&