#define NO_THE_INDEX_COMPATIBILITY_MACROS
#include "cache.h"
+#include "config.h"
#include "dir.h"
#include "tree.h"
#include "tree-walk.h"
#include "attr.h"
#include "split-index.h"
#include "dir.h"
+#include "submodule.h"
+#include "submodule-config.h"
/*
* Error messages expected by scripts out of plumbing commands such as
/* ERROR_WOULD_LOSE_ORPHANED_REMOVED */
"Working tree file '%s' would be removed by sparse checkout update.",
+
+ /* ERROR_WOULD_LOSE_SUBMODULE */
+ "Submodule '%s' cannot checkout new HEAD.",
};
#define ERRORMSG(o,type) \
? ((o)->msgs[(type)]) \
: (unpack_plumbing_errors[(type)]) )
+static const char *super_prefixed(const char *path)
+{
+ /*
+ * It is necessary and sufficient to have two static buffers
+ * here, as the return value of this function is fed to
+ * error() using the unpack_*_errors[] templates we see above.
+ */
+ static struct strbuf buf[2] = {STRBUF_INIT, STRBUF_INIT};
+ static int super_prefix_len = -1;
+ static unsigned idx = ARRAY_SIZE(buf) - 1;
+
+ if (super_prefix_len < 0) {
+ const char *super_prefix = get_super_prefix();
+ if (!super_prefix) {
+ super_prefix_len = 0;
+ } else {
+ int i;
+ for (i = 0; i < ARRAY_SIZE(buf); i++)
+ strbuf_addstr(&buf[i], super_prefix);
+ super_prefix_len = buf[0].len;
+ }
+ }
+
+ if (!super_prefix_len)
+ return path;
+
+ if (++idx >= ARRAY_SIZE(buf))
+ idx = 0;
+
+ strbuf_setlen(&buf[idx], super_prefix_len);
+ strbuf_addstr(&buf[idx], path);
+
+ return buf[idx].buf;
+}
+
void setup_unpack_trees_porcelain(struct unpack_trees_options *opts,
const char *cmd)
{
xstrfmt(msg, cmd, cmd);
msgs[ERROR_NOT_UPTODATE_DIR] =
- _("Updating the following directories would lose untracked files in it:\n%s");
+ _("Updating the following directories would lose untracked files in them:\n%s");
if (!strcmp(cmd, "checkout"))
msg = advice_commit_before_merge
msgs[ERROR_SPARSE_NOT_UPTODATE_FILE] =
_("Cannot update sparse checkout: the following entries are not up-to-date:\n%s");
msgs[ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN] =
- _("The following Working tree files would be overwritten by sparse checkout update:\n%s");
+ _("The following working tree files would be overwritten by sparse checkout update:\n%s");
msgs[ERROR_WOULD_LOSE_ORPHANED_REMOVED] =
- _("The following Working tree files would be removed by sparse checkout update:\n%s");
+ _("The following working tree files would be removed by sparse checkout update:\n%s");
+ msgs[ERROR_WOULD_LOSE_SUBMODULE] =
+ _("Cannot update submodule:\n%s");
opts->show_all_errors = 1;
/* rejected paths may not have a static buffer */
const char *path)
{
if (!o->show_all_errors)
- return error(ERRORMSG(o, e), path);
+ return error(ERRORMSG(o, e), super_prefixed(path));
/*
* Otherwise, insert in a list for future display by
something_displayed = 1;
for (i = 0; i < rejects->nr; i++)
strbuf_addf(&path, "\t%s\n", rejects->items[i].string);
- error(ERRORMSG(o, e), path.buf);
+ error(ERRORMSG(o, e), super_prefixed(path.buf));
strbuf_release(&path);
}
string_list_clear(rejects, 0);
fprintf(stderr, _("Aborting\n"));
}
+static int check_submodule_move_head(const struct cache_entry *ce,
+ const char *old_id,
+ const char *new_id,
+ struct unpack_trees_options *o)
+{
+ unsigned flags = SUBMODULE_MOVE_HEAD_DRY_RUN;
+ const struct submodule *sub = submodule_from_ce(ce);
+ if (!sub)
+ return 0;
+
+ if (o->reset)
+ flags |= SUBMODULE_MOVE_HEAD_FORCE;
+
+ switch (sub->update_strategy.type) {
+ case SM_UPDATE_UNSPECIFIED:
+ case SM_UPDATE_CHECKOUT:
+ if (submodule_move_head(ce->name, old_id, new_id, flags))
+ return o->gently ? -1 :
+ add_rejected_path(o, ERROR_WOULD_LOSE_SUBMODULE, ce->name);
+ return 0;
+ case SM_UPDATE_NONE:
+ return 0;
+ case SM_UPDATE_REBASE:
+ case SM_UPDATE_MERGE:
+ case SM_UPDATE_COMMAND:
+ default:
+ warning(_("submodule update strategy not supported for submodule '%s'"), ce->name);
+ return -1;
+ }
+}
+
+static void reload_gitmodules_file(struct index_state *index,
+ struct checkout *state)
+{
+ int i;
+ for (i = 0; i < index->cache_nr; i++) {
+ struct cache_entry *ce = index->cache[i];
+ if (ce->ce_flags & CE_UPDATE) {
+ int r = strcmp(ce->name, GITMODULES_FILE);
+ if (r < 0)
+ continue;
+ else if (r == 0) {
+ submodule_free();
+ checkout_entry(ce, state, NULL);
+ gitmodules_config();
+ git_config(submodule_config, NULL);
+ } else
+ break;
+ }
+ }
+}
+
/*
* Unlink the last component and schedule the leading directories for
* removal, such that empty directories get removed.
*/
static void unlink_entry(const struct cache_entry *ce)
{
+ const struct submodule *sub = submodule_from_ce(ce);
+ if (sub) {
+ switch (sub->update_strategy.type) {
+ case SM_UPDATE_UNSPECIFIED:
+ case SM_UPDATE_CHECKOUT:
+ case SM_UPDATE_REBASE:
+ case SM_UPDATE_MERGE:
+ /* state.force is set at the caller. */
+ submodule_move_head(ce->name, "HEAD", NULL,
+ SUBMODULE_MOVE_HEAD_FORCE);
+ break;
+ case SM_UPDATE_NONE:
+ case SM_UPDATE_COMMAND:
+ return; /* Do not touch the submodule. */
+ }
+ }
if (!check_leading_path(ce->name, ce_namelen(ce)))
return;
if (remove_or_warn(ce->ce_mode, ce->name))
schedule_dir_for_removal(ce->name, ce_namelen(ce));
}
-static struct checkout state;
-static int check_updates(struct unpack_trees_options *o)
+static struct progress *get_progress(struct unpack_trees_options *o)
{
unsigned cnt = 0, total = 0;
+ struct index_state *index = &o->result;
+
+ if (!o->update || !o->verbose_update)
+ return NULL;
+
+ for (; cnt < index->cache_nr; cnt++) {
+ const struct cache_entry *ce = index->cache[cnt];
+ if (ce->ce_flags & (CE_UPDATE | CE_WT_REMOVE))
+ total++;
+ }
+
+ return start_progress_delay(_("Checking out files"),
+ total, 50, 1);
+}
+
+static int check_updates(struct unpack_trees_options *o)
+{
+ unsigned cnt = 0;
+ int errs = 0;
struct progress *progress = NULL;
struct index_state *index = &o->result;
+ struct checkout state = CHECKOUT_INIT;
int i;
- int errs = 0;
- if (o->update && o->verbose_update) {
- for (total = cnt = 0; cnt < index->cache_nr; cnt++) {
- const struct cache_entry *ce = index->cache[cnt];
- if (ce->ce_flags & (CE_UPDATE | CE_WT_REMOVE))
- total++;
- }
+ state.force = 1;
+ state.quiet = 1;
+ state.refresh_cache = 1;
+ state.istate = index;
- progress = start_progress_delay(_("Checking out files"),
- total, 50, 1);
- cnt = 0;
- }
+ progress = get_progress(o);
if (o->update)
- git_attr_set_direction(GIT_ATTR_CHECKOUT, &o->result);
+ git_attr_set_direction(GIT_ATTR_CHECKOUT, index);
for (i = 0; i < index->cache_nr; i++) {
const struct cache_entry *ce = index->cache[i];
display_progress(progress, ++cnt);
if (o->update && !o->dry_run)
unlink_entry(ce);
- continue;
}
}
- remove_marked_cache_entries(&o->result);
+ remove_marked_cache_entries(index);
remove_scheduled_dirs();
+ if (should_update_submodules() && o->update && !o->dry_run)
+ reload_gitmodules_file(index, &state);
+
+ enable_delayed_checkout(&state);
for (i = 0; i < index->cache_nr; i++) {
struct cache_entry *ce = index->cache[i];
}
}
}
+ errs |= finish_delayed_checkout(&state);
stop_progress(&progress);
if (o->update)
git_attr_set_direction(GIT_ATTR_CHECKIN, NULL);
return ret;
}
+static inline int are_same_oid(struct name_entry *name_j, struct name_entry *name_k)
+{
+ return name_j->oid && name_k->oid && !oidcmp(name_j->oid, name_k->oid);
+}
+
static int traverse_trees_recursive(int n, unsigned long dirmask,
unsigned long df_conflicts,
struct name_entry *names,
struct traverse_info *info)
{
int i, ret, bottom;
+ int nr_buf = 0;
struct tree_desc t[MAX_UNPACK_TREES];
void *buf[MAX_UNPACK_TREES];
struct traverse_info newinfo;
newinfo.pathlen += tree_entry_len(p) + 1;
newinfo.df_conflicts |= df_conflicts;
+ /*
+ * Fetch the tree from the ODB for each peer directory in the
+ * n commits.
+ *
+ * For 2- and 3-way traversals, we try to avoid hitting the
+ * ODB twice for the same OID. This should yield a nice speed
+ * up in checkouts and merges when the commits are similar.
+ *
+ * We don't bother doing the full O(n^2) search for larger n,
+ * because wider traversals don't happen that often and we
+ * avoid the search setup.
+ *
+ * When 2 peer OIDs are the same, we just copy the tree
+ * descriptor data. This implicitly borrows the buffer
+ * data from the earlier cell.
+ */
for (i = 0; i < n; i++, dirmask >>= 1) {
- const unsigned char *sha1 = NULL;
- if (dirmask & 1)
- sha1 = names[i].oid->hash;
- buf[i] = fill_tree_descriptor(t+i, sha1);
+ if (i > 0 && are_same_oid(&names[i], &names[i - 1]))
+ t[i] = t[i - 1];
+ else if (i > 1 && are_same_oid(&names[i], &names[i - 2]))
+ t[i] = t[i - 2];
+ else {
+ const unsigned char *sha1 = NULL;
+ if (dirmask & 1)
+ sha1 = names[i].oid->hash;
+ buf[nr_buf++] = fill_tree_descriptor(t+i, sha1);
+ }
}
bottom = switch_cache_bottom(&newinfo);
ret = traverse_trees(n, t, &newinfo);
restore_cache_bottom(&newinfo, bottom);
- for (i = 0; i < n; i++)
+ for (i = 0; i < nr_buf; i++)
free(buf[i]);
return ret;
ce->ce_mode = create_ce_mode(n->mode);
ce->ce_flags = create_ce_flags(stage);
ce->ce_namelen = len;
- hashcpy(ce->sha1, n->oid->hash);
+ oidcpy(&ce->oid, n->oid);
make_traverse_path(ce->name, info, n);
return ce;
struct cache_entry **cache_end;
int dtype = DT_DIR;
int ret = is_excluded_from_list(prefix->buf, prefix->len,
- basename, &dtype, el);
+ basename, &dtype, el, &the_index);
int rc;
strbuf_addch(prefix, '/');
/* Non-directory */
dtype = ce_to_dtype(ce);
ret = is_excluded_from_list(ce->name, ce_namelen(ce),
- name, &dtype, el);
+ name, &dtype, el, &the_index);
if (ret < 0)
ret = defval;
if (ret > 0)
if (len > MAX_UNPACK_TREES)
die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES);
- memset(&state, 0, sizeof(state));
- state.base_dir = "";
- state.force = 1;
- state.quiet = 1;
- state.refresh_cache = 1;
- state.istate = &o->result;
memset(&el, 0, sizeof(el));
if (!core_apply_sparse_checkout || !o->update)
o->skip_sparse_checkout = 1;
if (!o->skip_sparse_checkout) {
char *sparse = git_pathdup("info/sparse-checkout");
- if (add_excludes_from_file_to_list(sparse, "", 0, &el, 0) < 0)
+ if (add_excludes_from_file_to_list(sparse, "", 0, &el, NULL) < 0)
o->skip_sparse_checkout = 1;
else
o->el = ⪙
WRITE_TREE_SILENT |
WRITE_TREE_REPAIR);
}
+ move_index_extensions(&o->result, o->dst_index);
discard_index(o->dst_index);
*o->dst_index = o->result;
} else {
if ((a->ce_flags | b->ce_flags) & CE_CONFLICTED)
return 0;
return a->ce_mode == b->ce_mode &&
- !hashcmp(a->sha1, b->sha1);
+ !oidcmp(&a->oid, &b->oid);
}
if (!lstat(ce->name, &st)) {
int flags = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE;
unsigned changed = ie_match_stat(o->src_index, ce, &st, flags);
+
+ if (submodule_from_ce(ce)) {
+ int r = check_submodule_move_head(ce,
+ "HEAD", oid_to_hex(&ce->oid), o);
+ if (r)
+ return o->gently ? -1 :
+ add_rejected_path(o, error_type, ce->name);
+ return 0;
+ }
+
if (!changed)
return 0;
/*
- * NEEDSWORK: the current default policy is to allow
- * submodule to be out of sync wrt the superproject
- * index. This needs to be tightened later for
- * submodules that are marked to be automatically
- * checked out.
+ * Historic default policy was to allow submodule to be out
+ * of sync wrt the superproject index. If the submodule was
+ * not considered interesting above, we don't care here.
*/
if (S_ISGITLINK(ce->ce_mode))
return 0;
+
errno = 0;
}
if (errno == ENOENT)
* Currently, git does not checkout subprojects during a superproject
* checkout, so it is not going to overwrite anything.
*/
-static int verify_clean_submodule(const struct cache_entry *ce,
+static int verify_clean_submodule(const char *old_sha1,
+ const struct cache_entry *ce,
enum unpack_trees_error_types error_type,
struct unpack_trees_options *o)
{
- return 0;
+ if (!submodule_from_ce(ce))
+ return 0;
+
+ return check_submodule_move_head(ce, old_sha1,
+ oid_to_hex(&ce->oid), o);
}
static int verify_clean_subdirectory(const struct cache_entry *ce,
struct dir_struct d;
char *pathbuf;
int cnt = 0;
- unsigned char sha1[20];
- if (S_ISGITLINK(ce->ce_mode) &&
- resolve_gitlink_ref(ce->name, "HEAD", sha1) == 0) {
- /* If we are not going to update the submodule, then
+ if (S_ISGITLINK(ce->ce_mode)) {
+ unsigned char sha1[20];
+ int sub_head = resolve_gitlink_ref(ce->name, "HEAD", sha1);
+ /*
+ * If we are not going to update the submodule, then
* we don't care.
*/
- if (!hashcmp(sha1, ce->sha1))
+ if (!sub_head && !hashcmp(sha1, ce->oid.hash))
return 0;
- return verify_clean_submodule(ce, error_type, o);
+ return verify_clean_submodule(sub_head ? NULL : sha1_to_hex(sha1),
+ ce, error_type, o);
}
/*
memset(&d, 0, sizeof(d));
if (o->dir)
d.exclude_per_dir = o->dir->exclude_per_dir;
- i = read_directory(&d, pathbuf, namelen+1, NULL);
+ i = read_directory(&d, &the_index, pathbuf, namelen+1, NULL);
if (i)
return o->gently ? -1 :
add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name);
return 0;
if (o->dir &&
- is_excluded(o->dir, name, &dtype))
+ is_excluded(o->dir, &the_index, name, &dtype))
/*
* ce->name is explicitly excluded, so it is Ok to
* overwrite it.
path = xmemdupz(ce->name, len);
if (lstat(path, &st))
ret = error_errno("cannot stat '%s'", path);
- else
- ret = check_ok_to_remove(path, len, DT_UNKNOWN, NULL,
- &st, error_type, o);
+ else {
+ if (submodule_from_ce(ce))
+ ret = check_submodule_move_head(ce,
+ oid_to_hex(&ce->oid),
+ NULL, o);
+ else
+ ret = check_ok_to_remove(path, len, DT_UNKNOWN, NULL,
+ &st, error_type, o);
+ }
free(path);
return ret;
} else if (lstat(ce->name, &st)) {
return error_errno("cannot stat '%s'", ce->name);
return 0;
} else {
+ if (submodule_from_ce(ce))
+ return check_submodule_move_head(ce, oid_to_hex(&ce->oid),
+ NULL, o);
+
return check_ok_to_remove(ce->name, ce_namelen(ce),
ce_to_dtype(ce), ce, &st,
error_type, o);
return -1;
}
invalidate_ce_path(merge, o);
+
+ if (submodule_from_ce(ce)) {
+ int ret = check_submodule_move_head(ce, NULL,
+ oid_to_hex(&ce->oid),
+ o);
+ if (ret)
+ return ret;
+ }
+
} else if (!(old->ce_flags & CE_CONFLICTED)) {
/*
* See if we can re-use the old CE directly?
update |= old->ce_flags & (CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);
invalidate_ce_path(old, o);
}
+
+ if (submodule_from_ce(ce)) {
+ int ret = check_submodule_move_head(ce, oid_to_hex(&old->oid),
+ oid_to_hex(&ce->oid),
+ o);
+ if (ret)
+ return ret;
+ }
} else {
/*
* Previously unmerged entry left as an existence
fprintf(o, "%s%06o %s %d\t%s\n",
label,
ce->ce_mode,
- sha1_to_hex(ce->sha1),
+ oid_to_hex(&ce->oid),
ce_stage(ce),
ce->name);
}
o->merge_size);
if (a && old)
return o->gently ? -1 :
- error(ERRORMSG(o, ERROR_BIND_OVERLAP), a->name, old->name);
+ error(ERRORMSG(o, ERROR_BIND_OVERLAP),
+ super_prefixed(a->name),
+ super_prefixed(old->name));
if (!a)
return keep_entry(old, o);
else