Sync with 1.7.7.7
[gitweb.git] / builtin / pack-objects.c
index 80ab6c39f91b30be90203da777fd8e3989abdc2c..ef703dfeb611c0820a68f4c7cebae154689fa5f5 100644 (file)
@@ -409,25 +409,56 @@ static unsigned long write_object(struct sha1file *f,
        return hdrlen + datalen;
 }
 
-static int write_one(struct sha1file *f,
-                              struct object_entry *e,
-                              off_t *offset)
+enum write_one_status {
+       WRITE_ONE_SKIP = -1, /* already written */
+       WRITE_ONE_BREAK = 0, /* writing this will bust the limit; not written */
+       WRITE_ONE_WRITTEN = 1, /* normal */
+       WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
+};
+
+static enum write_one_status write_one(struct sha1file *f,
+                                      struct object_entry *e,
+                                      off_t *offset)
 {
        unsigned long size;
+       int recursing;
 
-       /* offset is non zero if object is written already. */
-       if (e->idx.offset || e->preferred_base)
-               return -1;
+       /*
+        * we set offset to 1 (which is an impossible value) to mark
+        * the fact that this object is involved in "write its base
+        * first before writing a deltified object" recursion.
+        */
+       recursing = (e->idx.offset == 1);
+       if (recursing) {
+               warning("recursive delta detected for object %s",
+                       sha1_to_hex(e->idx.sha1));
+               return WRITE_ONE_RECURSIVE;
+       } else if (e->idx.offset || e->preferred_base) {
+               /* offset is non zero if object is written already. */
+               return WRITE_ONE_SKIP;
+       }
 
        /* if we are deltified, write out base object first. */
-       if (e->delta && !write_one(f, e->delta, offset))
-               return 0;
+       if (e->delta) {
+               e->idx.offset = 1; /* now recurse */
+               switch (write_one(f, e->delta, offset)) {
+               case WRITE_ONE_RECURSIVE:
+                       /* we cannot depend on this one */
+                       e->delta = NULL;
+                       break;
+               default:
+                       break;
+               case WRITE_ONE_BREAK:
+                       e->idx.offset = recursing;
+                       return WRITE_ONE_BREAK;
+               }
+       }
 
        e->idx.offset = *offset;
        size = write_object(f, e, *offset);
        if (!size) {
-               e->idx.offset = 0;
-               return 0;
+               e->idx.offset = recursing;
+               return WRITE_ONE_BREAK;
        }
        written_list[nr_written++] = &e->idx;
 
@@ -435,7 +466,7 @@ static int write_one(struct sha1file *f,
        if (signed_add_overflows(*offset, size))
                die("pack too large for current definition of off_t");
        *offset += size;
-       return 1;
+       return WRITE_ONE_WRITTEN;
 }
 
 static int mark_tagged(const char *path, const unsigned char *sha1, int flag,
@@ -640,7 +671,7 @@ static void write_pack_file(void)
                nr_written = 0;
                for (; i < nr_objects; i++) {
                        struct object_entry *e = write_order[i];
-                       if (!write_one(f, e, &offset))
+                       if (write_one(f, e, &offset) == WRITE_ONE_BREAK)
                                break;
                        display_progress(progress_state, written);
                }
@@ -840,6 +871,10 @@ static int add_object_entry(const unsigned char *sha1, enum object_type type,
                off_t offset = find_pack_entry_one(sha1, p);
                if (offset) {
                        if (!found_pack) {
+                               if (!is_pack_valid(p)) {
+                                       warning("packfile %s cannot be accessed", p->pack_name);
+                                       continue;
+                               }
                                found_offset = offset;
                                found_pack = p;
                        }
@@ -1011,7 +1046,7 @@ static void add_pbase_object(struct tree_desc *tree,
        while (tree_entry(tree,&entry)) {
                if (S_ISGITLINK(entry.mode))
                        continue;
-               cmp = tree_entry_len(entry.path, entry.sha1) != cmplen ? 1 :
+               cmp = tree_entry_len(&entry) != cmplen ? 1 :
                      memcmp(name, entry.path, cmplen);
                if (cmp > 0)
                        continue;
@@ -1421,11 +1456,16 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
                return -1;
 
        /*
-        * We do not bother to try a delta that we discarded
-        * on an earlier try, but only when reusing delta data.
+        * We do not bother to try a delta that we discarded on an
+        * earlier try, but only when reusing delta data.  Note that
+        * src_entry that is marked as the preferred_base should always
+        * be considered, as even if we produce a suboptimal delta against
+        * it, we will still save the transfer cost, as we already know
+        * the other side has it and we won't send src_entry at all.
         */
        if (reuse_delta && trg_entry->in_pack &&
            trg_entry->in_pack == src_entry->in_pack &&
+           !src_entry->preferred_base &&
            trg_entry->in_pack_type != OBJ_REF_DELTA &&
            trg_entry->in_pack_type != OBJ_OFS_DELTA)
                return 0;
@@ -2109,7 +2149,9 @@ static void show_commit(struct commit *commit, void *data)
        commit->object.flags |= OBJECT_ADDED;
 }
 
-static void show_object(struct object *obj, const struct name_path *path, const char *last)
+static void show_object(struct object *obj,
+                       const struct name_path *path, const char *last,
+                       void *data)
 {
        char *name = path_name(path, last);