Merge branch 'nd/clone-single-branch'
[gitweb.git] / builtin / pack-objects.c
index 6643c1657a7f1dc94e12782000d0780d86c0e155..0f2e7b8f5cb26910679c39550d59ef23353a278f 100644 (file)
@@ -76,7 +76,7 @@ static struct pack_idx_option pack_idx_opts;
 static const char *base_name;
 static int progress = 1;
 static int window = 10;
-static unsigned long pack_size_limit, pack_size_limit_cfg;
+static unsigned long pack_size_limit;
 static int depth = 50;
 static int delta_search_threads;
 static int pack_to_stdout;
@@ -409,25 +409,56 @@ static unsigned long write_object(struct sha1file *f,
        return hdrlen + datalen;
 }
 
-static int write_one(struct sha1file *f,
-                              struct object_entry *e,
-                              off_t *offset)
+enum write_one_status {
+       WRITE_ONE_SKIP = -1, /* already written */
+       WRITE_ONE_BREAK = 0, /* writing this will bust the limit; not written */
+       WRITE_ONE_WRITTEN = 1, /* normal */
+       WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
+};
+
+static enum write_one_status write_one(struct sha1file *f,
+                                      struct object_entry *e,
+                                      off_t *offset)
 {
        unsigned long size;
+       int recursing;
 
-       /* offset is non zero if object is written already. */
-       if (e->idx.offset || e->preferred_base)
-               return -1;
+       /*
+        * we set offset to 1 (which is an impossible value) to mark
+        * the fact that this object is involved in "write its base
+        * first before writing a deltified object" recursion.
+        */
+       recursing = (e->idx.offset == 1);
+       if (recursing) {
+               warning("recursive delta detected for object %s",
+                       sha1_to_hex(e->idx.sha1));
+               return WRITE_ONE_RECURSIVE;
+       } else if (e->idx.offset || e->preferred_base) {
+               /* offset is non zero if object is written already. */
+               return WRITE_ONE_SKIP;
+       }
 
        /* if we are deltified, write out base object first. */
-       if (e->delta && !write_one(f, e->delta, offset))
-               return 0;
+       if (e->delta) {
+               e->idx.offset = 1; /* now recurse */
+               switch (write_one(f, e->delta, offset)) {
+               case WRITE_ONE_RECURSIVE:
+                       /* we cannot depend on this one */
+                       e->delta = NULL;
+                       break;
+               default:
+                       break;
+               case WRITE_ONE_BREAK:
+                       e->idx.offset = recursing;
+                       return WRITE_ONE_BREAK;
+               }
+       }
 
        e->idx.offset = *offset;
        size = write_object(f, e, *offset);
        if (!size) {
-               e->idx.offset = 0;
-               return 0;
+               e->idx.offset = recursing;
+               return WRITE_ONE_BREAK;
        }
        written_list[nr_written++] = &e->idx;
 
@@ -435,7 +466,7 @@ static int write_one(struct sha1file *f,
        if (signed_add_overflows(*offset, size))
                die("pack too large for current definition of off_t");
        *offset += size;
-       return 1;
+       return WRITE_ONE_WRITTEN;
 }
 
 static int mark_tagged(const char *path, const unsigned char *sha1, int flag,
@@ -454,8 +485,8 @@ static int mark_tagged(const char *path, const unsigned char *sha1, int flag,
        return 0;
 }
 
-static void add_to_write_order(struct object_entry **wo,
-                              int *endp,
+static inline void add_to_write_order(struct object_entry **wo,
+                              unsigned int *endp,
                               struct object_entry *e)
 {
        if (e->filled)
@@ -465,32 +496,62 @@ static void add_to_write_order(struct object_entry **wo,
 }
 
 static void add_descendants_to_write_order(struct object_entry **wo,
-                                          int *endp,
+                                          unsigned int *endp,
                                           struct object_entry *e)
 {
-       struct object_entry *child;
-
-       for (child = e->delta_child; child; child = child->delta_sibling)
-               add_to_write_order(wo, endp, child);
-       for (child = e->delta_child; child; child = child->delta_sibling)
-               add_descendants_to_write_order(wo, endp, child);
+       int add_to_order = 1;
+       while (e) {
+               if (add_to_order) {
+                       struct object_entry *s;
+                       /* add this node... */
+                       add_to_write_order(wo, endp, e);
+                       /* all its siblings... */
+                       for (s = e->delta_sibling; s; s = s->delta_sibling) {
+                               add_to_write_order(wo, endp, s);
+                       }
+               }
+               /* drop down a level to add left subtree nodes if possible */
+               if (e->delta_child) {
+                       add_to_order = 1;
+                       e = e->delta_child;
+               } else {
+                       add_to_order = 0;
+                       /* our sibling might have some children, it is next */
+                       if (e->delta_sibling) {
+                               e = e->delta_sibling;
+                               continue;
+                       }
+                       /* go back to our parent node */
+                       e = e->delta;
+                       while (e && !e->delta_sibling) {
+                               /* we're on the right side of a subtree, keep
+                                * going up until we can go right again */
+                               e = e->delta;
+                       }
+                       if (!e) {
+                               /* done- we hit our original root node */
+                               return;
+                       }
+                       /* pass it off to sibling at this level */
+                       e = e->delta_sibling;
+               }
+       };
 }
 
 static void add_family_to_write_order(struct object_entry **wo,
-                                     int *endp,
+                                     unsigned int *endp,
                                      struct object_entry *e)
 {
        struct object_entry *root;
 
        for (root = e; root->delta; root = root->delta)
                ; /* nothing */
-       add_to_write_order(wo, endp, root);
        add_descendants_to_write_order(wo, endp, root);
 }
 
 static struct object_entry **compute_write_order(void)
 {
-       int i, wo_end;
+       unsigned int i, wo_end, last_untagged;
 
        struct object_entry **wo = xmalloc(nr_objects * sizeof(*wo));
 
@@ -506,8 +567,8 @@ static struct object_entry **compute_write_order(void)
         * Make sure delta_sibling is sorted in the original
         * recency order.
         */
-       for (i = nr_objects - 1; 0 <= i; i--) {
-               struct object_entry *e = &objects[i];
+       for (i = nr_objects; i > 0;) {
+               struct object_entry *e = &objects[--i];
                if (!e->delta)
                        continue;
                /* Mark me as the first child */
@@ -521,7 +582,7 @@ static struct object_entry **compute_write_order(void)
        for_each_tag_ref(mark_tagged, NULL);
 
        /*
-        * Give the commits in the original recency order until
+        * Give the objects in the original recency order until
         * we see a tagged tip.
         */
        for (i = wo_end = 0; i < nr_objects; i++) {
@@ -529,6 +590,7 @@ static struct object_entry **compute_write_order(void)
                        break;
                add_to_write_order(wo, &wo_end, &objects[i]);
        }
+       last_untagged = i;
 
        /*
         * Then fill all the tagged tips.
@@ -541,7 +603,7 @@ static struct object_entry **compute_write_order(void)
        /*
         * And then all remaining commits and tags.
         */
-       for (i = 0; i < nr_objects; i++) {
+       for (i = last_untagged; i < nr_objects; i++) {
                if (objects[i].type != OBJ_COMMIT &&
                    objects[i].type != OBJ_TAG)
                        continue;
@@ -551,7 +613,7 @@ static struct object_entry **compute_write_order(void)
        /*
         * And then all the trees.
         */
-       for (i = 0; i < nr_objects; i++) {
+       for (i = last_untagged; i < nr_objects; i++) {
                if (objects[i].type != OBJ_TREE)
                        continue;
                add_to_write_order(wo, &wo_end, &objects[i]);
@@ -560,8 +622,13 @@ static struct object_entry **compute_write_order(void)
        /*
         * Finally all the rest in really tight order
         */
-       for (i = 0; i < nr_objects; i++)
-               add_family_to_write_order(wo, &wo_end, &objects[i]);
+       for (i = last_untagged; i < nr_objects; i++) {
+               if (!objects[i].filled)
+                       add_family_to_write_order(wo, &wo_end, &objects[i]);
+       }
+
+       if (wo_end != nr_objects)
+               die("ordered %u objects, expected %"PRIu32, wo_end, nr_objects);
 
        return wo;
 }
@@ -584,16 +651,10 @@ static void write_pack_file(void)
                unsigned char sha1[20];
                char *pack_tmp_name = NULL;
 
-               if (pack_to_stdout) {
+               if (pack_to_stdout)
                        f = sha1fd_throughput(1, "<stdout>", progress_state);
-               } else {
-                       char tmpname[PATH_MAX];
-                       int fd;
-                       fd = odb_mkstemp(tmpname, sizeof(tmpname),
-                                        "pack/tmp_pack_XXXXXX");
-                       pack_tmp_name = xstrdup(tmpname);
-                       f = sha1fd(fd, pack_tmp_name);
-               }
+               else
+                       f = create_tmp_packfile(&pack_tmp_name);
 
                offset = write_pack_header(f, nr_remaining);
                if (!offset)
@@ -601,7 +662,7 @@ static void write_pack_file(void)
                nr_written = 0;
                for (; i < nr_objects; i++) {
                        struct object_entry *e = write_order[i];
-                       if (!write_one(f, e, &offset))
+                       if (write_one(f, e, &offset) == WRITE_ONE_BREAK)
                                break;
                        display_progress(progress_state, written);
                }
@@ -623,20 +684,8 @@ static void write_pack_file(void)
 
                if (!pack_to_stdout) {
                        struct stat st;
-                       const char *idx_tmp_name;
                        char tmpname[PATH_MAX];
 
-                       idx_tmp_name = write_idx_file(NULL, written_list, nr_written,
-                                                     &pack_idx_opts, sha1);
-
-                       snprintf(tmpname, sizeof(tmpname), "%s-%s.pack",
-                                base_name, sha1_to_hex(sha1));
-                       free_pack_by_name(tmpname);
-                       if (adjust_shared_perm(pack_tmp_name))
-                               die_errno("unable to make temporary pack file readable");
-                       if (rename(pack_tmp_name, tmpname))
-                               die_errno("unable to rename temporary pack file");
-
                        /*
                         * Packs are runtime accessed in their mtime
                         * order since newer packs are more likely to contain
@@ -644,28 +693,27 @@ static void write_pack_file(void)
                         * packs then we should modify the mtime of later ones
                         * to preserve this property.
                         */
-                       if (stat(tmpname, &st) < 0) {
+                       if (stat(pack_tmp_name, &st) < 0) {
                                warning("failed to stat %s: %s",
-                                       tmpname, strerror(errno));
+                                       pack_tmp_name, strerror(errno));
                        } else if (!last_mtime) {
                                last_mtime = st.st_mtime;
                        } else {
                                struct utimbuf utb;
                                utb.actime = st.st_atime;
                                utb.modtime = --last_mtime;
-                               if (utime(tmpname, &utb) < 0)
+                               if (utime(pack_tmp_name, &utb) < 0)
                                        warning("failed utime() on %s: %s",
                                                tmpname, strerror(errno));
                        }
 
-                       snprintf(tmpname, sizeof(tmpname), "%s-%s.idx",
-                                base_name, sha1_to_hex(sha1));
-                       if (adjust_shared_perm(idx_tmp_name))
-                               die_errno("unable to make temporary index file readable");
-                       if (rename(idx_tmp_name, tmpname))
-                               die_errno("unable to rename temporary index file");
-
-                       free((void *) idx_tmp_name);
+                       /* Enough space for "-<sha-1>.pack"? */
+                       if (sizeof(tmpname) <= strlen(base_name) + 50)
+                               die("pack base name '%s' too long", base_name);
+                       snprintf(tmpname, sizeof(tmpname), "%s-", base_name);
+                       finish_tmp_packfile(tmpname, pack_tmp_name,
+                                           written_list, nr_written,
+                                           &pack_idx_opts, sha1);
                        free(pack_tmp_name);
                        puts(sha1_to_hex(sha1));
                }
@@ -976,7 +1024,7 @@ static void add_pbase_object(struct tree_desc *tree,
        while (tree_entry(tree,&entry)) {
                if (S_ISGITLINK(entry.mode))
                        continue;
-               cmp = tree_entry_len(entry.path, entry.sha1) != cmplen ? 1 :
+               cmp = tree_entry_len(&entry) != cmplen ? 1 :
                      memcmp(name, entry.path, cmplen);
                if (cmp > 0)
                        continue;
@@ -1386,11 +1434,16 @@ static int try_delta(struct unpacked *trg, struct unpacked *src,
                return -1;
 
        /*
-        * We do not bother to try a delta that we discarded
-        * on an earlier try, but only when reusing delta data.
+        * We do not bother to try a delta that we discarded on an
+        * earlier try, but only when reusing delta data.  Note that
+        * src_entry that is marked as the preferred_base should always
+        * be considered, as even if we produce a suboptimal delta against
+        * it, we will still save the transfer cost, as we already know
+        * the other side has it and we won't send src_entry at all.
         */
        if (reuse_delta && trg_entry->in_pack &&
            trg_entry->in_pack == src_entry->in_pack &&
+           !src_entry->preferred_base &&
            trg_entry->in_pack_type != OBJ_REF_DELTA &&
            trg_entry->in_pack_type != OBJ_OFS_DELTA)
                return 0;
@@ -2028,10 +2081,6 @@ static int git_pack_config(const char *k, const char *v, void *cb)
                            pack_idx_opts.version);
                return 0;
        }
-       if (!strcmp(k, "pack.packsizelimit")) {
-               pack_size_limit_cfg = git_config_ulong(k, v);
-               return 0;
-       }
        return git_default_config(k, v, cb);
 }