Merge branch 'bc/hash-algo'
[gitweb.git] / builtin / pack-objects.c
index 81ad914cfc20e30ce7480b76d6143b0f6f0b6230..5c674b2843ccadec710596f7b8c293bbbc6130ea 100644 (file)
@@ -164,7 +164,7 @@ static unsigned long do_compress(void **pptr, unsigned long size)
        return stream.total_out;
 }
 
-static unsigned long write_large_blob_data(struct git_istream *st, struct sha1file *f,
+static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f,
                                           const struct object_id *oid)
 {
        git_zstream stream;
@@ -188,7 +188,7 @@ static unsigned long write_large_blob_data(struct git_istream *st, struct sha1fi
                        stream.next_out = obuf;
                        stream.avail_out = sizeof(obuf);
                        zret = git_deflate(&stream, readlen ? 0 : Z_FINISH);
-                       sha1write(f, obuf, stream.next_out - obuf);
+                       hashwrite(f, obuf, stream.next_out - obuf);
                        olen += stream.next_out - obuf;
                }
                if (stream.avail_in)
@@ -233,7 +233,7 @@ static int check_pack_inflate(struct packed_git *p,
                stream.total_in == len) ? 0 : -1;
 }
 
-static void copy_pack_data(struct sha1file *f,
+static void copy_pack_data(struct hashfile *f,
                struct packed_git *p,
                struct pack_window **w_curs,
                off_t offset,
@@ -246,14 +246,14 @@ static void copy_pack_data(struct sha1file *f,
                in = use_pack(p, w_curs, offset, &avail);
                if (avail > len)
                        avail = (unsigned long)len;
-               sha1write(f, in, avail);
+               hashwrite(f, in, avail);
                offset += avail;
                len -= avail;
        }
 }
 
 /* Return 0 if we will bust the pack-size limit */
-static unsigned long write_no_reuse_object(struct sha1file *f, struct object_entry *entry,
+static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry,
                                           unsigned long limit, int usable_delta)
 {
        unsigned long size, datalen;
@@ -326,8 +326,8 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
                        free(buf);
                        return 0;
                }
-               sha1write(f, header, hdrlen);
-               sha1write(f, dheader + pos, sizeof(dheader) - pos);
+               hashwrite(f, header, hdrlen);
+               hashwrite(f, dheader + pos, sizeof(dheader) - pos);
                hdrlen += sizeof(dheader) - pos;
        } else if (type == OBJ_REF_DELTA) {
                /*
@@ -340,8 +340,8 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
                        free(buf);
                        return 0;
                }
-               sha1write(f, header, hdrlen);
-               sha1write(f, entry->delta->idx.oid.hash, 20);
+               hashwrite(f, header, hdrlen);
+               hashwrite(f, entry->delta->idx.oid.hash, 20);
                hdrlen += 20;
        } else {
                if (limit && hdrlen + datalen + 20 >= limit) {
@@ -350,13 +350,13 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
                        free(buf);
                        return 0;
                }
-               sha1write(f, header, hdrlen);
+               hashwrite(f, header, hdrlen);
        }
        if (st) {
                datalen = write_large_blob_data(st, f, &entry->idx.oid);
                close_istream(st);
        } else {
-               sha1write(f, buf, datalen);
+               hashwrite(f, buf, datalen);
                free(buf);
        }
 
@@ -364,7 +364,7 @@ static unsigned long write_no_reuse_object(struct sha1file *f, struct object_ent
 }
 
 /* Return 0 if we will bust the pack-size limit */
-static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
+static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
                                unsigned long limit, int usable_delta)
 {
        struct packed_git *p = entry->in_pack;
@@ -415,8 +415,8 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
                        unuse_pack(&w_curs);
                        return 0;
                }
-               sha1write(f, header, hdrlen);
-               sha1write(f, dheader + pos, sizeof(dheader) - pos);
+               hashwrite(f, header, hdrlen);
+               hashwrite(f, dheader + pos, sizeof(dheader) - pos);
                hdrlen += sizeof(dheader) - pos;
                reused_delta++;
        } else if (type == OBJ_REF_DELTA) {
@@ -424,8 +424,8 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
                        unuse_pack(&w_curs);
                        return 0;
                }
-               sha1write(f, header, hdrlen);
-               sha1write(f, entry->delta->idx.oid.hash, 20);
+               hashwrite(f, header, hdrlen);
+               hashwrite(f, entry->delta->idx.oid.hash, 20);
                hdrlen += 20;
                reused_delta++;
        } else {
@@ -433,7 +433,7 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
                        unuse_pack(&w_curs);
                        return 0;
                }
-               sha1write(f, header, hdrlen);
+               hashwrite(f, header, hdrlen);
        }
        copy_pack_data(f, p, &w_curs, offset, datalen);
        unuse_pack(&w_curs);
@@ -442,7 +442,7 @@ static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
 }
 
 /* Return 0 if we will bust the pack-size limit */
-static off_t write_object(struct sha1file *f,
+static off_t write_object(struct hashfile *f,
                          struct object_entry *entry,
                          off_t write_offset)
 {
@@ -515,7 +515,7 @@ enum write_one_status {
        WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
 };
 
-static enum write_one_status write_one(struct sha1file *f,
+static enum write_one_status write_one(struct hashfile *f,
                                       struct object_entry *e,
                                       off_t *offset)
 {
@@ -734,7 +734,7 @@ static struct object_entry **compute_write_order(void)
        return wo;
 }
 
-static off_t write_reused_pack(struct sha1file *f)
+static off_t write_reused_pack(struct hashfile *f)
 {
        unsigned char buffer[8192];
        off_t to_write, total;
@@ -765,7 +765,7 @@ static off_t write_reused_pack(struct sha1file *f)
                if (read_pack > to_write)
                        read_pack = to_write;
 
-               sha1write(f, buffer, read_pack);
+               hashwrite(f, buffer, read_pack);
                to_write -= read_pack;
 
                /*
@@ -794,7 +794,7 @@ static const char no_split_warning[] = N_(
 static void write_pack_file(void)
 {
        uint32_t i = 0, j;
-       struct sha1file *f;
+       struct hashfile *f;
        off_t offset;
        uint32_t nr_remaining = nr_result;
        time_t last_mtime = 0;
@@ -810,7 +810,7 @@ static void write_pack_file(void)
                char *pack_tmp_name = NULL;
 
                if (pack_to_stdout)
-                       f = sha1fd_throughput(1, "<stdout>", progress_state);
+                       f = hashfd_throughput(1, "<stdout>", progress_state);
                else
                        f = create_tmp_packfile(&pack_tmp_name);
 
@@ -837,11 +837,11 @@ static void write_pack_file(void)
                 * If so, rewrite it like in fast-import
                 */
                if (pack_to_stdout) {
-                       sha1close(f, oid.hash, CSUM_CLOSE);
+                       hashclose(f, oid.hash, CSUM_CLOSE);
                } else if (nr_written == nr_remaining) {
-                       sha1close(f, oid.hash, CSUM_FSYNC);
+                       hashclose(f, oid.hash, CSUM_FSYNC);
                } else {
-                       int fd = sha1close(f, oid.hash, 0);
+                       int fd = hashclose(f, oid.hash, 0);
                        fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
                                                 nr_written, oid.hash, offset);
                        close(fd);