Merge branch 'jk/for-each-object-iteration'
authorJunio C Hamano <gitster@pobox.com>
Mon, 20 Aug 2018 18:33:52 +0000 (11:33 -0700)
committerJunio C Hamano <gitster@pobox.com>
Mon, 20 Aug 2018 18:33:52 +0000 (11:33 -0700)
The API to iterate over all objects learned to optionally list
objects in the order they appear in packfiles, which helps locality
of access if the caller accesses these objects while as objects are
enumerated.

* jk/for-each-object-iteration:
for_each_*_object: move declarations to object-store.h
cat-file: use a single strbuf for all output
cat-file: split batch "buf" into two variables
cat-file: use oidset check-and-insert
cat-file: support "unordered" output for --batch-all-objects
cat-file: rename batch_{loose,packed}_object callbacks
t1006: test cat-file --batch-all-objects with duplicates
for_each_packed_object: support iterating in pack-order
for_each_*_object: give more comprehensive docstrings
for_each_*_object: take flag arguments as enum
for_each_*_object: store flag definitions in a single location

1  2 
cache.h
commit-graph.c
packfile.h
sha1-file.c
diff --combined cache.h
index 1398b2a4e4aa18958044acb49607c69ed0e6c125,aee36afa545abd1952eae9b965df0313628ac45e..66d3b91cdb1d14bbe2499f90a0e1f6d9785b4b37
+++ b/cache.h
@@@ -868,9 -868,11 +868,9 @@@ void reset_shared_repository(void)
   * Do replace refs need to be checked this run?  This variable is
   * initialized to true unless --no-replace-object is used or
   * $GIT_NO_REPLACE_OBJECTS is set, but is set to false by some
 - * commands that do not want replace references to be active.  As an
 - * optimization it is also set to false if replace references have
 - * been sought but there were none.
 + * commands that do not want replace references to be active.
   */
 -extern int check_replace_refs;
 +extern int read_replace_refs;
  extern char *git_replace_ref_base;
  
  extern int fsync_object_files;
@@@ -1423,20 -1425,18 +1423,20 @@@ extern void *read_object_with_reference
  extern struct object *peel_to_type(const char *name, int namelen,
                                   struct object *o, enum object_type);
  
 +enum date_mode_type {
 +      DATE_NORMAL = 0,
 +      DATE_RELATIVE,
 +      DATE_SHORT,
 +      DATE_ISO8601,
 +      DATE_ISO8601_STRICT,
 +      DATE_RFC2822,
 +      DATE_STRFTIME,
 +      DATE_RAW,
 +      DATE_UNIX
 +};
 +
  struct date_mode {
 -      enum date_mode_type {
 -              DATE_NORMAL = 0,
 -              DATE_RELATIVE,
 -              DATE_SHORT,
 -              DATE_ISO8601,
 -              DATE_ISO8601_STRICT,
 -              DATE_RFC2822,
 -              DATE_STRFTIME,
 -              DATE_RAW,
 -              DATE_UNIX
 -      } type;
 +      enum date_mode_type type;
        const char *strftime_fmt;
        int local;
  };
@@@ -1575,62 -1575,6 +1575,6 @@@ extern int odb_mkstemp(struct strbuf *t
   */
  extern int odb_pack_keep(const char *name);
  
- /*
-  * Iterate over the files in the loose-object parts of the object
-  * directory "path", triggering the following callbacks:
-  *
-  *  - loose_object is called for each loose object we find.
-  *
-  *  - loose_cruft is called for any files that do not appear to be
-  *    loose objects. Note that we only look in the loose object
-  *    directories "objects/[0-9a-f]{2}/", so we will not report
-  *    "objects/foobar" as cruft.
-  *
-  *  - loose_subdir is called for each top-level hashed subdirectory
-  *    of the object directory (e.g., "$OBJDIR/f0"). It is called
-  *    after the objects in the directory are processed.
-  *
-  * Any callback that is NULL will be ignored. Callbacks returning non-zero
-  * will end the iteration.
-  *
-  * In the "buf" variant, "path" is a strbuf which will also be used as a
-  * scratch buffer, but restored to its original contents before
-  * the function returns.
-  */
- typedef int each_loose_object_fn(const struct object_id *oid,
-                                const char *path,
-                                void *data);
- typedef int each_loose_cruft_fn(const char *basename,
-                               const char *path,
-                               void *data);
- typedef int each_loose_subdir_fn(unsigned int nr,
-                                const char *path,
-                                void *data);
- int for_each_file_in_obj_subdir(unsigned int subdir_nr,
-                               struct strbuf *path,
-                               each_loose_object_fn obj_cb,
-                               each_loose_cruft_fn cruft_cb,
-                               each_loose_subdir_fn subdir_cb,
-                               void *data);
- int for_each_loose_file_in_objdir(const char *path,
-                                 each_loose_object_fn obj_cb,
-                                 each_loose_cruft_fn cruft_cb,
-                                 each_loose_subdir_fn subdir_cb,
-                                 void *data);
- int for_each_loose_file_in_objdir_buf(struct strbuf *path,
-                                     each_loose_object_fn obj_cb,
-                                     each_loose_cruft_fn cruft_cb,
-                                     each_loose_subdir_fn subdir_cb,
-                                     void *data);
- /*
-  * Iterate over loose objects in both the local
-  * repository and any alternates repositories (unless the
-  * LOCAL_ONLY flag is set).
-  */
- #define FOR_EACH_OBJECT_LOCAL_ONLY 0x1
- extern int for_each_loose_object(each_loose_object_fn, void *, unsigned flags);
  /*
   * Set this to 0 to prevent sha1_object_info_extended() from fetching missing
   * blobs. This has a difference only if extensions.partialClone is set.
diff --combined commit-graph.c
index 0034740c26b48eda147d45258505df278b84cc0a,69a0d1c203c7093a9abe06d488b1a382cd0d6ca9..8a1bec7b8aa420dd3d4ecadc95dee31029533c07
@@@ -80,28 -80,28 +80,28 @@@ struct commit_graph *load_commit_graph_
  
        if (graph_size < GRAPH_MIN_SIZE) {
                close(fd);
 -              die("graph file %s is too small", graph_file);
 +              die(_("graph file %s is too small"), graph_file);
        }
        graph_map = xmmap(NULL, graph_size, PROT_READ, MAP_PRIVATE, fd, 0);
        data = (const unsigned char *)graph_map;
  
        graph_signature = get_be32(data);
        if (graph_signature != GRAPH_SIGNATURE) {
 -              error("graph signature %X does not match signature %X",
 +              error(_("graph signature %X does not match signature %X"),
                      graph_signature, GRAPH_SIGNATURE);
                goto cleanup_fail;
        }
  
        graph_version = *(unsigned char*)(data + 4);
        if (graph_version != GRAPH_VERSION) {
 -              error("graph version %X does not match version %X",
 +              error(_("graph version %X does not match version %X"),
                      graph_version, GRAPH_VERSION);
                goto cleanup_fail;
        }
  
        hash_version = *(unsigned char*)(data + 5);
        if (hash_version != GRAPH_OID_VERSION) {
 -              error("hash version %X does not match version %X",
 +              error(_("hash version %X does not match version %X"),
                      hash_version, GRAPH_OID_VERSION);
                goto cleanup_fail;
        }
                chunk_lookup += GRAPH_CHUNKLOOKUP_WIDTH;
  
                if (chunk_offset > graph_size - GIT_MAX_RAWSZ) {
 -                      error("improper chunk offset %08x%08x", (uint32_t)(chunk_offset >> 32),
 +                      error(_("improper chunk offset %08x%08x"), (uint32_t)(chunk_offset >> 32),
                              (uint32_t)chunk_offset);
                        goto cleanup_fail;
                }
                }
  
                if (chunk_repeated) {
 -                      error("chunk id %08x appears multiple times", chunk_id);
 +                      error(_("chunk id %08x appears multiple times"), chunk_id);
                        goto cleanup_fail;
                }
  
@@@ -258,7 -258,7 +258,7 @@@ static struct commit_list **insert_pare
        hashcpy(oid.hash, g->chunk_oid_lookup + g->hash_len * pos);
        c = lookup_commit(the_repository, &oid);
        if (!c)
 -              die("could not find commit %s", oid_to_hex(&oid));
 +              die(_("could not find commit %s"), oid_to_hex(&oid));
        c->graph_pos = pos;
        return &commit_list_insert(c, pptr)->next;
  }
@@@ -562,7 -562,7 +562,7 @@@ static int add_packed_commits(const str
  
        oi.typep = &type;
        if (packed_object_info(the_repository, pack, offset, &oi) < 0)
 -              die("unable to get type of object %s", oid_to_hex(oid));
 +              die(_("unable to get type of object %s"), oid_to_hex(oid));
  
        if (type != OBJ_COMMIT)
                return 0;
@@@ -727,10 -727,10 +727,10 @@@ void write_commit_graph(const char *obj
                        strbuf_addstr(&packname, pack_indexes->items[i].string);
                        p = add_packed_git(packname.buf, packname.len, 1);
                        if (!p)
 -                              die("error adding pack %s", packname.buf);
 +                              die(_("error adding pack %s"), packname.buf);
                        if (open_pack_index(p))
 -                              die("error opening index for %s", packname.buf);
 +                              die(_("error opening index for %s"), packname.buf);
-                       for_each_object_in_pack(p, add_packed_commits, &oids);
+                       for_each_object_in_pack(p, add_packed_commits, &oids, 0);
                        close_pack(p);
                }
                strbuf_release(&packname);
diff --combined packfile.h
index fa36c473adaf02df7395329587baf07e47febb34,d91e43fe734a4e04cc9dfda36ca7c187c16dc154..630f35cf31ef74975c04d17820314a85bba675af
@@@ -1,12 -1,12 +1,12 @@@
  #ifndef PACKFILE_H
  #define PACKFILE_H
  
 +#include "cache.h"
  #include "oidset.h"
  
  /* in object-store.h */
  struct packed_git;
  struct object_info;
 -enum object_type;
  
  /*
   * Generate the filename to be used for a pack file with checksum "sha1" and
@@@ -148,23 -148,6 +148,6 @@@ extern int has_object_pack(const struc
  
  extern int has_pack_index(const unsigned char *sha1);
  
- /*
-  * Only iterate over packs obtained from the promisor remote.
-  */
- #define FOR_EACH_OBJECT_PROMISOR_ONLY 2
- /*
-  * Iterate over packed objects in both the local
-  * repository and any alternates repositories (unless the
-  * FOR_EACH_OBJECT_LOCAL_ONLY flag, defined in cache.h, is set).
-  */
- typedef int each_packed_object_fn(const struct object_id *oid,
-                                 struct packed_git *pack,
-                                 uint32_t pos,
-                                 void *data);
- extern int for_each_object_in_pack(struct packed_git *p, each_packed_object_fn, void *data);
- extern int for_each_packed_object(each_packed_object_fn, void *, unsigned flags);
  /*
   * Return 1 if an object in a promisor packfile is or refers to the given
   * object, 0 otherwise.
diff --combined sha1-file.c
index c6ca960eb2aa6b70841350d894bfda77856d980a,cc0b57a751bd35082003350e8f6b282aa1a955ef..56e5329caf23c50d783c88b4058363cc1d847d57
@@@ -71,17 -71,17 +71,17 @@@ static void git_hash_sha1_final(unsigne
  
  static void git_hash_unknown_init(git_hash_ctx *ctx)
  {
 -      die("trying to init unknown hash");
 +      BUG("trying to init unknown hash");
  }
  
  static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len)
  {
 -      die("trying to update unknown hash");
 +      BUG("trying to update unknown hash");
  }
  
  static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx)
  {
 -      die("trying to finalize unknown hash");
 +      BUG("trying to finalize unknown hash");
  }
  
  const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = {
@@@ -378,8 -378,8 +378,8 @@@ static int alt_odb_usable(struct raw_ob
  
        /* Detect cases where alternate disappeared */
        if (!is_directory(path->buf)) {
 -              error("object directory %s does not exist; "
 -                    "check .git/objects/info/alternates.",
 +              error(_("object directory %s does not exist; "
 +                      "check .git/objects/info/alternates"),
                      path->buf);
                return 0;
        }
@@@ -429,7 -429,7 +429,7 @@@ static int link_alt_odb_entry(struct re
        strbuf_addstr(&pathbuf, entry);
  
        if (strbuf_normalize_path(&pathbuf) < 0 && relative_base) {
 -              error("unable to normalize alternate object path: %s",
 +              error(_("unable to normalize alternate object path: %s"),
                      pathbuf.buf);
                strbuf_release(&pathbuf);
                return -1;
@@@ -500,14 -500,14 +500,14 @@@ static void link_alt_odb_entries(struc
                return;
  
        if (depth > 5) {
 -              error("%s: ignoring alternate object stores, nesting too deep.",
 +              error(_("%s: ignoring alternate object stores, nesting too deep"),
                                relative_base);
                return;
        }
  
        strbuf_add_absolute_path(&objdirbuf, r->objects->objectdir);
        if (strbuf_normalize_path(&objdirbuf) < 0)
 -              die("unable to normalize object directory: %s",
 +              die(_("unable to normalize object directory: %s"),
                    objdirbuf.buf);
  
        while (*alt) {
@@@ -562,7 -562,7 +562,7 @@@ void add_to_alternates_file(const char 
        hold_lock_file_for_update(&lock, alts, LOCK_DIE_ON_ERROR);
        out = fdopen_lock_file(&lock, "w");
        if (!out)
 -              die_errno("unable to fdopen alternates lockfile");
 +              die_errno(_("unable to fdopen alternates lockfile"));
  
        in = fopen(alts, "r");
        if (in) {
                fclose(in);
        }
        else if (errno != ENOENT)
 -              die_errno("unable to read alternates file");
 +              die_errno(_("unable to read alternates file"));
  
        if (found) {
                rollback_lock_file(&lock);
        } else {
                fprintf_or_die(out, "%s\n", reference);
                if (commit_lock_file(&lock))
 -                      die_errno("unable to move new alternates file into place");
 +                      die_errno(_("unable to move new alternates file into place"));
                if (the_repository->objects->alt_odb_tail)
                        link_alt_odb_entries(the_repository, reference,
                                             '\n', NULL, 0);
@@@ -778,7 -778,7 +778,7 @@@ static void mmap_limit_check(size_t len
                        limit = SIZE_MAX;
        }
        if (length > limit)
 -              die("attempting to mmap %"PRIuMAX" over limit %"PRIuMAX,
 +              die(_("attempting to mmap %"PRIuMAX" over limit %"PRIuMAX),
                    (uintmax_t)length, (uintmax_t)limit);
  }
  
@@@ -803,7 -803,7 +803,7 @@@ void *xmmap(void *start, size_t length
  {
        void *ret = xmmap_gently(start, length, prot, flags, fd, offset);
        if (ret == MAP_FAILED)
 -              die_errno("mmap failed");
 +              die_errno(_("mmap failed"));
        return ret;
  }
  
@@@ -970,7 -970,7 +970,7 @@@ static void *map_sha1_file_1(struct rep
                        *size = xsize_t(st.st_size);
                        if (!*size) {
                                /* mmap() is forbidden on empty files */
 -                              error("object file %s is empty", path);
 +                              error(_("object file %s is empty"), path);
                                return NULL;
                        }
                        map = xmmap(NULL, *size, PROT_READ, MAP_PRIVATE, fd, 0);
@@@ -1090,9 -1090,9 +1090,9 @@@ static void *unpack_sha1_rest(git_zstre
        }
  
        if (status < 0)
 -              error("corrupt loose object '%s'", sha1_to_hex(sha1));
 +              error(_("corrupt loose object '%s'"), sha1_to_hex(sha1));
        else if (stream->avail_in)
 -              error("garbage at end of loose object '%s'",
 +              error(_("garbage at end of loose object '%s'"),
                      sha1_to_hex(sha1));
        free(buf);
        return NULL;
@@@ -1134,7 -1134,7 +1134,7 @@@ static int parse_sha1_header_extended(c
        if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE) && (type < 0))
                type = 0;
        else if (type < 0)
 -              die("invalid object type");
 +              die(_("invalid object type"));
        if (oi->typep)
                *oi->typep = type;
  
@@@ -1216,19 -1216,19 +1216,19 @@@ static int sha1_loose_object_info(struc
                *oi->disk_sizep = mapsize;
        if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE)) {
                if (unpack_sha1_header_to_strbuf(&stream, map, mapsize, hdr, sizeof(hdr), &hdrbuf) < 0)
 -                      status = error("unable to unpack %s header with --allow-unknown-type",
 +                      status = error(_("unable to unpack %s header with --allow-unknown-type"),
                                       sha1_to_hex(sha1));
        } else if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0)
 -              status = error("unable to unpack %s header",
 +              status = error(_("unable to unpack %s header"),
                               sha1_to_hex(sha1));
        if (status < 0)
                ; /* Do nothing */
        else if (hdrbuf.len) {
                if ((status = parse_sha1_header_extended(hdrbuf.buf, oi, flags)) < 0)
 -                      status = error("unable to parse %s header with --allow-unknown-type",
 +                      status = error(_("unable to parse %s header with --allow-unknown-type"),
                                       sha1_to_hex(sha1));
        } else if ((status = parse_sha1_header_extended(hdr, oi, flags)) < 0)
 -              status = error("unable to parse %s header", sha1_to_hex(sha1));
 +              status = error(_("unable to parse %s header"), sha1_to_hex(sha1));
  
        if (status >= 0 && oi->contentp) {
                *oi->contentp = unpack_sha1_rest(&stream, hdr,
@@@ -1419,19 -1419,19 +1419,19 @@@ void *read_object_file_extended(const s
                return data;
  
        if (errno && errno != ENOENT)
 -              die_errno("failed to read object %s", oid_to_hex(oid));
 +              die_errno(_("failed to read object %s"), oid_to_hex(oid));
  
        /* die if we replaced an object with one that does not exist */
        if (repl != oid)
 -              die("replacement %s not found for %s",
 +              die(_("replacement %s not found for %s"),
                    oid_to_hex(repl), oid_to_hex(oid));
  
        if (!stat_sha1_file(the_repository, repl->hash, &st, &path))
 -              die("loose object %s (stored in %s) is corrupt",
 +              die(_("loose object %s (stored in %s) is corrupt"),
                    oid_to_hex(repl), path);
  
        if ((p = has_packed_and_bad(repl->hash)) != NULL)
 -              die("packed object %s (stored in %s) is corrupt",
 +              die(_("packed object %s (stored in %s) is corrupt"),
                    oid_to_hex(repl), p->pack_name);
  
        return NULL;
@@@ -1533,21 -1533,21 +1533,21 @@@ int finalize_object_file(const char *tm
        unlink_or_warn(tmpfile);
        if (ret) {
                if (ret != EEXIST) {
 -                      return error_errno("unable to write sha1 filename %s", filename);
 +                      return error_errno(_("unable to write sha1 filename %s"), filename);
                }
                /* FIXME!!! Collision check here ? */
        }
  
  out:
        if (adjust_shared_perm(filename))
 -              return error("unable to set permission to '%s'", filename);
 +              return error(_("unable to set permission to '%s'"), filename);
        return 0;
  }
  
  static int write_buffer(int fd, const void *buf, size_t len)
  {
        if (write_in_full(fd, buf, len) < 0)
 -              return error_errno("file write error");
 +              return error_errno(_("file write error"));
        return 0;
  }
  
@@@ -1566,7 -1566,7 +1566,7 @@@ static void close_sha1_file(int fd
        if (fsync_object_files)
                fsync_or_die(fd, "sha1 file");
        if (close(fd) != 0)
 -              die_errno("error when closing sha1 file");
 +              die_errno(_("error when closing sha1 file"));
  }
  
  /* Size of directory component, including the ending '/' */
@@@ -1632,9 -1632,9 +1632,9 @@@ static int write_loose_object(const str
        fd = create_tmpfile(&tmp_file, filename.buf);
        if (fd < 0) {
                if (errno == EACCES)
 -                      return error("insufficient permission for adding an object to repository database %s", get_object_directory());
 +                      return error(_("insufficient permission for adding an object to repository database %s"), get_object_directory());
                else
 -                      return error_errno("unable to create temporary file");
 +                      return error_errno(_("unable to create temporary file"));
        }
  
        /* Set it up */
                ret = git_deflate(&stream, Z_FINISH);
                the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
                if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
 -                      die("unable to write sha1 file");
 +                      die(_("unable to write sha1 file"));
                stream.next_out = compressed;
                stream.avail_out = sizeof(compressed);
        } while (ret == Z_OK);
  
        if (ret != Z_STREAM_END)
 -              die("unable to deflate new object %s (%d)", oid_to_hex(oid),
 +              die(_("unable to deflate new object %s (%d)"), oid_to_hex(oid),
                    ret);
        ret = git_deflate_end_gently(&stream);
        if (ret != Z_OK)
 -              die("deflateEnd on object %s failed (%d)", oid_to_hex(oid),
 +              die(_("deflateEnd on object %s failed (%d)"), oid_to_hex(oid),
                    ret);
        the_hash_algo->final_fn(parano_oid.hash, &c);
        if (oidcmp(oid, &parano_oid) != 0)
 -              die("confused by unstable object source data for %s",
 +              die(_("confused by unstable object source data for %s"),
                    oid_to_hex(oid));
  
        close_sha1_file(fd);
                utb.actime = mtime;
                utb.modtime = mtime;
                if (utime(tmp_file.buf, &utb) < 0)
 -                      warning_errno("failed utime() on %s", tmp_file.buf);
 +                      warning_errno(_("failed utime() on %s"), tmp_file.buf);
        }
  
        return finalize_object_file(tmp_file.buf, filename.buf);
@@@ -1757,7 -1757,7 +1757,7 @@@ int force_object_loose(const struct obj
                return 0;
        buf = read_object(oid->hash, &type, &len);
        if (!buf)
 -              return error("cannot read sha1_file for %s", oid_to_hex(oid));
 +              return error(_("cannot read sha1_file for %s"), oid_to_hex(oid));
        hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
        ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime);
        free(buf);
@@@ -1802,7 -1802,7 +1802,7 @@@ static void check_commit(const void *bu
        struct commit c;
        memset(&c, 0, sizeof(c));
        if (parse_commit_buffer(the_repository, &c, buf, size, 0))
 -              die("corrupt commit");
 +              die(_("corrupt commit"));
  }
  
  static void check_tag(const void *buf, size_t size)
        struct tag t;
        memset(&t, 0, sizeof(t));
        if (parse_tag_buffer(the_repository, &t, buf, size))
 -              die("corrupt tag");
 +              die(_("corrupt tag"));
  }
  
  static int index_mem(struct object_id *oid, void *buf, size_t size,
@@@ -1903,10 -1903,10 +1903,10 @@@ static int index_core(struct object_id 
                char *buf = xmalloc(size);
                ssize_t read_result = read_in_full(fd, buf, size);
                if (read_result < 0)
 -                      ret = error_errno("read error while indexing %s",
 +                      ret = error_errno(_("read error while indexing %s"),
                                          path ? path : "<unknown>");
                else if (read_result != size)
 -                      ret = error("short read while indexing %s",
 +                      ret = error(_("short read while indexing %s"),
                                    path ? path : "<unknown>");
                else
                        ret = index_mem(oid, buf, size, type, path, flags);
@@@ -1977,7 -1977,7 +1977,7 @@@ int index_path(struct object_id *oid, c
                if (fd < 0)
                        return error_errno("open(\"%s\")", path);
                if (index_fd(oid, fd, st, OBJ_BLOB, path, flags) < 0)
 -                      return error("%s: failed to insert into database",
 +                      return error(_("%s: failed to insert into database"),
                                     path);
                break;
        case S_IFLNK:
                if (!(flags & HASH_WRITE_OBJECT))
                        hash_object_file(sb.buf, sb.len, blob_type, oid);
                else if (write_object_file(sb.buf, sb.len, blob_type, oid))
 -                      rc = error("%s: failed to insert into database", path);
 +                      rc = error(_("%s: failed to insert into database"), path);
                strbuf_release(&sb);
                break;
        case S_IFDIR:
                return resolve_gitlink_ref(path, "HEAD", oid);
        default:
 -              return error("%s: unsupported file type", path);
 +              return error(_("%s: unsupported file type"), path);
        }
        return rc;
  }
@@@ -2016,9 -2016,9 +2016,9 @@@ void assert_oid_type(const struct objec
  {
        enum object_type type = oid_object_info(the_repository, oid, NULL);
        if (type < 0)
 -              die("%s is not a valid object", oid_to_hex(oid));
 +              die(_("%s is not a valid object"), oid_to_hex(oid));
        if (type != expect)
 -              die("%s is not a valid '%s' object", oid_to_hex(oid),
 +              die(_("%s is not a valid '%s' object"), oid_to_hex(oid),
                    type_name(expect));
  }
  
@@@ -2045,7 -2045,7 +2045,7 @@@ int for_each_file_in_obj_subdir(unsigne
        dir = opendir(path->buf);
        if (!dir) {
                if (errno != ENOENT)
 -                      r = error_errno("unable to open %s", path->buf);
 +                      r = error_errno(_("unable to open %s"), path->buf);
                strbuf_setlen(path, origlen);
                return r;
        }
@@@ -2146,7 -2146,8 +2146,8 @@@ static int loose_from_alt_odb(struct al
        return r;
  }
  
- int for_each_loose_object(each_loose_object_fn cb, void *data, unsigned flags)
+ int for_each_loose_object(each_loose_object_fn cb, void *data,
+                         enum for_each_object_flags flags)
  {
        struct loose_alt_odb_data alt;
        int r;
@@@ -2202,18 -2203,18 +2203,18 @@@ static int check_stream_sha1(git_zstrea
        git_inflate_end(stream);
  
        if (status != Z_STREAM_END) {
 -              error("corrupt loose object '%s'", sha1_to_hex(expected_sha1));
 +              error(_("corrupt loose object '%s'"), sha1_to_hex(expected_sha1));
                return -1;
        }
        if (stream->avail_in) {
 -              error("garbage at end of loose object '%s'",
 +              error(_("garbage at end of loose object '%s'"),
                      sha1_to_hex(expected_sha1));
                return -1;
        }
  
        the_hash_algo->final_fn(real_sha1, &c);
        if (hashcmp(expected_sha1, real_sha1)) {
 -              error("sha1 mismatch for %s (expected %s)", path,
 +              error(_("sha1 mismatch for %s (expected %s)"), path,
                      sha1_to_hex(expected_sha1));
                return -1;
        }
@@@ -2237,18 -2238,18 +2238,18 @@@ int read_loose_object(const char *path
  
        map = map_sha1_file_1(the_repository, path, NULL, &mapsize);
        if (!map) {
 -              error_errno("unable to mmap %s", path);
 +              error_errno(_("unable to mmap %s"), path);
                goto out;
        }
  
        if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0) {
 -              error("unable to unpack header of %s", path);
 +              error(_("unable to unpack header of %s"), path);
                goto out;
        }
  
        *type = parse_sha1_header(hdr, size);
        if (*type < 0) {
 -              error("unable to parse header of %s", path);
 +              error(_("unable to parse header of %s"), path);
                git_inflate_end(&stream);
                goto out;
        }
        } else {
                *contents = unpack_sha1_rest(&stream, hdr, *size, expected_oid->hash);
                if (!*contents) {
 -                      error("unable to unpack contents of %s", path);
 +                      error(_("unable to unpack contents of %s"), path);
                        git_inflate_end(&stream);
                        goto out;
                }
                if (check_object_signature(expected_oid, *contents,
                                         *size, type_name(*type))) {
 -                      error("sha1 mismatch for %s (expected %s)", path,
 +                      error(_("sha1 mismatch for %s (expected %s)"), path,
                              oid_to_hex(expected_oid));
                        free(*contents);
                        goto out;