Merge branch 'mh/mmap-packed-refs'
authorJunio C Hamano <gitster@pobox.com>
Tue, 3 Oct 2017 06:42:50 +0000 (15:42 +0900)
committerJunio C Hamano <gitster@pobox.com>
Tue, 3 Oct 2017 06:42:50 +0000 (15:42 +0900)
Operations that do not touch (majority of) packed refs have been
optimized by making accesses to packed-refs file lazy; we no longer
pre-parse everything, and an access to a single ref in the
packed-refs does not touch majority of irrelevant refs, either.

* mh/mmap-packed-refs: (21 commits)
packed-backend.c: rename a bunch of things and update comments
mmapped_ref_iterator: inline into `packed_ref_iterator`
ref_cache: remove support for storing peeled values
packed_ref_store: get rid of the `ref_cache` entirely
ref_store: implement `refs_peel_ref()` generically
packed_read_raw_ref(): read the reference from the mmapped buffer
packed_ref_iterator_begin(): iterate using `mmapped_ref_iterator`
read_packed_refs(): ensure that references are ordered when read
packed_ref_cache: keep the `packed-refs` file mmapped if possible
packed-backend.c: reorder some definitions
mmapped_ref_iterator_advance(): no peeled value for broken refs
mmapped_ref_iterator: add iterator over a packed-refs file
packed_ref_cache: remember the file-wide peeling state
read_packed_refs(): read references with minimal copying
read_packed_refs(): make parsing of the header line more robust
read_packed_refs(): only check for a header at the top of the file
read_packed_refs(): use mmap to read the `packed-refs` file
die_unterminated_line(), die_invalid_line(): new functions
packed_ref_cache: add a backlink to the associated `packed_ref_store`
prefix_ref_iterator: break when we leave the prefix
...

1  2 
Makefile
refs.c
refs/files-backend.c
refs/packed-backend.c
refs/refs-internal.h
diff --cc Makefile
Simple merge
diff --cc refs.c
Simple merge
index 4b46cd2e26dbcb782d58bcf4a7bc0c0494b715a4,7d12de88d015a73ce8cc77da5e01ad1a43086025..1b6141e718a5c449226c22fdeefb80b591ca3f7e
@@@ -2094,8 -2049,8 +2059,8 @@@ static struct ref_iterator *reflog_iter
        struct ref_iterator *ref_iterator = &iter->base;
        struct strbuf sb = STRBUF_INIT;
  
-       base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable);
+       base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable, 0);
 -      files_reflog_path(refs, &sb, NULL);
 +      strbuf_addf(&sb, "%s/logs", gitdir);
        iter->dir_iterator = dir_iterator_begin(sb.buf);
        iter->ref_store = ref_store;
        strbuf_release(&sb);
        return ref_iterator;
  }
  
 +static enum iterator_selection reflog_iterator_select(
 +      struct ref_iterator *iter_worktree,
 +      struct ref_iterator *iter_common,
 +      void *cb_data)
 +{
 +      if (iter_worktree) {
 +              /*
 +               * We're a bit loose here. We probably should ignore
 +               * common refs if they are accidentally added as
 +               * per-worktree refs.
 +               */
 +              return ITER_SELECT_0;
 +      } else if (iter_common) {
 +              if (ref_type(iter_common->refname) == REF_TYPE_NORMAL)
 +                      return ITER_SELECT_1;
 +
 +              /*
 +               * The main ref store may contain main worktree's
 +               * per-worktree refs, which should be ignored
 +               */
 +              return ITER_SKIP_1;
 +      } else
 +              return ITER_DONE;
 +}
 +
 +static struct ref_iterator *files_reflog_iterator_begin(struct ref_store *ref_store)
 +{
 +      struct files_ref_store *refs =
 +              files_downcast(ref_store, REF_STORE_READ,
 +                             "reflog_iterator_begin");
 +
 +      if (!strcmp(refs->gitdir, refs->gitcommondir)) {
 +              return reflog_iterator_begin(ref_store, refs->gitcommondir);
 +      } else {
 +              return merge_ref_iterator_begin(
++                      0,
 +                      reflog_iterator_begin(ref_store, refs->gitdir),
 +                      reflog_iterator_begin(ref_store, refs->gitcommondir),
 +                      reflog_iterator_select, refs);
 +      }
 +}
 +
  /*
   * If update is a direct update of head_ref (the reference pointed to
   * by HEAD), then add an extra REF_LOG_ONLY update for HEAD.
index 9c0d685c7f4e8d770f151ddd9e0aeac48b9e252b,d500ebfaa5311fc37ae0b35144c8a6bd130b54be..6e85b0bf0bd5efad7617698a4c2f740a9a28fb05
@@@ -75,9 -143,52 +143,52 @@@ struct packed_ref_store 
         * "packed-refs" file. Note that this (and thus the enclosing
         * `packed_ref_store`) must not be freed.
         */
 -      struct tempfile tempfile;
 +      struct tempfile *tempfile;
  };
  
+ /*
+  * Increment the reference count of `*snapshot`.
+  */
+ static void acquire_snapshot(struct snapshot *snapshot)
+ {
+       snapshot->referrers++;
+ }
+ /*
+  * If the buffer in `snapshot` is active, then either munmap the
+  * memory and close the file, or free the memory. Then set the buffer
+  * pointers to NULL.
+  */
+ static void clear_snapshot_buffer(struct snapshot *snapshot)
+ {
+       if (snapshot->mmapped) {
+               if (munmap(snapshot->buf, snapshot->eof - snapshot->buf))
+                       die_errno("error ummapping packed-refs file %s",
+                                 snapshot->refs->path);
+               snapshot->mmapped = 0;
+       } else {
+               free(snapshot->buf);
+       }
+       snapshot->buf = snapshot->eof = NULL;
+       snapshot->header_len = 0;
+ }
+ /*
+  * Decrease the reference count of `*snapshot`. If it goes to zero,
+  * free `*snapshot` and return true; otherwise return false.
+  */
+ static int release_snapshot(struct snapshot *snapshot)
+ {
+       if (!--snapshot->referrers) {
+               stat_validity_clear(&snapshot->validity);
+               clear_snapshot_buffer(snapshot);
+               free(snapshot);
+               return 1;
+       } else {
+               return 0;
+       }
+ }
  struct ref_store *packed_ref_store_create(const char *path,
                                          unsigned int store_flags)
  {
Simple merge