Merge branch 'sb/test-helper-excludes'
[gitweb.git] / refs / packed-backend.c
index 3279d42c5a696434bbcdb85d6244cded1ddbc758..023243fd5f1833f3c5f0b6fd3cd82b2e0c69644e 100644 (file)
@@ -716,7 +716,7 @@ static struct snapshot *get_snapshot(struct packed_ref_store *refs)
 }
 
 static int packed_read_raw_ref(struct ref_store *ref_store,
-                              const char *refname, unsigned char *sha1,
+                              const char *refname, struct object_id *oid,
                               struct strbuf *referent, unsigned int *type)
 {
        struct packed_ref_store *refs =
@@ -734,7 +734,7 @@ static int packed_read_raw_ref(struct ref_store *ref_store,
                return -1;
        }
 
-       if (get_sha1_hex(rec, sha1))
+       if (get_oid_hex(rec, oid))
                die_invalid_line(refs->path, rec, snapshot->eof - rec);
 
        *type = REF_ISPACKED;
@@ -744,7 +744,7 @@ static int packed_read_raw_ref(struct ref_store *ref_store,
 /*
  * This value is set in `base.flags` if the peeled value of the
  * current reference is known. In that case, `peeled` contains the
- * correct peeled value for the reference, which might be `null_sha1`
+ * correct peeled value for the reference, which might be `null_oid`
  * if the reference is not a tag or if it is broken.
  */
 #define REF_KNOWS_PEELED 0x40
@@ -880,7 +880,7 @@ static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator,
        } else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) {
                return -1;
        } else {
-               return !!peel_object(iter->oid.hash, peeled->hash);
+               return !!peel_object(&iter->oid, peeled);
        }
 }
 
@@ -961,11 +961,11 @@ static struct ref_iterator *packed_ref_iterator_begin(
  * by the failing call to `fprintf()`.
  */
 static int write_packed_entry(FILE *fh, const char *refname,
-                             const unsigned char *sha1,
-                             const unsigned char *peeled)
+                             const struct object_id *oid,
+                             const struct object_id *peeled)
 {
-       if (fprintf(fh, "%s %s\n", sha1_to_hex(sha1), refname) < 0 ||
-           (peeled && fprintf(fh, "^%s\n", sha1_to_hex(peeled)) < 0))
+       if (fprintf(fh, "%s %s\n", oid_to_hex(oid), refname) < 0 ||
+           (peeled && fprintf(fh, "^%s\n", oid_to_hex(peeled)) < 0))
                return -1;
 
        return 0;
@@ -1203,8 +1203,8 @@ static int write_with_updates(struct packed_ref_store *refs,
                        int peel_error = ref_iterator_peel(iter, &peeled);
 
                        if (write_packed_entry(out, iter->refname,
-                                              iter->oid->hash,
-                                              peel_error ? NULL : peeled.hash))
+                                              iter->oid,
+                                              peel_error ? NULL : &peeled))
                                goto write_error;
 
                        if ((ok = ref_iterator_advance(iter)) != ITER_OK)
@@ -1220,12 +1220,12 @@ static int write_with_updates(struct packed_ref_store *refs,
                        i++;
                } else {
                        struct object_id peeled;
-                       int peel_error = peel_object(update->new_oid.hash,
-                                                    peeled.hash);
+                       int peel_error = peel_object(&update->new_oid,
+                                                    &peeled);
 
                        if (write_packed_entry(out, update->refname,
-                                              update->new_oid.hash,
-                                              peel_error ? NULL : peeled.hash))
+                                              &update->new_oid,
+                                              peel_error ? NULL : &peeled))
                                goto write_error;
 
                        i++;
@@ -1261,6 +1261,100 @@ static int write_with_updates(struct packed_ref_store *refs,
        return -1;
 }
 
+int is_packed_transaction_needed(struct ref_store *ref_store,
+                                struct ref_transaction *transaction)
+{
+       struct packed_ref_store *refs = packed_downcast(
+                       ref_store,
+                       REF_STORE_READ,
+                       "is_packed_transaction_needed");
+       struct strbuf referent = STRBUF_INIT;
+       size_t i;
+       int ret;
+
+       if (!is_lock_file_locked(&refs->lock))
+               BUG("is_packed_transaction_needed() called while unlocked");
+
+       /*
+        * We're only going to bother returning false for the common,
+        * trivial case that references are only being deleted, their
+        * old values are not being checked, and the old `packed-refs`
+        * file doesn't contain any of those reference(s). This gives
+        * false positives for some other cases that could
+        * theoretically be optimized away:
+        *
+        * 1. It could be that the old value is being verified without
+        *    setting a new value. In this case, we could verify the
+        *    old value here and skip the update if it agrees. If it
+        *    disagrees, we could either let the update go through
+        *    (the actual commit would re-detect and report the
+        *    problem), or come up with a way of reporting such an
+        *    error to *our* caller.
+        *
+        * 2. It could be that a new value is being set, but that it
+        *    is identical to the current packed value of the
+        *    reference.
+        *
+        * Neither of these cases will come up in the current code,
+        * because the only caller of this function passes to it a
+        * transaction that only includes `delete` updates with no
+        * `old_id`. Even if that ever changes, false positives only
+        * cause an optimization to be missed; they do not affect
+        * correctness.
+        */
+
+       /*
+        * Start with the cheap checks that don't require old
+        * reference values to be read:
+        */
+       for (i = 0; i < transaction->nr; i++) {
+               struct ref_update *update = transaction->updates[i];
+
+               if (update->flags & REF_HAVE_OLD)
+                       /* Have to check the old value -> needed. */
+                       return 1;
+
+               if ((update->flags & REF_HAVE_NEW) && !is_null_oid(&update->new_oid))
+                       /* Have to set a new value -> needed. */
+                       return 1;
+       }
+
+       /*
+        * The transaction isn't checking any old values nor is it
+        * setting any nonzero new values, so it still might be able
+        * to be skipped. Now do the more expensive check: the update
+        * is needed if any of the updates is a delete, and the old
+        * `packed-refs` file contains a value for that reference.
+        */
+       ret = 0;
+       for (i = 0; i < transaction->nr; i++) {
+               struct ref_update *update = transaction->updates[i];
+               unsigned int type;
+               struct object_id oid;
+
+               if (!(update->flags & REF_HAVE_NEW))
+                       /*
+                        * This reference isn't being deleted -> not
+                        * needed.
+                        */
+                       continue;
+
+               if (!refs_read_raw_ref(ref_store, update->refname,
+                                      &oid, &referent, &type) ||
+                   errno != ENOENT) {
+                       /*
+                        * We have to actually delete that reference
+                        * -> this transaction is needed.
+                        */
+                       ret = 1;
+                       break;
+               }
+       }
+
+       strbuf_release(&referent);
+       return ret;
+}
+
 struct packed_transaction_backend_data {
        /* True iff the transaction owns the packed-refs lock. */
        int own_lock;
@@ -1519,7 +1613,7 @@ static int packed_delete_reflog(struct ref_store *ref_store,
 }
 
 static int packed_reflog_expire(struct ref_store *ref_store,
-                               const char *refname, const unsigned char *sha1,
+                               const char *refname, const struct object_id *oid,
                                unsigned int flags,
                                reflog_expiry_prepare_fn prepare_fn,
                                reflog_expiry_should_prune_fn should_prune_fn,