f27943f9a156e6e1f1b498d2cc5f2abe4b65b6fc
1#include "../cache.h"
2#include "../refs.h"
3#include "refs-internal.h"
4#include "ref-cache.h"
5#include "packed-backend.h"
6#include "../iterator.h"
7#include "../lockfile.h"
8
9struct packed_ref_cache {
10 struct ref_cache *cache;
11
12 /*
13 * Count of references to the data structure in this instance,
14 * including the pointer from files_ref_store::packed if any.
15 * The data will not be freed as long as the reference count
16 * is nonzero.
17 */
18 unsigned int referrers;
19
20 /* The metadata from when this packed-refs cache was read */
21 struct stat_validity validity;
22};
23
24/*
25 * Increment the reference count of *packed_refs.
26 */
27static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs)
28{
29 packed_refs->referrers++;
30}
31
32/*
33 * Decrease the reference count of *packed_refs. If it goes to zero,
34 * free *packed_refs and return true; otherwise return false.
35 */
36static int release_packed_ref_cache(struct packed_ref_cache *packed_refs)
37{
38 if (!--packed_refs->referrers) {
39 free_ref_cache(packed_refs->cache);
40 stat_validity_clear(&packed_refs->validity);
41 free(packed_refs);
42 return 1;
43 } else {
44 return 0;
45 }
46}
47
48/*
49 * A container for `packed-refs`-related data. It is not (yet) a
50 * `ref_store`.
51 */
52struct packed_ref_store {
53 struct ref_store base;
54
55 unsigned int store_flags;
56
57 /* The path of the "packed-refs" file: */
58 char *path;
59
60 /*
61 * A cache of the values read from the `packed-refs` file, if
62 * it might still be current; otherwise, NULL.
63 */
64 struct packed_ref_cache *cache;
65
66 /*
67 * Lock used for the "packed-refs" file. Note that this (and
68 * thus the enclosing `packed_ref_store`) must not be freed.
69 */
70 struct lock_file lock;
71
72 /*
73 * Temporary file used when rewriting new contents to the
74 * "packed-refs" file. Note that this (and thus the enclosing
75 * `packed_ref_store`) must not be freed.
76 */
77 struct tempfile tempfile;
78};
79
80struct ref_store *packed_ref_store_create(const char *path,
81 unsigned int store_flags)
82{
83 struct packed_ref_store *refs = xcalloc(1, sizeof(*refs));
84 struct ref_store *ref_store = (struct ref_store *)refs;
85
86 base_ref_store_init(ref_store, &refs_be_packed);
87 refs->store_flags = store_flags;
88
89 refs->path = xstrdup(path);
90 return ref_store;
91}
92
93/*
94 * Die if refs is not the main ref store. caller is used in any
95 * necessary error messages.
96 */
97static void packed_assert_main_repository(struct packed_ref_store *refs,
98 const char *caller)
99{
100 if (refs->store_flags & REF_STORE_MAIN)
101 return;
102
103 die("BUG: operation %s only allowed for main ref store", caller);
104}
105
106/*
107 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is
108 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't
109 * support at least the flags specified in `required_flags`. `caller`
110 * is used in any necessary error messages.
111 */
112static struct packed_ref_store *packed_downcast(struct ref_store *ref_store,
113 unsigned int required_flags,
114 const char *caller)
115{
116 struct packed_ref_store *refs;
117
118 if (ref_store->be != &refs_be_packed)
119 die("BUG: ref_store is type \"%s\" not \"packed\" in %s",
120 ref_store->be->name, caller);
121
122 refs = (struct packed_ref_store *)ref_store;
123
124 if ((refs->store_flags & required_flags) != required_flags)
125 die("BUG: unallowed operation (%s), requires %x, has %x\n",
126 caller, required_flags, refs->store_flags);
127
128 return refs;
129}
130
131static void clear_packed_ref_cache(struct packed_ref_store *refs)
132{
133 if (refs->cache) {
134 struct packed_ref_cache *cache = refs->cache;
135
136 if (is_lock_file_locked(&refs->lock))
137 die("BUG: packed-ref cache cleared while locked");
138 refs->cache = NULL;
139 release_packed_ref_cache(cache);
140 }
141}
142
143/* The length of a peeled reference line in packed-refs, including EOL: */
144#define PEELED_LINE_LENGTH 42
145
146/*
147 * Parse one line from a packed-refs file. Write the SHA1 to sha1.
148 * Return a pointer to the refname within the line (null-terminated),
149 * or NULL if there was a problem.
150 */
151static const char *parse_ref_line(struct strbuf *line, struct object_id *oid)
152{
153 const char *ref;
154
155 if (parse_oid_hex(line->buf, oid, &ref) < 0)
156 return NULL;
157 if (!isspace(*ref++))
158 return NULL;
159
160 if (isspace(*ref))
161 return NULL;
162
163 if (line->buf[line->len - 1] != '\n')
164 return NULL;
165 line->buf[--line->len] = 0;
166
167 return ref;
168}
169
170/*
171 * Read from `packed_refs_file` into a newly-allocated
172 * `packed_ref_cache` and return it. The return value will already
173 * have its reference count incremented.
174 *
175 * A comment line of the form "# pack-refs with: " may contain zero or
176 * more traits. We interpret the traits as follows:
177 *
178 * No traits:
179 *
180 * Probably no references are peeled. But if the file contains a
181 * peeled value for a reference, we will use it.
182 *
183 * peeled:
184 *
185 * References under "refs/tags/", if they *can* be peeled, *are*
186 * peeled in this file. References outside of "refs/tags/" are
187 * probably not peeled even if they could have been, but if we find
188 * a peeled value for such a reference we will use it.
189 *
190 * fully-peeled:
191 *
192 * All references in the file that can be peeled are peeled.
193 * Inversely (and this is more important), any references in the
194 * file for which no peeled value is recorded is not peelable. This
195 * trait should typically be written alongside "peeled" for
196 * compatibility with older clients, but we do not require it
197 * (i.e., "peeled" is a no-op if "fully-peeled" is set).
198 */
199static struct packed_ref_cache *read_packed_refs(const char *packed_refs_file)
200{
201 FILE *f;
202 struct packed_ref_cache *packed_refs = xcalloc(1, sizeof(*packed_refs));
203 struct ref_entry *last = NULL;
204 struct strbuf line = STRBUF_INIT;
205 enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled = PEELED_NONE;
206 struct ref_dir *dir;
207
208 acquire_packed_ref_cache(packed_refs);
209 packed_refs->cache = create_ref_cache(NULL, NULL);
210 packed_refs->cache->root->flag &= ~REF_INCOMPLETE;
211
212 f = fopen(packed_refs_file, "r");
213 if (!f) {
214 if (errno == ENOENT) {
215 /*
216 * This is OK; it just means that no
217 * "packed-refs" file has been written yet,
218 * which is equivalent to it being empty.
219 */
220 return packed_refs;
221 } else {
222 die_errno("couldn't read %s", packed_refs_file);
223 }
224 }
225
226 stat_validity_update(&packed_refs->validity, fileno(f));
227
228 dir = get_ref_dir(packed_refs->cache->root);
229 while (strbuf_getwholeline(&line, f, '\n') != EOF) {
230 struct object_id oid;
231 const char *refname;
232 const char *traits;
233
234 if (skip_prefix(line.buf, "# pack-refs with:", &traits)) {
235 if (strstr(traits, " fully-peeled "))
236 peeled = PEELED_FULLY;
237 else if (strstr(traits, " peeled "))
238 peeled = PEELED_TAGS;
239 /* perhaps other traits later as well */
240 continue;
241 }
242
243 refname = parse_ref_line(&line, &oid);
244 if (refname) {
245 int flag = REF_ISPACKED;
246
247 if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) {
248 if (!refname_is_safe(refname))
249 die("packed refname is dangerous: %s", refname);
250 oidclr(&oid);
251 flag |= REF_BAD_NAME | REF_ISBROKEN;
252 }
253 last = create_ref_entry(refname, &oid, flag);
254 if (peeled == PEELED_FULLY ||
255 (peeled == PEELED_TAGS && starts_with(refname, "refs/tags/")))
256 last->flag |= REF_KNOWS_PEELED;
257 add_ref_entry(dir, last);
258 continue;
259 }
260 if (last &&
261 line.buf[0] == '^' &&
262 line.len == PEELED_LINE_LENGTH &&
263 line.buf[PEELED_LINE_LENGTH - 1] == '\n' &&
264 !get_oid_hex(line.buf + 1, &oid)) {
265 oidcpy(&last->u.value.peeled, &oid);
266 /*
267 * Regardless of what the file header said,
268 * we definitely know the value of *this*
269 * reference:
270 */
271 last->flag |= REF_KNOWS_PEELED;
272 }
273 }
274
275 fclose(f);
276 strbuf_release(&line);
277
278 return packed_refs;
279}
280
281/*
282 * Check that the packed refs cache (if any) still reflects the
283 * contents of the file. If not, clear the cache.
284 */
285static void validate_packed_ref_cache(struct packed_ref_store *refs)
286{
287 if (refs->cache &&
288 !stat_validity_check(&refs->cache->validity, refs->path))
289 clear_packed_ref_cache(refs);
290}
291
292/*
293 * Get the packed_ref_cache for the specified packed_ref_store,
294 * creating and populating it if it hasn't been read before or if the
295 * file has been changed (according to its `validity` field) since it
296 * was last read. On the other hand, if we hold the lock, then assume
297 * that the file hasn't been changed out from under us, so skip the
298 * extra `stat()` call in `stat_validity_check()`.
299 */
300static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *refs)
301{
302 if (!is_lock_file_locked(&refs->lock))
303 validate_packed_ref_cache(refs);
304
305 if (!refs->cache)
306 refs->cache = read_packed_refs(refs->path);
307
308 return refs->cache;
309}
310
311static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache)
312{
313 return get_ref_dir(packed_ref_cache->cache->root);
314}
315
316static struct ref_dir *get_packed_refs(struct packed_ref_store *refs)
317{
318 return get_packed_ref_dir(get_packed_ref_cache(refs));
319}
320
321/*
322 * Add or overwrite a reference in the in-memory packed reference
323 * cache. This may only be called while the packed-refs file is locked
324 * (see packed_refs_lock()). To actually write the packed-refs file,
325 * call commit_packed_refs().
326 */
327void add_packed_ref(struct ref_store *ref_store,
328 const char *refname, const struct object_id *oid)
329{
330 struct packed_ref_store *refs =
331 packed_downcast(ref_store, REF_STORE_WRITE,
332 "add_packed_ref");
333 struct ref_dir *packed_refs;
334 struct ref_entry *packed_entry;
335
336 if (!is_lock_file_locked(&refs->lock))
337 die("BUG: packed refs not locked");
338
339 if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL))
340 die("Reference has invalid format: '%s'", refname);
341
342 packed_refs = get_packed_refs(refs);
343 packed_entry = find_ref_entry(packed_refs, refname);
344 if (packed_entry) {
345 /* Overwrite the existing entry: */
346 oidcpy(&packed_entry->u.value.oid, oid);
347 packed_entry->flag = REF_ISPACKED;
348 oidclr(&packed_entry->u.value.peeled);
349 } else {
350 packed_entry = create_ref_entry(refname, oid, REF_ISPACKED);
351 add_ref_entry(packed_refs, packed_entry);
352 }
353}
354
355/*
356 * Return the ref_entry for the given refname from the packed
357 * references. If it does not exist, return NULL.
358 */
359static struct ref_entry *get_packed_ref(struct packed_ref_store *refs,
360 const char *refname)
361{
362 return find_ref_entry(get_packed_refs(refs), refname);
363}
364
365static int packed_read_raw_ref(struct ref_store *ref_store,
366 const char *refname, unsigned char *sha1,
367 struct strbuf *referent, unsigned int *type)
368{
369 struct packed_ref_store *refs =
370 packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
371
372 struct ref_entry *entry;
373
374 *type = 0;
375
376 entry = get_packed_ref(refs, refname);
377 if (!entry) {
378 errno = ENOENT;
379 return -1;
380 }
381
382 hashcpy(sha1, entry->u.value.oid.hash);
383 *type = REF_ISPACKED;
384 return 0;
385}
386
387static int packed_peel_ref(struct ref_store *ref_store,
388 const char *refname, unsigned char *sha1)
389{
390 struct packed_ref_store *refs =
391 packed_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,
392 "peel_ref");
393 struct ref_entry *r = get_packed_ref(refs, refname);
394
395 if (!r || peel_entry(r, 0))
396 return -1;
397
398 hashcpy(sha1, r->u.value.peeled.hash);
399 return 0;
400}
401
402struct packed_ref_iterator {
403 struct ref_iterator base;
404
405 struct packed_ref_cache *cache;
406 struct ref_iterator *iter0;
407 unsigned int flags;
408};
409
410static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
411{
412 struct packed_ref_iterator *iter =
413 (struct packed_ref_iterator *)ref_iterator;
414 int ok;
415
416 while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
417 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
418 ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE)
419 continue;
420
421 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
422 !ref_resolves_to_object(iter->iter0->refname,
423 iter->iter0->oid,
424 iter->iter0->flags))
425 continue;
426
427 iter->base.refname = iter->iter0->refname;
428 iter->base.oid = iter->iter0->oid;
429 iter->base.flags = iter->iter0->flags;
430 return ITER_OK;
431 }
432
433 iter->iter0 = NULL;
434 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
435 ok = ITER_ERROR;
436
437 return ok;
438}
439
440static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator,
441 struct object_id *peeled)
442{
443 struct packed_ref_iterator *iter =
444 (struct packed_ref_iterator *)ref_iterator;
445
446 return ref_iterator_peel(iter->iter0, peeled);
447}
448
449static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator)
450{
451 struct packed_ref_iterator *iter =
452 (struct packed_ref_iterator *)ref_iterator;
453 int ok = ITER_DONE;
454
455 if (iter->iter0)
456 ok = ref_iterator_abort(iter->iter0);
457
458 release_packed_ref_cache(iter->cache);
459 base_ref_iterator_free(ref_iterator);
460 return ok;
461}
462
463static struct ref_iterator_vtable packed_ref_iterator_vtable = {
464 packed_ref_iterator_advance,
465 packed_ref_iterator_peel,
466 packed_ref_iterator_abort
467};
468
469static struct ref_iterator *packed_ref_iterator_begin(
470 struct ref_store *ref_store,
471 const char *prefix, unsigned int flags)
472{
473 struct packed_ref_store *refs;
474 struct packed_ref_iterator *iter;
475 struct ref_iterator *ref_iterator;
476 unsigned int required_flags = REF_STORE_READ;
477
478 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
479 required_flags |= REF_STORE_ODB;
480 refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin");
481
482 iter = xcalloc(1, sizeof(*iter));
483 ref_iterator = &iter->base;
484 base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable);
485
486 /*
487 * Note that get_packed_ref_cache() internally checks whether
488 * the packed-ref cache is up to date with what is on disk,
489 * and re-reads it if not.
490 */
491
492 iter->cache = get_packed_ref_cache(refs);
493 acquire_packed_ref_cache(iter->cache);
494 iter->iter0 = cache_ref_iterator_begin(iter->cache->cache, prefix, 0);
495
496 iter->flags = flags;
497
498 return ref_iterator;
499}
500
501/*
502 * Write an entry to the packed-refs file for the specified refname.
503 * If peeled is non-NULL, write it as the entry's peeled value. On
504 * error, return a nonzero value and leave errno set at the value left
505 * by the failing call to `fprintf()`.
506 */
507static int write_packed_entry(FILE *fh, const char *refname,
508 const unsigned char *sha1,
509 const unsigned char *peeled)
510{
511 if (fprintf(fh, "%s %s\n", sha1_to_hex(sha1), refname) < 0 ||
512 (peeled && fprintf(fh, "^%s\n", sha1_to_hex(peeled)) < 0))
513 return -1;
514
515 return 0;
516}
517
518int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err)
519{
520 struct packed_ref_store *refs =
521 packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN,
522 "packed_refs_lock");
523 static int timeout_configured = 0;
524 static int timeout_value = 1000;
525 struct packed_ref_cache *packed_ref_cache;
526
527 if (!timeout_configured) {
528 git_config_get_int("core.packedrefstimeout", &timeout_value);
529 timeout_configured = 1;
530 }
531
532 /*
533 * Note that we close the lockfile immediately because we
534 * don't write new content to it, but rather to a separate
535 * tempfile.
536 */
537 if (hold_lock_file_for_update_timeout(
538 &refs->lock,
539 refs->path,
540 flags, timeout_value) < 0) {
541 unable_to_lock_message(refs->path, errno, err);
542 return -1;
543 }
544
545 if (close_lock_file(&refs->lock)) {
546 strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno));
547 return -1;
548 }
549
550 /*
551 * Now that we hold the `packed-refs` lock, make sure that our
552 * cache matches the current version of the file. Normally
553 * `get_packed_ref_cache()` does that for us, but that
554 * function assumes that when the file is locked, any existing
555 * cache is still valid. We've just locked the file, but it
556 * might have changed the moment *before* we locked it.
557 */
558 validate_packed_ref_cache(refs);
559
560 packed_ref_cache = get_packed_ref_cache(refs);
561 /* Increment the reference count to prevent it from being freed: */
562 acquire_packed_ref_cache(packed_ref_cache);
563 return 0;
564}
565
566void packed_refs_unlock(struct ref_store *ref_store)
567{
568 struct packed_ref_store *refs = packed_downcast(
569 ref_store,
570 REF_STORE_READ | REF_STORE_WRITE,
571 "packed_refs_unlock");
572
573 if (!is_lock_file_locked(&refs->lock))
574 die("BUG: packed_refs_unlock() called when not locked");
575 rollback_lock_file(&refs->lock);
576 release_packed_ref_cache(refs->cache);
577}
578
579int packed_refs_is_locked(struct ref_store *ref_store)
580{
581 struct packed_ref_store *refs = packed_downcast(
582 ref_store,
583 REF_STORE_READ | REF_STORE_WRITE,
584 "packed_refs_is_locked");
585
586 return is_lock_file_locked(&refs->lock);
587}
588
589/*
590 * The packed-refs header line that we write out. Perhaps other
591 * traits will be added later. The trailing space is required.
592 */
593static const char PACKED_REFS_HEADER[] =
594 "# pack-refs with: peeled fully-peeled \n";
595
596/*
597 * Write the current version of the packed refs cache from memory to
598 * disk. The packed-refs file must already be locked for writing (see
599 * packed_refs_lock()). Return zero on success. On errors, rollback
600 * the lockfile, write an error message to `err`, and return a nonzero
601 * value.
602 */
603int commit_packed_refs(struct ref_store *ref_store, struct strbuf *err)
604{
605 struct packed_ref_store *refs =
606 packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN,
607 "commit_packed_refs");
608 struct packed_ref_cache *packed_ref_cache =
609 get_packed_ref_cache(refs);
610 int ok;
611 int ret = -1;
612 struct strbuf sb = STRBUF_INIT;
613 FILE *out;
614 struct ref_iterator *iter;
615
616 if (!is_lock_file_locked(&refs->lock))
617 die("BUG: commit_packed_refs() called when unlocked");
618
619 strbuf_addf(&sb, "%s.new", refs->path);
620 if (create_tempfile(&refs->tempfile, sb.buf) < 0) {
621 strbuf_addf(err, "unable to create file %s: %s",
622 sb.buf, strerror(errno));
623 strbuf_release(&sb);
624 goto out;
625 }
626 strbuf_release(&sb);
627
628 out = fdopen_tempfile(&refs->tempfile, "w");
629 if (!out) {
630 strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",
631 strerror(errno));
632 goto error;
633 }
634
635 if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0) {
636 strbuf_addf(err, "error writing to %s: %s",
637 get_tempfile_path(&refs->tempfile), strerror(errno));
638 goto error;
639 }
640
641 iter = cache_ref_iterator_begin(packed_ref_cache->cache, NULL, 0);
642 while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
643 struct object_id peeled;
644 int peel_error = ref_iterator_peel(iter, &peeled);
645
646 if (write_packed_entry(out, iter->refname, iter->oid->hash,
647 peel_error ? NULL : peeled.hash)) {
648 strbuf_addf(err, "error writing to %s: %s",
649 get_tempfile_path(&refs->tempfile),
650 strerror(errno));
651 ref_iterator_abort(iter);
652 goto error;
653 }
654 }
655
656 if (ok != ITER_DONE) {
657 strbuf_addf(err, "unable to rewrite packed-refs file: "
658 "error iterating over old contents");
659 goto error;
660 }
661
662 if (rename_tempfile(&refs->tempfile, refs->path)) {
663 strbuf_addf(err, "error replacing %s: %s",
664 refs->path, strerror(errno));
665 goto out;
666 }
667
668 ret = 0;
669 goto out;
670
671error:
672 delete_tempfile(&refs->tempfile);
673
674out:
675 packed_refs_unlock(ref_store);
676 return ret;
677}
678
679/*
680 * Rollback the lockfile for the packed-refs file, and discard the
681 * in-memory packed reference cache. (The packed-refs file will be
682 * read anew if it is needed again after this function is called.)
683 */
684static void rollback_packed_refs(struct packed_ref_store *refs)
685{
686 packed_assert_main_repository(refs, "rollback_packed_refs");
687
688 if (!is_lock_file_locked(&refs->lock))
689 die("BUG: packed-refs not locked");
690 packed_refs_unlock(&refs->base);
691 clear_packed_ref_cache(refs);
692}
693
694/*
695 * Rewrite the packed-refs file, omitting any refs listed in
696 * 'refnames'. On error, leave packed-refs unchanged, write an error
697 * message to 'err', and return a nonzero value.
698 *
699 * The refs in 'refnames' needn't be sorted. `err` must not be NULL.
700 */
701int repack_without_refs(struct ref_store *ref_store,
702 struct string_list *refnames, struct strbuf *err)
703{
704 struct packed_ref_store *refs =
705 packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN,
706 "repack_without_refs");
707 struct ref_dir *packed;
708 struct string_list_item *refname;
709 int needs_repacking = 0, removed = 0;
710
711 packed_assert_main_repository(refs, "repack_without_refs");
712 assert(err);
713
714 /* Look for a packed ref */
715 for_each_string_list_item(refname, refnames) {
716 if (get_packed_ref(refs, refname->string)) {
717 needs_repacking = 1;
718 break;
719 }
720 }
721
722 /* Avoid locking if we have nothing to do */
723 if (!needs_repacking)
724 return 0; /* no refname exists in packed refs */
725
726 if (packed_refs_lock(&refs->base, 0, err))
727 return -1;
728
729 packed = get_packed_refs(refs);
730
731 /* Remove refnames from the cache */
732 for_each_string_list_item(refname, refnames)
733 if (remove_entry_from_dir(packed, refname->string) != -1)
734 removed = 1;
735 if (!removed) {
736 /*
737 * All packed entries disappeared while we were
738 * acquiring the lock.
739 */
740 rollback_packed_refs(refs);
741 return 0;
742 }
743
744 /* Write what remains */
745 return commit_packed_refs(&refs->base, err);
746}
747
748static int packed_init_db(struct ref_store *ref_store, struct strbuf *err)
749{
750 /* Nothing to do. */
751 return 0;
752}
753
754static int packed_transaction_prepare(struct ref_store *ref_store,
755 struct ref_transaction *transaction,
756 struct strbuf *err)
757{
758 die("BUG: not implemented yet");
759}
760
761static int packed_transaction_abort(struct ref_store *ref_store,
762 struct ref_transaction *transaction,
763 struct strbuf *err)
764{
765 die("BUG: not implemented yet");
766}
767
768static int packed_transaction_finish(struct ref_store *ref_store,
769 struct ref_transaction *transaction,
770 struct strbuf *err)
771{
772 die("BUG: not implemented yet");
773}
774
775static int packed_initial_transaction_commit(struct ref_store *ref_store,
776 struct ref_transaction *transaction,
777 struct strbuf *err)
778{
779 return ref_transaction_commit(transaction, err);
780}
781
782static int packed_delete_refs(struct ref_store *ref_store, const char *msg,
783 struct string_list *refnames, unsigned int flags)
784{
785 die("BUG: not implemented yet");
786}
787
788static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags)
789{
790 /*
791 * Packed refs are already packed. It might be that loose refs
792 * are packed *into* a packed refs store, but that is done by
793 * updating the packed references via a transaction.
794 */
795 return 0;
796}
797
798static int packed_create_symref(struct ref_store *ref_store,
799 const char *refname, const char *target,
800 const char *logmsg)
801{
802 die("BUG: packed reference store does not support symrefs");
803}
804
805static int packed_rename_ref(struct ref_store *ref_store,
806 const char *oldrefname, const char *newrefname,
807 const char *logmsg)
808{
809 die("BUG: packed reference store does not support renaming references");
810}
811
812static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)
813{
814 return empty_ref_iterator_begin();
815}
816
817static int packed_for_each_reflog_ent(struct ref_store *ref_store,
818 const char *refname,
819 each_reflog_ent_fn fn, void *cb_data)
820{
821 return 0;
822}
823
824static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store,
825 const char *refname,
826 each_reflog_ent_fn fn,
827 void *cb_data)
828{
829 return 0;
830}
831
832static int packed_reflog_exists(struct ref_store *ref_store,
833 const char *refname)
834{
835 return 0;
836}
837
838static int packed_create_reflog(struct ref_store *ref_store,
839 const char *refname, int force_create,
840 struct strbuf *err)
841{
842 die("BUG: packed reference store does not support reflogs");
843}
844
845static int packed_delete_reflog(struct ref_store *ref_store,
846 const char *refname)
847{
848 return 0;
849}
850
851static int packed_reflog_expire(struct ref_store *ref_store,
852 const char *refname, const unsigned char *sha1,
853 unsigned int flags,
854 reflog_expiry_prepare_fn prepare_fn,
855 reflog_expiry_should_prune_fn should_prune_fn,
856 reflog_expiry_cleanup_fn cleanup_fn,
857 void *policy_cb_data)
858{
859 return 0;
860}
861
862struct ref_storage_be refs_be_packed = {
863 NULL,
864 "packed",
865 packed_ref_store_create,
866 packed_init_db,
867 packed_transaction_prepare,
868 packed_transaction_finish,
869 packed_transaction_abort,
870 packed_initial_transaction_commit,
871
872 packed_pack_refs,
873 packed_peel_ref,
874 packed_create_symref,
875 packed_delete_refs,
876 packed_rename_ref,
877
878 packed_ref_iterator_begin,
879 packed_read_raw_ref,
880
881 packed_reflog_iterator_begin,
882 packed_for_each_reflog_ent,
883 packed_for_each_reflog_ent_reverse,
884 packed_reflog_exists,
885 packed_create_reflog,
886 packed_delete_reflog,
887 packed_reflog_expire
888};