312116a99dba3029906c40388ce5ef7e79aa914d
1#include "../cache.h"
2#include "../config.h"
3#include "../refs.h"
4#include "refs-internal.h"
5#include "ref-cache.h"
6#include "packed-backend.h"
7#include "../iterator.h"
8#include "../lockfile.h"
9
10struct packed_ref_store;
11
12struct packed_ref_cache {
13 /*
14 * A back-pointer to the packed_ref_store with which this
15 * cache is associated:
16 */
17 struct packed_ref_store *refs;
18
19 struct ref_cache *cache;
20
21 /*
22 * What is the peeled state of this cache? (This is usually
23 * determined from the header of the "packed-refs" file.)
24 */
25 enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled;
26
27 /*
28 * Count of references to the data structure in this instance,
29 * including the pointer from files_ref_store::packed if any.
30 * The data will not be freed as long as the reference count
31 * is nonzero.
32 */
33 unsigned int referrers;
34
35 /* The metadata from when this packed-refs cache was read */
36 struct stat_validity validity;
37};
38
39/*
40 * Increment the reference count of *packed_refs.
41 */
42static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs)
43{
44 packed_refs->referrers++;
45}
46
47/*
48 * Decrease the reference count of *packed_refs. If it goes to zero,
49 * free *packed_refs and return true; otherwise return false.
50 */
51static int release_packed_ref_cache(struct packed_ref_cache *packed_refs)
52{
53 if (!--packed_refs->referrers) {
54 free_ref_cache(packed_refs->cache);
55 stat_validity_clear(&packed_refs->validity);
56 free(packed_refs);
57 return 1;
58 } else {
59 return 0;
60 }
61}
62
63/*
64 * A container for `packed-refs`-related data. It is not (yet) a
65 * `ref_store`.
66 */
67struct packed_ref_store {
68 struct ref_store base;
69
70 unsigned int store_flags;
71
72 /* The path of the "packed-refs" file: */
73 char *path;
74
75 /*
76 * A cache of the values read from the `packed-refs` file, if
77 * it might still be current; otherwise, NULL.
78 */
79 struct packed_ref_cache *cache;
80
81 /*
82 * Lock used for the "packed-refs" file. Note that this (and
83 * thus the enclosing `packed_ref_store`) must not be freed.
84 */
85 struct lock_file lock;
86
87 /*
88 * Temporary file used when rewriting new contents to the
89 * "packed-refs" file. Note that this (and thus the enclosing
90 * `packed_ref_store`) must not be freed.
91 */
92 struct tempfile tempfile;
93};
94
95struct ref_store *packed_ref_store_create(const char *path,
96 unsigned int store_flags)
97{
98 struct packed_ref_store *refs = xcalloc(1, sizeof(*refs));
99 struct ref_store *ref_store = (struct ref_store *)refs;
100
101 base_ref_store_init(ref_store, &refs_be_packed);
102 refs->store_flags = store_flags;
103
104 refs->path = xstrdup(path);
105 return ref_store;
106}
107
108/*
109 * Downcast `ref_store` to `packed_ref_store`. Die if `ref_store` is
110 * not a `packed_ref_store`. Also die if `packed_ref_store` doesn't
111 * support at least the flags specified in `required_flags`. `caller`
112 * is used in any necessary error messages.
113 */
114static struct packed_ref_store *packed_downcast(struct ref_store *ref_store,
115 unsigned int required_flags,
116 const char *caller)
117{
118 struct packed_ref_store *refs;
119
120 if (ref_store->be != &refs_be_packed)
121 die("BUG: ref_store is type \"%s\" not \"packed\" in %s",
122 ref_store->be->name, caller);
123
124 refs = (struct packed_ref_store *)ref_store;
125
126 if ((refs->store_flags & required_flags) != required_flags)
127 die("BUG: unallowed operation (%s), requires %x, has %x\n",
128 caller, required_flags, refs->store_flags);
129
130 return refs;
131}
132
133static void clear_packed_ref_cache(struct packed_ref_store *refs)
134{
135 if (refs->cache) {
136 struct packed_ref_cache *cache = refs->cache;
137
138 refs->cache = NULL;
139 release_packed_ref_cache(cache);
140 }
141}
142
143static NORETURN void die_unterminated_line(const char *path,
144 const char *p, size_t len)
145{
146 if (len < 80)
147 die("unterminated line in %s: %.*s", path, (int)len, p);
148 else
149 die("unterminated line in %s: %.75s...", path, p);
150}
151
152static NORETURN void die_invalid_line(const char *path,
153 const char *p, size_t len)
154{
155 const char *eol = memchr(p, '\n', len);
156
157 if (!eol)
158 die_unterminated_line(path, p, len);
159 else if (eol - p < 80)
160 die("unexpected line in %s: %.*s", path, (int)(eol - p), p);
161 else
162 die("unexpected line in %s: %.75s...", path, p);
163
164}
165
166/*
167 * An iterator over a packed-refs file that is currently mmapped.
168 */
169struct mmapped_ref_iterator {
170 struct ref_iterator base;
171
172 struct packed_ref_cache *packed_refs;
173
174 /* The current position in the mmapped file: */
175 const char *pos;
176
177 /* The end of the mmapped file: */
178 const char *eof;
179
180 struct object_id oid, peeled;
181
182 struct strbuf refname_buf;
183};
184
185static int mmapped_ref_iterator_advance(struct ref_iterator *ref_iterator)
186{
187 struct mmapped_ref_iterator *iter =
188 (struct mmapped_ref_iterator *)ref_iterator;
189 const char *p = iter->pos, *eol;
190
191 strbuf_reset(&iter->refname_buf);
192
193 if (iter->pos == iter->eof)
194 return ref_iterator_abort(ref_iterator);
195
196 iter->base.flags = REF_ISPACKED;
197
198 if (iter->eof - p < GIT_SHA1_HEXSZ + 2 ||
199 parse_oid_hex(p, &iter->oid, &p) ||
200 !isspace(*p++))
201 die_invalid_line(iter->packed_refs->refs->path,
202 iter->pos, iter->eof - iter->pos);
203
204 eol = memchr(p, '\n', iter->eof - p);
205 if (!eol)
206 die_unterminated_line(iter->packed_refs->refs->path,
207 iter->pos, iter->eof - iter->pos);
208
209 strbuf_add(&iter->refname_buf, p, eol - p);
210 iter->base.refname = iter->refname_buf.buf;
211
212 if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {
213 if (!refname_is_safe(iter->base.refname))
214 die("packed refname is dangerous: %s",
215 iter->base.refname);
216 oidclr(&iter->oid);
217 iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
218 }
219 if (iter->packed_refs->peeled == PEELED_FULLY ||
220 (iter->packed_refs->peeled == PEELED_TAGS &&
221 starts_with(iter->base.refname, "refs/tags/")))
222 iter->base.flags |= REF_KNOWS_PEELED;
223
224 iter->pos = eol + 1;
225
226 if (iter->pos < iter->eof && *iter->pos == '^') {
227 p = iter->pos + 1;
228 if (iter->eof - p < GIT_SHA1_HEXSZ + 1 ||
229 parse_oid_hex(p, &iter->peeled, &p) ||
230 *p++ != '\n')
231 die_invalid_line(iter->packed_refs->refs->path,
232 iter->pos, iter->eof - iter->pos);
233 iter->pos = p;
234
235 /*
236 * Regardless of what the file header said, we
237 * definitely know the value of *this* reference:
238 */
239 iter->base.flags |= REF_KNOWS_PEELED;
240 } else {
241 oidclr(&iter->peeled);
242 }
243
244 return ITER_OK;
245}
246
247static int mmapped_ref_iterator_peel(struct ref_iterator *ref_iterator,
248 struct object_id *peeled)
249{
250 struct mmapped_ref_iterator *iter =
251 (struct mmapped_ref_iterator *)ref_iterator;
252
253 if ((iter->base.flags & REF_KNOWS_PEELED)) {
254 oidcpy(peeled, &iter->peeled);
255 return is_null_oid(&iter->peeled) ? -1 : 0;
256 } else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) {
257 return -1;
258 } else {
259 return !!peel_object(iter->oid.hash, peeled->hash);
260 }
261}
262
263static int mmapped_ref_iterator_abort(struct ref_iterator *ref_iterator)
264{
265 struct mmapped_ref_iterator *iter =
266 (struct mmapped_ref_iterator *)ref_iterator;
267
268 release_packed_ref_cache(iter->packed_refs);
269 strbuf_release(&iter->refname_buf);
270 base_ref_iterator_free(ref_iterator);
271 return ITER_DONE;
272}
273
274static struct ref_iterator_vtable mmapped_ref_iterator_vtable = {
275 mmapped_ref_iterator_advance,
276 mmapped_ref_iterator_peel,
277 mmapped_ref_iterator_abort
278};
279
280struct ref_iterator *mmapped_ref_iterator_begin(
281 const char *packed_refs_file,
282 struct packed_ref_cache *packed_refs,
283 const char *pos, const char *eof)
284{
285 struct mmapped_ref_iterator *iter = xcalloc(1, sizeof(*iter));
286 struct ref_iterator *ref_iterator = &iter->base;
287
288 base_ref_iterator_init(ref_iterator, &mmapped_ref_iterator_vtable, 0);
289
290 iter->packed_refs = packed_refs;
291 acquire_packed_ref_cache(iter->packed_refs);
292 iter->pos = pos;
293 iter->eof = eof;
294 strbuf_init(&iter->refname_buf, 0);
295
296 iter->base.oid = &iter->oid;
297
298 return ref_iterator;
299}
300
301/*
302 * Read from the `packed-refs` file into a newly-allocated
303 * `packed_ref_cache` and return it. The return value will already
304 * have its reference count incremented.
305 *
306 * A comment line of the form "# pack-refs with: " may contain zero or
307 * more traits. We interpret the traits as follows:
308 *
309 * No traits:
310 *
311 * Probably no references are peeled. But if the file contains a
312 * peeled value for a reference, we will use it.
313 *
314 * peeled:
315 *
316 * References under "refs/tags/", if they *can* be peeled, *are*
317 * peeled in this file. References outside of "refs/tags/" are
318 * probably not peeled even if they could have been, but if we find
319 * a peeled value for such a reference we will use it.
320 *
321 * fully-peeled:
322 *
323 * All references in the file that can be peeled are peeled.
324 * Inversely (and this is more important), any references in the
325 * file for which no peeled value is recorded is not peelable. This
326 * trait should typically be written alongside "peeled" for
327 * compatibility with older clients, but we do not require it
328 * (i.e., "peeled" is a no-op if "fully-peeled" is set).
329 */
330static struct packed_ref_cache *read_packed_refs(struct packed_ref_store *refs)
331{
332 struct packed_ref_cache *packed_refs = xcalloc(1, sizeof(*packed_refs));
333 int fd;
334 struct stat st;
335 size_t size;
336 char *buf;
337 const char *pos, *eof;
338 struct ref_dir *dir;
339 struct ref_iterator *iter;
340 int ok;
341
342 packed_refs->refs = refs;
343 acquire_packed_ref_cache(packed_refs);
344 packed_refs->cache = create_ref_cache(NULL, NULL);
345 packed_refs->cache->root->flag &= ~REF_INCOMPLETE;
346 packed_refs->peeled = PEELED_NONE;
347
348 fd = open(refs->path, O_RDONLY);
349 if (fd < 0) {
350 if (errno == ENOENT) {
351 /*
352 * This is OK; it just means that no
353 * "packed-refs" file has been written yet,
354 * which is equivalent to it being empty.
355 */
356 return packed_refs;
357 } else {
358 die_errno("couldn't read %s", refs->path);
359 }
360 }
361
362 stat_validity_update(&packed_refs->validity, fd);
363
364 if (fstat(fd, &st) < 0)
365 die_errno("couldn't stat %s", refs->path);
366
367 size = xsize_t(st.st_size);
368 buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
369 pos = buf;
370 eof = buf + size;
371
372 /* If the file has a header line, process it: */
373 if (pos < eof && *pos == '#') {
374 struct strbuf tmp = STRBUF_INIT;
375 char *p;
376 const char *eol;
377 struct string_list traits = STRING_LIST_INIT_NODUP;
378
379 eol = memchr(pos, '\n', eof - pos);
380 if (!eol)
381 die_unterminated_line(refs->path, pos, eof - pos);
382
383 strbuf_add(&tmp, pos, eol - pos);
384
385 if (!skip_prefix(tmp.buf, "# pack-refs with:", (const char **)&p))
386 die_invalid_line(refs->path, pos, eof - pos);
387
388 string_list_split_in_place(&traits, p, ' ', -1);
389
390 if (unsorted_string_list_has_string(&traits, "fully-peeled"))
391 packed_refs->peeled = PEELED_FULLY;
392 else if (unsorted_string_list_has_string(&traits, "peeled"))
393 packed_refs->peeled = PEELED_TAGS;
394 /* perhaps other traits later as well */
395
396 /* The "+ 1" is for the LF character. */
397 pos = eol + 1;
398
399 string_list_clear(&traits, 0);
400 strbuf_release(&tmp);
401 }
402
403 dir = get_ref_dir(packed_refs->cache->root);
404 iter = mmapped_ref_iterator_begin(refs->path, packed_refs, pos, eof);
405 while ((ok = ref_iterator_advance(iter)) == ITER_OK) {
406 struct ref_entry *entry =
407 create_ref_entry(iter->refname, iter->oid, iter->flags);
408
409 if ((iter->flags & REF_KNOWS_PEELED))
410 ref_iterator_peel(iter, &entry->u.value.peeled);
411 add_ref_entry(dir, entry);
412 }
413
414 if (ok != ITER_DONE)
415 die("error reading packed-refs file %s", refs->path);
416
417 if (munmap(buf, size))
418 die_errno("error ummapping packed-refs file %s", refs->path);
419
420 close(fd);
421
422 return packed_refs;
423}
424
425/*
426 * Check that the packed refs cache (if any) still reflects the
427 * contents of the file. If not, clear the cache.
428 */
429static void validate_packed_ref_cache(struct packed_ref_store *refs)
430{
431 if (refs->cache &&
432 !stat_validity_check(&refs->cache->validity, refs->path))
433 clear_packed_ref_cache(refs);
434}
435
436/*
437 * Get the packed_ref_cache for the specified packed_ref_store,
438 * creating and populating it if it hasn't been read before or if the
439 * file has been changed (according to its `validity` field) since it
440 * was last read. On the other hand, if we hold the lock, then assume
441 * that the file hasn't been changed out from under us, so skip the
442 * extra `stat()` call in `stat_validity_check()`.
443 */
444static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *refs)
445{
446 if (!is_lock_file_locked(&refs->lock))
447 validate_packed_ref_cache(refs);
448
449 if (!refs->cache)
450 refs->cache = read_packed_refs(refs);
451
452 return refs->cache;
453}
454
455static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache)
456{
457 return get_ref_dir(packed_ref_cache->cache->root);
458}
459
460static struct ref_dir *get_packed_refs(struct packed_ref_store *refs)
461{
462 return get_packed_ref_dir(get_packed_ref_cache(refs));
463}
464
465/*
466 * Return the ref_entry for the given refname from the packed
467 * references. If it does not exist, return NULL.
468 */
469static struct ref_entry *get_packed_ref(struct packed_ref_store *refs,
470 const char *refname)
471{
472 return find_ref_entry(get_packed_refs(refs), refname);
473}
474
475static int packed_read_raw_ref(struct ref_store *ref_store,
476 const char *refname, unsigned char *sha1,
477 struct strbuf *referent, unsigned int *type)
478{
479 struct packed_ref_store *refs =
480 packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
481
482 struct ref_entry *entry;
483
484 *type = 0;
485
486 entry = get_packed_ref(refs, refname);
487 if (!entry) {
488 errno = ENOENT;
489 return -1;
490 }
491
492 hashcpy(sha1, entry->u.value.oid.hash);
493 *type = REF_ISPACKED;
494 return 0;
495}
496
497static int packed_peel_ref(struct ref_store *ref_store,
498 const char *refname, unsigned char *sha1)
499{
500 struct packed_ref_store *refs =
501 packed_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,
502 "peel_ref");
503 struct ref_entry *r = get_packed_ref(refs, refname);
504
505 if (!r || peel_entry(r, 0))
506 return -1;
507
508 hashcpy(sha1, r->u.value.peeled.hash);
509 return 0;
510}
511
512struct packed_ref_iterator {
513 struct ref_iterator base;
514
515 struct packed_ref_cache *cache;
516 struct ref_iterator *iter0;
517 unsigned int flags;
518};
519
520static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
521{
522 struct packed_ref_iterator *iter =
523 (struct packed_ref_iterator *)ref_iterator;
524 int ok;
525
526 while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
527 if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
528 ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE)
529 continue;
530
531 if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
532 !ref_resolves_to_object(iter->iter0->refname,
533 iter->iter0->oid,
534 iter->iter0->flags))
535 continue;
536
537 iter->base.refname = iter->iter0->refname;
538 iter->base.oid = iter->iter0->oid;
539 iter->base.flags = iter->iter0->flags;
540 return ITER_OK;
541 }
542
543 iter->iter0 = NULL;
544 if (ref_iterator_abort(ref_iterator) != ITER_DONE)
545 ok = ITER_ERROR;
546
547 return ok;
548}
549
550static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator,
551 struct object_id *peeled)
552{
553 struct packed_ref_iterator *iter =
554 (struct packed_ref_iterator *)ref_iterator;
555
556 return ref_iterator_peel(iter->iter0, peeled);
557}
558
559static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator)
560{
561 struct packed_ref_iterator *iter =
562 (struct packed_ref_iterator *)ref_iterator;
563 int ok = ITER_DONE;
564
565 if (iter->iter0)
566 ok = ref_iterator_abort(iter->iter0);
567
568 release_packed_ref_cache(iter->cache);
569 base_ref_iterator_free(ref_iterator);
570 return ok;
571}
572
573static struct ref_iterator_vtable packed_ref_iterator_vtable = {
574 packed_ref_iterator_advance,
575 packed_ref_iterator_peel,
576 packed_ref_iterator_abort
577};
578
579static struct ref_iterator *packed_ref_iterator_begin(
580 struct ref_store *ref_store,
581 const char *prefix, unsigned int flags)
582{
583 struct packed_ref_store *refs;
584 struct packed_ref_iterator *iter;
585 struct ref_iterator *ref_iterator;
586 unsigned int required_flags = REF_STORE_READ;
587
588 if (!(flags & DO_FOR_EACH_INCLUDE_BROKEN))
589 required_flags |= REF_STORE_ODB;
590 refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin");
591
592 iter = xcalloc(1, sizeof(*iter));
593 ref_iterator = &iter->base;
594 base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable, 1);
595
596 /*
597 * Note that get_packed_ref_cache() internally checks whether
598 * the packed-ref cache is up to date with what is on disk,
599 * and re-reads it if not.
600 */
601
602 iter->cache = get_packed_ref_cache(refs);
603 acquire_packed_ref_cache(iter->cache);
604 iter->iter0 = cache_ref_iterator_begin(iter->cache->cache, prefix, 0);
605
606 iter->flags = flags;
607
608 return ref_iterator;
609}
610
611/*
612 * Write an entry to the packed-refs file for the specified refname.
613 * If peeled is non-NULL, write it as the entry's peeled value. On
614 * error, return a nonzero value and leave errno set at the value left
615 * by the failing call to `fprintf()`.
616 */
617static int write_packed_entry(FILE *fh, const char *refname,
618 const unsigned char *sha1,
619 const unsigned char *peeled)
620{
621 if (fprintf(fh, "%s %s\n", sha1_to_hex(sha1), refname) < 0 ||
622 (peeled && fprintf(fh, "^%s\n", sha1_to_hex(peeled)) < 0))
623 return -1;
624
625 return 0;
626}
627
628int packed_refs_lock(struct ref_store *ref_store, int flags, struct strbuf *err)
629{
630 struct packed_ref_store *refs =
631 packed_downcast(ref_store, REF_STORE_WRITE | REF_STORE_MAIN,
632 "packed_refs_lock");
633 static int timeout_configured = 0;
634 static int timeout_value = 1000;
635
636 if (!timeout_configured) {
637 git_config_get_int("core.packedrefstimeout", &timeout_value);
638 timeout_configured = 1;
639 }
640
641 /*
642 * Note that we close the lockfile immediately because we
643 * don't write new content to it, but rather to a separate
644 * tempfile.
645 */
646 if (hold_lock_file_for_update_timeout(
647 &refs->lock,
648 refs->path,
649 flags, timeout_value) < 0) {
650 unable_to_lock_message(refs->path, errno, err);
651 return -1;
652 }
653
654 if (close_lock_file(&refs->lock)) {
655 strbuf_addf(err, "unable to close %s: %s", refs->path, strerror(errno));
656 return -1;
657 }
658
659 /*
660 * Now that we hold the `packed-refs` lock, make sure that our
661 * cache matches the current version of the file. Normally
662 * `get_packed_ref_cache()` does that for us, but that
663 * function assumes that when the file is locked, any existing
664 * cache is still valid. We've just locked the file, but it
665 * might have changed the moment *before* we locked it.
666 */
667 validate_packed_ref_cache(refs);
668
669 /*
670 * Now make sure that the packed-refs file as it exists in the
671 * locked state is loaded into the cache:
672 */
673 get_packed_ref_cache(refs);
674 return 0;
675}
676
677void packed_refs_unlock(struct ref_store *ref_store)
678{
679 struct packed_ref_store *refs = packed_downcast(
680 ref_store,
681 REF_STORE_READ | REF_STORE_WRITE,
682 "packed_refs_unlock");
683
684 if (!is_lock_file_locked(&refs->lock))
685 die("BUG: packed_refs_unlock() called when not locked");
686 rollback_lock_file(&refs->lock);
687}
688
689int packed_refs_is_locked(struct ref_store *ref_store)
690{
691 struct packed_ref_store *refs = packed_downcast(
692 ref_store,
693 REF_STORE_READ | REF_STORE_WRITE,
694 "packed_refs_is_locked");
695
696 return is_lock_file_locked(&refs->lock);
697}
698
699/*
700 * The packed-refs header line that we write out. Perhaps other
701 * traits will be added later.
702 *
703 * Note that earlier versions of Git used to parse these traits by
704 * looking for " trait " in the line. For this reason, the space after
705 * the colon and the trailing space are required.
706 */
707static const char PACKED_REFS_HEADER[] =
708 "# pack-refs with: peeled fully-peeled \n";
709
710static int packed_init_db(struct ref_store *ref_store, struct strbuf *err)
711{
712 /* Nothing to do. */
713 return 0;
714}
715
716/*
717 * Write the packed-refs from the cache to the packed-refs tempfile,
718 * incorporating any changes from `updates`. `updates` must be a
719 * sorted string list whose keys are the refnames and whose util
720 * values are `struct ref_update *`. On error, rollback the tempfile,
721 * write an error message to `err`, and return a nonzero value.
722 *
723 * The packfile must be locked before calling this function and will
724 * remain locked when it is done.
725 */
726static int write_with_updates(struct packed_ref_store *refs,
727 struct string_list *updates,
728 struct strbuf *err)
729{
730 struct ref_iterator *iter = NULL;
731 size_t i;
732 int ok;
733 FILE *out;
734 struct strbuf sb = STRBUF_INIT;
735 char *packed_refs_path;
736
737 if (!is_lock_file_locked(&refs->lock))
738 die("BUG: write_with_updates() called while unlocked");
739
740 /*
741 * If packed-refs is a symlink, we want to overwrite the
742 * symlinked-to file, not the symlink itself. Also, put the
743 * staging file next to it:
744 */
745 packed_refs_path = get_locked_file_path(&refs->lock);
746 strbuf_addf(&sb, "%s.new", packed_refs_path);
747 free(packed_refs_path);
748 if (create_tempfile(&refs->tempfile, sb.buf) < 0) {
749 strbuf_addf(err, "unable to create file %s: %s",
750 sb.buf, strerror(errno));
751 strbuf_release(&sb);
752 return -1;
753 }
754 strbuf_release(&sb);
755
756 out = fdopen_tempfile(&refs->tempfile, "w");
757 if (!out) {
758 strbuf_addf(err, "unable to fdopen packed-refs tempfile: %s",
759 strerror(errno));
760 goto error;
761 }
762
763 if (fprintf(out, "%s", PACKED_REFS_HEADER) < 0)
764 goto write_error;
765
766 /*
767 * We iterate in parallel through the current list of refs and
768 * the list of updates, processing an entry from at least one
769 * of the lists each time through the loop. When the current
770 * list of refs is exhausted, set iter to NULL. When the list
771 * of updates is exhausted, leave i set to updates->nr.
772 */
773 iter = packed_ref_iterator_begin(&refs->base, "",
774 DO_FOR_EACH_INCLUDE_BROKEN);
775 if ((ok = ref_iterator_advance(iter)) != ITER_OK)
776 iter = NULL;
777
778 i = 0;
779
780 while (iter || i < updates->nr) {
781 struct ref_update *update = NULL;
782 int cmp;
783
784 if (i >= updates->nr) {
785 cmp = -1;
786 } else {
787 update = updates->items[i].util;
788
789 if (!iter)
790 cmp = +1;
791 else
792 cmp = strcmp(iter->refname, update->refname);
793 }
794
795 if (!cmp) {
796 /*
797 * There is both an old value and an update
798 * for this reference. Check the old value if
799 * necessary:
800 */
801 if ((update->flags & REF_HAVE_OLD)) {
802 if (is_null_oid(&update->old_oid)) {
803 strbuf_addf(err, "cannot update ref '%s': "
804 "reference already exists",
805 update->refname);
806 goto error;
807 } else if (oidcmp(&update->old_oid, iter->oid)) {
808 strbuf_addf(err, "cannot update ref '%s': "
809 "is at %s but expected %s",
810 update->refname,
811 oid_to_hex(iter->oid),
812 oid_to_hex(&update->old_oid));
813 goto error;
814 }
815 }
816
817 /* Now figure out what to use for the new value: */
818 if ((update->flags & REF_HAVE_NEW)) {
819 /*
820 * The update takes precedence. Skip
821 * the iterator over the unneeded
822 * value.
823 */
824 if ((ok = ref_iterator_advance(iter)) != ITER_OK)
825 iter = NULL;
826 cmp = +1;
827 } else {
828 /*
829 * The update doesn't actually want to
830 * change anything. We're done with it.
831 */
832 i++;
833 cmp = -1;
834 }
835 } else if (cmp > 0) {
836 /*
837 * There is no old value but there is an
838 * update for this reference. Make sure that
839 * the update didn't expect an existing value:
840 */
841 if ((update->flags & REF_HAVE_OLD) &&
842 !is_null_oid(&update->old_oid)) {
843 strbuf_addf(err, "cannot update ref '%s': "
844 "reference is missing but expected %s",
845 update->refname,
846 oid_to_hex(&update->old_oid));
847 goto error;
848 }
849 }
850
851 if (cmp < 0) {
852 /* Pass the old reference through. */
853
854 struct object_id peeled;
855 int peel_error = ref_iterator_peel(iter, &peeled);
856
857 if (write_packed_entry(out, iter->refname,
858 iter->oid->hash,
859 peel_error ? NULL : peeled.hash))
860 goto write_error;
861
862 if ((ok = ref_iterator_advance(iter)) != ITER_OK)
863 iter = NULL;
864 } else if (is_null_oid(&update->new_oid)) {
865 /*
866 * The update wants to delete the reference,
867 * and the reference either didn't exist or we
868 * have already skipped it. So we're done with
869 * the update (and don't have to write
870 * anything).
871 */
872 i++;
873 } else {
874 struct object_id peeled;
875 int peel_error = peel_object(update->new_oid.hash,
876 peeled.hash);
877
878 if (write_packed_entry(out, update->refname,
879 update->new_oid.hash,
880 peel_error ? NULL : peeled.hash))
881 goto write_error;
882
883 i++;
884 }
885 }
886
887 if (ok != ITER_DONE) {
888 strbuf_addf(err, "unable to write packed-refs file: "
889 "error iterating over old contents");
890 goto error;
891 }
892
893 if (close_tempfile(&refs->tempfile)) {
894 strbuf_addf(err, "error closing file %s: %s",
895 get_tempfile_path(&refs->tempfile),
896 strerror(errno));
897 strbuf_release(&sb);
898 return -1;
899 }
900
901 return 0;
902
903write_error:
904 strbuf_addf(err, "error writing to %s: %s",
905 get_tempfile_path(&refs->tempfile), strerror(errno));
906
907error:
908 if (iter)
909 ref_iterator_abort(iter);
910
911 delete_tempfile(&refs->tempfile);
912 return -1;
913}
914
915struct packed_transaction_backend_data {
916 /* True iff the transaction owns the packed-refs lock. */
917 int own_lock;
918
919 struct string_list updates;
920};
921
922static void packed_transaction_cleanup(struct packed_ref_store *refs,
923 struct ref_transaction *transaction)
924{
925 struct packed_transaction_backend_data *data = transaction->backend_data;
926
927 if (data) {
928 string_list_clear(&data->updates, 0);
929
930 if (is_tempfile_active(&refs->tempfile))
931 delete_tempfile(&refs->tempfile);
932
933 if (data->own_lock && is_lock_file_locked(&refs->lock)) {
934 packed_refs_unlock(&refs->base);
935 data->own_lock = 0;
936 }
937
938 free(data);
939 transaction->backend_data = NULL;
940 }
941
942 transaction->state = REF_TRANSACTION_CLOSED;
943}
944
945static int packed_transaction_prepare(struct ref_store *ref_store,
946 struct ref_transaction *transaction,
947 struct strbuf *err)
948{
949 struct packed_ref_store *refs = packed_downcast(
950 ref_store,
951 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
952 "ref_transaction_prepare");
953 struct packed_transaction_backend_data *data;
954 size_t i;
955 int ret = TRANSACTION_GENERIC_ERROR;
956
957 /*
958 * Note that we *don't* skip transactions with zero updates,
959 * because such a transaction might be executed for the side
960 * effect of ensuring that all of the references are peeled.
961 * If the caller wants to optimize away empty transactions, it
962 * should do so itself.
963 */
964
965 data = xcalloc(1, sizeof(*data));
966 string_list_init(&data->updates, 0);
967
968 transaction->backend_data = data;
969
970 /*
971 * Stick the updates in a string list by refname so that we
972 * can sort them:
973 */
974 for (i = 0; i < transaction->nr; i++) {
975 struct ref_update *update = transaction->updates[i];
976 struct string_list_item *item =
977 string_list_append(&data->updates, update->refname);
978
979 /* Store a pointer to update in item->util: */
980 item->util = update;
981 }
982 string_list_sort(&data->updates);
983
984 if (ref_update_reject_duplicates(&data->updates, err))
985 goto failure;
986
987 if (!is_lock_file_locked(&refs->lock)) {
988 if (packed_refs_lock(ref_store, 0, err))
989 goto failure;
990 data->own_lock = 1;
991 }
992
993 if (write_with_updates(refs, &data->updates, err))
994 goto failure;
995
996 transaction->state = REF_TRANSACTION_PREPARED;
997 return 0;
998
999failure:
1000 packed_transaction_cleanup(refs, transaction);
1001 return ret;
1002}
1003
1004static int packed_transaction_abort(struct ref_store *ref_store,
1005 struct ref_transaction *transaction,
1006 struct strbuf *err)
1007{
1008 struct packed_ref_store *refs = packed_downcast(
1009 ref_store,
1010 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1011 "ref_transaction_abort");
1012
1013 packed_transaction_cleanup(refs, transaction);
1014 return 0;
1015}
1016
1017static int packed_transaction_finish(struct ref_store *ref_store,
1018 struct ref_transaction *transaction,
1019 struct strbuf *err)
1020{
1021 struct packed_ref_store *refs = packed_downcast(
1022 ref_store,
1023 REF_STORE_READ | REF_STORE_WRITE | REF_STORE_ODB,
1024 "ref_transaction_finish");
1025 int ret = TRANSACTION_GENERIC_ERROR;
1026 char *packed_refs_path;
1027
1028 packed_refs_path = get_locked_file_path(&refs->lock);
1029 if (rename_tempfile(&refs->tempfile, packed_refs_path)) {
1030 strbuf_addf(err, "error replacing %s: %s",
1031 refs->path, strerror(errno));
1032 goto cleanup;
1033 }
1034
1035 clear_packed_ref_cache(refs);
1036 ret = 0;
1037
1038cleanup:
1039 free(packed_refs_path);
1040 packed_transaction_cleanup(refs, transaction);
1041 return ret;
1042}
1043
1044static int packed_initial_transaction_commit(struct ref_store *ref_store,
1045 struct ref_transaction *transaction,
1046 struct strbuf *err)
1047{
1048 return ref_transaction_commit(transaction, err);
1049}
1050
1051static int packed_delete_refs(struct ref_store *ref_store, const char *msg,
1052 struct string_list *refnames, unsigned int flags)
1053{
1054 struct packed_ref_store *refs =
1055 packed_downcast(ref_store, REF_STORE_WRITE, "delete_refs");
1056 struct strbuf err = STRBUF_INIT;
1057 struct ref_transaction *transaction;
1058 struct string_list_item *item;
1059 int ret;
1060
1061 (void)refs; /* We need the check above, but don't use the variable */
1062
1063 if (!refnames->nr)
1064 return 0;
1065
1066 /*
1067 * Since we don't check the references' old_oids, the
1068 * individual updates can't fail, so we can pack all of the
1069 * updates into a single transaction.
1070 */
1071
1072 transaction = ref_store_transaction_begin(ref_store, &err);
1073 if (!transaction)
1074 return -1;
1075
1076 for_each_string_list_item(item, refnames) {
1077 if (ref_transaction_delete(transaction, item->string, NULL,
1078 flags, msg, &err)) {
1079 warning(_("could not delete reference %s: %s"),
1080 item->string, err.buf);
1081 strbuf_reset(&err);
1082 }
1083 }
1084
1085 ret = ref_transaction_commit(transaction, &err);
1086
1087 if (ret) {
1088 if (refnames->nr == 1)
1089 error(_("could not delete reference %s: %s"),
1090 refnames->items[0].string, err.buf);
1091 else
1092 error(_("could not delete references: %s"), err.buf);
1093 }
1094
1095 ref_transaction_free(transaction);
1096 strbuf_release(&err);
1097 return ret;
1098}
1099
1100static int packed_pack_refs(struct ref_store *ref_store, unsigned int flags)
1101{
1102 /*
1103 * Packed refs are already packed. It might be that loose refs
1104 * are packed *into* a packed refs store, but that is done by
1105 * updating the packed references via a transaction.
1106 */
1107 return 0;
1108}
1109
1110static int packed_create_symref(struct ref_store *ref_store,
1111 const char *refname, const char *target,
1112 const char *logmsg)
1113{
1114 die("BUG: packed reference store does not support symrefs");
1115}
1116
1117static int packed_rename_ref(struct ref_store *ref_store,
1118 const char *oldrefname, const char *newrefname,
1119 const char *logmsg)
1120{
1121 die("BUG: packed reference store does not support renaming references");
1122}
1123
1124static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)
1125{
1126 return empty_ref_iterator_begin();
1127}
1128
1129static int packed_for_each_reflog_ent(struct ref_store *ref_store,
1130 const char *refname,
1131 each_reflog_ent_fn fn, void *cb_data)
1132{
1133 return 0;
1134}
1135
1136static int packed_for_each_reflog_ent_reverse(struct ref_store *ref_store,
1137 const char *refname,
1138 each_reflog_ent_fn fn,
1139 void *cb_data)
1140{
1141 return 0;
1142}
1143
1144static int packed_reflog_exists(struct ref_store *ref_store,
1145 const char *refname)
1146{
1147 return 0;
1148}
1149
1150static int packed_create_reflog(struct ref_store *ref_store,
1151 const char *refname, int force_create,
1152 struct strbuf *err)
1153{
1154 die("BUG: packed reference store does not support reflogs");
1155}
1156
1157static int packed_delete_reflog(struct ref_store *ref_store,
1158 const char *refname)
1159{
1160 return 0;
1161}
1162
1163static int packed_reflog_expire(struct ref_store *ref_store,
1164 const char *refname, const unsigned char *sha1,
1165 unsigned int flags,
1166 reflog_expiry_prepare_fn prepare_fn,
1167 reflog_expiry_should_prune_fn should_prune_fn,
1168 reflog_expiry_cleanup_fn cleanup_fn,
1169 void *policy_cb_data)
1170{
1171 return 0;
1172}
1173
1174struct ref_storage_be refs_be_packed = {
1175 NULL,
1176 "packed",
1177 packed_ref_store_create,
1178 packed_init_db,
1179 packed_transaction_prepare,
1180 packed_transaction_finish,
1181 packed_transaction_abort,
1182 packed_initial_transaction_commit,
1183
1184 packed_pack_refs,
1185 packed_peel_ref,
1186 packed_create_symref,
1187 packed_delete_refs,
1188 packed_rename_ref,
1189
1190 packed_ref_iterator_begin,
1191 packed_read_raw_ref,
1192
1193 packed_reflog_iterator_begin,
1194 packed_for_each_reflog_ent,
1195 packed_for_each_reflog_ent_reverse,
1196 packed_reflog_exists,
1197 packed_create_reflog,
1198 packed_delete_reflog,
1199 packed_reflog_expire
1200};