1#include"cache.h" 2#include"pack-revindex.h" 3 4/* 5 * Pack index for existing packs give us easy access to the offsets into 6 * corresponding pack file where each object's data starts, but the entries 7 * do not store the size of the compressed representation (uncompressed 8 * size is easily available by examining the pack entry header). It is 9 * also rather expensive to find the sha1 for an object given its offset. 10 * 11 * We build a hashtable of existing packs (pack_revindex), and keep reverse 12 * index here -- pack index file is sorted by object name mapping to offset; 13 * this pack_revindex[].revindex array is a list of offset/index_nr pairs 14 * ordered by offset, so if you know the offset of an object, next offset 15 * is where its packed representation ends and the index_nr can be used to 16 * get the object sha1 from the main index. 17 */ 18 19static struct pack_revindex *pack_revindex; 20static int pack_revindex_hashsz; 21 22static intpack_revindex_ix(struct packed_git *p) 23{ 24unsigned long ui = (unsigned long)(intptr_t)p; 25int i; 26 27 ui = ui ^ (ui >>16);/* defeat structure alignment */ 28 i = (int)(ui % pack_revindex_hashsz); 29while(pack_revindex[i].p) { 30if(pack_revindex[i].p == p) 31return i; 32if(++i == pack_revindex_hashsz) 33 i =0; 34} 35return-1- i; 36} 37 38static voidinit_pack_revindex(void) 39{ 40int num; 41struct packed_git *p; 42 43for(num =0, p = packed_git; p; p = p->next) 44 num++; 45if(!num) 46return; 47 pack_revindex_hashsz = num *11; 48 pack_revindex =xcalloc(pack_revindex_hashsz,sizeof(*pack_revindex)); 49for(p = packed_git; p; p = p->next) { 50 num =pack_revindex_ix(p); 51 num = -1- num; 52 pack_revindex[num].p = p; 53} 54/* revindex elements are lazily initialized */ 55} 56 57/* 58 * This is a least-significant-digit radix sort. 59 * 60 * It sorts each of the "n" items in "entries" by its offset field. The "max" 61 * parameter must be at least as large as the largest offset in the array, 62 * and lets us quit the sort early. 63 */ 64static voidsort_revindex(struct revindex_entry *entries,unsigned n, off_t max) 65{ 66/* 67 * We use a "digit" size of 16 bits. That keeps our memory 68 * usage reasonable, and we can generally (for a 4G or smaller 69 * packfile) quit after two rounds of radix-sorting. 70 */ 71#define DIGIT_SIZE (16) 72#define BUCKETS (1 << DIGIT_SIZE) 73/* 74 * We want to know the bucket that a[i] will go into when we are using 75 * the digit that is N bits from the (least significant) end. 76 */ 77#define BUCKET_FOR(a, i, bits) (((a)[(i)].offset >> (bits)) & (BUCKETS-1)) 78 79/* 80 * We need O(n) temporary storage. Rather than do an extra copy of the 81 * partial results into "entries", we sort back and forth between the 82 * real array and temporary storage. In each iteration of the loop, we 83 * keep track of them with alias pointers, always sorting from "from" 84 * to "to". 85 */ 86struct revindex_entry *tmp =xmalloc(n *sizeof(*tmp)); 87struct revindex_entry *from = entries, *to = tmp; 88int bits; 89unsigned*pos =xmalloc(BUCKETS *sizeof(*pos)); 90 91/* 92 * If (max >> bits) is zero, then we know that the radix digit we are 93 * on (and any higher) will be zero for all entries, and our loop will 94 * be a no-op, as everybody lands in the same zero-th bucket. 95 */ 96for(bits =0; max >> bits; bits += DIGIT_SIZE) { 97struct revindex_entry *swap; 98unsigned i; 99 100memset(pos,0, BUCKETS *sizeof(*pos)); 101 102/* 103 * We want pos[i] to store the index of the last element that 104 * will go in bucket "i" (actually one past the last element). 105 * To do this, we first count the items that will go in each 106 * bucket, which gives us a relative offset from the last 107 * bucket. We can then cumulatively add the index from the 108 * previous bucket to get the true index. 109 */ 110for(i =0; i < n; i++) 111 pos[BUCKET_FOR(from, i, bits)]++; 112for(i =1; i < BUCKETS; i++) 113 pos[i] += pos[i-1]; 114 115/* 116 * Now we can drop the elements into their correct buckets (in 117 * our temporary array). We iterate the pos counter backwards 118 * to avoid using an extra index to count up. And since we are 119 * going backwards there, we must also go backwards through the 120 * array itself, to keep the sort stable. 121 * 122 * Note that we use an unsigned iterator to make sure we can 123 * handle 2^32-1 objects, even on a 32-bit system. But this 124 * means we cannot use the more obvious "i >= 0" loop condition 125 * for counting backwards, and must instead check for 126 * wrap-around with UINT_MAX. 127 */ 128for(i = n -1; i != UINT_MAX; i--) 129 to[--pos[BUCKET_FOR(from, i, bits)]] = from[i]; 130 131/* 132 * Now "to" contains the most sorted list, so we swap "from" and 133 * "to" for the next iteration. 134 */ 135 swap = from; 136 from = to; 137 to = swap; 138} 139 140/* 141 * If we ended with our data in the original array, great. If not, 142 * we have to move it back from the temporary storage. 143 */ 144if(from != entries) 145memcpy(entries, tmp, n *sizeof(*entries)); 146free(tmp); 147free(pos); 148 149#undef BUCKET_FOR 150#undef BUCKETS 151#undef DIGIT_SIZE 152} 153 154/* 155 * Ordered list of offsets of objects in the pack. 156 */ 157static voidcreate_pack_revindex(struct pack_revindex *rix) 158{ 159struct packed_git *p = rix->p; 160unsigned num_ent = p->num_objects; 161unsigned i; 162const char*index = p->index_data; 163 164 rix->revindex =xmalloc(sizeof(*rix->revindex) * (num_ent +1)); 165 index +=4*256; 166 167if(p->index_version >1) { 168const uint32_t*off_32 = 169(uint32_t*)(index +8+ p->num_objects * (20+4)); 170const uint32_t*off_64 = off_32 + p->num_objects; 171for(i =0; i < num_ent; i++) { 172uint32_t off =ntohl(*off_32++); 173if(!(off &0x80000000)) { 174 rix->revindex[i].offset = off; 175}else{ 176 rix->revindex[i].offset = 177((uint64_t)ntohl(*off_64++)) <<32; 178 rix->revindex[i].offset |= 179ntohl(*off_64++); 180} 181 rix->revindex[i].nr = i; 182} 183}else{ 184for(i =0; i < num_ent; i++) { 185uint32_t hl = *((uint32_t*)(index +24* i)); 186 rix->revindex[i].offset =ntohl(hl); 187 rix->revindex[i].nr = i; 188} 189} 190 191/* This knows the pack format -- the 20-byte trailer 192 * follows immediately after the last object data. 193 */ 194 rix->revindex[num_ent].offset = p->pack_size -20; 195 rix->revindex[num_ent].nr = -1; 196sort_revindex(rix->revindex, num_ent, p->pack_size); 197} 198 199struct pack_revindex *revindex_for_pack(struct packed_git *p) 200{ 201int num; 202struct pack_revindex *rix; 203 204if(!pack_revindex_hashsz) 205init_pack_revindex(); 206 207 num =pack_revindex_ix(p); 208if(num <0) 209die("internal error: pack revindex fubar"); 210 211 rix = &pack_revindex[num]; 212if(!rix->revindex) 213create_pack_revindex(rix); 214 215return rix; 216} 217 218intfind_revindex_position(struct pack_revindex *pridx, off_t ofs) 219{ 220int lo =0; 221int hi = pridx->p->num_objects +1; 222struct revindex_entry *revindex = pridx->revindex; 223 224do{ 225unsigned mi = lo + (hi - lo) /2; 226if(revindex[mi].offset == ofs) { 227return mi; 228}else if(ofs < revindex[mi].offset) 229 hi = mi; 230else 231 lo = mi +1; 232}while(lo < hi); 233 234error("bad offset for revindex"); 235return-1; 236} 237 238struct revindex_entry *find_pack_revindex(struct packed_git *p, off_t ofs) 239{ 240struct pack_revindex *pridx =revindex_for_pack(p); 241int pos =find_revindex_position(pridx, ofs); 242 243if(pos <0) 244return NULL; 245 246return pridx->revindex + pos; 247}