1/* 2 * GIT - The information manager from hell 3 * 4 * Copyright (C) Linus Torvalds, 2005 5 */ 6#include"cache.h" 7#include"config.h" 8#include"diff.h" 9#include"diffcore.h" 10#include"tempfile.h" 11#include"lockfile.h" 12#include"cache-tree.h" 13#include"refs.h" 14#include"dir.h" 15#include"object-store.h" 16#include"tree.h" 17#include"commit.h" 18#include"blob.h" 19#include"resolve-undo.h" 20#include"strbuf.h" 21#include"varint.h" 22#include"split-index.h" 23#include"utf8.h" 24#include"fsmonitor.h" 25#include"thread-utils.h" 26#include"progress.h" 27 28/* Mask for the name length in ce_flags in the on-disk index */ 29 30#define CE_NAMEMASK (0x0fff) 31 32/* Index extensions. 33 * 34 * The first letter should be 'A'..'Z' for extensions that are not 35 * necessary for a correct operation (i.e. optimization data). 36 * When new extensions are added that _needs_ to be understood in 37 * order to correctly interpret the index file, pick character that 38 * is outside the range, to cause the reader to abort. 39 */ 40 41#define CACHE_EXT(s) ( (s[0]<<24)|(s[1]<<16)|(s[2]<<8)|(s[3]) ) 42#define CACHE_EXT_TREE 0x54524545/* "TREE" */ 43#define CACHE_EXT_RESOLVE_UNDO 0x52455543/* "REUC" */ 44#define CACHE_EXT_LINK 0x6c696e6b/* "link" */ 45#define CACHE_EXT_UNTRACKED 0x554E5452/* "UNTR" */ 46#define CACHE_EXT_FSMONITOR 0x46534D4E/* "FSMN" */ 47#define CACHE_EXT_ENDOFINDEXENTRIES 0x454F4945/* "EOIE" */ 48#define CACHE_EXT_INDEXENTRYOFFSETTABLE 0x49454F54/* "IEOT" */ 49 50/* changes that can be kept in $GIT_DIR/index (basically all extensions) */ 51#define EXTMASK (RESOLVE_UNDO_CHANGED | CACHE_TREE_CHANGED | \ 52 CE_ENTRY_ADDED | CE_ENTRY_REMOVED | CE_ENTRY_CHANGED | \ 53 SPLIT_INDEX_ORDERED | UNTRACKED_CHANGED | FSMONITOR_CHANGED) 54 55 56/* 57 * This is an estimate of the pathname length in the index. We use 58 * this for V4 index files to guess the un-deltafied size of the index 59 * in memory because of pathname deltafication. This is not required 60 * for V2/V3 index formats because their pathnames are not compressed. 61 * If the initial amount of memory set aside is not sufficient, the 62 * mem pool will allocate extra memory. 63 */ 64#define CACHE_ENTRY_PATH_LENGTH 80 65 66staticinlinestruct cache_entry *mem_pool__ce_alloc(struct mem_pool *mem_pool,size_t len) 67{ 68struct cache_entry *ce; 69 ce =mem_pool_alloc(mem_pool,cache_entry_size(len)); 70 ce->mem_pool_allocated =1; 71return ce; 72} 73 74staticinlinestruct cache_entry *mem_pool__ce_calloc(struct mem_pool *mem_pool,size_t len) 75{ 76struct cache_entry * ce; 77 ce =mem_pool_calloc(mem_pool,1,cache_entry_size(len)); 78 ce->mem_pool_allocated =1; 79return ce; 80} 81 82static struct mem_pool *find_mem_pool(struct index_state *istate) 83{ 84struct mem_pool **pool_ptr; 85 86if(istate->split_index && istate->split_index->base) 87 pool_ptr = &istate->split_index->base->ce_mem_pool; 88else 89 pool_ptr = &istate->ce_mem_pool; 90 91if(!*pool_ptr) 92mem_pool_init(pool_ptr,0); 93 94return*pool_ptr; 95} 96 97static const char*alternate_index_output; 98 99static voidset_index_entry(struct index_state *istate,int nr,struct cache_entry *ce) 100{ 101 istate->cache[nr] = ce; 102add_name_hash(istate, ce); 103} 104 105static voidreplace_index_entry(struct index_state *istate,int nr,struct cache_entry *ce) 106{ 107struct cache_entry *old = istate->cache[nr]; 108 109replace_index_entry_in_base(istate, old, ce); 110remove_name_hash(istate, old); 111discard_cache_entry(old); 112 ce->ce_flags &= ~CE_HASHED; 113set_index_entry(istate, nr, ce); 114 ce->ce_flags |= CE_UPDATE_IN_BASE; 115mark_fsmonitor_invalid(istate, ce); 116 istate->cache_changed |= CE_ENTRY_CHANGED; 117} 118 119voidrename_index_entry_at(struct index_state *istate,int nr,const char*new_name) 120{ 121struct cache_entry *old_entry = istate->cache[nr], *new_entry; 122int namelen =strlen(new_name); 123 124 new_entry =make_empty_cache_entry(istate, namelen); 125copy_cache_entry(new_entry, old_entry); 126 new_entry->ce_flags &= ~CE_HASHED; 127 new_entry->ce_namelen = namelen; 128 new_entry->index =0; 129memcpy(new_entry->name, new_name, namelen +1); 130 131cache_tree_invalidate_path(istate, old_entry->name); 132untracked_cache_remove_from_index(istate, old_entry->name); 133remove_index_entry_at(istate, nr); 134add_index_entry(istate, new_entry, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE); 135} 136 137voidfill_stat_data(struct stat_data *sd,struct stat *st) 138{ 139 sd->sd_ctime.sec = (unsigned int)st->st_ctime; 140 sd->sd_mtime.sec = (unsigned int)st->st_mtime; 141 sd->sd_ctime.nsec =ST_CTIME_NSEC(*st); 142 sd->sd_mtime.nsec =ST_MTIME_NSEC(*st); 143 sd->sd_dev = st->st_dev; 144 sd->sd_ino = st->st_ino; 145 sd->sd_uid = st->st_uid; 146 sd->sd_gid = st->st_gid; 147 sd->sd_size = st->st_size; 148} 149 150intmatch_stat_data(const struct stat_data *sd,struct stat *st) 151{ 152int changed =0; 153 154if(sd->sd_mtime.sec != (unsigned int)st->st_mtime) 155 changed |= MTIME_CHANGED; 156if(trust_ctime && check_stat && 157 sd->sd_ctime.sec != (unsigned int)st->st_ctime) 158 changed |= CTIME_CHANGED; 159 160#ifdef USE_NSEC 161if(check_stat && sd->sd_mtime.nsec !=ST_MTIME_NSEC(*st)) 162 changed |= MTIME_CHANGED; 163if(trust_ctime && check_stat && 164 sd->sd_ctime.nsec !=ST_CTIME_NSEC(*st)) 165 changed |= CTIME_CHANGED; 166#endif 167 168if(check_stat) { 169if(sd->sd_uid != (unsigned int) st->st_uid || 170 sd->sd_gid != (unsigned int) st->st_gid) 171 changed |= OWNER_CHANGED; 172if(sd->sd_ino != (unsigned int) st->st_ino) 173 changed |= INODE_CHANGED; 174} 175 176#ifdef USE_STDEV 177/* 178 * st_dev breaks on network filesystems where different 179 * clients will have different views of what "device" 180 * the filesystem is on 181 */ 182if(check_stat && sd->sd_dev != (unsigned int) st->st_dev) 183 changed |= INODE_CHANGED; 184#endif 185 186if(sd->sd_size != (unsigned int) st->st_size) 187 changed |= DATA_CHANGED; 188 189return changed; 190} 191 192/* 193 * This only updates the "non-critical" parts of the directory 194 * cache, ie the parts that aren't tracked by GIT, and only used 195 * to validate the cache. 196 */ 197voidfill_stat_cache_info(struct cache_entry *ce,struct stat *st) 198{ 199fill_stat_data(&ce->ce_stat_data, st); 200 201if(assume_unchanged) 202 ce->ce_flags |= CE_VALID; 203 204if(S_ISREG(st->st_mode)) { 205ce_mark_uptodate(ce); 206mark_fsmonitor_valid(ce); 207} 208} 209 210static intce_compare_data(struct index_state *istate, 211const struct cache_entry *ce, 212struct stat *st) 213{ 214int match = -1; 215int fd =git_open_cloexec(ce->name, O_RDONLY); 216 217if(fd >=0) { 218struct object_id oid; 219if(!index_fd(istate, &oid, fd, st, OBJ_BLOB, ce->name,0)) 220 match = !oideq(&oid, &ce->oid); 221/* index_fd() closed the file descriptor already */ 222} 223return match; 224} 225 226static intce_compare_link(const struct cache_entry *ce,size_t expected_size) 227{ 228int match = -1; 229void*buffer; 230unsigned long size; 231enum object_type type; 232struct strbuf sb = STRBUF_INIT; 233 234if(strbuf_readlink(&sb, ce->name, expected_size)) 235return-1; 236 237 buffer =read_object_file(&ce->oid, &type, &size); 238if(buffer) { 239if(size == sb.len) 240 match =memcmp(buffer, sb.buf, size); 241free(buffer); 242} 243strbuf_release(&sb); 244return match; 245} 246 247static intce_compare_gitlink(const struct cache_entry *ce) 248{ 249struct object_id oid; 250 251/* 252 * We don't actually require that the .git directory 253 * under GITLINK directory be a valid git directory. It 254 * might even be missing (in case nobody populated that 255 * sub-project). 256 * 257 * If so, we consider it always to match. 258 */ 259if(resolve_gitlink_ref(ce->name,"HEAD", &oid) <0) 260return0; 261return!oideq(&oid, &ce->oid); 262} 263 264static intce_modified_check_fs(struct index_state *istate, 265const struct cache_entry *ce, 266struct stat *st) 267{ 268switch(st->st_mode & S_IFMT) { 269case S_IFREG: 270if(ce_compare_data(istate, ce, st)) 271return DATA_CHANGED; 272break; 273case S_IFLNK: 274if(ce_compare_link(ce,xsize_t(st->st_size))) 275return DATA_CHANGED; 276break; 277case S_IFDIR: 278if(S_ISGITLINK(ce->ce_mode)) 279returnce_compare_gitlink(ce) ? DATA_CHANGED :0; 280/* else fallthrough */ 281default: 282return TYPE_CHANGED; 283} 284return0; 285} 286 287static intce_match_stat_basic(const struct cache_entry *ce,struct stat *st) 288{ 289unsigned int changed =0; 290 291if(ce->ce_flags & CE_REMOVE) 292return MODE_CHANGED | DATA_CHANGED | TYPE_CHANGED; 293 294switch(ce->ce_mode & S_IFMT) { 295case S_IFREG: 296 changed |= !S_ISREG(st->st_mode) ? TYPE_CHANGED :0; 297/* We consider only the owner x bit to be relevant for 298 * "mode changes" 299 */ 300if(trust_executable_bit && 301(0100& (ce->ce_mode ^ st->st_mode))) 302 changed |= MODE_CHANGED; 303break; 304case S_IFLNK: 305if(!S_ISLNK(st->st_mode) && 306(has_symlinks || !S_ISREG(st->st_mode))) 307 changed |= TYPE_CHANGED; 308break; 309case S_IFGITLINK: 310/* We ignore most of the st_xxx fields for gitlinks */ 311if(!S_ISDIR(st->st_mode)) 312 changed |= TYPE_CHANGED; 313else if(ce_compare_gitlink(ce)) 314 changed |= DATA_CHANGED; 315return changed; 316default: 317BUG("unsupported ce_mode:%o", ce->ce_mode); 318} 319 320 changed |=match_stat_data(&ce->ce_stat_data, st); 321 322/* Racily smudged entry? */ 323if(!ce->ce_stat_data.sd_size) { 324if(!is_empty_blob_sha1(ce->oid.hash)) 325 changed |= DATA_CHANGED; 326} 327 328return changed; 329} 330 331static intis_racy_stat(const struct index_state *istate, 332const struct stat_data *sd) 333{ 334return(istate->timestamp.sec && 335#ifdef USE_NSEC 336/* nanosecond timestamped files can also be racy! */ 337(istate->timestamp.sec < sd->sd_mtime.sec || 338(istate->timestamp.sec == sd->sd_mtime.sec && 339 istate->timestamp.nsec <= sd->sd_mtime.nsec)) 340#else 341 istate->timestamp.sec <= sd->sd_mtime.sec 342#endif 343); 344} 345 346intis_racy_timestamp(const struct index_state *istate, 347const struct cache_entry *ce) 348{ 349return(!S_ISGITLINK(ce->ce_mode) && 350is_racy_stat(istate, &ce->ce_stat_data)); 351} 352 353intmatch_stat_data_racy(const struct index_state *istate, 354const struct stat_data *sd,struct stat *st) 355{ 356if(is_racy_stat(istate, sd)) 357return MTIME_CHANGED; 358returnmatch_stat_data(sd, st); 359} 360 361intie_match_stat(struct index_state *istate, 362const struct cache_entry *ce,struct stat *st, 363unsigned int options) 364{ 365unsigned int changed; 366int ignore_valid = options & CE_MATCH_IGNORE_VALID; 367int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE; 368int assume_racy_is_modified = options & CE_MATCH_RACY_IS_DIRTY; 369int ignore_fsmonitor = options & CE_MATCH_IGNORE_FSMONITOR; 370 371if(!ignore_fsmonitor) 372refresh_fsmonitor(istate); 373/* 374 * If it's marked as always valid in the index, it's 375 * valid whatever the checked-out copy says. 376 * 377 * skip-worktree has the same effect with higher precedence 378 */ 379if(!ignore_skip_worktree &&ce_skip_worktree(ce)) 380return0; 381if(!ignore_valid && (ce->ce_flags & CE_VALID)) 382return0; 383if(!ignore_fsmonitor && (ce->ce_flags & CE_FSMONITOR_VALID)) 384return0; 385 386/* 387 * Intent-to-add entries have not been added, so the index entry 388 * by definition never matches what is in the work tree until it 389 * actually gets added. 390 */ 391if(ce_intent_to_add(ce)) 392return DATA_CHANGED | TYPE_CHANGED | MODE_CHANGED; 393 394 changed =ce_match_stat_basic(ce, st); 395 396/* 397 * Within 1 second of this sequence: 398 * echo xyzzy >file && git-update-index --add file 399 * running this command: 400 * echo frotz >file 401 * would give a falsely clean cache entry. The mtime and 402 * length match the cache, and other stat fields do not change. 403 * 404 * We could detect this at update-index time (the cache entry 405 * being registered/updated records the same time as "now") 406 * and delay the return from git-update-index, but that would 407 * effectively mean we can make at most one commit per second, 408 * which is not acceptable. Instead, we check cache entries 409 * whose mtime are the same as the index file timestamp more 410 * carefully than others. 411 */ 412if(!changed &&is_racy_timestamp(istate, ce)) { 413if(assume_racy_is_modified) 414 changed |= DATA_CHANGED; 415else 416 changed |=ce_modified_check_fs(istate, ce, st); 417} 418 419return changed; 420} 421 422intie_modified(struct index_state *istate, 423const struct cache_entry *ce, 424struct stat *st,unsigned int options) 425{ 426int changed, changed_fs; 427 428 changed =ie_match_stat(istate, ce, st, options); 429if(!changed) 430return0; 431/* 432 * If the mode or type has changed, there's no point in trying 433 * to refresh the entry - it's not going to match 434 */ 435if(changed & (MODE_CHANGED | TYPE_CHANGED)) 436return changed; 437 438/* 439 * Immediately after read-tree or update-index --cacheinfo, 440 * the length field is zero, as we have never even read the 441 * lstat(2) information once, and we cannot trust DATA_CHANGED 442 * returned by ie_match_stat() which in turn was returned by 443 * ce_match_stat_basic() to signal that the filesize of the 444 * blob changed. We have to actually go to the filesystem to 445 * see if the contents match, and if so, should answer "unchanged". 446 * 447 * The logic does not apply to gitlinks, as ce_match_stat_basic() 448 * already has checked the actual HEAD from the filesystem in the 449 * subproject. If ie_match_stat() already said it is different, 450 * then we know it is. 451 */ 452if((changed & DATA_CHANGED) && 453(S_ISGITLINK(ce->ce_mode) || ce->ce_stat_data.sd_size !=0)) 454return changed; 455 456 changed_fs =ce_modified_check_fs(istate, ce, st); 457if(changed_fs) 458return changed | changed_fs; 459return0; 460} 461 462intbase_name_compare(const char*name1,int len1,int mode1, 463const char*name2,int len2,int mode2) 464{ 465unsigned char c1, c2; 466int len = len1 < len2 ? len1 : len2; 467int cmp; 468 469 cmp =memcmp(name1, name2, len); 470if(cmp) 471return cmp; 472 c1 = name1[len]; 473 c2 = name2[len]; 474if(!c1 &&S_ISDIR(mode1)) 475 c1 ='/'; 476if(!c2 &&S_ISDIR(mode2)) 477 c2 ='/'; 478return(c1 < c2) ? -1: (c1 > c2) ?1:0; 479} 480 481/* 482 * df_name_compare() is identical to base_name_compare(), except it 483 * compares conflicting directory/file entries as equal. Note that 484 * while a directory name compares as equal to a regular file, they 485 * then individually compare _differently_ to a filename that has 486 * a dot after the basename (because '\0' < '.' < '/'). 487 * 488 * This is used by routines that want to traverse the git namespace 489 * but then handle conflicting entries together when possible. 490 */ 491intdf_name_compare(const char*name1,int len1,int mode1, 492const char*name2,int len2,int mode2) 493{ 494int len = len1 < len2 ? len1 : len2, cmp; 495unsigned char c1, c2; 496 497 cmp =memcmp(name1, name2, len); 498if(cmp) 499return cmp; 500/* Directories and files compare equal (same length, same name) */ 501if(len1 == len2) 502return0; 503 c1 = name1[len]; 504if(!c1 &&S_ISDIR(mode1)) 505 c1 ='/'; 506 c2 = name2[len]; 507if(!c2 &&S_ISDIR(mode2)) 508 c2 ='/'; 509if(c1 =='/'&& !c2) 510return0; 511if(c2 =='/'&& !c1) 512return0; 513return c1 - c2; 514} 515 516intname_compare(const char*name1,size_t len1,const char*name2,size_t len2) 517{ 518size_t min_len = (len1 < len2) ? len1 : len2; 519int cmp =memcmp(name1, name2, min_len); 520if(cmp) 521return cmp; 522if(len1 < len2) 523return-1; 524if(len1 > len2) 525return1; 526return0; 527} 528 529intcache_name_stage_compare(const char*name1,int len1,int stage1,const char*name2,int len2,int stage2) 530{ 531int cmp; 532 533 cmp =name_compare(name1, len1, name2, len2); 534if(cmp) 535return cmp; 536 537if(stage1 < stage2) 538return-1; 539if(stage1 > stage2) 540return1; 541return0; 542} 543 544static intindex_name_stage_pos(const struct index_state *istate,const char*name,int namelen,int stage) 545{ 546int first, last; 547 548 first =0; 549 last = istate->cache_nr; 550while(last > first) { 551int next = (last + first) >>1; 552struct cache_entry *ce = istate->cache[next]; 553int cmp =cache_name_stage_compare(name, namelen, stage, ce->name,ce_namelen(ce),ce_stage(ce)); 554if(!cmp) 555return next; 556if(cmp <0) { 557 last = next; 558continue; 559} 560 first = next+1; 561} 562return-first-1; 563} 564 565intindex_name_pos(const struct index_state *istate,const char*name,int namelen) 566{ 567returnindex_name_stage_pos(istate, name, namelen,0); 568} 569 570intremove_index_entry_at(struct index_state *istate,int pos) 571{ 572struct cache_entry *ce = istate->cache[pos]; 573 574record_resolve_undo(istate, ce); 575remove_name_hash(istate, ce); 576save_or_free_index_entry(istate, ce); 577 istate->cache_changed |= CE_ENTRY_REMOVED; 578 istate->cache_nr--; 579if(pos >= istate->cache_nr) 580return0; 581MOVE_ARRAY(istate->cache + pos, istate->cache + pos +1, 582 istate->cache_nr - pos); 583return1; 584} 585 586/* 587 * Remove all cache entries marked for removal, that is where 588 * CE_REMOVE is set in ce_flags. This is much more effective than 589 * calling remove_index_entry_at() for each entry to be removed. 590 */ 591voidremove_marked_cache_entries(struct index_state *istate) 592{ 593struct cache_entry **ce_array = istate->cache; 594unsigned int i, j; 595 596for(i = j =0; i < istate->cache_nr; i++) { 597if(ce_array[i]->ce_flags & CE_REMOVE) { 598remove_name_hash(istate, ce_array[i]); 599save_or_free_index_entry(istate, ce_array[i]); 600} 601else 602 ce_array[j++] = ce_array[i]; 603} 604if(j == istate->cache_nr) 605return; 606 istate->cache_changed |= CE_ENTRY_REMOVED; 607 istate->cache_nr = j; 608} 609 610intremove_file_from_index(struct index_state *istate,const char*path) 611{ 612int pos =index_name_pos(istate, path,strlen(path)); 613if(pos <0) 614 pos = -pos-1; 615cache_tree_invalidate_path(istate, path); 616untracked_cache_remove_from_index(istate, path); 617while(pos < istate->cache_nr && !strcmp(istate->cache[pos]->name, path)) 618remove_index_entry_at(istate, pos); 619return0; 620} 621 622static intcompare_name(struct cache_entry *ce,const char*path,int namelen) 623{ 624return namelen !=ce_namelen(ce) ||memcmp(path, ce->name, namelen); 625} 626 627static intindex_name_pos_also_unmerged(struct index_state *istate, 628const char*path,int namelen) 629{ 630int pos =index_name_pos(istate, path, namelen); 631struct cache_entry *ce; 632 633if(pos >=0) 634return pos; 635 636/* maybe unmerged? */ 637 pos = -1- pos; 638if(pos >= istate->cache_nr || 639compare_name((ce = istate->cache[pos]), path, namelen)) 640return-1; 641 642/* order of preference: stage 2, 1, 3 */ 643if(ce_stage(ce) ==1&& pos +1< istate->cache_nr && 644ce_stage((ce = istate->cache[pos +1])) ==2&& 645!compare_name(ce, path, namelen)) 646 pos++; 647return pos; 648} 649 650static intdifferent_name(struct cache_entry *ce,struct cache_entry *alias) 651{ 652int len =ce_namelen(ce); 653returnce_namelen(alias) != len ||memcmp(ce->name, alias->name, len); 654} 655 656/* 657 * If we add a filename that aliases in the cache, we will use the 658 * name that we already have - but we don't want to update the same 659 * alias twice, because that implies that there were actually two 660 * different files with aliasing names! 661 * 662 * So we use the CE_ADDED flag to verify that the alias was an old 663 * one before we accept it as 664 */ 665static struct cache_entry *create_alias_ce(struct index_state *istate, 666struct cache_entry *ce, 667struct cache_entry *alias) 668{ 669int len; 670struct cache_entry *new_entry; 671 672if(alias->ce_flags & CE_ADDED) 673die(_("will not add file alias '%s' ('%s' already exists in index)"), 674 ce->name, alias->name); 675 676/* Ok, create the new entry using the name of the existing alias */ 677 len =ce_namelen(alias); 678 new_entry =make_empty_cache_entry(istate, len); 679memcpy(new_entry->name, alias->name, len); 680copy_cache_entry(new_entry, ce); 681save_or_free_index_entry(istate, ce); 682return new_entry; 683} 684 685voidset_object_name_for_intent_to_add_entry(struct cache_entry *ce) 686{ 687struct object_id oid; 688if(write_object_file("",0, blob_type, &oid)) 689die(_("cannot create an empty blob in the object database")); 690oidcpy(&ce->oid, &oid); 691} 692 693intadd_to_index(struct index_state *istate,const char*path,struct stat *st,int flags) 694{ 695int namelen, was_same; 696 mode_t st_mode = st->st_mode; 697struct cache_entry *ce, *alias = NULL; 698unsigned ce_option = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE|CE_MATCH_RACY_IS_DIRTY; 699int verbose = flags & (ADD_CACHE_VERBOSE | ADD_CACHE_PRETEND); 700int pretend = flags & ADD_CACHE_PRETEND; 701int intent_only = flags & ADD_CACHE_INTENT; 702int add_option = (ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE| 703(intent_only ? ADD_CACHE_NEW_ONLY :0)); 704int hash_flags = HASH_WRITE_OBJECT; 705 706if(flags & ADD_CACHE_RENORMALIZE) 707 hash_flags |= HASH_RENORMALIZE; 708 709if(!S_ISREG(st_mode) && !S_ISLNK(st_mode) && !S_ISDIR(st_mode)) 710returnerror(_("%s: can only add regular files, symbolic links or git-directories"), path); 711 712 namelen =strlen(path); 713if(S_ISDIR(st_mode)) { 714while(namelen && path[namelen-1] =='/') 715 namelen--; 716} 717 ce =make_empty_cache_entry(istate, namelen); 718memcpy(ce->name, path, namelen); 719 ce->ce_namelen = namelen; 720if(!intent_only) 721fill_stat_cache_info(ce, st); 722else 723 ce->ce_flags |= CE_INTENT_TO_ADD; 724 725 726if(trust_executable_bit && has_symlinks) { 727 ce->ce_mode =create_ce_mode(st_mode); 728}else{ 729/* If there is an existing entry, pick the mode bits and type 730 * from it, otherwise assume unexecutable regular file. 731 */ 732struct cache_entry *ent; 733int pos =index_name_pos_also_unmerged(istate, path, namelen); 734 735 ent = (0<= pos) ? istate->cache[pos] : NULL; 736 ce->ce_mode =ce_mode_from_stat(ent, st_mode); 737} 738 739/* When core.ignorecase=true, determine if a directory of the same name but differing 740 * case already exists within the Git repository. If it does, ensure the directory 741 * case of the file being added to the repository matches (is folded into) the existing 742 * entry's directory case. 743 */ 744if(ignore_case) { 745adjust_dirname_case(istate, ce->name); 746} 747if(!(flags & ADD_CACHE_RENORMALIZE)) { 748 alias =index_file_exists(istate, ce->name, 749ce_namelen(ce), ignore_case); 750if(alias && 751!ce_stage(alias) && 752!ie_match_stat(istate, alias, st, ce_option)) { 753/* Nothing changed, really */ 754if(!S_ISGITLINK(alias->ce_mode)) 755ce_mark_uptodate(alias); 756 alias->ce_flags |= CE_ADDED; 757 758discard_cache_entry(ce); 759return0; 760} 761} 762if(!intent_only) { 763if(index_path(istate, &ce->oid, path, st, hash_flags)) { 764discard_cache_entry(ce); 765returnerror(_("unable to index file '%s'"), path); 766} 767}else 768set_object_name_for_intent_to_add_entry(ce); 769 770if(ignore_case && alias &&different_name(ce, alias)) 771 ce =create_alias_ce(istate, ce, alias); 772 ce->ce_flags |= CE_ADDED; 773 774/* It was suspected to be racily clean, but it turns out to be Ok */ 775 was_same = (alias && 776!ce_stage(alias) && 777oideq(&alias->oid, &ce->oid) && 778 ce->ce_mode == alias->ce_mode); 779 780if(pretend) 781discard_cache_entry(ce); 782else if(add_index_entry(istate, ce, add_option)) { 783discard_cache_entry(ce); 784returnerror(_("unable to add '%s' to index"), path); 785} 786if(verbose && !was_same) 787printf("add '%s'\n", path); 788return0; 789} 790 791intadd_file_to_index(struct index_state *istate,const char*path,int flags) 792{ 793struct stat st; 794if(lstat(path, &st)) 795die_errno(_("unable to stat '%s'"), path); 796returnadd_to_index(istate, path, &st, flags); 797} 798 799struct cache_entry *make_empty_cache_entry(struct index_state *istate,size_t len) 800{ 801returnmem_pool__ce_calloc(find_mem_pool(istate), len); 802} 803 804struct cache_entry *make_empty_transient_cache_entry(size_t len) 805{ 806returnxcalloc(1,cache_entry_size(len)); 807} 808 809struct cache_entry *make_cache_entry(struct index_state *istate, 810unsigned int mode, 811const struct object_id *oid, 812const char*path, 813int stage, 814unsigned int refresh_options) 815{ 816struct cache_entry *ce, *ret; 817int len; 818 819if(!verify_path(path, mode)) { 820error(_("invalid path '%s'"), path); 821return NULL; 822} 823 824 len =strlen(path); 825 ce =make_empty_cache_entry(istate, len); 826 827oidcpy(&ce->oid, oid); 828memcpy(ce->name, path, len); 829 ce->ce_flags =create_ce_flags(stage); 830 ce->ce_namelen = len; 831 ce->ce_mode =create_ce_mode(mode); 832 833 ret =refresh_cache_entry(istate, ce, refresh_options); 834if(ret != ce) 835discard_cache_entry(ce); 836return ret; 837} 838 839struct cache_entry *make_transient_cache_entry(unsigned int mode,const struct object_id *oid, 840const char*path,int stage) 841{ 842struct cache_entry *ce; 843int len; 844 845if(!verify_path(path, mode)) { 846error(_("invalid path '%s'"), path); 847return NULL; 848} 849 850 len =strlen(path); 851 ce =make_empty_transient_cache_entry(len); 852 853oidcpy(&ce->oid, oid); 854memcpy(ce->name, path, len); 855 ce->ce_flags =create_ce_flags(stage); 856 ce->ce_namelen = len; 857 ce->ce_mode =create_ce_mode(mode); 858 859return ce; 860} 861 862/* 863 * Chmod an index entry with either +x or -x. 864 * 865 * Returns -1 if the chmod for the particular cache entry failed (if it's 866 * not a regular file), -2 if an invalid flip argument is passed in, 0 867 * otherwise. 868 */ 869intchmod_index_entry(struct index_state *istate,struct cache_entry *ce, 870char flip) 871{ 872if(!S_ISREG(ce->ce_mode)) 873return-1; 874switch(flip) { 875case'+': 876 ce->ce_mode |=0111; 877break; 878case'-': 879 ce->ce_mode &= ~0111; 880break; 881default: 882return-2; 883} 884cache_tree_invalidate_path(istate, ce->name); 885 ce->ce_flags |= CE_UPDATE_IN_BASE; 886mark_fsmonitor_invalid(istate, ce); 887 istate->cache_changed |= CE_ENTRY_CHANGED; 888 889return0; 890} 891 892intce_same_name(const struct cache_entry *a,const struct cache_entry *b) 893{ 894int len =ce_namelen(a); 895returnce_namelen(b) == len && !memcmp(a->name, b->name, len); 896} 897 898/* 899 * We fundamentally don't like some paths: we don't want 900 * dot or dot-dot anywhere, and for obvious reasons don't 901 * want to recurse into ".git" either. 902 * 903 * Also, we don't want double slashes or slashes at the 904 * end that can make pathnames ambiguous. 905 */ 906static intverify_dotfile(const char*rest,unsigned mode) 907{ 908/* 909 * The first character was '.', but that 910 * has already been discarded, we now test 911 * the rest. 912 */ 913 914/* "." is not allowed */ 915if(*rest =='\0'||is_dir_sep(*rest)) 916return0; 917 918switch(*rest) { 919/* 920 * ".git" followed by NUL or slash is bad. Note that we match 921 * case-insensitively here, even if ignore_case is not set. 922 * This outlaws ".GIT" everywhere out of an abundance of caution, 923 * since there's really no good reason to allow it. 924 * 925 * Once we've seen ".git", we can also find ".gitmodules", etc (also 926 * case-insensitively). 927 */ 928case'g': 929case'G': 930if(rest[1] !='i'&& rest[1] !='I') 931break; 932if(rest[2] !='t'&& rest[2] !='T') 933break; 934if(rest[3] =='\0'||is_dir_sep(rest[3])) 935return0; 936if(S_ISLNK(mode)) { 937 rest +=3; 938if(skip_iprefix(rest,"modules", &rest) && 939(*rest =='\0'||is_dir_sep(*rest))) 940return0; 941} 942break; 943case'.': 944if(rest[1] =='\0'||is_dir_sep(rest[1])) 945return0; 946} 947return1; 948} 949 950intverify_path(const char*path,unsigned mode) 951{ 952char c; 953 954if(has_dos_drive_prefix(path)) 955return0; 956 957goto inside; 958for(;;) { 959if(!c) 960return1; 961if(is_dir_sep(c)) { 962inside: 963if(protect_hfs) { 964if(is_hfs_dotgit(path)) 965return0; 966if(S_ISLNK(mode)) { 967if(is_hfs_dotgitmodules(path)) 968return0; 969} 970} 971if(protect_ntfs) { 972if(is_ntfs_dotgit(path)) 973return0; 974if(S_ISLNK(mode)) { 975if(is_ntfs_dotgitmodules(path)) 976return0; 977} 978} 979 980 c = *path++; 981if((c =='.'&& !verify_dotfile(path, mode)) || 982is_dir_sep(c) || c =='\0') 983return0; 984} 985 c = *path++; 986} 987} 988 989/* 990 * Do we have another file that has the beginning components being a 991 * proper superset of the name we're trying to add? 992 */ 993static inthas_file_name(struct index_state *istate, 994const struct cache_entry *ce,int pos,int ok_to_replace) 995{ 996int retval =0; 997int len =ce_namelen(ce); 998int stage =ce_stage(ce); 999const char*name = ce->name;10001001while(pos < istate->cache_nr) {1002struct cache_entry *p = istate->cache[pos++];10031004if(len >=ce_namelen(p))1005break;1006if(memcmp(name, p->name, len))1007break;1008if(ce_stage(p) != stage)1009continue;1010if(p->name[len] !='/')1011continue;1012if(p->ce_flags & CE_REMOVE)1013continue;1014 retval = -1;1015if(!ok_to_replace)1016break;1017remove_index_entry_at(istate, --pos);1018}1019return retval;1020}102110221023/*1024 * Like strcmp(), but also return the offset of the first change.1025 * If strings are equal, return the length.1026 */1027intstrcmp_offset(const char*s1,const char*s2,size_t*first_change)1028{1029size_t k;10301031if(!first_change)1032returnstrcmp(s1, s2);10331034for(k =0; s1[k] == s2[k]; k++)1035if(s1[k] =='\0')1036break;10371038*first_change = k;1039return(unsigned char)s1[k] - (unsigned char)s2[k];1040}10411042/*1043 * Do we have another file with a pathname that is a proper1044 * subset of the name we're trying to add?1045 *1046 * That is, is there another file in the index with a path1047 * that matches a sub-directory in the given entry?1048 */1049static inthas_dir_name(struct index_state *istate,1050const struct cache_entry *ce,int pos,int ok_to_replace)1051{1052int retval =0;1053int stage =ce_stage(ce);1054const char*name = ce->name;1055const char*slash = name +ce_namelen(ce);1056size_t len_eq_last;1057int cmp_last =0;10581059/*1060 * We are frequently called during an iteration on a sorted1061 * list of pathnames and while building a new index. Therefore,1062 * there is a high probability that this entry will eventually1063 * be appended to the index, rather than inserted in the middle.1064 * If we can confirm that, we can avoid binary searches on the1065 * components of the pathname.1066 *1067 * Compare the entry's full path with the last path in the index.1068 */1069if(istate->cache_nr >0) {1070 cmp_last =strcmp_offset(name,1071 istate->cache[istate->cache_nr -1]->name,1072&len_eq_last);1073if(cmp_last >0) {1074if(len_eq_last ==0) {1075/*1076 * The entry sorts AFTER the last one in the1077 * index and their paths have no common prefix,1078 * so there cannot be a F/D conflict.1079 */1080return retval;1081}else{1082/*1083 * The entry sorts AFTER the last one in the1084 * index, but has a common prefix. Fall through1085 * to the loop below to disect the entry's path1086 * and see where the difference is.1087 */1088}1089}else if(cmp_last ==0) {1090/*1091 * The entry exactly matches the last one in the1092 * index, but because of multiple stage and CE_REMOVE1093 * items, we fall through and let the regular search1094 * code handle it.1095 */1096}1097}10981099for(;;) {1100size_t len;11011102for(;;) {1103if(*--slash =='/')1104break;1105if(slash <= ce->name)1106return retval;1107}1108 len = slash - name;11091110if(cmp_last >0) {1111/*1112 * (len + 1) is a directory boundary (including1113 * the trailing slash). And since the loop is1114 * decrementing "slash", the first iteration is1115 * the longest directory prefix; subsequent1116 * iterations consider parent directories.1117 */11181119if(len +1<= len_eq_last) {1120/*1121 * The directory prefix (including the trailing1122 * slash) also appears as a prefix in the last1123 * entry, so the remainder cannot collide (because1124 * strcmp said the whole path was greater).1125 *1126 * EQ: last: xxx/A1127 * this: xxx/B1128 *1129 * LT: last: xxx/file_A1130 * this: xxx/file_B1131 */1132return retval;1133}11341135if(len > len_eq_last) {1136/*1137 * This part of the directory prefix (excluding1138 * the trailing slash) is longer than the known1139 * equal portions, so this sub-directory cannot1140 * collide with a file.1141 *1142 * GT: last: xxxA1143 * this: xxxB/file1144 */1145return retval;1146}11471148if(istate->cache_nr >0&&1149ce_namelen(istate->cache[istate->cache_nr -1]) > len) {1150/*1151 * The directory prefix lines up with part of1152 * a longer file or directory name, but sorts1153 * after it, so this sub-directory cannot1154 * collide with a file.1155 *1156 * last: xxx/yy-file (because '-' sorts before '/')1157 * this: xxx/yy/abc1158 */1159return retval;1160}11611162/*1163 * This is a possible collision. Fall through and1164 * let the regular search code handle it.1165 *1166 * last: xxx1167 * this: xxx/file1168 */1169}11701171 pos =index_name_stage_pos(istate, name, len, stage);1172if(pos >=0) {1173/*1174 * Found one, but not so fast. This could1175 * be a marker that says "I was here, but1176 * I am being removed". Such an entry is1177 * not a part of the resulting tree, and1178 * it is Ok to have a directory at the same1179 * path.1180 */1181if(!(istate->cache[pos]->ce_flags & CE_REMOVE)) {1182 retval = -1;1183if(!ok_to_replace)1184break;1185remove_index_entry_at(istate, pos);1186continue;1187}1188}1189else1190 pos = -pos-1;11911192/*1193 * Trivial optimization: if we find an entry that1194 * already matches the sub-directory, then we know1195 * we're ok, and we can exit.1196 */1197while(pos < istate->cache_nr) {1198struct cache_entry *p = istate->cache[pos];1199if((ce_namelen(p) <= len) ||1200(p->name[len] !='/') ||1201memcmp(p->name, name, len))1202break;/* not our subdirectory */1203if(ce_stage(p) == stage && !(p->ce_flags & CE_REMOVE))1204/*1205 * p is at the same stage as our entry, and1206 * is a subdirectory of what we are looking1207 * at, so we cannot have conflicts at our1208 * level or anything shorter.1209 */1210return retval;1211 pos++;1212}1213}1214return retval;1215}12161217/* We may be in a situation where we already have path/file and path1218 * is being added, or we already have path and path/file is being1219 * added. Either one would result in a nonsense tree that has path1220 * twice when git-write-tree tries to write it out. Prevent it.1221 *1222 * If ok-to-replace is specified, we remove the conflicting entries1223 * from the cache so the caller should recompute the insert position.1224 * When this happens, we return non-zero.1225 */1226static intcheck_file_directory_conflict(struct index_state *istate,1227const struct cache_entry *ce,1228int pos,int ok_to_replace)1229{1230int retval;12311232/*1233 * When ce is an "I am going away" entry, we allow it to be added1234 */1235if(ce->ce_flags & CE_REMOVE)1236return0;12371238/*1239 * We check if the path is a sub-path of a subsequent pathname1240 * first, since removing those will not change the position1241 * in the array.1242 */1243 retval =has_file_name(istate, ce, pos, ok_to_replace);12441245/*1246 * Then check if the path might have a clashing sub-directory1247 * before it.1248 */1249return retval +has_dir_name(istate, ce, pos, ok_to_replace);1250}12511252static intadd_index_entry_with_check(struct index_state *istate,struct cache_entry *ce,int option)1253{1254int pos;1255int ok_to_add = option & ADD_CACHE_OK_TO_ADD;1256int ok_to_replace = option & ADD_CACHE_OK_TO_REPLACE;1257int skip_df_check = option & ADD_CACHE_SKIP_DFCHECK;1258int new_only = option & ADD_CACHE_NEW_ONLY;12591260if(!(option & ADD_CACHE_KEEP_CACHE_TREE))1261cache_tree_invalidate_path(istate, ce->name);12621263/*1264 * If this entry's path sorts after the last entry in the index,1265 * we can avoid searching for it.1266 */1267if(istate->cache_nr >0&&1268strcmp(ce->name, istate->cache[istate->cache_nr -1]->name) >0)1269 pos = -istate->cache_nr -1;1270else1271 pos =index_name_stage_pos(istate, ce->name,ce_namelen(ce),ce_stage(ce));12721273/* existing match? Just replace it. */1274if(pos >=0) {1275if(!new_only)1276replace_index_entry(istate, pos, ce);1277return0;1278}1279 pos = -pos-1;12801281if(!(option & ADD_CACHE_KEEP_CACHE_TREE))1282untracked_cache_add_to_index(istate, ce->name);12831284/*1285 * Inserting a merged entry ("stage 0") into the index1286 * will always replace all non-merged entries..1287 */1288if(pos < istate->cache_nr &&ce_stage(ce) ==0) {1289while(ce_same_name(istate->cache[pos], ce)) {1290 ok_to_add =1;1291if(!remove_index_entry_at(istate, pos))1292break;1293}1294}12951296if(!ok_to_add)1297return-1;1298if(!verify_path(ce->name, ce->ce_mode))1299returnerror(_("invalid path '%s'"), ce->name);13001301if(!skip_df_check &&1302check_file_directory_conflict(istate, ce, pos, ok_to_replace)) {1303if(!ok_to_replace)1304returnerror(_("'%s' appears as both a file and as a directory"),1305 ce->name);1306 pos =index_name_stage_pos(istate, ce->name,ce_namelen(ce),ce_stage(ce));1307 pos = -pos-1;1308}1309return pos +1;1310}13111312intadd_index_entry(struct index_state *istate,struct cache_entry *ce,int option)1313{1314int pos;13151316if(option & ADD_CACHE_JUST_APPEND)1317 pos = istate->cache_nr;1318else{1319int ret;1320 ret =add_index_entry_with_check(istate, ce, option);1321if(ret <=0)1322return ret;1323 pos = ret -1;1324}13251326/* Make sure the array is big enough .. */1327ALLOC_GROW(istate->cache, istate->cache_nr +1, istate->cache_alloc);13281329/* Add it in.. */1330 istate->cache_nr++;1331if(istate->cache_nr > pos +1)1332MOVE_ARRAY(istate->cache + pos +1, istate->cache + pos,1333 istate->cache_nr - pos -1);1334set_index_entry(istate, pos, ce);1335 istate->cache_changed |= CE_ENTRY_ADDED;1336return0;1337}13381339/*1340 * "refresh" does not calculate a new sha1 file or bring the1341 * cache up-to-date for mode/content changes. But what it1342 * _does_ do is to "re-match" the stat information of a file1343 * with the cache, so that you can refresh the cache for a1344 * file that hasn't been changed but where the stat entry is1345 * out of date.1346 *1347 * For example, you'd want to do this after doing a "git-read-tree",1348 * to link up the stat cache details with the proper files.1349 */1350static struct cache_entry *refresh_cache_ent(struct index_state *istate,1351struct cache_entry *ce,1352unsigned int options,int*err,1353int*changed_ret)1354{1355struct stat st;1356struct cache_entry *updated;1357int changed;1358int refresh = options & CE_MATCH_REFRESH;1359int ignore_valid = options & CE_MATCH_IGNORE_VALID;1360int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE;1361int ignore_missing = options & CE_MATCH_IGNORE_MISSING;1362int ignore_fsmonitor = options & CE_MATCH_IGNORE_FSMONITOR;13631364if(!refresh ||ce_uptodate(ce))1365return ce;13661367if(!ignore_fsmonitor)1368refresh_fsmonitor(istate);1369/*1370 * CE_VALID or CE_SKIP_WORKTREE means the user promised us1371 * that the change to the work tree does not matter and told1372 * us not to worry.1373 */1374if(!ignore_skip_worktree &&ce_skip_worktree(ce)) {1375ce_mark_uptodate(ce);1376return ce;1377}1378if(!ignore_valid && (ce->ce_flags & CE_VALID)) {1379ce_mark_uptodate(ce);1380return ce;1381}1382if(!ignore_fsmonitor && (ce->ce_flags & CE_FSMONITOR_VALID)) {1383ce_mark_uptodate(ce);1384return ce;1385}13861387if(has_symlink_leading_path(ce->name,ce_namelen(ce))) {1388if(ignore_missing)1389return ce;1390if(err)1391*err = ENOENT;1392return NULL;1393}13941395if(lstat(ce->name, &st) <0) {1396if(ignore_missing && errno == ENOENT)1397return ce;1398if(err)1399*err = errno;1400return NULL;1401}14021403 changed =ie_match_stat(istate, ce, &st, options);1404if(changed_ret)1405*changed_ret = changed;1406if(!changed) {1407/*1408 * The path is unchanged. If we were told to ignore1409 * valid bit, then we did the actual stat check and1410 * found that the entry is unmodified. If the entry1411 * is not marked VALID, this is the place to mark it1412 * valid again, under "assume unchanged" mode.1413 */1414if(ignore_valid && assume_unchanged &&1415!(ce->ce_flags & CE_VALID))1416;/* mark this one VALID again */1417else{1418/*1419 * We do not mark the index itself "modified"1420 * because CE_UPTODATE flag is in-core only;1421 * we are not going to write this change out.1422 */1423if(!S_ISGITLINK(ce->ce_mode)) {1424ce_mark_uptodate(ce);1425mark_fsmonitor_valid(ce);1426}1427return ce;1428}1429}14301431if(ie_modified(istate, ce, &st, options)) {1432if(err)1433*err = EINVAL;1434return NULL;1435}14361437 updated =make_empty_cache_entry(istate,ce_namelen(ce));1438copy_cache_entry(updated, ce);1439memcpy(updated->name, ce->name, ce->ce_namelen +1);1440fill_stat_cache_info(updated, &st);1441/*1442 * If ignore_valid is not set, we should leave CE_VALID bit1443 * alone. Otherwise, paths marked with --no-assume-unchanged1444 * (i.e. things to be edited) will reacquire CE_VALID bit1445 * automatically, which is not really what we want.1446 */1447if(!ignore_valid && assume_unchanged &&1448!(ce->ce_flags & CE_VALID))1449 updated->ce_flags &= ~CE_VALID;14501451/* istate->cache_changed is updated in the caller */1452return updated;1453}14541455static voidshow_file(const char* fmt,const char* name,int in_porcelain,1456int* first,const char*header_msg)1457{1458if(in_porcelain && *first && header_msg) {1459printf("%s\n", header_msg);1460*first =0;1461}1462printf(fmt, name);1463}14641465intrefresh_index(struct index_state *istate,unsigned int flags,1466const struct pathspec *pathspec,1467char*seen,const char*header_msg)1468{1469int i;1470int has_errors =0;1471int really = (flags & REFRESH_REALLY) !=0;1472int allow_unmerged = (flags & REFRESH_UNMERGED) !=0;1473int quiet = (flags & REFRESH_QUIET) !=0;1474int not_new = (flags & REFRESH_IGNORE_MISSING) !=0;1475int ignore_submodules = (flags & REFRESH_IGNORE_SUBMODULES) !=0;1476int first =1;1477int in_porcelain = (flags & REFRESH_IN_PORCELAIN);1478unsigned int options = (CE_MATCH_REFRESH |1479(really ? CE_MATCH_IGNORE_VALID :0) |1480(not_new ? CE_MATCH_IGNORE_MISSING :0));1481const char*modified_fmt;1482const char*deleted_fmt;1483const char*typechange_fmt;1484const char*added_fmt;1485const char*unmerged_fmt;1486struct progress *progress = NULL;14871488if(flags & REFRESH_PROGRESS &&isatty(2))1489 progress =start_delayed_progress(_("Refresh index"),1490 istate->cache_nr);14911492trace_performance_enter();1493 modified_fmt = in_porcelain ?"M\t%s\n":"%s: needs update\n";1494 deleted_fmt = in_porcelain ?"D\t%s\n":"%s: needs update\n";1495 typechange_fmt = in_porcelain ?"T\t%s\n":"%s: needs update\n";1496 added_fmt = in_porcelain ?"A\t%s\n":"%s: needs update\n";1497 unmerged_fmt = in_porcelain ?"U\t%s\n":"%s: needs merge\n";1498/*1499 * Use the multi-threaded preload_index() to refresh most of the1500 * cache entries quickly then in the single threaded loop below,1501 * we only have to do the special cases that are left.1502 */1503preload_index(istate, pathspec,0);1504for(i =0; i < istate->cache_nr; i++) {1505struct cache_entry *ce, *new_entry;1506int cache_errno =0;1507int changed =0;1508int filtered =0;15091510 ce = istate->cache[i];1511if(ignore_submodules &&S_ISGITLINK(ce->ce_mode))1512continue;15131514if(pathspec && !ce_path_match(istate, ce, pathspec, seen))1515 filtered =1;15161517if(ce_stage(ce)) {1518while((i < istate->cache_nr) &&1519!strcmp(istate->cache[i]->name, ce->name))1520 i++;1521 i--;1522if(allow_unmerged)1523continue;1524if(!filtered)1525show_file(unmerged_fmt, ce->name, in_porcelain,1526&first, header_msg);1527 has_errors =1;1528continue;1529}15301531if(filtered)1532continue;15331534 new_entry =refresh_cache_ent(istate, ce, options, &cache_errno, &changed);1535if(new_entry == ce)1536continue;1537if(progress)1538display_progress(progress, i);1539if(!new_entry) {1540const char*fmt;15411542if(really && cache_errno == EINVAL) {1543/* If we are doing --really-refresh that1544 * means the index is not valid anymore.1545 */1546 ce->ce_flags &= ~CE_VALID;1547 ce->ce_flags |= CE_UPDATE_IN_BASE;1548mark_fsmonitor_invalid(istate, ce);1549 istate->cache_changed |= CE_ENTRY_CHANGED;1550}1551if(quiet)1552continue;15531554if(cache_errno == ENOENT)1555 fmt = deleted_fmt;1556else if(ce_intent_to_add(ce))1557 fmt = added_fmt;/* must be before other checks */1558else if(changed & TYPE_CHANGED)1559 fmt = typechange_fmt;1560else1561 fmt = modified_fmt;1562show_file(fmt,1563 ce->name, in_porcelain, &first, header_msg);1564 has_errors =1;1565continue;1566}15671568replace_index_entry(istate, i, new_entry);1569}1570if(progress) {1571display_progress(progress, istate->cache_nr);1572stop_progress(&progress);1573}1574trace_performance_leave("refresh index");1575return has_errors;1576}15771578struct cache_entry *refresh_cache_entry(struct index_state *istate,1579struct cache_entry *ce,1580unsigned int options)1581{1582returnrefresh_cache_ent(istate, ce, options, NULL, NULL);1583}158415851586/*****************************************************************1587 * Index File I/O1588 *****************************************************************/15891590#define INDEX_FORMAT_DEFAULT 315911592static unsigned intget_index_format_default(void)1593{1594char*envversion =getenv("GIT_INDEX_VERSION");1595char*endp;1596int value;1597unsigned int version = INDEX_FORMAT_DEFAULT;15981599if(!envversion) {1600if(!git_config_get_int("index.version", &value))1601 version = value;1602if(version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < version) {1603warning(_("index.version set, but the value is invalid.\n"1604"Using version%i"), INDEX_FORMAT_DEFAULT);1605return INDEX_FORMAT_DEFAULT;1606}1607return version;1608}16091610 version =strtoul(envversion, &endp,10);1611if(*endp ||1612 version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < version) {1613warning(_("GIT_INDEX_VERSION set, but the value is invalid.\n"1614"Using version%i"), INDEX_FORMAT_DEFAULT);1615 version = INDEX_FORMAT_DEFAULT;1616}1617return version;1618}16191620/*1621 * dev/ino/uid/gid/size are also just tracked to the low 32 bits1622 * Again - this is just a (very strong in practice) heuristic that1623 * the inode hasn't changed.1624 *1625 * We save the fields in big-endian order to allow using the1626 * index file over NFS transparently.1627 */1628struct ondisk_cache_entry {1629struct cache_time ctime;1630struct cache_time mtime;1631uint32_t dev;1632uint32_t ino;1633uint32_t mode;1634uint32_t uid;1635uint32_t gid;1636uint32_t size;1637unsigned char sha1[20];1638uint16_t flags;1639char name[FLEX_ARRAY];/* more */1640};16411642/*1643 * This struct is used when CE_EXTENDED bit is 11644 * The struct must match ondisk_cache_entry exactly from1645 * ctime till flags1646 */1647struct ondisk_cache_entry_extended {1648struct cache_time ctime;1649struct cache_time mtime;1650uint32_t dev;1651uint32_t ino;1652uint32_t mode;1653uint32_t uid;1654uint32_t gid;1655uint32_t size;1656unsigned char sha1[20];1657uint16_t flags;1658uint16_t flags2;1659char name[FLEX_ARRAY];/* more */1660};16611662/* These are only used for v3 or lower */1663#define align_padding_size(size, len) ((size + (len) + 8) & ~7) - (size + len)1664#define align_flex_name(STRUCT,len) ((offsetof(struct STRUCT,name) + (len) + 8) & ~7)1665#define ondisk_cache_entry_size(len) align_flex_name(ondisk_cache_entry,len)1666#define ondisk_cache_entry_extended_size(len) align_flex_name(ondisk_cache_entry_extended,len)1667#define ondisk_ce_size(ce) (((ce)->ce_flags & CE_EXTENDED) ? \1668 ondisk_cache_entry_extended_size(ce_namelen(ce)) : \1669 ondisk_cache_entry_size(ce_namelen(ce)))16701671/* Allow fsck to force verification of the index checksum. */1672int verify_index_checksum;16731674/* Allow fsck to force verification of the cache entry order. */1675int verify_ce_order;16761677static intverify_hdr(const struct cache_header *hdr,unsigned long size)1678{1679 git_hash_ctx c;1680unsigned char hash[GIT_MAX_RAWSZ];1681int hdr_version;16821683if(hdr->hdr_signature !=htonl(CACHE_SIGNATURE))1684returnerror(_("bad signature 0x%08x"), hdr->hdr_signature);1685 hdr_version =ntohl(hdr->hdr_version);1686if(hdr_version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < hdr_version)1687returnerror(_("bad index version%d"), hdr_version);16881689if(!verify_index_checksum)1690return0;16911692 the_hash_algo->init_fn(&c);1693 the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);1694 the_hash_algo->final_fn(hash, &c);1695if(!hasheq(hash, (unsigned char*)hdr + size - the_hash_algo->rawsz))1696returnerror(_("bad index file sha1 signature"));1697return0;1698}16991700static intread_index_extension(struct index_state *istate,1701const char*ext,const char*data,unsigned long sz)1702{1703switch(CACHE_EXT(ext)) {1704case CACHE_EXT_TREE:1705 istate->cache_tree =cache_tree_read(data, sz);1706break;1707case CACHE_EXT_RESOLVE_UNDO:1708 istate->resolve_undo =resolve_undo_read(data, sz);1709break;1710case CACHE_EXT_LINK:1711if(read_link_extension(istate, data, sz))1712return-1;1713break;1714case CACHE_EXT_UNTRACKED:1715 istate->untracked =read_untracked_extension(data, sz);1716break;1717case CACHE_EXT_FSMONITOR:1718read_fsmonitor_extension(istate, data, sz);1719break;1720case CACHE_EXT_ENDOFINDEXENTRIES:1721case CACHE_EXT_INDEXENTRYOFFSETTABLE:1722/* already handled in do_read_index() */1723break;1724default:1725if(*ext <'A'||'Z'< *ext)1726returnerror(_("index uses %.4s extension, which we do not understand"),1727 ext);1728fprintf_ln(stderr,_("ignoring %.4s extension"), ext);1729break;1730}1731return0;1732}17331734static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,1735unsigned int version,1736struct ondisk_cache_entry *ondisk,1737unsigned long*ent_size,1738const struct cache_entry *previous_ce)1739{1740struct cache_entry *ce;1741size_t len;1742const char*name;1743unsigned int flags;1744size_t copy_len =0;1745/*1746 * Adjacent cache entries tend to share the leading paths, so it makes1747 * sense to only store the differences in later entries. In the v41748 * on-disk format of the index, each on-disk cache entry stores the1749 * number of bytes to be stripped from the end of the previous name,1750 * and the bytes to append to the result, to come up with its name.1751 */1752int expand_name_field = version ==4;17531754/* On-disk flags are just 16 bits */1755 flags =get_be16(&ondisk->flags);1756 len = flags & CE_NAMEMASK;17571758if(flags & CE_EXTENDED) {1759struct ondisk_cache_entry_extended *ondisk2;1760int extended_flags;1761 ondisk2 = (struct ondisk_cache_entry_extended *)ondisk;1762 extended_flags =get_be16(&ondisk2->flags2) <<16;1763/* We do not yet understand any bit out of CE_EXTENDED_FLAGS */1764if(extended_flags & ~CE_EXTENDED_FLAGS)1765die(_("unknown index entry format 0x%08x"), extended_flags);1766 flags |= extended_flags;1767 name = ondisk2->name;1768}1769else1770 name = ondisk->name;17711772if(expand_name_field) {1773const unsigned char*cp = (const unsigned char*)name;1774size_t strip_len, previous_len;17751776/* If we're at the begining of a block, ignore the previous name */1777 strip_len =decode_varint(&cp);1778if(previous_ce) {1779 previous_len = previous_ce->ce_namelen;1780if(previous_len < strip_len)1781die(_("malformed name field in the index, near path '%s'"),1782 previous_ce->name);1783 copy_len = previous_len - strip_len;1784}1785 name = (const char*)cp;1786}17871788if(len == CE_NAMEMASK) {1789 len =strlen(name);1790if(expand_name_field)1791 len += copy_len;1792}17931794 ce =mem_pool__ce_alloc(ce_mem_pool, len);17951796 ce->ce_stat_data.sd_ctime.sec =get_be32(&ondisk->ctime.sec);1797 ce->ce_stat_data.sd_mtime.sec =get_be32(&ondisk->mtime.sec);1798 ce->ce_stat_data.sd_ctime.nsec =get_be32(&ondisk->ctime.nsec);1799 ce->ce_stat_data.sd_mtime.nsec =get_be32(&ondisk->mtime.nsec);1800 ce->ce_stat_data.sd_dev =get_be32(&ondisk->dev);1801 ce->ce_stat_data.sd_ino =get_be32(&ondisk->ino);1802 ce->ce_mode =get_be32(&ondisk->mode);1803 ce->ce_stat_data.sd_uid =get_be32(&ondisk->uid);1804 ce->ce_stat_data.sd_gid =get_be32(&ondisk->gid);1805 ce->ce_stat_data.sd_size =get_be32(&ondisk->size);1806 ce->ce_flags = flags & ~CE_NAMEMASK;1807 ce->ce_namelen = len;1808 ce->index =0;1809hashcpy(ce->oid.hash, ondisk->sha1);18101811if(expand_name_field) {1812if(copy_len)1813memcpy(ce->name, previous_ce->name, copy_len);1814memcpy(ce->name + copy_len, name, len +1- copy_len);1815*ent_size = (name - ((char*)ondisk)) + len +1- copy_len;1816}else{1817memcpy(ce->name, name, len +1);1818*ent_size =ondisk_ce_size(ce);1819}1820return ce;1821}18221823static voidcheck_ce_order(struct index_state *istate)1824{1825unsigned int i;18261827if(!verify_ce_order)1828return;18291830for(i =1; i < istate->cache_nr; i++) {1831struct cache_entry *ce = istate->cache[i -1];1832struct cache_entry *next_ce = istate->cache[i];1833int name_compare =strcmp(ce->name, next_ce->name);18341835if(0< name_compare)1836die(_("unordered stage entries in index"));1837if(!name_compare) {1838if(!ce_stage(ce))1839die(_("multiple stage entries for merged file '%s'"),1840 ce->name);1841if(ce_stage(ce) >ce_stage(next_ce))1842die(_("unordered stage entries for '%s'"),1843 ce->name);1844}1845}1846}18471848static voidtweak_untracked_cache(struct index_state *istate)1849{1850switch(git_config_get_untracked_cache()) {1851case-1:/* keep: do nothing */1852break;1853case0:/* false */1854remove_untracked_cache(istate);1855break;1856case1:/* true */1857add_untracked_cache(istate);1858break;1859default:/* unknown value: do nothing */1860break;1861}1862}18631864static voidtweak_split_index(struct index_state *istate)1865{1866switch(git_config_get_split_index()) {1867case-1:/* unset: do nothing */1868break;1869case0:/* false */1870remove_split_index(istate);1871break;1872case1:/* true */1873add_split_index(istate);1874break;1875default:/* unknown value: do nothing */1876break;1877}1878}18791880static voidpost_read_index_from(struct index_state *istate)1881{1882check_ce_order(istate);1883tweak_untracked_cache(istate);1884tweak_split_index(istate);1885tweak_fsmonitor(istate);1886}18871888static size_testimate_cache_size_from_compressed(unsigned int entries)1889{1890return entries * (sizeof(struct cache_entry) + CACHE_ENTRY_PATH_LENGTH);1891}18921893static size_testimate_cache_size(size_t ondisk_size,unsigned int entries)1894{1895long per_entry =sizeof(struct cache_entry) -sizeof(struct ondisk_cache_entry);18961897/*1898 * Account for potential alignment differences.1899 */1900 per_entry +=align_padding_size(sizeof(struct cache_entry), -sizeof(struct ondisk_cache_entry));1901return ondisk_size + entries * per_entry;1902}19031904struct index_entry_offset1905{1906/* starting byte offset into index file, count of index entries in this block */1907int offset, nr;1908};19091910struct index_entry_offset_table1911{1912int nr;1913struct index_entry_offset entries[FLEX_ARRAY];1914};19151916static struct index_entry_offset_table *read_ieot_extension(const char*mmap,size_t mmap_size,size_t offset);1917static voidwrite_ieot_extension(struct strbuf *sb,struct index_entry_offset_table *ieot);19181919static size_tread_eoie_extension(const char*mmap,size_t mmap_size);1920static voidwrite_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context,size_t offset);19211922struct load_index_extensions1923{1924 pthread_t pthread;1925struct index_state *istate;1926const char*mmap;1927size_t mmap_size;1928unsigned long src_offset;1929};19301931static void*load_index_extensions(void*_data)1932{1933struct load_index_extensions *p = _data;1934unsigned long src_offset = p->src_offset;19351936while(src_offset <= p->mmap_size - the_hash_algo->rawsz -8) {1937/* After an array of active_nr index entries,1938 * there can be arbitrary number of extended1939 * sections, each of which is prefixed with1940 * extension name (4-byte) and section length1941 * in 4-byte network byte order.1942 */1943uint32_t extsize =get_be32(p->mmap + src_offset +4);1944if(read_index_extension(p->istate,1945 p->mmap + src_offset,1946 p->mmap + src_offset +8,1947 extsize) <0) {1948munmap((void*)p->mmap, p->mmap_size);1949die(_("index file corrupt"));1950}1951 src_offset +=8;1952 src_offset += extsize;1953}19541955return NULL;1956}19571958/*1959 * A helper function that will load the specified range of cache entries1960 * from the memory mapped file and add them to the given index.1961 */1962static unsigned longload_cache_entry_block(struct index_state *istate,1963struct mem_pool *ce_mem_pool,int offset,int nr,const char*mmap,1964unsigned long start_offset,const struct cache_entry *previous_ce)1965{1966int i;1967unsigned long src_offset = start_offset;19681969for(i = offset; i < offset + nr; i++) {1970struct ondisk_cache_entry *disk_ce;1971struct cache_entry *ce;1972unsigned long consumed;19731974 disk_ce = (struct ondisk_cache_entry *)(mmap + src_offset);1975 ce =create_from_disk(ce_mem_pool, istate->version, disk_ce, &consumed, previous_ce);1976set_index_entry(istate, i, ce);19771978 src_offset += consumed;1979 previous_ce = ce;1980}1981return src_offset - start_offset;1982}19831984static unsigned longload_all_cache_entries(struct index_state *istate,1985const char*mmap,size_t mmap_size,unsigned long src_offset)1986{1987unsigned long consumed;19881989if(istate->version ==4) {1990mem_pool_init(&istate->ce_mem_pool,1991estimate_cache_size_from_compressed(istate->cache_nr));1992}else{1993mem_pool_init(&istate->ce_mem_pool,1994estimate_cache_size(mmap_size, istate->cache_nr));1995}19961997 consumed =load_cache_entry_block(istate, istate->ce_mem_pool,19980, istate->cache_nr, mmap, src_offset, NULL);1999return consumed;2000}20012002/*2003 * Mostly randomly chosen maximum thread counts: we2004 * cap the parallelism to online_cpus() threads, and we want2005 * to have at least 10000 cache entries per thread for it to2006 * be worth starting a thread.2007 */20082009#define THREAD_COST (10000)20102011struct load_cache_entries_thread_data2012{2013 pthread_t pthread;2014struct index_state *istate;2015struct mem_pool *ce_mem_pool;2016int offset;2017const char*mmap;2018struct index_entry_offset_table *ieot;2019int ieot_start;/* starting index into the ieot array */2020int ieot_blocks;/* count of ieot entries to process */2021unsigned long consumed;/* return # of bytes in index file processed */2022};20232024/*2025 * A thread proc to run the load_cache_entries() computation2026 * across multiple background threads.2027 */2028static void*load_cache_entries_thread(void*_data)2029{2030struct load_cache_entries_thread_data *p = _data;2031int i;20322033/* iterate across all ieot blocks assigned to this thread */2034for(i = p->ieot_start; i < p->ieot_start + p->ieot_blocks; i++) {2035 p->consumed +=load_cache_entry_block(p->istate, p->ce_mem_pool,2036 p->offset, p->ieot->entries[i].nr, p->mmap, p->ieot->entries[i].offset, NULL);2037 p->offset += p->ieot->entries[i].nr;2038}2039return NULL;2040}20412042static unsigned longload_cache_entries_threaded(struct index_state *istate,const char*mmap,size_t mmap_size,2043unsigned long src_offset,int nr_threads,struct index_entry_offset_table *ieot)2044{2045int i, offset, ieot_blocks, ieot_start, err;2046struct load_cache_entries_thread_data *data;2047unsigned long consumed =0;20482049/* a little sanity checking */2050if(istate->name_hash_initialized)2051BUG("the name hash isn't thread safe");20522053mem_pool_init(&istate->ce_mem_pool,0);20542055/* ensure we have no more threads than we have blocks to process */2056if(nr_threads > ieot->nr)2057 nr_threads = ieot->nr;2058 data =xcalloc(nr_threads,sizeof(*data));20592060 offset = ieot_start =0;2061 ieot_blocks =DIV_ROUND_UP(ieot->nr, nr_threads);2062for(i =0; i < nr_threads; i++) {2063struct load_cache_entries_thread_data *p = &data[i];2064int nr, j;20652066if(ieot_start + ieot_blocks > ieot->nr)2067 ieot_blocks = ieot->nr - ieot_start;20682069 p->istate = istate;2070 p->offset = offset;2071 p->mmap = mmap;2072 p->ieot = ieot;2073 p->ieot_start = ieot_start;2074 p->ieot_blocks = ieot_blocks;20752076/* create a mem_pool for each thread */2077 nr =0;2078for(j = p->ieot_start; j < p->ieot_start + p->ieot_blocks; j++)2079 nr += p->ieot->entries[j].nr;2080if(istate->version ==4) {2081mem_pool_init(&p->ce_mem_pool,2082estimate_cache_size_from_compressed(nr));2083}else{2084mem_pool_init(&p->ce_mem_pool,2085estimate_cache_size(mmap_size, nr));2086}20872088 err =pthread_create(&p->pthread, NULL, load_cache_entries_thread, p);2089if(err)2090die(_("unable to create load_cache_entries thread:%s"),strerror(err));20912092/* increment by the number of cache entries in the ieot block being processed */2093for(j =0; j < ieot_blocks; j++)2094 offset += ieot->entries[ieot_start + j].nr;2095 ieot_start += ieot_blocks;2096}20972098for(i =0; i < nr_threads; i++) {2099struct load_cache_entries_thread_data *p = &data[i];21002101 err =pthread_join(p->pthread, NULL);2102if(err)2103die(_("unable to join load_cache_entries thread:%s"),strerror(err));2104mem_pool_combine(istate->ce_mem_pool, p->ce_mem_pool);2105 consumed += p->consumed;2106}21072108free(data);21092110return consumed;2111}21122113/* remember to discard_cache() before reading a different cache! */2114intdo_read_index(struct index_state *istate,const char*path,int must_exist)2115{2116int fd;2117struct stat st;2118unsigned long src_offset;2119const struct cache_header *hdr;2120const char*mmap;2121size_t mmap_size;2122struct load_index_extensions p;2123size_t extension_offset =0;2124int nr_threads, cpus;2125struct index_entry_offset_table *ieot = NULL;21262127if(istate->initialized)2128return istate->cache_nr;21292130 istate->timestamp.sec =0;2131 istate->timestamp.nsec =0;2132 fd =open(path, O_RDONLY);2133if(fd <0) {2134if(!must_exist && errno == ENOENT)2135return0;2136die_errno(_("%s: index file open failed"), path);2137}21382139if(fstat(fd, &st))2140die_errno(_("%s: cannot stat the open index"), path);21412142 mmap_size =xsize_t(st.st_size);2143if(mmap_size <sizeof(struct cache_header) + the_hash_algo->rawsz)2144die(_("%s: index file smaller than expected"), path);21452146 mmap =xmmap(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd,0);2147if(mmap == MAP_FAILED)2148die_errno(_("%s: unable to map index file"), path);2149close(fd);21502151 hdr = (const struct cache_header *)mmap;2152if(verify_hdr(hdr, mmap_size) <0)2153goto unmap;21542155hashcpy(istate->oid.hash, (const unsigned char*)hdr + mmap_size - the_hash_algo->rawsz);2156 istate->version =ntohl(hdr->hdr_version);2157 istate->cache_nr =ntohl(hdr->hdr_entries);2158 istate->cache_alloc =alloc_nr(istate->cache_nr);2159 istate->cache =xcalloc(istate->cache_alloc,sizeof(*istate->cache));2160 istate->initialized =1;21612162 p.istate = istate;2163 p.mmap = mmap;2164 p.mmap_size = mmap_size;21652166 src_offset =sizeof(*hdr);21672168if(git_config_get_index_threads(&nr_threads))2169 nr_threads =1;21702171/* TODO: does creating more threads than cores help? */2172if(!nr_threads) {2173 nr_threads = istate->cache_nr / THREAD_COST;2174 cpus =online_cpus();2175if(nr_threads > cpus)2176 nr_threads = cpus;2177}21782179if(!HAVE_THREADS)2180 nr_threads =1;21812182if(nr_threads >1) {2183 extension_offset =read_eoie_extension(mmap, mmap_size);2184if(extension_offset) {2185int err;21862187 p.src_offset = extension_offset;2188 err =pthread_create(&p.pthread, NULL, load_index_extensions, &p);2189if(err)2190die(_("unable to create load_index_extensions thread:%s"),strerror(err));21912192 nr_threads--;2193}2194}21952196/*2197 * Locate and read the index entry offset table so that we can use it2198 * to multi-thread the reading of the cache entries.2199 */2200if(extension_offset && nr_threads >1)2201 ieot =read_ieot_extension(mmap, mmap_size, extension_offset);22022203if(ieot) {2204 src_offset +=load_cache_entries_threaded(istate, mmap, mmap_size, src_offset, nr_threads, ieot);2205free(ieot);2206}else{2207 src_offset +=load_all_cache_entries(istate, mmap, mmap_size, src_offset);2208}22092210 istate->timestamp.sec = st.st_mtime;2211 istate->timestamp.nsec =ST_MTIME_NSEC(st);22122213/* if we created a thread, join it otherwise load the extensions on the primary thread */2214if(extension_offset) {2215int ret =pthread_join(p.pthread, NULL);2216if(ret)2217die(_("unable to join load_index_extensions thread:%s"),strerror(ret));2218}else{2219 p.src_offset = src_offset;2220load_index_extensions(&p);2221}2222munmap((void*)mmap, mmap_size);2223return istate->cache_nr;22242225unmap:2226munmap((void*)mmap, mmap_size);2227die(_("index file corrupt"));2228}22292230/*2231 * Signal that the shared index is used by updating its mtime.2232 *2233 * This way, shared index can be removed if they have not been used2234 * for some time.2235 */2236static voidfreshen_shared_index(const char*shared_index,int warn)2237{2238if(!check_and_freshen_file(shared_index,1) && warn)2239warning(_("could not freshen shared index '%s'"), shared_index);2240}22412242intread_index_from(struct index_state *istate,const char*path,2243const char*gitdir)2244{2245struct split_index *split_index;2246int ret;2247char*base_oid_hex;2248char*base_path;22492250/* istate->initialized covers both .git/index and .git/sharedindex.xxx */2251if(istate->initialized)2252return istate->cache_nr;22532254trace_performance_enter();2255 ret =do_read_index(istate, path,0);2256trace_performance_leave("read cache%s", path);22572258 split_index = istate->split_index;2259if(!split_index ||is_null_oid(&split_index->base_oid)) {2260post_read_index_from(istate);2261return ret;2262}22632264trace_performance_enter();2265if(split_index->base)2266discard_index(split_index->base);2267else2268 split_index->base =xcalloc(1,sizeof(*split_index->base));22692270 base_oid_hex =oid_to_hex(&split_index->base_oid);2271 base_path =xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex);2272 ret =do_read_index(split_index->base, base_path,1);2273if(!oideq(&split_index->base_oid, &split_index->base->oid))2274die(_("broken index, expect%sin%s, got%s"),2275 base_oid_hex, base_path,2276oid_to_hex(&split_index->base->oid));22772278freshen_shared_index(base_path,0);2279merge_base_index(istate);2280post_read_index_from(istate);2281trace_performance_leave("read cache%s", base_path);2282free(base_path);2283return ret;2284}22852286intis_index_unborn(struct index_state *istate)2287{2288return(!istate->cache_nr && !istate->timestamp.sec);2289}22902291intdiscard_index(struct index_state *istate)2292{2293/*2294 * Cache entries in istate->cache[] should have been allocated2295 * from the memory pool associated with this index, or from an2296 * associated split_index. There is no need to free individual2297 * cache entries. validate_cache_entries can detect when this2298 * assertion does not hold.2299 */2300validate_cache_entries(istate);23012302resolve_undo_clear_index(istate);2303 istate->cache_nr =0;2304 istate->cache_changed =0;2305 istate->timestamp.sec =0;2306 istate->timestamp.nsec =0;2307free_name_hash(istate);2308cache_tree_free(&(istate->cache_tree));2309 istate->initialized =0;2310FREE_AND_NULL(istate->cache);2311 istate->cache_alloc =0;2312discard_split_index(istate);2313free_untracked_cache(istate->untracked);2314 istate->untracked = NULL;23152316if(istate->ce_mem_pool) {2317mem_pool_discard(istate->ce_mem_pool,should_validate_cache_entries());2318 istate->ce_mem_pool = NULL;2319}23202321return0;2322}23232324/*2325 * Validate the cache entries of this index.2326 * All cache entries associated with this index2327 * should have been allocated by the memory pool2328 * associated with this index, or by a referenced2329 * split index.2330 */2331voidvalidate_cache_entries(const struct index_state *istate)2332{2333int i;23342335if(!should_validate_cache_entries() ||!istate || !istate->initialized)2336return;23372338for(i =0; i < istate->cache_nr; i++) {2339if(!istate) {2340BUG("cache entry is not allocated from expected memory pool");2341}else if(!istate->ce_mem_pool ||2342!mem_pool_contains(istate->ce_mem_pool, istate->cache[i])) {2343if(!istate->split_index ||2344!istate->split_index->base ||2345!istate->split_index->base->ce_mem_pool ||2346!mem_pool_contains(istate->split_index->base->ce_mem_pool, istate->cache[i])) {2347BUG("cache entry is not allocated from expected memory pool");2348}2349}2350}23512352if(istate->split_index)2353validate_cache_entries(istate->split_index->base);2354}23552356intunmerged_index(const struct index_state *istate)2357{2358int i;2359for(i =0; i < istate->cache_nr; i++) {2360if(ce_stage(istate->cache[i]))2361return1;2362}2363return0;2364}23652366intrepo_index_has_changes(struct repository *repo,2367struct tree *tree,2368struct strbuf *sb)2369{2370struct index_state *istate = repo->index;2371struct object_id cmp;2372int i;23732374if(tree)2375 cmp = tree->object.oid;2376if(tree || !get_oid_tree("HEAD", &cmp)) {2377struct diff_options opt;23782379repo_diff_setup(repo, &opt);2380 opt.flags.exit_with_status =1;2381if(!sb)2382 opt.flags.quick =1;2383do_diff_cache(&cmp, &opt);2384diffcore_std(&opt);2385for(i =0; sb && i < diff_queued_diff.nr; i++) {2386if(i)2387strbuf_addch(sb,' ');2388strbuf_addstr(sb, diff_queued_diff.queue[i]->two->path);2389}2390diff_flush(&opt);2391return opt.flags.has_changes !=0;2392}else{2393for(i =0; sb && i < istate->cache_nr; i++) {2394if(i)2395strbuf_addch(sb,' ');2396strbuf_addstr(sb, istate->cache[i]->name);2397}2398return!!istate->cache_nr;2399}2400}24012402#define WRITE_BUFFER_SIZE 81922403static unsigned char write_buffer[WRITE_BUFFER_SIZE];2404static unsigned long write_buffer_len;24052406static intce_write_flush(git_hash_ctx *context,int fd)2407{2408unsigned int buffered = write_buffer_len;2409if(buffered) {2410 the_hash_algo->update_fn(context, write_buffer, buffered);2411if(write_in_full(fd, write_buffer, buffered) <0)2412return-1;2413 write_buffer_len =0;2414}2415return0;2416}24172418static intce_write(git_hash_ctx *context,int fd,void*data,unsigned int len)2419{2420while(len) {2421unsigned int buffered = write_buffer_len;2422unsigned int partial = WRITE_BUFFER_SIZE - buffered;2423if(partial > len)2424 partial = len;2425memcpy(write_buffer + buffered, data, partial);2426 buffered += partial;2427if(buffered == WRITE_BUFFER_SIZE) {2428 write_buffer_len = buffered;2429if(ce_write_flush(context, fd))2430return-1;2431 buffered =0;2432}2433 write_buffer_len = buffered;2434 len -= partial;2435 data = (char*) data + partial;2436}2437return0;2438}24392440static intwrite_index_ext_header(git_hash_ctx *context, git_hash_ctx *eoie_context,2441int fd,unsigned int ext,unsigned int sz)2442{2443 ext =htonl(ext);2444 sz =htonl(sz);2445if(eoie_context) {2446 the_hash_algo->update_fn(eoie_context, &ext,4);2447 the_hash_algo->update_fn(eoie_context, &sz,4);2448}2449return((ce_write(context, fd, &ext,4) <0) ||2450(ce_write(context, fd, &sz,4) <0)) ? -1:0;2451}24522453static intce_flush(git_hash_ctx *context,int fd,unsigned char*hash)2454{2455unsigned int left = write_buffer_len;24562457if(left) {2458 write_buffer_len =0;2459 the_hash_algo->update_fn(context, write_buffer, left);2460}24612462/* Flush first if not enough space for hash signature */2463if(left + the_hash_algo->rawsz > WRITE_BUFFER_SIZE) {2464if(write_in_full(fd, write_buffer, left) <0)2465return-1;2466 left =0;2467}24682469/* Append the hash signature at the end */2470 the_hash_algo->final_fn(write_buffer + left, context);2471hashcpy(hash, write_buffer + left);2472 left += the_hash_algo->rawsz;2473return(write_in_full(fd, write_buffer, left) <0) ? -1:0;2474}24752476static voidce_smudge_racily_clean_entry(struct index_state *istate,2477struct cache_entry *ce)2478{2479/*2480 * The only thing we care about in this function is to smudge the2481 * falsely clean entry due to touch-update-touch race, so we leave2482 * everything else as they are. We are called for entries whose2483 * ce_stat_data.sd_mtime match the index file mtime.2484 *2485 * Note that this actually does not do much for gitlinks, for2486 * which ce_match_stat_basic() always goes to the actual2487 * contents. The caller checks with is_racy_timestamp() which2488 * always says "no" for gitlinks, so we are not called for them ;-)2489 */2490struct stat st;24912492if(lstat(ce->name, &st) <0)2493return;2494if(ce_match_stat_basic(ce, &st))2495return;2496if(ce_modified_check_fs(istate, ce, &st)) {2497/* This is "racily clean"; smudge it. Note that this2498 * is a tricky code. At first glance, it may appear2499 * that it can break with this sequence:2500 *2501 * $ echo xyzzy >frotz2502 * $ git-update-index --add frotz2503 * $ : >frotz2504 * $ sleep 32505 * $ echo filfre >nitfol2506 * $ git-update-index --add nitfol2507 *2508 * but it does not. When the second update-index runs,2509 * it notices that the entry "frotz" has the same timestamp2510 * as index, and if we were to smudge it by resetting its2511 * size to zero here, then the object name recorded2512 * in index is the 6-byte file but the cached stat information2513 * becomes zero --- which would then match what we would2514 * obtain from the filesystem next time we stat("frotz").2515 *2516 * However, the second update-index, before calling2517 * this function, notices that the cached size is 62518 * bytes and what is on the filesystem is an empty2519 * file, and never calls us, so the cached size information2520 * for "frotz" stays 6 which does not match the filesystem.2521 */2522 ce->ce_stat_data.sd_size =0;2523}2524}25252526/* Copy miscellaneous fields but not the name */2527static voidcopy_cache_entry_to_ondisk(struct ondisk_cache_entry *ondisk,2528struct cache_entry *ce)2529{2530short flags;25312532 ondisk->ctime.sec =htonl(ce->ce_stat_data.sd_ctime.sec);2533 ondisk->mtime.sec =htonl(ce->ce_stat_data.sd_mtime.sec);2534 ondisk->ctime.nsec =htonl(ce->ce_stat_data.sd_ctime.nsec);2535 ondisk->mtime.nsec =htonl(ce->ce_stat_data.sd_mtime.nsec);2536 ondisk->dev =htonl(ce->ce_stat_data.sd_dev);2537 ondisk->ino =htonl(ce->ce_stat_data.sd_ino);2538 ondisk->mode =htonl(ce->ce_mode);2539 ondisk->uid =htonl(ce->ce_stat_data.sd_uid);2540 ondisk->gid =htonl(ce->ce_stat_data.sd_gid);2541 ondisk->size =htonl(ce->ce_stat_data.sd_size);2542hashcpy(ondisk->sha1, ce->oid.hash);25432544 flags = ce->ce_flags & ~CE_NAMEMASK;2545 flags |= (ce_namelen(ce) >= CE_NAMEMASK ? CE_NAMEMASK :ce_namelen(ce));2546 ondisk->flags =htons(flags);2547if(ce->ce_flags & CE_EXTENDED) {2548struct ondisk_cache_entry_extended *ondisk2;2549 ondisk2 = (struct ondisk_cache_entry_extended *)ondisk;2550 ondisk2->flags2 =htons((ce->ce_flags & CE_EXTENDED_FLAGS) >>16);2551}2552}25532554static intce_write_entry(git_hash_ctx *c,int fd,struct cache_entry *ce,2555struct strbuf *previous_name,struct ondisk_cache_entry *ondisk)2556{2557int size;2558int result;2559unsigned int saved_namelen;2560int stripped_name =0;2561static unsigned char padding[8] = {0x00};25622563if(ce->ce_flags & CE_STRIP_NAME) {2564 saved_namelen =ce_namelen(ce);2565 ce->ce_namelen =0;2566 stripped_name =1;2567}25682569if(ce->ce_flags & CE_EXTENDED)2570 size =offsetof(struct ondisk_cache_entry_extended, name);2571else2572 size =offsetof(struct ondisk_cache_entry, name);25732574if(!previous_name) {2575int len =ce_namelen(ce);2576copy_cache_entry_to_ondisk(ondisk, ce);2577 result =ce_write(c, fd, ondisk, size);2578if(!result)2579 result =ce_write(c, fd, ce->name, len);2580if(!result)2581 result =ce_write(c, fd, padding,align_padding_size(size, len));2582}else{2583int common, to_remove, prefix_size;2584unsigned char to_remove_vi[16];2585for(common =0;2586(ce->name[common] &&2587 common < previous_name->len &&2588 ce->name[common] == previous_name->buf[common]);2589 common++)2590;/* still matching */2591 to_remove = previous_name->len - common;2592 prefix_size =encode_varint(to_remove, to_remove_vi);25932594copy_cache_entry_to_ondisk(ondisk, ce);2595 result =ce_write(c, fd, ondisk, size);2596if(!result)2597 result =ce_write(c, fd, to_remove_vi, prefix_size);2598if(!result)2599 result =ce_write(c, fd, ce->name + common,ce_namelen(ce) - common);2600if(!result)2601 result =ce_write(c, fd, padding,1);26022603strbuf_splice(previous_name, common, to_remove,2604 ce->name + common,ce_namelen(ce) - common);2605}2606if(stripped_name) {2607 ce->ce_namelen = saved_namelen;2608 ce->ce_flags &= ~CE_STRIP_NAME;2609}26102611return result;2612}26132614/*2615 * This function verifies if index_state has the correct sha1 of the2616 * index file. Don't die if we have any other failure, just return 0.2617 */2618static intverify_index_from(const struct index_state *istate,const char*path)2619{2620int fd;2621 ssize_t n;2622struct stat st;2623unsigned char hash[GIT_MAX_RAWSZ];26242625if(!istate->initialized)2626return0;26272628 fd =open(path, O_RDONLY);2629if(fd <0)2630return0;26312632if(fstat(fd, &st))2633goto out;26342635if(st.st_size <sizeof(struct cache_header) + the_hash_algo->rawsz)2636goto out;26372638 n =pread_in_full(fd, hash, the_hash_algo->rawsz, st.st_size - the_hash_algo->rawsz);2639if(n != the_hash_algo->rawsz)2640goto out;26412642if(!hasheq(istate->oid.hash, hash))2643goto out;26442645close(fd);2646return1;26472648out:2649close(fd);2650return0;2651}26522653static intrepo_verify_index(struct repository *repo)2654{2655returnverify_index_from(repo->index, repo->index_file);2656}26572658static inthas_racy_timestamp(struct index_state *istate)2659{2660int entries = istate->cache_nr;2661int i;26622663for(i =0; i < entries; i++) {2664struct cache_entry *ce = istate->cache[i];2665if(is_racy_timestamp(istate, ce))2666return1;2667}2668return0;2669}26702671voidrepo_update_index_if_able(struct repository *repo,2672struct lock_file *lockfile)2673{2674if((repo->index->cache_changed ||2675has_racy_timestamp(repo->index)) &&2676repo_verify_index(repo))2677write_locked_index(repo->index, lockfile, COMMIT_LOCK);2678else2679rollback_lock_file(lockfile);2680}26812682static intrecord_eoie(void)2683{2684int val;26852686if(!git_config_get_bool("index.recordendofindexentries", &val))2687return val;26882689/*2690 * As a convenience, the end of index entries extension2691 * used for threading is written by default if the user2692 * explicitly requested threaded index reads.2693 */2694return!git_config_get_index_threads(&val) && val !=1;2695}26962697static intrecord_ieot(void)2698{2699int val;27002701if(!git_config_get_bool("index.recordoffsettable", &val))2702return val;27032704/*2705 * As a convenience, the offset table used for threading is2706 * written by default if the user explicitly requested2707 * threaded index reads.2708 */2709return!git_config_get_index_threads(&val) && val !=1;2710}27112712/*2713 * On success, `tempfile` is closed. If it is the temporary file2714 * of a `struct lock_file`, we will therefore effectively perform2715 * a 'close_lock_file_gently()`. Since that is an implementation2716 * detail of lockfiles, callers of `do_write_index()` should not2717 * rely on it.2718 */2719static intdo_write_index(struct index_state *istate,struct tempfile *tempfile,2720int strip_extensions)2721{2722uint64_t start =getnanotime();2723int newfd = tempfile->fd;2724 git_hash_ctx c, eoie_c;2725struct cache_header hdr;2726int i, err =0, removed, extended, hdr_version;2727struct cache_entry **cache = istate->cache;2728int entries = istate->cache_nr;2729struct stat st;2730struct ondisk_cache_entry_extended ondisk;2731struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;2732int drop_cache_tree = istate->drop_cache_tree;2733 off_t offset;2734int ieot_entries =1;2735struct index_entry_offset_table *ieot = NULL;2736int nr, nr_threads;27372738for(i = removed = extended =0; i < entries; i++) {2739if(cache[i]->ce_flags & CE_REMOVE)2740 removed++;27412742/* reduce extended entries if possible */2743 cache[i]->ce_flags &= ~CE_EXTENDED;2744if(cache[i]->ce_flags & CE_EXTENDED_FLAGS) {2745 extended++;2746 cache[i]->ce_flags |= CE_EXTENDED;2747}2748}27492750if(!istate->version) {2751 istate->version =get_index_format_default();2752if(git_env_bool("GIT_TEST_SPLIT_INDEX",0))2753init_split_index(istate);2754}27552756/* demote version 3 to version 2 when the latter suffices */2757if(istate->version ==3|| istate->version ==2)2758 istate->version = extended ?3:2;27592760 hdr_version = istate->version;27612762 hdr.hdr_signature =htonl(CACHE_SIGNATURE);2763 hdr.hdr_version =htonl(hdr_version);2764 hdr.hdr_entries =htonl(entries - removed);27652766 the_hash_algo->init_fn(&c);2767if(ce_write(&c, newfd, &hdr,sizeof(hdr)) <0)2768return-1;27692770if(!HAVE_THREADS ||git_config_get_index_threads(&nr_threads))2771 nr_threads =1;27722773if(nr_threads !=1&&record_ieot()) {2774int ieot_blocks, cpus;27752776/*2777 * ensure default number of ieot blocks maps evenly to the2778 * default number of threads that will process them leaving2779 * room for the thread to load the index extensions.2780 */2781if(!nr_threads) {2782 ieot_blocks = istate->cache_nr / THREAD_COST;2783 cpus =online_cpus();2784if(ieot_blocks > cpus -1)2785 ieot_blocks = cpus -1;2786}else{2787 ieot_blocks = nr_threads;2788if(ieot_blocks > istate->cache_nr)2789 ieot_blocks = istate->cache_nr;2790}27912792/*2793 * no reason to write out the IEOT extension if we don't2794 * have enough blocks to utilize multi-threading2795 */2796if(ieot_blocks >1) {2797 ieot =xcalloc(1,sizeof(struct index_entry_offset_table)2798+ (ieot_blocks *sizeof(struct index_entry_offset)));2799 ieot_entries =DIV_ROUND_UP(entries, ieot_blocks);2800}2801}28022803 offset =lseek(newfd,0, SEEK_CUR);2804if(offset <0) {2805free(ieot);2806return-1;2807}2808 offset += write_buffer_len;2809 nr =0;2810 previous_name = (hdr_version ==4) ? &previous_name_buf : NULL;28112812for(i =0; i < entries; i++) {2813struct cache_entry *ce = cache[i];2814if(ce->ce_flags & CE_REMOVE)2815continue;2816if(!ce_uptodate(ce) &&is_racy_timestamp(istate, ce))2817ce_smudge_racily_clean_entry(istate, ce);2818if(is_null_oid(&ce->oid)) {2819static const char msg[] ="cache entry has null sha1:%s";2820static int allow = -1;28212822if(allow <0)2823 allow =git_env_bool("GIT_ALLOW_NULL_SHA1",0);2824if(allow)2825warning(msg, ce->name);2826else2827 err =error(msg, ce->name);28282829 drop_cache_tree =1;2830}2831if(ieot && i && (i % ieot_entries ==0)) {2832 ieot->entries[ieot->nr].nr = nr;2833 ieot->entries[ieot->nr].offset = offset;2834 ieot->nr++;2835/*2836 * If we have a V4 index, set the first byte to an invalid2837 * character to ensure there is nothing common with the previous2838 * entry2839 */2840if(previous_name)2841 previous_name->buf[0] =0;2842 nr =0;2843 offset =lseek(newfd,0, SEEK_CUR);2844if(offset <0) {2845free(ieot);2846return-1;2847}2848 offset += write_buffer_len;2849}2850if(ce_write_entry(&c, newfd, ce, previous_name, (struct ondisk_cache_entry *)&ondisk) <0)2851 err = -1;28522853if(err)2854break;2855 nr++;2856}2857if(ieot && nr) {2858 ieot->entries[ieot->nr].nr = nr;2859 ieot->entries[ieot->nr].offset = offset;2860 ieot->nr++;2861}2862strbuf_release(&previous_name_buf);28632864if(err) {2865free(ieot);2866return err;2867}28682869/* Write extension data here */2870 offset =lseek(newfd,0, SEEK_CUR);2871if(offset <0) {2872free(ieot);2873return-1;2874}2875 offset += write_buffer_len;2876 the_hash_algo->init_fn(&eoie_c);28772878/*2879 * Lets write out CACHE_EXT_INDEXENTRYOFFSETTABLE first so that we2880 * can minimize the number of extensions we have to scan through to2881 * find it during load. Write it out regardless of the2882 * strip_extensions parameter as we need it when loading the shared2883 * index.2884 */2885if(ieot) {2886struct strbuf sb = STRBUF_INIT;28872888write_ieot_extension(&sb, ieot);2889 err =write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_INDEXENTRYOFFSETTABLE, sb.len) <02890||ce_write(&c, newfd, sb.buf, sb.len) <0;2891strbuf_release(&sb);2892free(ieot);2893if(err)2894return-1;2895}28962897if(!strip_extensions && istate->split_index) {2898struct strbuf sb = STRBUF_INIT;28992900 err =write_link_extension(&sb, istate) <0||2901write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_LINK,2902 sb.len) <0||2903ce_write(&c, newfd, sb.buf, sb.len) <0;2904strbuf_release(&sb);2905if(err)2906return-1;2907}2908if(!strip_extensions && !drop_cache_tree && istate->cache_tree) {2909struct strbuf sb = STRBUF_INIT;29102911cache_tree_write(&sb, istate->cache_tree);2912 err =write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_TREE, sb.len) <02913||ce_write(&c, newfd, sb.buf, sb.len) <0;2914strbuf_release(&sb);2915if(err)2916return-1;2917}2918if(!strip_extensions && istate->resolve_undo) {2919struct strbuf sb = STRBUF_INIT;29202921resolve_undo_write(&sb, istate->resolve_undo);2922 err =write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_RESOLVE_UNDO,2923 sb.len) <02924||ce_write(&c, newfd, sb.buf, sb.len) <0;2925strbuf_release(&sb);2926if(err)2927return-1;2928}2929if(!strip_extensions && istate->untracked) {2930struct strbuf sb = STRBUF_INIT;29312932write_untracked_extension(&sb, istate->untracked);2933 err =write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_UNTRACKED,2934 sb.len) <0||2935ce_write(&c, newfd, sb.buf, sb.len) <0;2936strbuf_release(&sb);2937if(err)2938return-1;2939}2940if(!strip_extensions && istate->fsmonitor_last_update) {2941struct strbuf sb = STRBUF_INIT;29422943write_fsmonitor_extension(&sb, istate);2944 err =write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_FSMONITOR, sb.len) <02945||ce_write(&c, newfd, sb.buf, sb.len) <0;2946strbuf_release(&sb);2947if(err)2948return-1;2949}29502951/*2952 * CACHE_EXT_ENDOFINDEXENTRIES must be written as the last entry before the SHA12953 * so that it can be found and processed before all the index entries are2954 * read. Write it out regardless of the strip_extensions parameter as we need it2955 * when loading the shared index.2956 */2957if(offset &&record_eoie()) {2958struct strbuf sb = STRBUF_INIT;29592960write_eoie_extension(&sb, &eoie_c, offset);2961 err =write_index_ext_header(&c, NULL, newfd, CACHE_EXT_ENDOFINDEXENTRIES, sb.len) <02962||ce_write(&c, newfd, sb.buf, sb.len) <0;2963strbuf_release(&sb);2964if(err)2965return-1;2966}29672968if(ce_flush(&c, newfd, istate->oid.hash))2969return-1;2970if(close_tempfile_gently(tempfile)) {2971error(_("could not close '%s'"), tempfile->filename.buf);2972return-1;2973}2974if(stat(tempfile->filename.buf, &st))2975return-1;2976 istate->timestamp.sec = (unsigned int)st.st_mtime;2977 istate->timestamp.nsec =ST_MTIME_NSEC(st);2978trace_performance_since(start,"write index, changed mask =%x", istate->cache_changed);2979return0;2980}29812982voidset_alternate_index_output(const char*name)2983{2984 alternate_index_output = name;2985}29862987static intcommit_locked_index(struct lock_file *lk)2988{2989if(alternate_index_output)2990returncommit_lock_file_to(lk, alternate_index_output);2991else2992returncommit_lock_file(lk);2993}29942995static intdo_write_locked_index(struct index_state *istate,struct lock_file *lock,2996unsigned flags)2997{2998int ret =do_write_index(istate, lock->tempfile,0);2999if(ret)3000return ret;3001if(flags & COMMIT_LOCK)3002returncommit_locked_index(lock);3003returnclose_lock_file_gently(lock);3004}30053006static intwrite_split_index(struct index_state *istate,3007struct lock_file *lock,3008unsigned flags)3009{3010int ret;3011prepare_to_write_split_index(istate);3012 ret =do_write_locked_index(istate, lock, flags);3013finish_writing_split_index(istate);3014return ret;3015}30163017static const char*shared_index_expire ="2.weeks.ago";30183019static unsigned longget_shared_index_expire_date(void)3020{3021static unsigned long shared_index_expire_date;3022static int shared_index_expire_date_prepared;30233024if(!shared_index_expire_date_prepared) {3025git_config_get_expiry("splitindex.sharedindexexpire",3026&shared_index_expire);3027 shared_index_expire_date =approxidate(shared_index_expire);3028 shared_index_expire_date_prepared =1;3029}30303031return shared_index_expire_date;3032}30333034static intshould_delete_shared_index(const char*shared_index_path)3035{3036struct stat st;3037unsigned long expiration;30383039/* Check timestamp */3040 expiration =get_shared_index_expire_date();3041if(!expiration)3042return0;3043if(stat(shared_index_path, &st))3044returnerror_errno(_("could not stat '%s'"), shared_index_path);3045if(st.st_mtime > expiration)3046return0;30473048return1;3049}30503051static intclean_shared_index_files(const char*current_hex)3052{3053struct dirent *de;3054DIR*dir =opendir(get_git_dir());30553056if(!dir)3057returnerror_errno(_("unable to open git dir:%s"),get_git_dir());30583059while((de =readdir(dir)) != NULL) {3060const char*sha1_hex;3061const char*shared_index_path;3062if(!skip_prefix(de->d_name,"sharedindex.", &sha1_hex))3063continue;3064if(!strcmp(sha1_hex, current_hex))3065continue;3066 shared_index_path =git_path("%s", de->d_name);3067if(should_delete_shared_index(shared_index_path) >0&&3068unlink(shared_index_path))3069warning_errno(_("unable to unlink:%s"), shared_index_path);3070}3071closedir(dir);30723073return0;3074}30753076static intwrite_shared_index(struct index_state *istate,3077struct tempfile **temp)3078{3079struct split_index *si = istate->split_index;3080int ret;30813082move_cache_to_base_index(istate);3083 ret =do_write_index(si->base, *temp,1);3084if(ret)3085return ret;3086 ret =adjust_shared_perm(get_tempfile_path(*temp));3087if(ret) {3088error(_("cannot fix permission bits on '%s'"),get_tempfile_path(*temp));3089return ret;3090}3091 ret =rename_tempfile(temp,3092git_path("sharedindex.%s",oid_to_hex(&si->base->oid)));3093if(!ret) {3094oidcpy(&si->base_oid, &si->base->oid);3095clean_shared_index_files(oid_to_hex(&si->base->oid));3096}30973098return ret;3099}31003101static const int default_max_percent_split_change =20;31023103static inttoo_many_not_shared_entries(struct index_state *istate)3104{3105int i, not_shared =0;3106int max_split =git_config_get_max_percent_split_change();31073108switch(max_split) {3109case-1:3110/* not or badly configured: use the default value */3111 max_split = default_max_percent_split_change;3112break;3113case0:3114return1;/* 0% means always write a new shared index */3115case100:3116return0;/* 100% means never write a new shared index */3117default:3118break;/* just use the configured value */3119}31203121/* Count not shared entries */3122for(i =0; i < istate->cache_nr; i++) {3123struct cache_entry *ce = istate->cache[i];3124if(!ce->index)3125 not_shared++;3126}31273128return(int64_t)istate->cache_nr * max_split < (int64_t)not_shared *100;3129}31303131intwrite_locked_index(struct index_state *istate,struct lock_file *lock,3132unsigned flags)3133{3134int new_shared_index, ret;3135struct split_index *si = istate->split_index;31363137if(git_env_bool("GIT_TEST_CHECK_CACHE_TREE",0))3138cache_tree_verify(the_repository, istate);31393140if((flags & SKIP_IF_UNCHANGED) && !istate->cache_changed) {3141if(flags & COMMIT_LOCK)3142rollback_lock_file(lock);3143return0;3144}31453146if(istate->fsmonitor_last_update)3147fill_fsmonitor_bitmap(istate);31483149if(!si || alternate_index_output ||3150(istate->cache_changed & ~EXTMASK)) {3151if(si)3152oidclr(&si->base_oid);3153 ret =do_write_locked_index(istate, lock, flags);3154goto out;3155}31563157if(git_env_bool("GIT_TEST_SPLIT_INDEX",0)) {3158int v = si->base_oid.hash[0];3159if((v &15) <6)3160 istate->cache_changed |= SPLIT_INDEX_ORDERED;3161}3162if(too_many_not_shared_entries(istate))3163 istate->cache_changed |= SPLIT_INDEX_ORDERED;31643165 new_shared_index = istate->cache_changed & SPLIT_INDEX_ORDERED;31663167if(new_shared_index) {3168struct tempfile *temp;3169int saved_errno;31703171/* Same initial permissions as the main .git/index file */3172 temp =mks_tempfile_sm(git_path("sharedindex_XXXXXX"),0,0666);3173if(!temp) {3174oidclr(&si->base_oid);3175 ret =do_write_locked_index(istate, lock, flags);3176goto out;3177}3178 ret =write_shared_index(istate, &temp);31793180 saved_errno = errno;3181if(is_tempfile_active(temp))3182delete_tempfile(&temp);3183 errno = saved_errno;31843185if(ret)3186goto out;3187}31883189 ret =write_split_index(istate, lock, flags);31903191/* Freshen the shared index only if the split-index was written */3192if(!ret && !new_shared_index) {3193const char*shared_index =git_path("sharedindex.%s",3194oid_to_hex(&si->base_oid));3195freshen_shared_index(shared_index,1);3196}31973198out:3199if(flags & COMMIT_LOCK)3200rollback_lock_file(lock);3201return ret;3202}32033204/*3205 * Read the index file that is potentially unmerged into given3206 * index_state, dropping any unmerged entries to stage #0 (potentially3207 * resulting in a path appearing as both a file and a directory in the3208 * index; the caller is responsible to clear out the extra entries3209 * before writing the index to a tree). Returns true if the index is3210 * unmerged. Callers who want to refuse to work from an unmerged3211 * state can call this and check its return value, instead of calling3212 * read_cache().3213 */3214intrepo_read_index_unmerged(struct repository *repo)3215{3216struct index_state *istate;3217int i;3218int unmerged =0;32193220repo_read_index(repo);3221 istate = repo->index;3222for(i =0; i < istate->cache_nr; i++) {3223struct cache_entry *ce = istate->cache[i];3224struct cache_entry *new_ce;3225int len;32263227if(!ce_stage(ce))3228continue;3229 unmerged =1;3230 len =ce_namelen(ce);3231 new_ce =make_empty_cache_entry(istate, len);3232memcpy(new_ce->name, ce->name, len);3233 new_ce->ce_flags =create_ce_flags(0) | CE_CONFLICTED;3234 new_ce->ce_namelen = len;3235 new_ce->ce_mode = ce->ce_mode;3236if(add_index_entry(istate, new_ce, ADD_CACHE_SKIP_DFCHECK))3237returnerror(_("%s: cannot drop to stage #0"),3238 new_ce->name);3239}3240return unmerged;3241}32423243/*3244 * Returns 1 if the path is an "other" path with respect to3245 * the index; that is, the path is not mentioned in the index at all,3246 * either as a file, a directory with some files in the index,3247 * or as an unmerged entry.3248 *3249 * We helpfully remove a trailing "/" from directories so that3250 * the output of read_directory can be used as-is.3251 */3252intindex_name_is_other(const struct index_state *istate,const char*name,3253int namelen)3254{3255int pos;3256if(namelen && name[namelen -1] =='/')3257 namelen--;3258 pos =index_name_pos(istate, name, namelen);3259if(0<= pos)3260return0;/* exact match */3261 pos = -pos -1;3262if(pos < istate->cache_nr) {3263struct cache_entry *ce = istate->cache[pos];3264if(ce_namelen(ce) == namelen &&3265!memcmp(ce->name, name, namelen))3266return0;/* Yup, this one exists unmerged */3267}3268return1;3269}32703271void*read_blob_data_from_index(const struct index_state *istate,3272const char*path,unsigned long*size)3273{3274int pos, len;3275unsigned long sz;3276enum object_type type;3277void*data;32783279 len =strlen(path);3280 pos =index_name_pos(istate, path, len);3281if(pos <0) {3282/*3283 * We might be in the middle of a merge, in which3284 * case we would read stage #2 (ours).3285 */3286int i;3287for(i = -pos -1;3288(pos <0&& i < istate->cache_nr &&3289!strcmp(istate->cache[i]->name, path));3290 i++)3291if(ce_stage(istate->cache[i]) ==2)3292 pos = i;3293}3294if(pos <0)3295return NULL;3296 data =read_object_file(&istate->cache[pos]->oid, &type, &sz);3297if(!data || type != OBJ_BLOB) {3298free(data);3299return NULL;3300}3301if(size)3302*size = sz;3303return data;3304}33053306voidstat_validity_clear(struct stat_validity *sv)3307{3308FREE_AND_NULL(sv->sd);3309}33103311intstat_validity_check(struct stat_validity *sv,const char*path)3312{3313struct stat st;33143315if(stat(path, &st) <0)3316return sv->sd == NULL;3317if(!sv->sd)3318return0;3319returnS_ISREG(st.st_mode) && !match_stat_data(sv->sd, &st);3320}33213322voidstat_validity_update(struct stat_validity *sv,int fd)3323{3324struct stat st;33253326if(fstat(fd, &st) <0|| !S_ISREG(st.st_mode))3327stat_validity_clear(sv);3328else{3329if(!sv->sd)3330 sv->sd =xcalloc(1,sizeof(struct stat_data));3331fill_stat_data(sv->sd, &st);3332}3333}33343335voidmove_index_extensions(struct index_state *dst,struct index_state *src)3336{3337 dst->untracked = src->untracked;3338 src->untracked = NULL;3339 dst->cache_tree = src->cache_tree;3340 src->cache_tree = NULL;3341}33423343struct cache_entry *dup_cache_entry(const struct cache_entry *ce,3344struct index_state *istate)3345{3346unsigned int size =ce_size(ce);3347int mem_pool_allocated;3348struct cache_entry *new_entry =make_empty_cache_entry(istate,ce_namelen(ce));3349 mem_pool_allocated = new_entry->mem_pool_allocated;33503351memcpy(new_entry, ce, size);3352 new_entry->mem_pool_allocated = mem_pool_allocated;3353return new_entry;3354}33553356voiddiscard_cache_entry(struct cache_entry *ce)3357{3358if(ce &&should_validate_cache_entries())3359memset(ce,0xCD,cache_entry_size(ce->ce_namelen));33603361if(ce && ce->mem_pool_allocated)3362return;33633364free(ce);3365}33663367intshould_validate_cache_entries(void)3368{3369static int validate_index_cache_entries = -1;33703371if(validate_index_cache_entries <0) {3372if(getenv("GIT_TEST_VALIDATE_INDEX_CACHE_ENTRIES"))3373 validate_index_cache_entries =1;3374else3375 validate_index_cache_entries =0;3376}33773378return validate_index_cache_entries;3379}33803381#define EOIE_SIZE (4 + GIT_SHA1_RAWSZ)/* <4-byte offset> + <20-byte hash> */3382#define EOIE_SIZE_WITH_HEADER (4 + 4 + EOIE_SIZE)/* <4-byte signature> + <4-byte length> + EOIE_SIZE */33833384static size_tread_eoie_extension(const char*mmap,size_t mmap_size)3385{3386/*3387 * The end of index entries (EOIE) extension is guaranteed to be last3388 * so that it can be found by scanning backwards from the EOF.3389 *3390 * "EOIE"3391 * <4-byte length>3392 * <4-byte offset>3393 * <20-byte hash>3394 */3395const char*index, *eoie;3396uint32_t extsize;3397size_t offset, src_offset;3398unsigned char hash[GIT_MAX_RAWSZ];3399 git_hash_ctx c;34003401/* ensure we have an index big enough to contain an EOIE extension */3402if(mmap_size <sizeof(struct cache_header) + EOIE_SIZE_WITH_HEADER + the_hash_algo->rawsz)3403return0;34043405/* validate the extension signature */3406 index = eoie = mmap + mmap_size - EOIE_SIZE_WITH_HEADER - the_hash_algo->rawsz;3407if(CACHE_EXT(index) != CACHE_EXT_ENDOFINDEXENTRIES)3408return0;3409 index +=sizeof(uint32_t);34103411/* validate the extension size */3412 extsize =get_be32(index);3413if(extsize != EOIE_SIZE)3414return0;3415 index +=sizeof(uint32_t);34163417/*3418 * Validate the offset we're going to look for the first extension3419 * signature is after the index header and before the eoie extension.3420 */3421 offset =get_be32(index);3422if(mmap + offset < mmap +sizeof(struct cache_header))3423return0;3424if(mmap + offset >= eoie)3425return0;3426 index +=sizeof(uint32_t);34273428/*3429 * The hash is computed over extension types and their sizes (but not3430 * their contents). E.g. if we have "TREE" extension that is N-bytes3431 * long, "REUC" extension that is M-bytes long, followed by "EOIE",3432 * then the hash would be:3433 *3434 * SHA-1("TREE" + <binary representation of N> +3435 * "REUC" + <binary representation of M>)3436 */3437 src_offset = offset;3438 the_hash_algo->init_fn(&c);3439while(src_offset < mmap_size - the_hash_algo->rawsz - EOIE_SIZE_WITH_HEADER) {3440/* After an array of active_nr index entries,3441 * there can be arbitrary number of extended3442 * sections, each of which is prefixed with3443 * extension name (4-byte) and section length3444 * in 4-byte network byte order.3445 */3446uint32_t extsize;3447memcpy(&extsize, mmap + src_offset +4,4);3448 extsize =ntohl(extsize);34493450/* verify the extension size isn't so large it will wrap around */3451if(src_offset +8+ extsize < src_offset)3452return0;34533454 the_hash_algo->update_fn(&c, mmap + src_offset,8);34553456 src_offset +=8;3457 src_offset += extsize;3458}3459 the_hash_algo->final_fn(hash, &c);3460if(!hasheq(hash, (const unsigned char*)index))3461return0;34623463/* Validate that the extension offsets returned us back to the eoie extension. */3464if(src_offset != mmap_size - the_hash_algo->rawsz - EOIE_SIZE_WITH_HEADER)3465return0;34663467return offset;3468}34693470static voidwrite_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context,size_t offset)3471{3472uint32_t buffer;3473unsigned char hash[GIT_MAX_RAWSZ];34743475/* offset */3476put_be32(&buffer, offset);3477strbuf_add(sb, &buffer,sizeof(uint32_t));34783479/* hash */3480 the_hash_algo->final_fn(hash, eoie_context);3481strbuf_add(sb, hash, the_hash_algo->rawsz);3482}34833484#define IEOT_VERSION (1)34853486static struct index_entry_offset_table *read_ieot_extension(const char*mmap,size_t mmap_size,size_t offset)3487{3488const char*index = NULL;3489uint32_t extsize, ext_version;3490struct index_entry_offset_table *ieot;3491int i, nr;34923493/* find the IEOT extension */3494if(!offset)3495return NULL;3496while(offset <= mmap_size - the_hash_algo->rawsz -8) {3497 extsize =get_be32(mmap + offset +4);3498if(CACHE_EXT((mmap + offset)) == CACHE_EXT_INDEXENTRYOFFSETTABLE) {3499 index = mmap + offset +4+4;3500break;3501}3502 offset +=8;3503 offset += extsize;3504}3505if(!index)3506return NULL;35073508/* validate the version is IEOT_VERSION */3509 ext_version =get_be32(index);3510if(ext_version != IEOT_VERSION) {3511error("invalid IEOT version%d", ext_version);3512return NULL;3513}3514 index +=sizeof(uint32_t);35153516/* extension size - version bytes / bytes per entry */3517 nr = (extsize -sizeof(uint32_t)) / (sizeof(uint32_t) +sizeof(uint32_t));3518if(!nr) {3519error("invalid number of IEOT entries%d", nr);3520return NULL;3521}3522 ieot =xmalloc(sizeof(struct index_entry_offset_table)3523+ (nr *sizeof(struct index_entry_offset)));3524 ieot->nr = nr;3525for(i =0; i < nr; i++) {3526 ieot->entries[i].offset =get_be32(index);3527 index +=sizeof(uint32_t);3528 ieot->entries[i].nr =get_be32(index);3529 index +=sizeof(uint32_t);3530}35313532return ieot;3533}35343535static voidwrite_ieot_extension(struct strbuf *sb,struct index_entry_offset_table *ieot)3536{3537uint32_t buffer;3538int i;35393540/* version */3541put_be32(&buffer, IEOT_VERSION);3542strbuf_add(sb, &buffer,sizeof(uint32_t));35433544/* ieot */3545for(i =0; i < ieot->nr; i++) {35463547/* offset */3548put_be32(&buffer, ieot->entries[i].offset);3549strbuf_add(sb, &buffer,sizeof(uint32_t));35503551/* count */3552put_be32(&buffer, ieot->entries[i].nr);3553strbuf_add(sb, &buffer,sizeof(uint32_t));3554}3555}