*/
#include "builtin.h"
+#include "repository.h"
#include "config.h"
#include "tempfile.h"
#include "lockfile.h"
#include "sigchain.h"
#include "argv-array.h"
#include "commit.h"
+#include "commit-graph.h"
#include "packfile.h"
+#include "object-store.h"
+#include "pack.h"
+#include "pack-objects.h"
+#include "blob.h"
+#include "tree.h"
#define FAILED_RUN "failed to run %s"
static int aggressive_window = 250;
static int gc_auto_threshold = 6700;
static int gc_auto_pack_limit = 50;
+static int gc_write_commit_graph;
static int detach_auto = 1;
static timestamp_t gc_log_expire_time;
static const char *gc_log_expire = "1.day.ago";
static const char *prune_expire = "2.weeks.ago";
static const char *prune_worktrees_expire = "3.months.ago";
+static unsigned long big_pack_threshold;
+static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE;
static struct argv_array pack_refs_cmd = ARGV_ARRAY_INIT;
static struct argv_array reflog = ARGV_ARRAY_INIT;
git_config_get_int("gc.aggressivedepth", &aggressive_depth);
git_config_get_int("gc.auto", &gc_auto_threshold);
git_config_get_int("gc.autopacklimit", &gc_auto_pack_limit);
+ git_config_get_bool("gc.writecommitgraph", &gc_write_commit_graph);
git_config_get_bool("gc.autodetach", &detach_auto);
git_config_get_expiry("gc.pruneexpire", &prune_expire);
git_config_get_expiry("gc.worktreepruneexpire", &prune_worktrees_expire);
git_config_get_expiry("gc.logexpiry", &gc_log_expire);
+ git_config_get_ulong("gc.bigpackthreshold", &big_pack_threshold);
+ git_config_get_ulong("pack.deltacachesize", &max_delta_cache_size);
+
git_config(git_default_config, NULL);
}
return needed;
}
+static struct packed_git *find_base_packs(struct string_list *packs,
+ unsigned long limit)
+{
+ struct packed_git *p, *base = NULL;
+
+ for (p = get_all_packs(the_repository); p; p = p->next) {
+ if (!p->pack_local)
+ continue;
+ if (limit) {
+ if (p->pack_size >= limit)
+ string_list_append(packs, p->pack_name);
+ } else if (!base || base->pack_size < p->pack_size) {
+ base = p;
+ }
+ }
+
+ if (base)
+ string_list_append(packs, base->pack_name);
+
+ return base;
+}
+
static int too_many_packs(void)
{
struct packed_git *p;
if (gc_auto_pack_limit <= 0)
return 0;
- prepare_packed_git();
- for (cnt = 0, p = packed_git; p; p = p->next) {
+ for (cnt = 0, p = get_all_packs(the_repository); p; p = p->next) {
if (!p->pack_local)
continue;
if (p->pack_keep)
return gc_auto_pack_limit < cnt;
}
-static void add_repack_all_option(void)
+static uint64_t total_ram(void)
+{
+#if defined(HAVE_SYSINFO)
+ struct sysinfo si;
+
+ if (!sysinfo(&si))
+ return si.totalram;
+#elif defined(HAVE_BSD_SYSCTL) && (defined(HW_MEMSIZE) || defined(HW_PHYSMEM))
+ int64_t physical_memory;
+ int mib[2];
+ size_t length;
+
+ mib[0] = CTL_HW;
+# if defined(HW_MEMSIZE)
+ mib[1] = HW_MEMSIZE;
+# else
+ mib[1] = HW_PHYSMEM;
+# endif
+ length = sizeof(int64_t);
+ if (!sysctl(mib, 2, &physical_memory, &length, NULL, 0))
+ return physical_memory;
+#elif defined(GIT_WINDOWS_NATIVE)
+ MEMORYSTATUSEX memInfo;
+
+ memInfo.dwLength = sizeof(MEMORYSTATUSEX);
+ if (GlobalMemoryStatusEx(&memInfo))
+ return memInfo.ullTotalPhys;
+#endif
+ return 0;
+}
+
+static uint64_t estimate_repack_memory(struct packed_git *pack)
+{
+ unsigned long nr_objects = approximate_object_count();
+ size_t os_cache, heap;
+
+ if (!pack || !nr_objects)
+ return 0;
+
+ /*
+ * First we have to scan through at least one pack.
+ * Assume enough room in OS file cache to keep the entire pack
+ * or we may accidentally evict data of other processes from
+ * the cache.
+ */
+ os_cache = pack->pack_size + pack->index_size;
+ /* then pack-objects needs lots more for book keeping */
+ heap = sizeof(struct object_entry) * nr_objects;
+ /*
+ * internal rev-list --all --objects takes up some memory too,
+ * let's say half of it is for blobs
+ */
+ heap += sizeof(struct blob) * nr_objects / 2;
+ /*
+ * and the other half is for trees (commits and tags are
+ * usually insignificant)
+ */
+ heap += sizeof(struct tree) * nr_objects / 2;
+ /* and then obj_hash[], underestimated in fact */
+ heap += sizeof(struct object *) * nr_objects;
+ /* revindex is used also */
+ heap += sizeof(struct revindex_entry) * nr_objects;
+ /*
+ * read_sha1_file() (either at delta calculation phase, or
+ * writing phase) also fills up the delta base cache
+ */
+ heap += delta_base_cache_limit;
+ /* and of course pack-objects has its own delta cache */
+ heap += max_delta_cache_size;
+
+ return os_cache + heap;
+}
+
+static int keep_one_pack(struct string_list_item *item, void *data)
+{
+ argv_array_pushf(&repack, "--keep-pack=%s", basename(item->string));
+ return 0;
+}
+
+static void add_repack_all_option(struct string_list *keep_pack)
{
if (prune_expire && !strcmp(prune_expire, "now"))
argv_array_push(&repack, "-a");
if (prune_expire)
argv_array_pushf(&repack, "--unpack-unreachable=%s", prune_expire);
}
+
+ if (keep_pack)
+ for_each_string_list(keep_pack, keep_one_pack, NULL);
}
static void add_repack_incremental_option(void)
* we run "repack -A -d -l". Otherwise we tell the caller
* there is no need.
*/
- if (too_many_packs())
- add_repack_all_option();
- else if (too_many_loose_objects())
+ if (too_many_packs()) {
+ struct string_list keep_pack = STRING_LIST_INIT_NODUP;
+
+ if (big_pack_threshold) {
+ find_base_packs(&keep_pack, big_pack_threshold);
+ if (keep_pack.nr >= gc_auto_pack_limit) {
+ big_pack_threshold = 0;
+ string_list_clear(&keep_pack, 0);
+ find_base_packs(&keep_pack, 0);
+ }
+ } else {
+ struct packed_git *p = find_base_packs(&keep_pack, 0);
+ uint64_t mem_have, mem_want;
+
+ mem_have = total_ram();
+ mem_want = estimate_repack_memory(p);
+
+ /*
+ * Only allow 1/2 of memory for pack-objects, leave
+ * the rest for the OS and other processes in the
+ * system.
+ */
+ if (!mem_have || mem_want < mem_have / 2)
+ string_list_clear(&keep_pack, 0);
+ }
+
+ add_repack_all_option(&keep_pack);
+ string_list_clear(&keep_pack, 0);
+ } else if (too_many_loose_objects())
add_repack_incremental_option();
else
return 0;
/* return NULL on success, else hostname running the gc */
static const char *lock_repo_for_gc(int force, pid_t* ret_pid)
{
- static struct lock_file lock;
+ struct lock_file lock = LOCK_INIT;
char my_host[HOST_NAME_MAX + 1];
struct strbuf sb = STRBUF_INIT;
struct stat st;
return NULL;
}
+/*
+ * Returns 0 if there was no previous error and gc can proceed, 1 if
+ * gc should not proceed due to an error in the last run. Prints a
+ * message and returns -1 if an error occured while reading gc.log
+ */
static int report_last_gc_error(void)
{
struct strbuf sb = STRBUF_INIT;
int ret = 0;
+ ssize_t len;
struct stat st;
char *gc_log_path = git_pathdup("gc.log");
if (errno == ENOENT)
goto done;
- ret = error_errno(_("Can't stat %s"), gc_log_path);
+ ret = error_errno(_("cannot stat '%s'"), gc_log_path);
goto done;
}
if (st.st_mtime < gc_log_expire_time)
goto done;
- ret = strbuf_read_file(&sb, gc_log_path, 0);
- if (ret > 0)
- ret = error(_("The last gc run reported the following. "
+ len = strbuf_read_file(&sb, gc_log_path, 0);
+ if (len < 0)
+ ret = error_errno(_("cannot read '%s'"), gc_log_path);
+ else if (len > 0) {
+ /*
+ * A previous gc failed. Report the error, and don't
+ * bother with an automatic gc run since it is likely
+ * to fail in the same way.
+ */
+ warning(_("The last gc run reported the following. "
"Please correct the root cause\n"
"and remove %s.\n"
"Automatic cleanup will not be performed "
"until the file is removed.\n\n"
"%s"),
gc_log_path, sb.buf);
+ ret = 1;
+ }
strbuf_release(&sb);
done:
free(gc_log_path);
return ret;
}
-static int gc_before_repack(void)
+static void gc_before_repack(void)
{
if (pack_refs && run_command_v_opt(pack_refs_cmd.argv, RUN_GIT_CMD))
- return error(FAILED_RUN, pack_refs_cmd.argv[0]);
+ die(FAILED_RUN, pack_refs_cmd.argv[0]);
if (prune_reflogs && run_command_v_opt(reflog.argv, RUN_GIT_CMD))
- return error(FAILED_RUN, reflog.argv[0]);
+ die(FAILED_RUN, reflog.argv[0]);
pack_refs = 0;
prune_reflogs = 0;
- return 0;
}
int cmd_gc(int argc, const char **argv, const char *prefix)
const char *name;
pid_t pid;
int daemonized = 0;
+ int keep_base_pack = -1;
+ timestamp_t dummy;
struct option builtin_gc_options[] = {
OPT__QUIET(&quiet, N_("suppress progress reporting")),
OPT_BOOL_F(0, "force", &force,
N_("force running gc even if there may be another gc running"),
PARSE_OPT_NOCOMPLETE),
+ OPT_BOOL(0, "keep-largest-pack", &keep_base_pack,
+ N_("repack all other packs except the largest pack")),
OPT_END()
};
/* default expiry time, overwritten in gc_config */
gc_config();
if (parse_expiry_date(gc_log_expire, &gc_log_expire_time))
- die(_("Failed to parse gc.logexpiry value %s"), gc_log_expire);
+ die(_("failed to parse gc.logexpiry value %s"), gc_log_expire);
if (pack_refs < 0)
pack_refs = !is_bare_repository();
if (argc > 0)
usage_with_options(builtin_gc_usage, builtin_gc_options);
+ if (prune_expire && parse_expiry_date(prune_expire, &dummy))
+ die(_("failed to parse prune expiry value %s"), prune_expire);
+
if (aggressive) {
argv_array_push(&repack, "-f");
if (aggressive_depth > 0)
fprintf(stderr, _("See \"git help gc\" for manual housekeeping.\n"));
}
if (detach_auto) {
- if (report_last_gc_error())
- return -1;
+ int ret = report_last_gc_error();
+ if (ret < 0)
+ /* an I/O error occured, already reported */
+ exit(128);
+ if (ret == 1)
+ /* Last gc --auto failed. Skip this one. */
+ return 0;
if (lock_repo_for_gc(force, &pid))
return 0;
- if (gc_before_repack())
- return -1;
+ gc_before_repack(); /* dies on failure */
delete_tempfile(&pidfile);
/*
*/
daemonized = !daemonize();
}
- } else
- add_repack_all_option();
+ } else {
+ struct string_list keep_pack = STRING_LIST_INIT_NODUP;
+
+ if (keep_base_pack != -1) {
+ if (keep_base_pack)
+ find_base_packs(&keep_pack, 0);
+ } else if (big_pack_threshold) {
+ find_base_packs(&keep_pack, big_pack_threshold);
+ }
+
+ add_repack_all_option(&keep_pack);
+ string_list_clear(&keep_pack, 0);
+ }
name = lock_repo_for_gc(force, &pid);
if (name) {
atexit(process_log_file_at_exit);
}
- if (gc_before_repack())
- return -1;
+ gc_before_repack();
if (!repository_format_precious_objects) {
+ close_all_packs(the_repository->objects);
if (run_command_v_opt(repack.argv, RUN_GIT_CMD))
- return error(FAILED_RUN, repack.argv[0]);
+ die(FAILED_RUN, repack.argv[0]);
if (prune_expire) {
argv_array_push(&prune, prune_expire);
argv_array_push(&prune,
"--exclude-promisor-objects");
if (run_command_v_opt(prune.argv, RUN_GIT_CMD))
- return error(FAILED_RUN, prune.argv[0]);
+ die(FAILED_RUN, prune.argv[0]);
}
}
if (prune_worktrees_expire) {
argv_array_push(&prune_worktrees, prune_worktrees_expire);
if (run_command_v_opt(prune_worktrees.argv, RUN_GIT_CMD))
- return error(FAILED_RUN, prune_worktrees.argv[0]);
+ die(FAILED_RUN, prune_worktrees.argv[0]);
}
if (run_command_v_opt(rerere.argv, RUN_GIT_CMD))
- return error(FAILED_RUN, rerere.argv[0]);
+ die(FAILED_RUN, rerere.argv[0]);
report_garbage = report_pack_garbage;
- reprepare_packed_git();
+ reprepare_packed_git(the_repository);
if (pack_garbage.nr > 0)
clean_pack_garbage();
+ if (gc_write_commit_graph)
+ write_commit_graph_reachable(get_object_directory(), 0,
+ !quiet && !daemonized);
+
if (auto_gc && too_many_loose_objects())
warning(_("There are too many unreachable loose objects; "
"run 'git prune' to remove them."));