stage2: namespace cache dir with C source path
This is not strictly necessary but it increases the likelihood of cache hits because foo.c and bar.c now will have different cache directories and can be updated independently without clobbering each other's cache data.master
parent
0379d7b431
commit
060c91b97f
|
@ -427,6 +427,13 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
|
|||
// modified between incremental updates.
|
||||
var hash = cache.hash;
|
||||
|
||||
// Here we put the root source file path name, but *not* with addFile. We want the
|
||||
// hash to be the same regardless of the contents of the source file, because
|
||||
// incremental compilation will handle it, but we do want to namespace different
|
||||
// source file names because they are likely different compilations and therefore this
|
||||
// would be likely to cause cache hits.
|
||||
hash.addBytes(root_pkg.root_src_path);
|
||||
hash.addOptionalBytes(root_pkg.root_src_directory.path);
|
||||
hash.add(valgrind);
|
||||
hash.add(single_threaded);
|
||||
switch (options.target.os.getVersionRange()) {
|
||||
|
@ -512,7 +519,17 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
|
|||
const bin_directory = emit_bin.directory orelse blk: {
|
||||
if (module) |zm| break :blk zm.zig_cache_artifact_directory;
|
||||
|
||||
const digest = cache.hash.peek();
|
||||
// We could use the cache hash as is no problem, however, we increase
|
||||
// the likelihood of cache hits by adding the first C source file
|
||||
// path name (not contents) to the hash. This way if the user is compiling
|
||||
// foo.c and bar.c as separate compilations, they get different cache
|
||||
// directories.
|
||||
var hash = cache.hash;
|
||||
if (options.c_source_files.len >= 1) {
|
||||
hash.addBytes(options.c_source_files[0].src_path);
|
||||
}
|
||||
|
||||
const digest = hash.final();
|
||||
const artifact_sub_dir = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest });
|
||||
var artifact_dir = try options.zig_cache_directory.handle.makeOpenPath(artifact_sub_dir, .{});
|
||||
owned_link_dir = artifact_dir;
|
||||
|
|
|
@ -1288,15 +1288,15 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
|
|||
const prev_digest: []u8 = directory.handle.readLink(id_symlink_basename, &prev_digest_buf) catch |err| blk: {
|
||||
log.debug("ELF LLD new_digest={} readlink error: {}", .{digest, @errorName(err)});
|
||||
// Handle this as a cache miss.
|
||||
mem.set(u8, &prev_digest_buf, 0);
|
||||
break :blk &prev_digest_buf;
|
||||
break :blk prev_digest_buf[0..0];
|
||||
};
|
||||
log.debug("ELF LLD prev_digest={} new_digest={}", .{prev_digest, digest});
|
||||
if (mem.eql(u8, prev_digest, &digest)) {
|
||||
log.debug("ELF LLD digest={} match - skipping invocation", .{digest});
|
||||
// Hot diggity dog! The output binary is already there.
|
||||
self.base.lock = ch.toOwnedLock();
|
||||
return;
|
||||
}
|
||||
log.debug("ELF LLD prev_digest={} new_digest={}", .{prev_digest, digest});
|
||||
|
||||
// We are about to change the output file to be different, so we invalidate the build hash now.
|
||||
directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
|
||||
|
|
Loading…
Reference in New Issue