Refactor the FileSystem API to allow for different hash functions.

Refactor the FileSystem class to include the hash function as an
instance field. This allows us to have a different hash function
per FileSystem and removes technical debt, as currently that's
somewhat accomplished by a horrible hack that has a static method
to set the hash function for all FileSystem instances.

The FileSystem's default hash function remains MD5.

RELNOTES: None
PiperOrigin-RevId: 177479772
diff --git a/src/test/java/com/google/devtools/build/lib/remote/ByteStreamUploaderTest.java b/src/test/java/com/google/devtools/build/lib/remote/ByteStreamUploaderTest.java
index e809b74..0b70f36 100644
--- a/src/test/java/com/google/devtools/build/lib/remote/ByteStreamUploaderTest.java
+++ b/src/test/java/com/google/devtools/build/lib/remote/ByteStreamUploaderTest.java
@@ -26,6 +26,7 @@
 import com.google.common.util.concurrent.ListeningScheduledExecutorService;
 import com.google.common.util.concurrent.MoreExecutors;
 import com.google.devtools.build.lib.analysis.BlazeVersionInfo;
+import com.google.devtools.build.lib.vfs.FileSystem.HashFunction;
 import com.google.devtools.remoteexecution.v1test.Digest;
 import com.google.devtools.remoteexecution.v1test.RequestMetadata;
 import com.google.protobuf.ByteString;
@@ -75,6 +76,8 @@
 @RunWith(JUnit4.class)
 public class ByteStreamUploaderTest {
 
+  private static final DigestUtil DIGEST_UTIL = new DigestUtil(HashFunction.SHA256);
+
   private static final int CHUNK_SIZE = 10;
   private static final String INSTANCE_NAME = "foo";
 
@@ -99,7 +102,7 @@
     channel = InProcessChannelBuilder.forName(serverName).build();
     withEmptyMetadata =
         TracingMetadataUtils.contextWithMetadata(
-            "none", "none", Digests.unsafeActionKeyFromDigest(Digest.getDefaultInstance()));
+            "none", "none", DIGEST_UTIL.asActionKey(Digest.getDefaultInstance()));
     // Needs to be repeated in every test that uses the timeout setting, since the tests run
     // on different threads than the setUp.
     withEmptyMetadata.attach();
@@ -121,7 +124,7 @@
     byte[] blob = new byte[CHUNK_SIZE * 2 + 1];
     new Random().nextBytes(blob);
 
-    Chunker chunker = new Chunker(blob, CHUNK_SIZE);
+    Chunker chunker = new Chunker(blob, CHUNK_SIZE, DIGEST_UTIL);
 
     serviceRegistry.addService(new ByteStreamImplBase() {
           @Override
@@ -195,7 +198,7 @@
       int blobSize = rand.nextInt(CHUNK_SIZE * 10) + CHUNK_SIZE;
       byte[] blob = new byte[blobSize];
       rand.nextBytes(blob);
-      Chunker chunker = new Chunker(blob, CHUNK_SIZE);
+      Chunker chunker = new Chunker(blob, CHUNK_SIZE, DIGEST_UTIL);
       builders.add(chunker);
       blobsByHash.put(chunker.digest().getHash(), blob);
     }
@@ -281,7 +284,7 @@
     List<Chunker> builders = new ArrayList<>(toUpload.size());
     Map<String, Integer> uploadsFailed = new HashMap<>();
     for (String s : toUpload) {
-      Chunker chunker = new Chunker(s.getBytes(UTF_8), /* chunkSize=*/ 3);
+      Chunker chunker = new Chunker(s.getBytes(UTF_8), /* chunkSize=*/ 3, DIGEST_UTIL);
       builders.add(chunker);
       uploadsFailed.put(chunker.digest().getHash(), 0);
     }
@@ -342,7 +345,7 @@
     for (Chunker chunker : builders) {
       Context ctx =
           TracingMetadataUtils.contextWithMetadata(
-              "build-req-id", "command-id", Digests.unsafeActionKeyFromDigest(chunker.digest()));
+              "build-req-id", "command-id", DIGEST_UTIL.asActionKey(chunker.digest()));
       ctx.call(
           () -> {
             uploads.add(uploader.uploadBlobAsync(chunker));
@@ -367,7 +370,7 @@
         new ByteStreamUploader(INSTANCE_NAME, channel, null, 3, retrier, retryService);
 
     byte[] blob = new byte[CHUNK_SIZE * 10];
-    Chunker chunker = new Chunker(blob, CHUNK_SIZE);
+    Chunker chunker = new Chunker(blob, CHUNK_SIZE, DIGEST_UTIL);
 
     AtomicInteger numWriteCalls = new AtomicInteger();
     CountDownLatch blocker = new CountDownLatch(1);
@@ -426,7 +429,7 @@
         new ByteStreamUploader(INSTANCE_NAME, channel, null, 3, retrier, retryService);
 
     byte[] blob = new byte[CHUNK_SIZE];
-    Chunker chunker = new Chunker(blob, CHUNK_SIZE);
+    Chunker chunker = new Chunker(blob, CHUNK_SIZE, DIGEST_UTIL);
 
     serviceRegistry.addService(new ByteStreamImplBase() {
       @Override
@@ -477,10 +480,10 @@
     serviceRegistry.addService(service);
 
     byte[] blob1 = new byte[CHUNK_SIZE];
-    Chunker chunker1 = new Chunker(blob1, CHUNK_SIZE);
+    Chunker chunker1 = new Chunker(blob1, CHUNK_SIZE, DIGEST_UTIL);
 
     byte[] blob2 = new byte[CHUNK_SIZE + 1];
-    Chunker chunker2 = new Chunker(blob2, CHUNK_SIZE);
+    Chunker chunker2 = new Chunker(blob2, CHUNK_SIZE, DIGEST_UTIL);
 
     ListenableFuture<Void> f1 = uploader.uploadBlobAsync(chunker1);
     ListenableFuture<Void> f2 = uploader.uploadBlobAsync(chunker2);
@@ -519,7 +522,7 @@
     assertThat(retryService.isShutdown()).isTrue();
 
     byte[] blob = new byte[1];
-    Chunker chunker = new Chunker(blob, CHUNK_SIZE);
+    Chunker chunker = new Chunker(blob, CHUNK_SIZE, DIGEST_UTIL);
     try {
       uploader.uploadBlob(chunker);
       fail("Should have thrown an exception.");
@@ -560,7 +563,7 @@
     });
 
     byte[] blob = new byte[1];
-    Chunker chunker = new Chunker(blob, CHUNK_SIZE);
+    Chunker chunker = new Chunker(blob, CHUNK_SIZE, DIGEST_UTIL);
 
     uploader.uploadBlob(chunker);
   }
@@ -585,7 +588,7 @@
     });
 
     byte[] blob = new byte[1];
-    Chunker chunker = new Chunker(blob, CHUNK_SIZE);
+    Chunker chunker = new Chunker(blob, CHUNK_SIZE, DIGEST_UTIL);
 
     try {
       uploader.uploadBlob(chunker);