This change adds support for a docker_bundle rule, refactoring a number of interfaces to support shared use with docker_build.

This also fixes "docker load -i <bazel output>", which was broken on newer clients due to a lack of tag data in manifest.json's RepoTags.

--
PiperOrigin-RevId: 148767383
MOS_MIGRATED_REVID=148767383
diff --git a/tools/build_defs/docker/BUILD b/tools/build_defs/docker/BUILD
index db6a375..af41218 100644
--- a/tools/build_defs/docker/BUILD
+++ b/tools/build_defs/docker/BUILD
@@ -48,6 +48,7 @@
     "//tools/build_defs/docker/testdata:root_data_path_image.tar",
     "//tools/build_defs/docker/testdata:dummy_repository.tar",
     "//tools/build_defs/docker/testdata:extras_with_deb.tar",
+    "//tools/build_defs/docker/testdata:bundle_test.tar",
 ]
 
 sh_test(
diff --git a/tools/build_defs/docker/build_test.sh b/tools/build_defs/docker/build_test.sh
index 82df66b..c0aabeb 100755
--- a/tools/build_defs/docker/build_test.sh
+++ b/tools/build_defs/docker/build_test.sh
@@ -35,6 +35,10 @@
     || fail "$message"
 }
 
+function no_check() {
+  echo "${@}"
+}
+
 function check_property() {
   local property="${1}"
   local tarball="${2}"
@@ -48,6 +52,18 @@
   EXPECT_CONTAINS "${metadata}" "\"${property}\": ${expected}"
 }
 
+function check_manifest_property() {
+  local property="${1}"
+  local tarball="${2}"
+  local expected="${3}"
+  local test_data="${TEST_DATA_DIR}/${tarball}.tar"
+
+  local metadata="$(tar xOf "${test_data}" "./manifest.json")"
+
+  # This would be much more accurate if we had 'jq' everywhere.
+  EXPECT_CONTAINS "${metadata}" "\"${property}\": ${expected}"
+}
+
 function check_no_property() {
   local property="${1}"
   local tarball="${2}"
@@ -132,6 +148,8 @@
 }
 
 function check_layers_aux() {
+  local ancestry_check=${1}
+  shift 1
   local input=${1}
   shift 1
   local expected_layers=(${*})
@@ -176,7 +194,7 @@
 
     # Check that the layer contains its predecessor as its parent in the JSON.
     if [[ -n "${parent}" ]]; then
-      check_parent "${input}" "${layer}" "\"${parent}\""
+      "${ancestry_check}" "${input}" "${layer}" "\"${parent}\""
     fi
 
     # Check that the layer's size metadata matches the layer's tarball's size.
@@ -191,8 +209,8 @@
 function check_layers() {
   local input=$1
   shift
-  check_layers_aux "$input" "$@"
-  check_layers_aux "notop_$input" "$@"
+  check_layers_aux "check_parent" "$input" "$@"
+  check_layers_aux "check_parent" "notop_$input" "$@"
 }
 
 function test_gen_image() {
@@ -203,7 +221,7 @@
 function test_dummy_repository() {
   local layer="0279f3ce8b08d10506abcf452393b3e48439f5eca41b836fae59a0d509fbafea"
   local test_data="${TEST_DATA_DIR}/dummy_repository.tar"
-  check_layers_aux "dummy_repository" "$layer"
+  check_layers_aux "check_parent" "dummy_repository" "$layer"
 
 
   local repositories="$(tar xOf "${test_data}" "./repositories")"
@@ -341,6 +359,11 @@
   check_env "with_env" \
     "42a1bd0f449f61a23b8a7776875ffb6707b34ee99c87d6428a7394f5e55e8624" \
     '["bar=blah blah blah", "foo=/asdf"]'
+
+  # We should have a tag in our manifest, otherwise it will be untagged
+  # when loaded in newer clients.
+  check_manifest_property "RepoTags" "with_env" \
+    "[\"bazel/${TEST_DATA_TARGET_BASE}:with_env\"]"
 }
 
 function test_with_double_env() {
@@ -396,10 +419,10 @@
   local absolute_data_path_sha="f196c42ab4f3eb850d9655b950b824db2c99c01527703ac486a7b48bb2a34f44"
   local root_data_path_sha="19d7fd26d67bfaeedd6232dcd441f14ee163bc81c56ed565cc20e73311c418b6"
 
-  check_layers_aux "no_data_path_image" "${no_data_path_sha}"
-  check_layers_aux "data_path_image" "${data_path_sha}"
-  check_layers_aux "absolute_data_path_image" "${absolute_data_path_sha}"
-  check_layers_aux "root_data_path_image" "${root_data_path_sha}"
+  check_layers_aux "check_parent" "no_data_path_image" "${no_data_path_sha}"
+  check_layers_aux "check_parent" "data_path_image" "${data_path_sha}"
+  check_layers_aux "check_parent" "absolute_data_path_image" "${absolute_data_path_sha}"
+  check_layers_aux "check_parent" "root_data_path_image" "${root_data_path_sha}"
 
   # Without data_path = "." the file will be inserted as `./test`
   # (since it is the path in the package) and with data_path = "."
@@ -454,4 +477,26 @@
 ./usr/titi"
 }
 
+function test_bundle() {
+  # Check that we have these layers, but ignore the parent check, since
+  # this is a tree not a list.
+  check_layers_aux "no_check" "bundle_test" \
+    "125e7cfb9d4a6d803a57b88bcdb05d9a6a47ac0d6312a8b4cff52a2685c5c858" \
+    "42a1bd0f449f61a23b8a7776875ffb6707b34ee99c87d6428a7394f5e55e8624" \
+    "4acbeb0495918726c0107e372b421e1d2a6fd4825d58fc3f0b0b2a719fb3ce1b" \
+    "576a9fd9c690be04dc7aacbb9dbd1f14816e32dbbcc510f4d42325bbff7163dd" \
+    "82ca3945f7d07df82f274d7fafe83fd664c2154e5c64c988916ccd5b217bb710" \
+    "e5cfc312de72ce09488d789f525189a26a686d60fcc1c74249a3d7ce62986a82"
+
+  # Our bundle should have the following aliases.
+  check_manifest_property "RepoTags" "bundle_test" \
+    "[\"bazel/${TEST_DATA_TARGET_BASE}:base_with_entrypoint\", \"docker.io/ubuntu:latest\"]"
+
+  check_manifest_property "RepoTags" "bundle_test" \
+    "[\"bazel/${TEST_DATA_TARGET_BASE}:link_with_files_base\", \"us.gcr.io/google-appengine/base:fresh\"]"
+
+  check_manifest_property "RepoTags" "bundle_test" \
+    "[\"bazel/${TEST_DATA_TARGET_BASE}:with_double_env\", \"gcr.io/google-containers/pause:2.0\"]"
+}
+
 run_suite "build_test"
diff --git a/tools/build_defs/docker/docker.bzl b/tools/build_defs/docker/docker.bzl
index 03a5805..fb5c1ff 100644
--- a/tools/build_defs/docker/docker.bzl
+++ b/tools/build_defs/docker/docker.bzl
@@ -143,15 +143,21 @@
 def _serialize_dict(dict_value):
     return ",".join(["%s=%s" % (k, dict_value[k]) for k in dict_value])
 
+def _string_to_label(label_list, string_list):
+  """Form a mapping from label strings to the resolved label."""
+  label_string_dict = dict()
+  for i in range(len(label_list)):
+    string = string_list[i]
+    label = label_list[i]
+    label_string_dict[string] = label
+  return label_string_dict
+
 def _image_config(ctx, layer_names):
   """Create the configuration for a new docker image."""
   config = ctx.new_file(ctx.label.name + ".config")
 
-  label_file_dict = dict()
-  for i in range(len(ctx.files.label_files)):
-    fname = ctx.attr.label_file_strings[i]
-    file = ctx.files.label_files[i]
-    label_file_dict[fname] = file
+  label_file_dict = _string_to_label(
+      ctx.files.label_files, ctx.attr.label_file_strings)
 
   labels = dict()
   for l in ctx.attr.labels:
@@ -199,11 +205,8 @@
   """Generate the action to create the JSON metadata for the layer."""
   rewrite_tool = ctx.executable.rewrite_tool
 
-  label_file_dict = dict()
-  for i in range(len(ctx.files.label_files)):
-    fname = ctx.attr.label_file_strings[i]
-    file = ctx.files.label_files[i]
-    label_file_dict[fname] = file
+  label_file_dict = _string_to_label(
+      ctx.files.label_files, ctx.attr.label_file_strings)
 
   labels = dict()
   for l in ctx.attr.labels:
@@ -274,16 +277,16 @@
   _metadata_action(ctx, layer, name, metadata)
   return metadata
 
-def _create_image(ctx, layers, id, config, name, metadata):
+def _create_image(ctx, layers, identifier, config, name, metadata, tags):
   """Create the new image."""
   args = [
       "--output=" + ctx.outputs.layer.path,
-      "--id=@" + id.path,
+      "--id=@" + identifier.path,
       "--config=" + config.path,
-      ]
+      ] + ["--tag=" + tag for tag in tags]
 
   args += ["--layer=@%s=%s" % (l["name"].path, l["layer"].path) for l in layers]
-  inputs = [id, config] + [l["name"] for l in layers] + [l["layer"] for l in layers]
+  inputs = [identifier, config] + [l["name"] for l in layers] + [l["layer"] for l in layers]
 
   if name:
     args += ["--legacy_id=@" + name.path]
@@ -312,23 +315,23 @@
       mnemonic = "CreateImage",
       )
 
-def _assemble_image(ctx, layers, name):
+def _assemble_image(ctx, layers, tags_to_names):
   """Create the full image from the list of layers."""
   layers = [l["layer"] for l in layers]
   args = [
       "--output=" + ctx.outputs.out.path,
-      "--id=@" + name.path,
-      "--repository=" + _repository_name(ctx),
-      "--name=" + ctx.label.name
-      ] + ["--layer=" + l.path for l in layers]
-  inputs = [name] + layers
+  ] + [
+      "--tags=" + tag + "=@" + tags_to_names[tag].path
+      for tag in tags_to_names
+  ] + ["--layer=" + l.path for l in layers]
+  inputs = layers + tags_to_names.values()
   ctx.action(
       executable = ctx.executable.join_layers,
       arguments = args,
       inputs = inputs,
       outputs = [ctx.outputs.out],
       mnemonic = "JoinLayers"
-      )
+  )
 
 def _repository_name(ctx):
   """Compute the repository name for the current rule."""
@@ -358,14 +361,17 @@
   layer_sha = _sha256(ctx, layer)
 
   config = _image_config(ctx, [layer_sha])
-  id = _sha256(ctx, config)
+  identifier = _sha256(ctx, config)
 
   name = _compute_layer_name(ctx, layer)
   metadata = _metadata(ctx, layer, name)
 
+  # Construct a temporary name based on the build target.
+  tags = [_repository_name(ctx) + ":" + ctx.label.name]
+
   # creating a partial image so only pass the layers that belong to it
   image_layer = {"layer": layer, "name": layer_sha}
-  _create_image(ctx, [image_layer], id, config, name, metadata)
+  _create_image(ctx, [image_layer], identifier, config, name, metadata, tags)
 
   # Compute the layers transitive provider.
   # It includes the current layers, and, if they exists the layer from
@@ -373,25 +379,14 @@
   # a base tarball as they probably do not respect the convention on
   # layer naming that our rules use.
   layers =  [
-      {"layer": ctx.outputs.layer, "id": id, "name": name}
-      ] + getattr(ctx.attr.base, "docker_layers", [])
+      {"layer": ctx.outputs.layer, "id": identifier, "name": name}
+  ] + getattr(ctx.attr.base, "docker_layers", [])
+
   # Generate the incremental load statement
-  ctx.template_action(
-      template = ctx.file.incremental_load_template,
-      substitutions = {
-        "%{load_statements}": "\n".join([
-            "incr_load '%s' '%s' '%s'" % (_get_runfile_path(ctx, l["name"]),
-                                          _get_runfile_path(ctx, l["id"]),
-                                          _get_runfile_path(ctx, l["layer"]))
-            # The last layer is the first in the list of layers.
-            # We reverse to load the layer from the parent to the child.
-            for l in reverse(layers)]),
-        "%{repository}": _repository_name(ctx),
-        "%{tag}": ctx.label.name,
-        },
-      output = ctx.outputs.executable,
-      executable = True)
-  _assemble_image(ctx, reverse(layers), name)
+  _incr_load(ctx, layers, {tag_name: {"name": name, "id": identifier}
+                           for tag_name in tags})
+
+  _assemble_image(ctx, reverse(layers), {tag_name: name for tag_name in tags})
   runfiles = ctx.runfiles(
       files = [l["name"] for l in layers] +
               [l["id"] for l in layers] +
@@ -577,3 +572,121 @@
   if "entrypoint" in kwargs:
     kwargs["entrypoint"] = _validate_command("entrypoint", kwargs["entrypoint"])
   docker_build_(**kwargs)
+
+
+def _incr_load(ctx, layers, images):
+  """Generate the incremental load statement."""
+  ctx.template_action(
+      template = ctx.file.incremental_load_template,
+      substitutions = {
+          "%{load_statements}": "\n".join([
+              "incr_load '%s' '%s' '%s'" % (_get_runfile_path(ctx, l["name"]),
+                                            _get_runfile_path(ctx, l["id"]),
+                                            _get_runfile_path(ctx, l["layer"]))
+              # The last layer is the first in the list of layers.
+              # We reverse to load the layer from the parent to the child.
+              for l in reverse(layers)]),
+          "%{tag_statements}": "\n".join([
+              "tag_layer '%s' '%s' '%s'" % (
+                  img,
+                  _get_runfile_path(ctx, images[img]["name"]),
+                  _get_runfile_path(ctx, images[img]["id"]))
+              for img in images
+          ])
+      },
+      output = ctx.outputs.executable,
+      executable = True)
+
+
+def _docker_bundle_impl(ctx):
+  """Implementation for the docker_bundle rule."""
+
+  # Compute the set of layers from the image_targes.
+  image_target_dict = _string_to_label(
+      ctx.attr.image_targets, ctx.attr.image_target_strings)
+
+  seen_names = []
+  layers = []
+  for image in ctx.attr.image_targets:
+    for layer in getattr(image, "docker_layers", []):
+      if layer["name"].path in seen_names:
+        continue
+      seen_names.append(layer["name"].path)
+      layers.append(layer)
+
+  images = dict()
+  for unresolved_tag in ctx.attr.images:
+    # Allow users to put make variables into the tag name.
+    tag = ctx.expand_make_variables("images", unresolved_tag, {})
+
+    target = ctx.attr.images[unresolved_tag]
+    target = image_target_dict[target]
+    images[tag] = getattr(target, "docker_layers", [])[0]
+
+  _incr_load(ctx, layers, images)
+
+  _assemble_image(ctx, reverse(layers), {
+      # Create a new dictionary with the same keyspace that
+      # points to the name of the layer.
+      k: images[k]["name"]
+      for k in images
+  })
+
+  runfiles = ctx.runfiles(
+      files = ([l["name"] for l in layers] +
+               [l["id"] for l in layers] +
+               [l["layer"] for l in layers]))
+
+  return struct(runfiles = runfiles,
+                files = set(),
+                docker_layers = layers)
+
+docker_bundle_ = rule(
+    implementation = _docker_bundle_impl,
+    attrs = {
+        "images": attr.string_dict(),
+        # Implicit dependencies.
+        "image_targets": attr.label_list(allow_files=True),
+        "image_target_strings": attr.string_list(),
+        "incremental_load_template": attr.label(
+            default=Label("//tools/build_defs/docker:incremental_load_template"),
+            single_file=True,
+            allow_files=True),
+        "join_layers": attr.label(
+            default=Label("//tools/build_defs/docker:join_layers"),
+            cfg="host",
+            executable=True,
+            allow_files=True),
+    },
+    outputs = {
+        "out": "%{name}.tar",
+    },
+    executable = True)
+
+
+# Produces a new docker image tarball compatible with 'docker load', which
+# contains the N listed 'images', each aliased with their key.
+#
+# Example:
+#   docker_bundle(
+#     name = "foo",
+#     images = {
+#       "ubuntu:latest": ":blah",
+#       "foo.io/bar:canary": "//baz:asdf",
+#     }
+#   )
+def docker_bundle(**kwargs):
+  """Package several docker images into a single tarball.
+
+  Args:
+    **kwargs: See above.
+  """
+  for reserved in ["image_targets", "image_target_strings"]:
+    if reserved in kwargs:
+      fail("reserved for internal use by docker_bundle macro", attr=reserved)
+
+  if "images" in kwargs:
+    kwargs["image_targets"] = kwargs["images"].values()
+    kwargs["image_target_strings"] = kwargs["images"].values()
+
+  docker_bundle_(**kwargs)
diff --git a/tools/build_defs/docker/incremental_load.sh.tpl b/tools/build_defs/docker/incremental_load.sh.tpl
index 2210ceb..c607380 100644
--- a/tools/build_defs/docker/incremental_load.sh.tpl
+++ b/tools/build_defs/docker/incremental_load.sh.tpl
@@ -56,17 +56,27 @@
   fi
 }
 
-# List of 'incr_load' statements for all layers.
-# This generated and injected by docker_build.
-%{load_statements}
+function tag_layer() {
+  if [ "$LEGACY_DOCKER" = true ]; then
+    name=$(cat ${RUNFILES}/$2)
+  else
+    name=$(cat ${RUNFILES}/$3)
+  fi
 
-# Tag the last layer.
-if [ -n "${name}" ]; then
-  TAG="${1:-%{repository}:%{tag}}"
+  TAG="$1"
   echo "Tagging ${name} as ${TAG}"
   if [ "$LEGACY_DOCKER" = true ]; then
     "${DOCKER}" tag -f ${name} ${TAG}
   else
     "${DOCKER}" tag ${name} ${TAG}
   fi
-fi
+}
+
+# List of 'incr_load' statements for all layers.
+# This generated and injected by docker_build.
+%{load_statements}
+
+# List of 'tag_layer' statements for all tags.
+# This generated and injected by docker_build.
+%{tag_statements}
+
diff --git a/tools/build_defs/docker/join_layers.py b/tools/build_defs/docker/join_layers.py
index 7cf8381..97e4793 100644
--- a/tools/build_defs/docker/join_layers.py
+++ b/tools/build_defs/docker/join_layers.py
@@ -32,16 +32,10 @@
 
 gflags.DEFINE_multistring('layer', [], 'The tar files for layers to join.')
 
-gflags.DEFINE_string(
-    'id', None, 'The hex identifier of the top layer (hexstring or @filename).')
-
-gflags.DEFINE_string(
-    'repository', None,
-    'The name of the repository to add this image (use with --id and --name).')
-
-gflags.DEFINE_string(
-    'name', None,
-    'The symbolic name of this image (use with --id and --repository).')
+gflags.DEFINE_multistring(
+    'tags', [],
+    'An associative list of fully qualified tag names and the layer they tag. '
+    'e.g. ubuntu=deadbeef,gcr.io/blah/debian=baadf00d')
 
 FLAGS = gflags.FLAGS
 
@@ -51,25 +45,57 @@
   return basename not in ('manifest.json', 'top', 'repositories')
 
 
-def create_image(output, layers, identifier=None,
-                 name=None, repository=None):
+def _add_top(tar, repositories):
+  # Don't add 'top' if there are multiple images in this bundle.
+  if len(repositories) != 1:
+    return
+
+  # Walk the single-item dictionary, and if there is a single tag
+  # for the single repository, then emit a 'top' file pointing to
+  # the single image in this bundle.
+  for (unused_x, tags) in repositories.iteritems():
+    if len(tags) != 1:
+      continue
+    for (unused_y, layer_id) in tags.iteritems():
+      tar.add_file('top', content=layer_id)
+
+
+def create_image(output, layers, repositories=None):
   """Creates a Docker image from a list of layers.
 
   Args:
     output: the name of the docker image file to create.
     layers: the layers (tar files) to join to the image.
-    identifier: the identifier of the top layer for this image.
-    name: symbolic name for this docker image.
-    repository: repository name for this docker image.
+    repositories: the repositories two-level dictionary, which is keyed by
+                  repo names at the top-level, and tag names at the second
+                  level pointing to layer ids.
   """
-  manifest = []
+  # Compute a map from layer tarball names to the tags that should apply to them
+  layers_to_tag = {}
+  for repo in repositories:
+    tags = repositories[repo]
+    for tag in tags:
+      layer_name = tags[tag] + '/layer.tar'
+      fq_name = '%s:%s' % (repo, tag)
+      layer_tags = layers_to_tag.get(layer_name, [])
+      layer_tags.append(fq_name)
+      layers_to_tag[layer_name] = layer_tags
 
+  manifests = []
   tar = archive.TarFileWriter(output)
   for layer in layers:
     tar.add_tar(layer, name_filter=_layer_filter)
-    manifest += utils.GetManifestFromTar(layer)
+    layer_manifests = utils.GetManifestFromTar(layer)
 
-  manifest_content = json.dumps(manifest, sort_keys=True)
+    # Augment each manifest with any tags that should apply to their top layer.
+    for manifest in layer_manifests:
+      top_layer = manifest['Layers'][-1]
+      manifest['RepoTags'] = list(sorted(set(manifest['RepoTags'] +
+                                             layers_to_tag.get(top_layer, []))))
+
+    manifests += layer_manifests
+
+  manifest_content = json.dumps(manifests, sort_keys=True)
   tar.add_file('manifest.json', content=manifest_content)
 
   # In addition to N layers of the form described above, there might be
@@ -82,28 +108,49 @@
   #   },
   #   ...
   # }
-  if identifier:
+  # This is the exact structure we expect repositories to have.
+  if repositories:
     # If the identifier is not provided, then the resulted layer will be
-    # created without a 'top' file. Docker doesn't needs that file nor
+    # created without a 'top' file.  Docker doesn't needs that file nor
     # the repository to load the image and for intermediate layer,
     # docker_build store the name of the layer in a separate artifact so
     # this 'top' file is not needed.
-    tar.add_file('top', content=identifier)
-    if repository and name:
-      tar.add_file('repositories',
-                   content='\n'.join([
-                       '{', '  "%s": {' % repository, '    "%s": "%s"' % (
-                           name, identifier), '  }', '}'
-                   ]))
+    _add_top(tar, repositories)
+    tar.add_file('repositories',
+                 content=json.dumps(repositories, sort_keys=True))
+
+
+def resolve_layer(identifier):
+  if not identifier:
+    # TODO(mattmoor): This should not happen.
+    return None
+
+  if not identifier.startswith('@'):
+    return identifier
+
+  with open(identifier[1:], 'r') as f:
+    return f.read()
 
 
 def main(unused_argv):
-  identifier = FLAGS.id
-  if identifier and identifier.startswith('@'):
-    with open(identifier[1:], 'r') as f:
-      identifier = f.read()
-  create_image(FLAGS.output, FLAGS.layer, identifier, FLAGS.name,
-               FLAGS.repository)
+  repositories = {}
+  for entry in FLAGS.tags:
+    elts = entry.split('=')
+    if len(elts) != 2:
+      raise Exception('Expected associative list key=value, got: %s' % entry)
+    (fq_tag, layer_id) = elts
+
+    tag_parts = fq_tag.rsplit(':', 2)
+    if len(tag_parts) != 2:
+      raise Exception('Expected fully-qualified tag name (e.g. ubuntu:latest), '
+                      'got: %s' % fq_tag)
+    (repository, tag) = tag_parts
+
+    others = repositories.get(repository, {})
+    others[tag] = resolve_layer(layer_id)
+    repositories[repository] = others
+
+  create_image(FLAGS.output, FLAGS.layer, repositories)
 
 
 if __name__ == '__main__':
diff --git a/tools/build_defs/docker/testdata/BUILD b/tools/build_defs/docker/testdata/BUILD
index c430ca2..2f46f20 100644
--- a/tools/build_defs/docker/testdata/BUILD
+++ b/tools/build_defs/docker/testdata/BUILD
@@ -4,7 +4,7 @@
     ],
 )
 
-load("//tools/build_defs/docker:docker.bzl", "docker_build")
+load("//tools/build_defs/docker:docker.bzl", "docker_build", "docker_bundle")
 
 filegroup(
     name = "srcs",
@@ -372,6 +372,15 @@
     },
 )
 
+docker_bundle(
+    name = "bundle_test",
+    images = {
+        "docker.io/ubuntu:latest": ":base_with_entrypoint",
+        "us.gcr.io/google-appengine/base:fresh": ":link_with_files_base",
+        "gcr.io/google-containers/pause:2.0": ":with_double_env",
+    },
+)
+
 # Generate a dummy debian package with a test/ directory
 py_binary(
     name = "gen_deb",
diff --git a/tools/build_defs/docker/testenv.sh b/tools/build_defs/docker/testenv.sh
index 6840ad0..e6a5bee 100755
--- a/tools/build_defs/docker/testenv.sh
+++ b/tools/build_defs/docker/testenv.sh
@@ -23,3 +23,5 @@
   { echo "Failed to source unittest.bash" >&2; exit 1; }
 
 readonly TEST_DATA_DIR="${TEST_SRCDIR}/io_bazel/tools/build_defs/docker/testdata"
+
+readonly TEST_DATA_TARGET_BASE="io_bazel/tools/build_defs/docker/testdata"