| // Copyright 2018 The Bazel Authors. |
| // |
| // Licensed under the Apache License, Version 2.0 (the "License"); |
| // you may not use this file except in compliance with the License. |
| // You may obtain a copy of the License at |
| // |
| // http://www.apache.org/licenses/LICENSE-2.0 |
| // |
| // Unless required by applicable law or agreed to in writing, software |
| // distributed under the License is distributed on an "AS IS" BASIS, |
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| // See the License for the specific language governing permissions and |
| // limitations under the License. |
| |
| syntax = "proto3"; |
| |
| package build.bazel.remote.execution.v2; |
| |
| import "build/bazel/semver/semver.proto"; |
| import "google/api/annotations.proto"; |
| import "google/longrunning/operations.proto"; |
| import "google/protobuf/any.proto"; |
| import "google/protobuf/duration.proto"; |
| import "google/protobuf/timestamp.proto"; |
| import "google/protobuf/wrappers.proto"; |
| import "google/rpc/status.proto"; |
| |
| option csharp_namespace = "Build.Bazel.Remote.Execution.V2"; |
| option go_package = "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2;remoteexecution"; |
| option java_multiple_files = true; |
| option java_outer_classname = "RemoteExecutionProto"; |
| option java_package = "build.bazel.remote.execution.v2"; |
| option objc_class_prefix = "REX"; |
| |
| |
| // The Remote Execution API is used to execute an |
| // [Action][build.bazel.remote.execution.v2.Action] on the remote |
| // workers. |
| // |
| // As with other services in the Remote Execution API, any call may return an |
| // error with a [RetryInfo][google.rpc.RetryInfo] error detail providing |
| // information about when the client should retry the request; clients SHOULD |
| // respect the information provided. |
| service Execution { |
| // Execute an action remotely. |
| // |
| // In order to execute an action, the client must first upload all of the |
| // inputs, the |
| // [Command][build.bazel.remote.execution.v2.Command] to run, and the |
| // [Action][build.bazel.remote.execution.v2.Action] into the |
| // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. |
| // It then calls `Execute` with an `action_digest` referring to them. The |
| // server will run the action and eventually return the result. |
| // |
| // The input `Action`'s fields MUST meet the various canonicalization |
| // requirements specified in the documentation for their types so that it has |
| // the same digest as other logically equivalent `Action`s. The server MAY |
| // enforce the requirements and return errors if a non-canonical input is |
| // received. It MAY also proceed without verifying some or all of the |
| // requirements, such as for performance reasons. If the server does not |
| // verify the requirement, then it will treat the `Action` as distinct from |
| // another logically equivalent action if they hash differently. |
| // |
| // Returns a stream of |
| // [google.longrunning.Operation][google.longrunning.Operation] messages |
| // describing the resulting execution, with eventual `response` |
| // [ExecuteResponse][build.bazel.remote.execution.v2.ExecuteResponse]. The |
| // `metadata` on the operation is of type |
| // [ExecuteOperationMetadata][build.bazel.remote.execution.v2.ExecuteOperationMetadata]. |
| // |
| // If the client remains connected after the first response is returned after |
| // the server, then updates are streamed as if the client had called |
| // [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution] |
| // until the execution completes or the request reaches an error. The |
| // operation can also be queried using [Operations |
| // API][google.longrunning.Operations.GetOperation]. |
| // |
| // The server NEED NOT implement other methods or functionality of the |
| // Operations API. |
| // |
| // Errors discovered during creation of the `Operation` will be reported |
| // as gRPC Status errors, while errors that occurred while running the |
| // action will be reported in the `status` field of the `ExecuteResponse`. The |
| // server MUST NOT set the `error` field of the `Operation` proto. |
| // The possible errors include: |
| // |
| // * `INVALID_ARGUMENT`: One or more arguments are invalid. |
| // * `FAILED_PRECONDITION`: One or more errors occurred in setting up the |
| // action requested, such as a missing input or command or no worker being |
| // available. The client may be able to fix the errors and retry. |
| // * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run |
| // the action. |
| // * `UNAVAILABLE`: Due to a transient condition, such as all workers being |
| // occupied (and the server does not support a queue), the action could not |
| // be started. The client should retry. |
| // * `INTERNAL`: An internal error occurred in the execution engine or the |
| // worker. |
| // * `DEADLINE_EXCEEDED`: The execution timed out. |
| // * `CANCELLED`: The operation was cancelled by the client. This status is |
| // only possible if the server implements the Operations API CancelOperation |
| // method, and it was called for the current execution. |
| // |
| // In the case of a missing input or command, the server SHOULD additionally |
| // send a [PreconditionFailure][google.rpc.PreconditionFailure] error detail |
| // where, for each requested blob not present in the CAS, there is a |
| // `Violation` with a `type` of `MISSING` and a `subject` of |
| // `"blobs/{hash}/{size}"` indicating the digest of the missing blob. |
| // |
| // The server does not need to guarantee that a call to this method leads to |
| // at most one execution of the action. The server MAY execute the action |
| // multiple times, potentially in parallel. These redundant executions MAY |
| // continue to run, even if the operation is completed. |
| rpc Execute(ExecuteRequest) returns (stream google.longrunning.Operation) { |
| option (google.api.http) = { post: "/v2/{instance_name=**}/actions:execute" body: "*" }; |
| } |
| |
| // Wait for an execution operation to complete. When the client initially |
| // makes the request, the server immediately responds with the current status |
| // of the execution. The server will leave the request stream open until the |
| // operation completes, and then respond with the completed operation. The |
| // server MAY choose to stream additional updates as execution progresses, |
| // such as to provide an update as to the state of the execution. |
| rpc WaitExecution(WaitExecutionRequest) returns (stream google.longrunning.Operation) { |
| option (google.api.http) = { post: "/v2/{name=operations/**}:waitExecution" body: "*" }; |
| } |
| } |
| |
| // The action cache API is used to query whether a given action has already been |
| // performed and, if so, retrieve its result. Unlike the |
| // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage], |
| // which addresses blobs by their own content, the action cache addresses the |
| // [ActionResult][build.bazel.remote.execution.v2.ActionResult] by a |
| // digest of the encoded [Action][build.bazel.remote.execution.v2.Action] |
| // which produced them. |
| // |
| // The lifetime of entries in the action cache is implementation-specific, but |
| // the server SHOULD assume that more recently used entries are more likely to |
| // be used again. |
| // |
| // As with other services in the Remote Execution API, any call may return an |
| // error with a [RetryInfo][google.rpc.RetryInfo] error detail providing |
| // information about when the client should retry the request; clients SHOULD |
| // respect the information provided. |
| service ActionCache { |
| // Retrieve a cached execution result. |
| // |
| // Implementations SHOULD ensure that any blobs referenced from the |
| // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage] |
| // are available at the time of returning the |
| // [ActionResult][build.bazel.remote.execution.v2.ActionResult] and will be |
| // for some period of time afterwards. The lifetimes of the referenced blobs SHOULD be increased |
| // if necessary and applicable. |
| // |
| // Errors: |
| // |
| // * `NOT_FOUND`: The requested `ActionResult` is not in the cache. |
| rpc GetActionResult(GetActionResultRequest) returns (ActionResult) { |
| option (google.api.http) = { get: "/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}" }; |
| } |
| |
| // Upload a new execution result. |
| // |
| // In order to allow the server to perform access control based on the type of |
| // action, and to assist with client debugging, the client MUST first upload |
| // the [Action][build.bazel.remote.execution.v2.Execution] that produced the |
| // result, along with its |
| // [Command][build.bazel.remote.execution.v2.Command], into the |
| // `ContentAddressableStorage`. |
| // |
| // Server implementations MAY modify the |
| // `UpdateActionResultRequest.action_result` and return an equivalent value. |
| // |
| // Errors: |
| // |
| // * `INVALID_ARGUMENT`: One or more arguments are invalid. |
| // * `FAILED_PRECONDITION`: One or more errors occurred in updating the |
| // action result, such as a missing command or action. |
| // * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the |
| // entry to the cache. |
| rpc UpdateActionResult(UpdateActionResultRequest) returns (ActionResult) { |
| option (google.api.http) = { put: "/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}" body: "action_result" }; |
| } |
| } |
| |
| // The CAS (content-addressable storage) is used to store the inputs to and |
| // outputs from the execution service. Each piece of content is addressed by the |
| // digest of its binary data. |
| // |
| // Most of the binary data stored in the CAS is opaque to the execution engine, |
| // and is only used as a communication medium. In order to build an |
| // [Action][build.bazel.remote.execution.v2.Action], |
| // however, the client will need to also upload the |
| // [Command][build.bazel.remote.execution.v2.Command] and input root |
| // [Directory][build.bazel.remote.execution.v2.Directory] for the Action. |
| // The Command and Directory messages must be marshalled to wire format and then |
| // uploaded under the hash as with any other piece of content. In practice, the |
| // input root directory is likely to refer to other Directories in its |
| // hierarchy, which must also each be uploaded on their own. |
| // |
| // For small file uploads the client should group them together and call |
| // [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]. |
| // |
| // For large uploads, the client must use the |
| // [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. |
| // |
| // For uncompressed data, The `WriteRequest.resource_name` is of the following form: |
| // `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}{/optional_metadata}` |
| // |
| // Where: |
| // * `instance_name` is an identifier, possibly containing multiple path |
| // segments, used to distinguish between the various instances on the server, |
| // in a manner defined by the server. If it is the empty path, the leading |
| // slash is omitted, so that the `resource_name` becomes |
| // `uploads/{uuid}/blobs/{hash}/{size}{/optional_metadata}`. |
| // To simplify parsing, a path segment cannot equal any of the following |
| // keywords: `blobs`, `uploads`, `actions`, `actionResults`, `operations`, |
| // `capabilities` or `compressed-blobs`. |
| // * `uuid` is a version 4 UUID generated by the client, used to avoid |
| // collisions between concurrent uploads of the same data. Clients MAY |
| // reuse the same `uuid` for uploading different blobs. |
| // * `hash` and `size` refer to the [Digest][build.bazel.remote.execution.v2.Digest] |
| // of the data being uploaded. |
| // * `optional_metadata` is implementation specific data, which clients MAY omit. |
| // Servers MAY ignore this metadata. |
| // |
| // Data can alternatively be uploaded in compressed form, with the following |
| // `WriteRequest.resource_name` form: |
| // `{instance_name}/uploads/{uuid}/compressed-blobs/{compressor}/{uncompressed_hash}/{uncompressed_size}{/optional_metadata}` |
| // |
| // Where: |
| // * `instance_name`, `uuid` and `optional_metadata` are defined as above. |
| // * `compressor` is a lowercase string form of a `Compressor.Value` enum |
| // other than `identity`, which is supported by the server and advertised in |
| // [CacheCapabilities.supported_compressor][build.bazel.remote.execution.v2.CacheCapabilities.supported_compressor]. |
| // * `uncompressed_hash` and `uncompressed_size` refer to the |
| // [Digest][build.bazel.remote.execution.v2.Digest] of the data being |
| // uploaded, once uncompressed. Servers MUST verify that these match |
| // the uploaded data once uncompressed, and MUST return an |
| // `INVALID_ARGUMENT` error in the case of mismatch. |
| // |
| // Note that when writing compressed blobs, the `WriteRequest.write_offset` in |
| // the initial request in a stream refers to the offset in the uncompressed form |
| // of the blob. In subsequent requests, `WriteRequest.write_offset` MUST be the |
| // sum of the first request's 'WriteRequest.write_offset' and the total size of |
| // all the compressed data bundles in the previous requests. |
| // Note that this mixes an uncompressed offset with a compressed byte length, |
| // which is nonsensical, but it is done to fit the semantics of the existing |
| // ByteStream protocol. |
| // |
| // Uploads of the same data MAY occur concurrently in any form, compressed or |
| // uncompressed. |
| // |
| // Clients SHOULD NOT use gRPC-level compression for ByteStream API `Write` |
| // calls of compressed blobs, since this would compress already-compressed data. |
| // |
| // When attempting an upload, if another client has already completed the upload |
| // (which may occur in the middle of a single upload if another client uploads |
| // the same blob concurrently), the request will terminate immediately without |
| // error, and with a response whose `committed_size` is the value `-1` if this |
| // is a compressed upload, or with the full size of the uploaded file if this is |
| // an uncompressed upload (regardless of how much data was transmitted by the |
| // client). If the client completes the upload but the |
| // [Digest][build.bazel.remote.execution.v2.Digest] does not match, an |
| // `INVALID_ARGUMENT` error will be returned. In either case, the client should |
| // not attempt to retry the upload. |
| // |
| // Small downloads can be grouped and requested in a batch via |
| // [BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs]. |
| // |
| // For large downloads, the client must use the |
| // [Read method][google.bytestream.ByteStream.Read] of the ByteStream API. |
| // |
| // For uncompressed data, The `ReadRequest.resource_name` is of the following form: |
| // `{instance_name}/blobs/{hash}/{size}` |
| // Where `instance_name`, `hash` and `size` are defined as for uploads. |
| // |
| // Data can alternatively be downloaded in compressed form, with the following |
| // `ReadRequest.resource_name` form: |
| // `{instance_name}/compressed-blobs/{compressor}/{uncompressed_hash}/{uncompressed_size}` |
| // |
| // Where: |
| // * `instance_name` and `compressor` are defined as for uploads. |
| // * `uncompressed_hash` and `uncompressed_size` refer to the |
| // [Digest][build.bazel.remote.execution.v2.Digest] of the data being |
| // downloaded, once uncompressed. Clients MUST verify that these match |
| // the downloaded data once uncompressed, and take appropriate steps in |
| // the case of failure such as retrying a limited number of times or |
| // surfacing an error to the user. |
| // |
| // When downloading compressed blobs: |
| // * `ReadRequest.read_offset` refers to the offset in the uncompressed form |
| // of the blob. |
| // * Servers MUST return `INVALID_ARGUMENT` if `ReadRequest.read_limit` is |
| // non-zero. |
| // * Servers MAY use any compression level they choose, including different |
| // levels for different blobs (e.g. choosing a level designed for maximum |
| // speed for data known to be incompressible). |
| // * Clients SHOULD NOT use gRPC-level compression, since this would compress |
| // already-compressed data. |
| // |
| // Servers MUST be able to provide data for all recently advertised blobs in |
| // each of the compression formats that the server supports, as well as in |
| // uncompressed form. |
| // |
| // The lifetime of entries in the CAS is implementation specific, but it SHOULD |
| // be long enough to allow for newly-added and recently looked-up entries to be |
| // used in subsequent calls (e.g. to |
| // [Execute][build.bazel.remote.execution.v2.Execution.Execute]). |
| // |
| // Servers MUST behave as though empty blobs are always available, even if they |
| // have not been uploaded. Clients MAY optimize away the uploading or |
| // downloading of empty blobs. |
| // |
| // As with other services in the Remote Execution API, any call may return an |
| // error with a [RetryInfo][google.rpc.RetryInfo] error detail providing |
| // information about when the client should retry the request; clients SHOULD |
| // respect the information provided. |
| service ContentAddressableStorage { |
| // Determine if blobs are present in the CAS. |
| // |
| // Clients can use this API before uploading blobs to determine which ones are |
| // already present in the CAS and do not need to be uploaded again. |
| // |
| // Servers SHOULD increase the lifetimes of the referenced blobs if necessary and |
| // applicable. |
| // |
| // There are no method-specific errors. |
| rpc FindMissingBlobs(FindMissingBlobsRequest) returns (FindMissingBlobsResponse) { |
| option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:findMissing" body: "*" }; |
| } |
| |
| // Upload many blobs at once. |
| // |
| // The server may enforce a limit of the combined total size of blobs |
| // to be uploaded using this API. This limit may be obtained using the |
| // [Capabilities][build.bazel.remote.execution.v2.Capabilities] API. |
| // Requests exceeding the limit should either be split into smaller |
| // chunks or uploaded using the |
| // [ByteStream API][google.bytestream.ByteStream], as appropriate. |
| // |
| // This request is equivalent to calling a Bytestream `Write` request |
| // on each individual blob, in parallel. The requests may succeed or fail |
| // independently. |
| // |
| // Errors: |
| // |
| // * `INVALID_ARGUMENT`: The client attempted to upload more than the |
| // server supported limit. |
| // |
| // Individual requests may return the following errors, additionally: |
| // |
| // * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob. |
| // * `INVALID_ARGUMENT`: The |
| // [Digest][build.bazel.remote.execution.v2.Digest] does not match the |
| // provided data. |
| rpc BatchUpdateBlobs(BatchUpdateBlobsRequest) returns (BatchUpdateBlobsResponse) { |
| option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:batchUpdate" body: "*" }; |
| } |
| |
| // Download many blobs at once. |
| // |
| // The server may enforce a limit of the combined total size of blobs |
| // to be downloaded using this API. This limit may be obtained using the |
| // [Capabilities][build.bazel.remote.execution.v2.Capabilities] API. |
| // Requests exceeding the limit should either be split into smaller |
| // chunks or downloaded using the |
| // [ByteStream API][google.bytestream.ByteStream], as appropriate. |
| // |
| // This request is equivalent to calling a Bytestream `Read` request |
| // on each individual blob, in parallel. The requests may succeed or fail |
| // independently. |
| // |
| // Errors: |
| // |
| // * `INVALID_ARGUMENT`: The client attempted to read more than the |
| // server supported limit. |
| // |
| // Every error on individual read will be returned in the corresponding digest |
| // status. |
| rpc BatchReadBlobs(BatchReadBlobsRequest) returns (BatchReadBlobsResponse) { |
| option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:batchRead" body: "*" }; |
| } |
| |
| // Fetch the entire directory tree rooted at a node. |
| // |
| // This request must be targeted at a |
| // [Directory][build.bazel.remote.execution.v2.Directory] stored in the |
| // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage] |
| // (CAS). The server will enumerate the `Directory` tree recursively and |
| // return every node descended from the root. |
| // |
| // The GetTreeRequest.page_token parameter can be used to skip ahead in |
| // the stream (e.g. when retrying a partially completed and aborted request), |
| // by setting it to a value taken from GetTreeResponse.next_page_token of the |
| // last successfully processed GetTreeResponse). |
| // |
| // The exact traversal order is unspecified and, unless retrieving subsequent |
| // pages from an earlier request, is not guaranteed to be stable across |
| // multiple invocations of `GetTree`. |
| // |
| // If part of the tree is missing from the CAS, the server will return the |
| // portion present and omit the rest. |
| // |
| // Errors: |
| // |
| // * `NOT_FOUND`: The requested tree root is not present in the CAS. |
| rpc GetTree(GetTreeRequest) returns (stream GetTreeResponse) { |
| option (google.api.http) = { get: "/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree" }; |
| } |
| } |
| |
| // The Capabilities service may be used by remote execution clients to query |
| // various server properties, in order to self-configure or return meaningful |
| // error messages. |
| // |
| // The query may include a particular `instance_name`, in which case the values |
| // returned will pertain to that instance. |
| service Capabilities { |
| // GetCapabilities returns the server capabilities configuration of the |
| // remote endpoint. |
| // Only the capabilities of the services supported by the endpoint will |
| // be returned: |
| // * Execution + CAS + Action Cache endpoints should return both |
| // CacheCapabilities and ExecutionCapabilities. |
| // * Execution only endpoints should return ExecutionCapabilities. |
| // * CAS + Action Cache only endpoints should return CacheCapabilities. |
| // |
| // There are no method-specific errors. |
| rpc GetCapabilities(GetCapabilitiesRequest) returns (ServerCapabilities) { |
| option (google.api.http) = { |
| get: "/v2/{instance_name=**}/capabilities" |
| }; |
| } |
| } |
| |
| // An `Action` captures all the information about an execution which is required |
| // to reproduce it. |
| // |
| // `Action`s are the core component of the [Execution] service. A single |
| // `Action` represents a repeatable action that can be performed by the |
| // execution service. `Action`s can be succinctly identified by the digest of |
| // their wire format encoding and, once an `Action` has been executed, will be |
| // cached in the action cache. Future requests can then use the cached result |
| // rather than needing to run afresh. |
| // |
| // When a server completes execution of an |
| // [Action][build.bazel.remote.execution.v2.Action], it MAY choose to |
| // cache the [result][build.bazel.remote.execution.v2.ActionResult] in |
| // the [ActionCache][build.bazel.remote.execution.v2.ActionCache] unless |
| // `do_not_cache` is `true`. Clients SHOULD expect the server to do so. By |
| // default, future calls to |
| // [Execute][build.bazel.remote.execution.v2.Execution.Execute] the same |
| // `Action` will also serve their results from the cache. Clients must take care |
| // to understand the caching behaviour. Ideally, all `Action`s will be |
| // reproducible so that serving a result from cache is always desirable and |
| // correct. |
| message Action { |
| // The digest of the [Command][build.bazel.remote.execution.v2.Command] |
| // to run, which MUST be present in the |
| // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. |
| Digest command_digest = 1; |
| |
| // The digest of the root |
| // [Directory][build.bazel.remote.execution.v2.Directory] for the input |
| // files. The files in the directory tree are available in the correct |
| // location on the build machine before the command is executed. The root |
| // directory, as well as every subdirectory and content blob referred to, MUST |
| // be in the |
| // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. |
| Digest input_root_digest = 2; |
| |
| reserved 3 to 5; // Used for fields moved to [Command][build.bazel.remote.execution.v2.Command]. |
| |
| // A timeout after which the execution should be killed. If the timeout is |
| // absent, then the client is specifying that the execution should continue |
| // as long as the server will let it. The server SHOULD impose a timeout if |
| // the client does not specify one, however, if the client does specify a |
| // timeout that is longer than the server's maximum timeout, the server MUST |
| // reject the request. |
| // |
| // The timeout is only intended to cover the "execution" of the specified |
| // action and not time in queue nor any overheads before or after execution |
| // such as marshalling inputs/outputs. The server SHOULD avoid including time |
| // spent the client doesn't have control over, and MAY extend or reduce the |
| // timeout to account for delays or speedups that occur during execution |
| // itself (e.g., lazily loading data from the Content Addressable Storage, |
| // live migration of virtual machines, emulation overhead). |
| // |
| // The timeout is a part of the |
| // [Action][build.bazel.remote.execution.v2.Action] message, and |
| // therefore two `Actions` with different timeouts are different, even if they |
| // are otherwise identical. This is because, if they were not, running an |
| // `Action` with a lower timeout than is required might result in a cache hit |
| // from an execution run with a longer timeout, hiding the fact that the |
| // timeout is too short. By encoding it directly in the `Action`, a lower |
| // timeout will result in a cache miss and the execution timeout will fail |
| // immediately, rather than whenever the cache entry gets evicted. |
| google.protobuf.Duration timeout = 6; |
| |
| // If true, then the `Action`'s result cannot be cached, and in-flight |
| // requests for the same `Action` may not be merged. |
| bool do_not_cache = 7; |
| |
| reserved 8; // Used for field moved to [Command][build.bazel.remote.execution.v2.Command]. |
| |
| // An optional additional salt value used to place this `Action` into a |
| // separate cache namespace from other instances having the same field |
| // contents. This salt typically comes from operational configuration |
| // specific to sources such as repo and service configuration, |
| // and allows disowning an entire set of ActionResults that might have been |
| // poisoned by buggy software or tool failures. |
| bytes salt = 9; |
| |
| // The optional platform requirements for the execution environment. The |
| // server MAY choose to execute the action on any worker satisfying the |
| // requirements, so the client SHOULD ensure that running the action on any |
| // such worker will have the same result. A detailed lexicon for this can be |
| // found in the accompanying platform.md. |
| // New in version 2.2: clients SHOULD set these platform properties as well |
| // as those in the [Command][build.bazel.remote.execution.v2.Command]. Servers |
| // SHOULD prefer those set here. |
| Platform platform = 10; |
| } |
| |
| // A `Command` is the actual command executed by a worker running an |
| // [Action][build.bazel.remote.execution.v2.Action] and specifications of its |
| // environment. |
| // |
| // Except as otherwise required, the environment (such as which system |
| // libraries or binaries are available, and what filesystems are mounted where) |
| // is defined by and specific to the implementation of the remote execution API. |
| message Command { |
| // An `EnvironmentVariable` is one variable to set in the running program's |
| // environment. |
| message EnvironmentVariable { |
| // The variable name. |
| string name = 1; |
| |
| // The variable value. |
| string value = 2; |
| } |
| |
| // The arguments to the command. |
| // |
| // The first argument specifies the command to run, which may be either an |
| // absolute path, a path relative to the working directory, or an unqualified |
| // path (without path separators) which will be resolved using the operating |
| // system's equivalent of the PATH environment variable. Path separators |
| // native to the operating system running on the worker SHOULD be used. If the |
| // `environment_variables` list contains an entry for the PATH environment |
| // variable, it SHOULD be respected. If not, the resolution process is |
| // implementation-defined. |
| // |
| // Changed in v2.3. v2.2 and older require that no PATH lookups are performed, |
| // and that relative paths are resolved relative to the input root. This |
| // behavior can, however, not be relied upon, as most implementations already |
| // followed the rules described above. |
| repeated string arguments = 1; |
| |
| // The environment variables to set when running the program. The worker may |
| // provide its own default environment variables; these defaults can be |
| // overridden using this field. Additional variables can also be specified. |
| // |
| // In order to ensure that equivalent |
| // [Command][build.bazel.remote.execution.v2.Command]s always hash to the same |
| // value, the environment variables MUST be lexicographically sorted by name. |
| // Sorting of strings is done by code point, equivalently, by the UTF-8 bytes. |
| repeated EnvironmentVariable environment_variables = 2; |
| |
| // A list of the output files that the client expects to retrieve from the |
| // action. Only the listed files, as well as directories listed in |
| // `output_directories`, will be returned to the client as output. |
| // Other files or directories that may be created during command execution |
| // are discarded. |
| // |
| // The paths are relative to the working directory of the action execution. |
| // The paths are specified using a single forward slash (`/`) as a path |
| // separator, even if the execution platform natively uses a different |
| // separator. The path MUST NOT include a trailing slash, nor a leading slash, |
| // being a relative path. |
| // |
| // In order to ensure consistent hashing of the same Action, the output paths |
| // MUST be sorted lexicographically by code point (or, equivalently, by UTF-8 |
| // bytes). |
| // |
| // An output file cannot be duplicated, be a parent of another output file, or |
| // have the same path as any of the listed output directories. |
| // |
| // Directories leading up to the output files are created by the worker prior |
| // to execution, even if they are not explicitly part of the input root. |
| // |
| // DEPRECATED since v2.1: Use `output_paths` instead. |
| repeated string output_files = 3; |
| |
| // A list of the output directories that the client expects to retrieve from |
| // the action. Only the listed directories will be returned (an entire |
| // directory structure will be returned as a |
| // [Tree][build.bazel.remote.execution.v2.Tree] message digest, see |
| // [OutputDirectory][build.bazel.remote.execution.v2.OutputDirectory]), as |
| // well as files listed in `output_files`. Other files or directories that |
| // may be created during command execution are discarded. |
| // |
| // The paths are relative to the working directory of the action execution. |
| // The paths are specified using a single forward slash (`/`) as a path |
| // separator, even if the execution platform natively uses a different |
| // separator. The path MUST NOT include a trailing slash, nor a leading slash, |
| // being a relative path. The special value of empty string is allowed, |
| // although not recommended, and can be used to capture the entire working |
| // directory tree, including inputs. |
| // |
| // In order to ensure consistent hashing of the same Action, the output paths |
| // MUST be sorted lexicographically by code point (or, equivalently, by UTF-8 |
| // bytes). |
| // |
| // An output directory cannot be duplicated or have the same path as any of |
| // the listed output files. An output directory is allowed to be a parent of |
| // another output directory. |
| // |
| // Directories leading up to the output directories (but not the output |
| // directories themselves) are created by the worker prior to execution, even |
| // if they are not explicitly part of the input root. |
| // |
| // DEPRECATED since 2.1: Use `output_paths` instead. |
| repeated string output_directories = 4; |
| |
| // A list of the output paths that the client expects to retrieve from the |
| // action. Only the listed paths will be returned to the client as output. |
| // The type of the output (file or directory) is not specified, and will be |
| // determined by the server after action execution. If the resulting path is |
| // a file, it will be returned in an |
| // [OutputFile][build.bazel.remote.execution.v2.OutputFile] typed field. |
| // If the path is a directory, the entire directory structure will be returned |
| // as a [Tree][build.bazel.remote.execution.v2.Tree] message digest, see |
| // [OutputDirectory][build.bazel.remote.execution.v2.OutputDirectory] |
| // Other files or directories that may be created during command execution |
| // are discarded. |
| // |
| // The paths are relative to the working directory of the action execution. |
| // The paths are specified using a single forward slash (`/`) as a path |
| // separator, even if the execution platform natively uses a different |
| // separator. The path MUST NOT include a trailing slash, nor a leading slash, |
| // being a relative path. |
| // |
| // In order to ensure consistent hashing of the same Action, the output paths |
| // MUST be deduplicated and sorted lexicographically by code point (or, |
| // equivalently, by UTF-8 bytes). |
| // |
| // Directories leading up to the output paths are created by the worker prior |
| // to execution, even if they are not explicitly part of the input root. |
| // |
| // New in v2.1: this field supersedes the DEPRECATED `output_files` and |
| // `output_directories` fields. If `output_paths` is used, `output_files` and |
| // `output_directories` will be ignored! |
| repeated string output_paths = 7; |
| |
| // The platform requirements for the execution environment. The server MAY |
| // choose to execute the action on any worker satisfying the requirements, so |
| // the client SHOULD ensure that running the action on any such worker will |
| // have the same result. A detailed lexicon for this can be found in the |
| // accompanying platform.md. |
| // DEPRECATED as of v2.2: platform properties are now specified directly in |
| // the action. See documentation note in the |
| // [Action][build.bazel.remote.execution.v2.Action] for migration. |
| Platform platform = 5; |
| |
| // The working directory, relative to the input root, for the command to run |
| // in. It must be a directory which exists in the input tree. If it is left |
| // empty, then the action is run in the input root. |
| string working_directory = 6; |
| |
| // A list of keys for node properties the client expects to retrieve for |
| // output files and directories. Keys are either names of string-based |
| // [NodeProperty][build.bazel.remote.execution.v2.NodeProperty] or |
| // names of fields in [NodeProperties][build.bazel.remote.execution.v2.NodeProperties]. |
| // In order to ensure that equivalent `Action`s always hash to the same |
| // value, the node properties MUST be lexicographically sorted by name. |
| // Sorting of strings is done by code point, equivalently, by the UTF-8 bytes. |
| // |
| // The interpretation of string-based properties is server-dependent. If a |
| // property is not recognized by the server, the server will return an |
| // `INVALID_ARGUMENT`. |
| repeated string output_node_properties = 8; |
| } |
| |
| // A `Platform` is a set of requirements, such as hardware, operating system, or |
| // compiler toolchain, for an |
| // [Action][build.bazel.remote.execution.v2.Action]'s execution |
| // environment. A `Platform` is represented as a series of key-value pairs |
| // representing the properties that are required of the platform. |
| message Platform { |
| // A single property for the environment. The server is responsible for |
| // specifying the property `name`s that it accepts. If an unknown `name` is |
| // provided in the requirements for an |
| // [Action][build.bazel.remote.execution.v2.Action], the server SHOULD |
| // reject the execution request. If permitted by the server, the same `name` |
| // may occur multiple times. |
| // |
| // The server is also responsible for specifying the interpretation of |
| // property `value`s. For instance, a property describing how much RAM must be |
| // available may be interpreted as allowing a worker with 16GB to fulfill a |
| // request for 8GB, while a property describing the OS environment on which |
| // the action must be performed may require an exact match with the worker's |
| // OS. |
| // |
| // The server MAY use the `value` of one or more properties to determine how |
| // it sets up the execution environment, such as by making specific system |
| // files available to the worker. |
| // |
| // Both names and values are typically case-sensitive. Note that the platform |
| // is implicitly part of the action digest, so even tiny changes in the names |
| // or values (like changing case) may result in different action cache |
| // entries. |
| message Property { |
| // The property name. |
| string name = 1; |
| |
| // The property value. |
| string value = 2; |
| } |
| |
| // The properties that make up this platform. In order to ensure that |
| // equivalent `Platform`s always hash to the same value, the properties MUST |
| // be lexicographically sorted by name, and then by value. Sorting of strings |
| // is done by code point, equivalently, by the UTF-8 bytes. |
| repeated Property properties = 1; |
| } |
| |
| // A `Directory` represents a directory node in a file tree, containing zero or |
| // more children [FileNodes][build.bazel.remote.execution.v2.FileNode], |
| // [DirectoryNodes][build.bazel.remote.execution.v2.DirectoryNode] and |
| // [SymlinkNodes][build.bazel.remote.execution.v2.SymlinkNode]. |
| // Each `Node` contains its name in the directory, either the digest of its |
| // content (either a file blob or a `Directory` proto) or a symlink target, as |
| // well as possibly some metadata about the file or directory. |
| // |
| // In order to ensure that two equivalent directory trees hash to the same |
| // value, the following restrictions MUST be obeyed when constructing a |
| // a `Directory`: |
| // |
| // * Every child in the directory must have a path of exactly one segment. |
| // Multiple levels of directory hierarchy may not be collapsed. |
| // * Each child in the directory must have a unique path segment (file name). |
| // Note that while the API itself is case-sensitive, the environment where |
| // the Action is executed may or may not be case-sensitive. That is, it is |
| // legal to call the API with a Directory that has both "Foo" and "foo" as |
| // children, but the Action may be rejected by the remote system upon |
| // execution. |
| // * The files, directories and symlinks in the directory must each be sorted |
| // in lexicographical order by path. The path strings must be sorted by code |
| // point, equivalently, by UTF-8 bytes. |
| // * The [NodeProperties][build.bazel.remote.execution.v2.NodeProperty] of files, |
| // directories, and symlinks must be sorted in lexicographical order by |
| // property name. |
| // |
| // A `Directory` that obeys the restrictions is said to be in canonical form. |
| // |
| // As an example, the following could be used for a file named `bar` and a |
| // directory named `foo` with an executable file named `baz` (hashes shortened |
| // for readability): |
| // |
| // ```json |
| // // (Directory proto) |
| // { |
| // files: [ |
| // { |
| // name: "bar", |
| // digest: { |
| // hash: "4a73bc9d03...", |
| // size: 65534 |
| // }, |
| // node_properties: [ |
| // { |
| // "name": "MTime", |
| // "value": "2017-01-15T01:30:15.01Z" |
| // } |
| // ] |
| // } |
| // ], |
| // directories: [ |
| // { |
| // name: "foo", |
| // digest: { |
| // hash: "4cf2eda940...", |
| // size: 43 |
| // } |
| // } |
| // ] |
| // } |
| // |
| // // (Directory proto with hash "4cf2eda940..." and size 43) |
| // { |
| // files: [ |
| // { |
| // name: "baz", |
| // digest: { |
| // hash: "b2c941073e...", |
| // size: 1294, |
| // }, |
| // is_executable: true |
| // } |
| // ] |
| // } |
| // ``` |
| message Directory { |
| // The files in the directory. |
| repeated FileNode files = 1; |
| |
| // The subdirectories in the directory. |
| repeated DirectoryNode directories = 2; |
| |
| // The symlinks in the directory. |
| repeated SymlinkNode symlinks = 3; |
| |
| // The node properties of the Directory. |
| reserved 4; |
| NodeProperties node_properties = 5; |
| } |
| |
| // A single property for [FileNodes][build.bazel.remote.execution.v2.FileNode], |
| // [DirectoryNodes][build.bazel.remote.execution.v2.DirectoryNode], and |
| // [SymlinkNodes][build.bazel.remote.execution.v2.SymlinkNode]. The server is |
| // responsible for specifying the property `name`s that it accepts. If |
| // permitted by the server, the same `name` may occur multiple times. |
| message NodeProperty { |
| // The property name. |
| string name = 1; |
| |
| // The property value. |
| string value = 2; |
| } |
| |
| // Node properties for [FileNodes][build.bazel.remote.execution.v2.FileNode], |
| // [DirectoryNodes][build.bazel.remote.execution.v2.DirectoryNode], and |
| // [SymlinkNodes][build.bazel.remote.execution.v2.SymlinkNode]. The server is |
| // responsible for specifying the properties that it accepts. |
| // |
| message NodeProperties { |
| // A list of string-based |
| // [NodeProperties][build.bazel.remote.execution.v2.NodeProperty]. |
| repeated NodeProperty properties = 1; |
| |
| // The file's last modification timestamp. |
| google.protobuf.Timestamp mtime = 2; |
| |
| // The UNIX file mode, e.g., 0755. |
| google.protobuf.UInt32Value unix_mode = 3; |
| } |
| |
| // A `FileNode` represents a single file and associated metadata. |
| message FileNode { |
| // The name of the file. |
| string name = 1; |
| |
| // The digest of the file's content. |
| Digest digest = 2; |
| |
| reserved 3; // Reserved to ensure wire-compatibility with `OutputFile`. |
| |
| // True if file is executable, false otherwise. |
| bool is_executable = 4; |
| |
| // The node properties of the FileNode. |
| reserved 5; |
| NodeProperties node_properties = 6; |
| } |
| |
| // A `DirectoryNode` represents a child of a |
| // [Directory][build.bazel.remote.execution.v2.Directory] which is itself |
| // a `Directory` and its associated metadata. |
| message DirectoryNode { |
| // The name of the directory. |
| string name = 1; |
| |
| // The digest of the |
| // [Directory][build.bazel.remote.execution.v2.Directory] object |
| // represented. See [Digest][build.bazel.remote.execution.v2.Digest] |
| // for information about how to take the digest of a proto message. |
| Digest digest = 2; |
| } |
| |
| // A `SymlinkNode` represents a symbolic link. |
| message SymlinkNode { |
| // The name of the symlink. |
| string name = 1; |
| |
| // The target path of the symlink. The path separator is a forward slash `/`. |
| // The target path can be relative to the parent directory of the symlink or |
| // it can be an absolute path starting with `/`. Support for absolute paths |
| // can be checked using the [Capabilities][build.bazel.remote.execution.v2.Capabilities] |
| // API. `..` components are allowed anywhere in the target path as logical |
| // canonicalization may lead to different behavior in the presence of |
| // directory symlinks (e.g. `foo/../bar` may not be the same as `bar`). |
| // To reduce potential cache misses, canonicalization is still recommended |
| // where this is possible without impacting correctness. |
| string target = 2; |
| |
| // The node properties of the SymlinkNode. |
| reserved 3; |
| NodeProperties node_properties = 4; |
| } |
| |
| // A content digest. A digest for a given blob consists of the size of the blob |
| // and its hash. The hash algorithm to use is defined by the server. |
| // |
| // The size is considered to be an integral part of the digest and cannot be |
| // separated. That is, even if the `hash` field is correctly specified but |
| // `size_bytes` is not, the server MUST reject the request. |
| // |
| // The reason for including the size in the digest is as follows: in a great |
| // many cases, the server needs to know the size of the blob it is about to work |
| // with prior to starting an operation with it, such as flattening Merkle tree |
| // structures or streaming it to a worker. Technically, the server could |
| // implement a separate metadata store, but this results in a significantly more |
| // complicated implementation as opposed to having the client specify the size |
| // up-front (or storing the size along with the digest in every message where |
| // digests are embedded). This does mean that the API leaks some implementation |
| // details of (what we consider to be) a reasonable server implementation, but |
| // we consider this to be a worthwhile tradeoff. |
| // |
| // When a `Digest` is used to refer to a proto message, it always refers to the |
| // message in binary encoded form. To ensure consistent hashing, clients and |
| // servers MUST ensure that they serialize messages according to the following |
| // rules, even if there are alternate valid encodings for the same message: |
| // |
| // * Fields are serialized in tag order. |
| // * There are no unknown fields. |
| // * There are no duplicate fields. |
| // * Fields are serialized according to the default semantics for their type. |
| // |
| // Most protocol buffer implementations will always follow these rules when |
| // serializing, but care should be taken to avoid shortcuts. For instance, |
| // concatenating two messages to merge them may produce duplicate fields. |
| message Digest { |
| // The hash. In the case of SHA-256, it will always be a lowercase hex string |
| // exactly 64 characters long. |
| string hash = 1; |
| |
| // The size of the blob, in bytes. |
| int64 size_bytes = 2; |
| } |
| |
| // ExecutedActionMetadata contains details about a completed execution. |
| message ExecutedActionMetadata { |
| // The name of the worker which ran the execution. |
| string worker = 1; |
| |
| // When was the action added to the queue. |
| google.protobuf.Timestamp queued_timestamp = 2; |
| |
| // When the worker received the action. |
| google.protobuf.Timestamp worker_start_timestamp = 3; |
| |
| // When the worker completed the action, including all stages. |
| google.protobuf.Timestamp worker_completed_timestamp = 4; |
| |
| // When the worker started fetching action inputs. |
| google.protobuf.Timestamp input_fetch_start_timestamp = 5; |
| |
| // When the worker finished fetching action inputs. |
| google.protobuf.Timestamp input_fetch_completed_timestamp = 6; |
| |
| // When the worker started executing the action command. |
| google.protobuf.Timestamp execution_start_timestamp = 7; |
| |
| // When the worker completed executing the action command. |
| google.protobuf.Timestamp execution_completed_timestamp = 8; |
| |
| // New in v2.3: the amount of time the worker spent executing the action |
| // command, potentially computed using a worker-specific virtual clock. |
| // |
| // The virtual execution duration is only intended to cover the "execution" of |
| // the specified action and not time in queue nor any overheads before or |
| // after execution such as marshalling inputs/outputs. The server SHOULD avoid |
| // including time spent the client doesn't have control over, and MAY extend |
| // or reduce the execution duration to account for delays or speedups that |
| // occur during execution itself (e.g., lazily loading data from the Content |
| // Addressable Storage, live migration of virtual machines, emulation |
| // overhead). |
| // |
| // The method of timekeeping used to compute the virtual execution duration |
| // MUST be consistent with what is used to enforce the |
| // [Action][[build.bazel.remote.execution.v2.Action]'s `timeout`. There is no |
| // relationship between the virtual execution duration and the values of |
| // `execution_start_timestamp` and `execution_completed_timestamp`. |
| google.protobuf.Duration virtual_execution_duration = 12; |
| |
| // When the worker started uploading action outputs. |
| google.protobuf.Timestamp output_upload_start_timestamp = 9; |
| |
| // When the worker finished uploading action outputs. |
| google.protobuf.Timestamp output_upload_completed_timestamp = 10; |
| |
| // Details that are specific to the kind of worker used. For example, |
| // on POSIX-like systems this could contain a message with |
| // getrusage(2) statistics. |
| repeated google.protobuf.Any auxiliary_metadata = 11; |
| } |
| |
| // An ActionResult represents the result of an |
| // [Action][build.bazel.remote.execution.v2.Action] being run. |
| // |
| // It is advised that at least one field (for example |
| // `ActionResult.execution_metadata.Worker`) have a non-default value, to |
| // ensure that the serialized value is non-empty, which can then be used |
| // as a basic data sanity check. |
| message ActionResult { |
| reserved 1; // Reserved for use as the resource name. |
| |
| // The output files of the action. For each output file requested in the |
| // `output_files` or `output_paths` field of the Action, if the corresponding |
| // file existed after the action completed, a single entry will be present |
| // either in this field, or the `output_file_symlinks` field if the file was |
| // a symbolic link to another file (`output_symlinks` field after v2.1). |
| // |
| // If an output listed in `output_files` was found, but was a directory rather |
| // than a regular file, the server will return a FAILED_PRECONDITION. |
| // If the action does not produce the requested output, then that output |
| // will be omitted from the list. The server is free to arrange the output |
| // list as desired; clients MUST NOT assume that the output list is sorted. |
| repeated OutputFile output_files = 2; |
| |
| // The output files of the action that are symbolic links to other files. Those |
| // may be links to other output files, or input files, or even absolute paths |
| // outside of the working directory, if the server supports |
| // [SymlinkAbsolutePathStrategy.ALLOWED][build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy]. |
| // For each output file requested in the `output_files` or `output_paths` |
| // field of the Action, if the corresponding file existed after |
| // the action completed, a single entry will be present either in this field, |
| // or in the `output_files` field, if the file was not a symbolic link. |
| // |
| // If an output symbolic link of the same name as listed in `output_files` of |
| // the Command was found, but its target type was not a regular file, the |
| // server will return a FAILED_PRECONDITION. |
| // If the action does not produce the requested output, then that output |
| // will be omitted from the list. The server is free to arrange the output |
| // list as desired; clients MUST NOT assume that the output list is sorted. |
| // |
| // DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API |
| // should still populate this field in addition to `output_symlinks`. |
| repeated OutputSymlink output_file_symlinks = 10; |
| |
| // New in v2.1: this field will only be populated if the command |
| // `output_paths` field was used, and not the pre v2.1 `output_files` or |
| // `output_directories` fields. |
| // The output paths of the action that are symbolic links to other paths. Those |
| // may be links to other outputs, or inputs, or even absolute paths |
| // outside of the working directory, if the server supports |
| // [SymlinkAbsolutePathStrategy.ALLOWED][build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy]. |
| // A single entry for each output requested in `output_paths` |
| // field of the Action, if the corresponding path existed after |
| // the action completed and was a symbolic link. |
| // |
| // If the action does not produce a requested output, then that output |
| // will be omitted from the list. The server is free to arrange the output |
| // list as desired; clients MUST NOT assume that the output list is sorted. |
| repeated OutputSymlink output_symlinks = 12; |
| |
| // The output directories of the action. For each output directory requested |
| // in the `output_directories` or `output_paths` field of the Action, if the |
| // corresponding directory existed after the action completed, a single entry |
| // will be present in the output list, which will contain the digest of a |
| // [Tree][build.bazel.remote.execution.v2.Tree] message containing the |
| // directory tree, and the path equal exactly to the corresponding Action |
| // output_directories member. |
| // |
| // As an example, suppose the Action had an output directory `a/b/dir` and the |
| // execution produced the following contents in `a/b/dir`: a file named `bar` |
| // and a directory named `foo` with an executable file named `baz`. Then, |
| // output_directory will contain (hashes shortened for readability): |
| // |
| // ```json |
| // // OutputDirectory proto: |
| // { |
| // path: "a/b/dir" |
| // tree_digest: { |
| // hash: "4a73bc9d03...", |
| // size: 55 |
| // } |
| // } |
| // // Tree proto with hash "4a73bc9d03..." and size 55: |
| // { |
| // root: { |
| // files: [ |
| // { |
| // name: "bar", |
| // digest: { |
| // hash: "4a73bc9d03...", |
| // size: 65534 |
| // } |
| // } |
| // ], |
| // directories: [ |
| // { |
| // name: "foo", |
| // digest: { |
| // hash: "4cf2eda940...", |
| // size: 43 |
| // } |
| // } |
| // ] |
| // } |
| // children : { |
| // // (Directory proto with hash "4cf2eda940..." and size 43) |
| // files: [ |
| // { |
| // name: "baz", |
| // digest: { |
| // hash: "b2c941073e...", |
| // size: 1294, |
| // }, |
| // is_executable: true |
| // } |
| // ] |
| // } |
| // } |
| // ``` |
| // If an output of the same name as listed in `output_files` of |
| // the Command was found in `output_directories`, but was not a directory, the |
| // server will return a FAILED_PRECONDITION. |
| repeated OutputDirectory output_directories = 3; |
| |
| // The output directories of the action that are symbolic links to other |
| // directories. Those may be links to other output directories, or input |
| // directories, or even absolute paths outside of the working directory, |
| // if the server supports |
| // [SymlinkAbsolutePathStrategy.ALLOWED][build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy]. |
| // For each output directory requested in the `output_directories` field of |
| // the Action, if the directory existed after the action completed, a |
| // single entry will be present either in this field, or in the |
| // `output_directories` field, if the directory was not a symbolic link. |
| // |
| // If an output of the same name was found, but was a symbolic link to a file |
| // instead of a directory, the server will return a FAILED_PRECONDITION. |
| // If the action does not produce the requested output, then that output |
| // will be omitted from the list. The server is free to arrange the output |
| // list as desired; clients MUST NOT assume that the output list is sorted. |
| // |
| // DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API |
| // should still populate this field in addition to `output_symlinks`. |
| repeated OutputSymlink output_directory_symlinks = 11; |
| |
| // The exit code of the command. |
| int32 exit_code = 4; |
| |
| // The standard output buffer of the action. The server SHOULD NOT inline |
| // stdout unless requested by the client in the |
| // [GetActionResultRequest][build.bazel.remote.execution.v2.GetActionResultRequest] |
| // message. The server MAY omit inlining, even if requested, and MUST do so if inlining |
| // would cause the response to exceed message size limits. |
| // Clients SHOULD NOT populate this field when uploading to the cache. |
| bytes stdout_raw = 5; |
| |
| // The digest for a blob containing the standard output of the action, which |
| // can be retrieved from the |
| // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. |
| Digest stdout_digest = 6; |
| |
| // The standard error buffer of the action. The server SHOULD NOT inline |
| // stderr unless requested by the client in the |
| // [GetActionResultRequest][build.bazel.remote.execution.v2.GetActionResultRequest] |
| // message. The server MAY omit inlining, even if requested, and MUST do so if inlining |
| // would cause the response to exceed message size limits. |
| // Clients SHOULD NOT populate this field when uploading to the cache. |
| bytes stderr_raw = 7; |
| |
| // The digest for a blob containing the standard error of the action, which |
| // can be retrieved from the |
| // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. |
| Digest stderr_digest = 8; |
| |
| // The details of the execution that originally produced this result. |
| ExecutedActionMetadata execution_metadata = 9; |
| } |
| |
| // An `OutputFile` is similar to a |
| // [FileNode][build.bazel.remote.execution.v2.FileNode], but it is used as an |
| // output in an `ActionResult`. It allows a full file path rather than |
| // only a name. |
| message OutputFile { |
| // The full path of the file relative to the working directory, including the |
| // filename. The path separator is a forward slash `/`. Since this is a |
| // relative path, it MUST NOT begin with a leading forward slash. |
| string path = 1; |
| |
| // The digest of the file's content. |
| Digest digest = 2; |
| |
| reserved 3; // Used for a removed field in an earlier version of the API. |
| |
| // True if file is executable, false otherwise. |
| bool is_executable = 4; |
| |
| // The contents of the file if inlining was requested. The server SHOULD NOT inline |
| // file contents unless requested by the client in the |
| // [GetActionResultRequest][build.bazel.remote.execution.v2.GetActionResultRequest] |
| // message. The server MAY omit inlining, even if requested, and MUST do so if inlining |
| // would cause the response to exceed message size limits. |
| // Clients SHOULD NOT populate this field when uploading to the cache. |
| bytes contents = 5; |
| |
| // The supported node properties of the OutputFile, if requested by the Action. |
| reserved 6; |
| NodeProperties node_properties = 7; |
| } |
| |
| // A `Tree` contains all the |
| // [Directory][build.bazel.remote.execution.v2.Directory] protos in a |
| // single directory Merkle tree, compressed into one message. |
| message Tree { |
| // The root directory in the tree. |
| Directory root = 1; |
| |
| // All the child directories: the directories referred to by the root and, |
| // recursively, all its children. In order to reconstruct the directory tree, |
| // the client must take the digests of each of the child directories and then |
| // build up a tree starting from the `root`. |
| // Servers SHOULD ensure that these are ordered consistently such that two |
| // actions producing equivalent output directories on the same server |
| // implementation also produce Tree messages with matching digests. |
| repeated Directory children = 2; |
| } |
| |
| // An `OutputDirectory` is the output in an `ActionResult` corresponding to a |
| // directory's full contents rather than a single file. |
| message OutputDirectory { |
| // The full path of the directory relative to the working directory. The path |
| // separator is a forward slash `/`. Since this is a relative path, it MUST |
| // NOT begin with a leading forward slash. The empty string value is allowed, |
| // and it denotes the entire working directory. |
| string path = 1; |
| |
| reserved 2; // Used for a removed field in an earlier version of the API. |
| |
| // The digest of the encoded |
| // [Tree][build.bazel.remote.execution.v2.Tree] proto containing the |
| // directory's contents. |
| Digest tree_digest = 3; |
| |
| // If set, consumers MAY make the following assumptions about the |
| // directories contained in the the Tree, so that it may be |
| // instantiated on a local file system by scanning through it |
| // sequentially: |
| // |
| // - All directories with the same binary representation are stored |
| // exactly once. |
| // - All directories, apart from the root directory, are referenced by |
| // at least one parent directory. |
| // - Directories are stored in topological order, with parents being |
| // stored before the child. The root directory is thus the first to |
| // be stored. |
| // |
| // Additionally, the Tree MUST be encoded as a stream of records, |
| // where each record has the following format: |
| // |
| // - A tag byte, having one of the following two values: |
| // - (1 << 3) | 2 == 0x0a: First record (the root directory). |
| // - (2 << 3) | 2 == 0x12: Any subsequent records (child directories). |
| // - The size of the directory, encoded as a base 128 varint. |
| // - The contents of the directory, encoded as a binary serialized |
| // Protobuf message. |
| // |
| // This encoding is a subset of the Protobuf wire format of the Tree |
| // message. As it is only permitted to store data associated with |
| // field numbers 1 and 2, the tag MUST be encoded as a single byte. |
| // More details on the Protobuf wire format can be found here: |
| // https://developers.google.com/protocol-buffers/docs/encoding |
| // |
| // It is recommended that implementations using this feature construct |
| // Tree objects manually using the specification given above, as |
| // opposed to using a Protobuf library to marshal a full Tree message. |
| // As individual Directory messages already need to be marshaled to |
| // compute their digests, constructing the Tree object manually avoids |
| // redundant marshaling. |
| bool is_topologically_sorted = 4; |
| } |
| |
| // An `OutputSymlink` is similar to a |
| // [Symlink][build.bazel.remote.execution.v2.SymlinkNode], but it is used as an |
| // output in an `ActionResult`. |
| // |
| // `OutputSymlink` is binary-compatible with `SymlinkNode`. |
| message OutputSymlink { |
| // The full path of the symlink relative to the working directory, including the |
| // filename. The path separator is a forward slash `/`. Since this is a |
| // relative path, it MUST NOT begin with a leading forward slash. |
| string path = 1; |
| |
| // The target path of the symlink. The path separator is a forward slash `/`. |
| // The target path can be relative to the parent directory of the symlink or |
| // it can be an absolute path starting with `/`. Support for absolute paths |
| // can be checked using the [Capabilities][build.bazel.remote.execution.v2.Capabilities] |
| // API. `..` components are allowed anywhere in the target path. |
| string target = 2; |
| |
| // The supported node properties of the OutputSymlink, if requested by the |
| // Action. |
| reserved 3; |
| NodeProperties node_properties = 4; |
| } |
| |
| // An `ExecutionPolicy` can be used to control the scheduling of the action. |
| message ExecutionPolicy { |
| // The priority (relative importance) of this action. Generally, a lower value |
| // means that the action should be run sooner than actions having a greater |
| // priority value, but the interpretation of a given value is server- |
| // dependent. A priority of 0 means the *default* priority. Priorities may be |
| // positive or negative, and such actions should run later or sooner than |
| // actions having the default priority, respectively. The particular semantics |
| // of this field is up to the server. In particular, every server will have |
| // their own supported range of priorities, and will decide how these map into |
| // scheduling policy. |
| int32 priority = 1; |
| } |
| |
| // A `ResultsCachePolicy` is used for fine-grained control over how action |
| // outputs are stored in the CAS and Action Cache. |
| message ResultsCachePolicy { |
| // The priority (relative importance) of this content in the overall cache. |
| // Generally, a lower value means a longer retention time or other advantage, |
| // but the interpretation of a given value is server-dependent. A priority of |
| // 0 means a *default* value, decided by the server. |
| // |
| // The particular semantics of this field is up to the server. In particular, |
| // every server will have their own supported range of priorities, and will |
| // decide how these map into retention/eviction policy. |
| int32 priority = 1; |
| } |
| |
| // A request message for |
| // [Execution.Execute][build.bazel.remote.execution.v2.Execution.Execute]. |
| message ExecuteRequest { |
| // The instance of the execution system to operate against. A server may |
| // support multiple instances of the execution system (with their own workers, |
| // storage, caches, etc.). The server MAY require use of this field to select |
| // between them in an implementation-defined fashion, otherwise it can be |
| // omitted. |
| string instance_name = 1; |
| |
| // If true, the action will be executed even if its result is already |
| // present in the [ActionCache][build.bazel.remote.execution.v2.ActionCache]. |
| // The execution is still allowed to be merged with other in-flight executions |
| // of the same action, however - semantically, the service MUST only guarantee |
| // that the results of an execution with this field set were not visible |
| // before the corresponding execution request was sent. |
| // Note that actions from execution requests setting this field set are still |
| // eligible to be entered into the action cache upon completion, and services |
| // SHOULD overwrite any existing entries that may exist. This allows |
| // skip_cache_lookup requests to be used as a mechanism for replacing action |
| // cache entries that reference outputs no longer available or that are |
| // poisoned in any way. |
| // If false, the result may be served from the action cache. |
| bool skip_cache_lookup = 3; |
| |
| reserved 2, 4, 5; // Used for removed fields in an earlier version of the API. |
| |
| // The digest of the [Action][build.bazel.remote.execution.v2.Action] to |
| // execute. |
| Digest action_digest = 6; |
| |
| // An optional policy for execution of the action. |
| // The server will have a default policy if this is not provided. |
| ExecutionPolicy execution_policy = 7; |
| |
| // An optional policy for the results of this execution in the remote cache. |
| // The server will have a default policy if this is not provided. |
| // This may be applied to both the ActionResult and the associated blobs. |
| ResultsCachePolicy results_cache_policy = 8; |
| } |
| |
| // A `LogFile` is a log stored in the CAS. |
| message LogFile { |
| // The digest of the log contents. |
| Digest digest = 1; |
| |
| // This is a hint as to the purpose of the log, and is set to true if the log |
| // is human-readable text that can be usefully displayed to a user, and false |
| // otherwise. For instance, if a command-line client wishes to print the |
| // server logs to the terminal for a failed action, this allows it to avoid |
| // displaying a binary file. |
| bool human_readable = 2; |
| } |
| |
| // The response message for |
| // [Execution.Execute][build.bazel.remote.execution.v2.Execution.Execute], |
| // which will be contained in the [response |
| // field][google.longrunning.Operation.response] of the |
| // [Operation][google.longrunning.Operation]. |
| message ExecuteResponse { |
| // The result of the action. |
| ActionResult result = 1; |
| |
| // True if the result was served from cache, false if it was executed. |
| bool cached_result = 2; |
| |
| // If the status has a code other than `OK`, it indicates that the action did |
| // not finish execution. For example, if the operation times out during |
| // execution, the status will have a `DEADLINE_EXCEEDED` code. Servers MUST |
| // use this field for errors in execution, rather than the error field on the |
| // `Operation` object. |
| // |
| // If the status code is other than `OK`, then the result MUST NOT be cached. |
| // For an error status, the `result` field is optional; the server may |
| // populate the output-, stdout-, and stderr-related fields if it has any |
| // information available, such as the stdout and stderr of a timed-out action. |
| google.rpc.Status status = 3; |
| |
| // An optional list of additional log outputs the server wishes to provide. A |
| // server can use this to return execution-specific logs however it wishes. |
| // This is intended primarily to make it easier for users to debug issues that |
| // may be outside of the actual job execution, such as by identifying the |
| // worker executing the action or by providing logs from the worker's setup |
| // phase. The keys SHOULD be human readable so that a client can display them |
| // to a user. |
| map<string, LogFile> server_logs = 4; |
| |
| // Freeform informational message with details on the execution of the action |
| // that may be displayed to the user upon failure or when requested explicitly. |
| string message = 5; |
| } |
| |
| // The current stage of action execution. |
| // |
| // Even though these stages are numbered according to the order in which |
| // they generally occur, there is no requirement that the remote |
| // execution system reports events along this order. For example, an |
| // operation MAY transition from the EXECUTING stage back to QUEUED |
| // in case the hardware on which the operation executes fails. |
| // |
| // If and only if the remote execution system reports that an operation |
| // has reached the COMPLETED stage, it MUST set the [done |
| // field][google.longrunning.Operation.done] of the |
| // [Operation][google.longrunning.Operation] and terminate the stream. |
| message ExecutionStage { |
| enum Value { |
| // Invalid value. |
| UNKNOWN = 0; |
| |
| // Checking the result against the cache. |
| CACHE_CHECK = 1; |
| |
| // Currently idle, awaiting a free machine to execute. |
| QUEUED = 2; |
| |
| // Currently being executed by a worker. |
| EXECUTING = 3; |
| |
| // Finished execution. |
| COMPLETED = 4; |
| } |
| } |
| |
| // Metadata about an ongoing |
| // [execution][build.bazel.remote.execution.v2.Execution.Execute], which |
| // will be contained in the [metadata |
| // field][google.longrunning.Operation.response] of the |
| // [Operation][google.longrunning.Operation]. |
| message ExecuteOperationMetadata { |
| // The current stage of execution. |
| ExecutionStage.Value stage = 1; |
| |
| // The digest of the [Action][build.bazel.remote.execution.v2.Action] |
| // being executed. |
| Digest action_digest = 2; |
| |
| // If set, the client can use this resource name with |
| // [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the |
| // standard output from the endpoint hosting streamed responses. |
| string stdout_stream_name = 3; |
| |
| // If set, the client can use this resource name with |
| // [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the |
| // standard error from the endpoint hosting streamed responses. |
| string stderr_stream_name = 4; |
| } |
| |
| // A request message for |
| // [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution]. |
| message WaitExecutionRequest { |
| // The name of the [Operation][google.longrunning.Operation] |
| // returned by [Execute][build.bazel.remote.execution.v2.Execution.Execute]. |
| string name = 1; |
| } |
| |
| // A request message for |
| // [ActionCache.GetActionResult][build.bazel.remote.execution.v2.ActionCache.GetActionResult]. |
| message GetActionResultRequest { |
| // The instance of the execution system to operate against. A server may |
| // support multiple instances of the execution system (with their own workers, |
| // storage, caches, etc.). The server MAY require use of this field to select |
| // between them in an implementation-defined fashion, otherwise it can be |
| // omitted. |
| string instance_name = 1; |
| |
| // The digest of the [Action][build.bazel.remote.execution.v2.Action] |
| // whose result is requested. |
| Digest action_digest = 2; |
| |
| // A hint to the server to request inlining stdout in the |
| // [ActionResult][build.bazel.remote.execution.v2.ActionResult] message. |
| bool inline_stdout = 3; |
| |
| // A hint to the server to request inlining stderr in the |
| // [ActionResult][build.bazel.remote.execution.v2.ActionResult] message. |
| bool inline_stderr = 4; |
| |
| // A hint to the server to inline the contents of the listed output files. |
| // Each path needs to exactly match one file path in either `output_paths` or |
| // `output_files` (DEPRECATED since v2.1) in the |
| // [Command][build.bazel.remote.execution.v2.Command] message. |
| repeated string inline_output_files = 5; |
| } |
| |
| // A request message for |
| // [ActionCache.UpdateActionResult][build.bazel.remote.execution.v2.ActionCache.UpdateActionResult]. |
| message UpdateActionResultRequest { |
| // The instance of the execution system to operate against. A server may |
| // support multiple instances of the execution system (with their own workers, |
| // storage, caches, etc.). The server MAY require use of this field to select |
| // between them in an implementation-defined fashion, otherwise it can be |
| // omitted. |
| string instance_name = 1; |
| |
| // The digest of the [Action][build.bazel.remote.execution.v2.Action] |
| // whose result is being uploaded. |
| Digest action_digest = 2; |
| |
| // The [ActionResult][build.bazel.remote.execution.v2.ActionResult] |
| // to store in the cache. |
| ActionResult action_result = 3; |
| |
| // An optional policy for the results of this execution in the remote cache. |
| // The server will have a default policy if this is not provided. |
| // This may be applied to both the ActionResult and the associated blobs. |
| ResultsCachePolicy results_cache_policy = 4; |
| } |
| |
| // A request message for |
| // [ContentAddressableStorage.FindMissingBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.FindMissingBlobs]. |
| message FindMissingBlobsRequest { |
| // The instance of the execution system to operate against. A server may |
| // support multiple instances of the execution system (with their own workers, |
| // storage, caches, etc.). The server MAY require use of this field to select |
| // between them in an implementation-defined fashion, otherwise it can be |
| // omitted. |
| string instance_name = 1; |
| |
| // A list of the blobs to check. |
| repeated Digest blob_digests = 2; |
| } |
| |
| // A response message for |
| // [ContentAddressableStorage.FindMissingBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.FindMissingBlobs]. |
| message FindMissingBlobsResponse { |
| // A list of the blobs requested *not* present in the storage. |
| repeated Digest missing_blob_digests = 2; |
| } |
| |
| // A request message for |
| // [ContentAddressableStorage.BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]. |
| message BatchUpdateBlobsRequest { |
| // A request corresponding to a single blob that the client wants to upload. |
| message Request { |
| // The digest of the blob. This MUST be the digest of `data`. |
| Digest digest = 1; |
| |
| // The raw binary data. |
| bytes data = 2; |
| |
| // The format of `data`. Must be `IDENTITY`/unspecified, or one of the |
| // compressors advertised by the |
| // [CacheCapabilities.supported_batch_compressors][build.bazel.remote.execution.v2.CacheCapabilities.supported_batch_compressors] |
| // field. |
| Compressor.Value compressor = 3; |
| } |
| |
| // The instance of the execution system to operate against. A server may |
| // support multiple instances of the execution system (with their own workers, |
| // storage, caches, etc.). The server MAY require use of this field to select |
| // between them in an implementation-defined fashion, otherwise it can be |
| // omitted. |
| string instance_name = 1; |
| |
| // The individual upload requests. |
| repeated Request requests = 2; |
| } |
| |
| // A response message for |
| // [ContentAddressableStorage.BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]. |
| message BatchUpdateBlobsResponse { |
| // A response corresponding to a single blob that the client tried to upload. |
| message Response { |
| // The blob digest to which this response corresponds. |
| Digest digest = 1; |
| |
| // The result of attempting to upload that blob. |
| google.rpc.Status status = 2; |
| } |
| |
| // The responses to the requests. |
| repeated Response responses = 1; |
| } |
| |
| // A request message for |
| // [ContentAddressableStorage.BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs]. |
| message BatchReadBlobsRequest { |
| // The instance of the execution system to operate against. A server may |
| // support multiple instances of the execution system (with their own workers, |
| // storage, caches, etc.). The server MAY require use of this field to select |
| // between them in an implementation-defined fashion, otherwise it can be |
| // omitted. |
| string instance_name = 1; |
| |
| // The individual blob digests. |
| repeated Digest digests = 2; |
| |
| // A list of acceptable encodings for the returned inlined data, in no |
| // particular order. `IDENTITY` is always allowed even if not specified here. |
| repeated Compressor.Value acceptable_compressors = 3; |
| } |
| |
| // A response message for |
| // [ContentAddressableStorage.BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs]. |
| message BatchReadBlobsResponse { |
| // A response corresponding to a single blob that the client tried to download. |
| message Response { |
| // The digest to which this response corresponds. |
| Digest digest = 1; |
| |
| // The raw binary data. |
| bytes data = 2; |
| |
| // The format the data is encoded in. MUST be `IDENTITY`/unspecified, |
| // or one of the acceptable compressors specified in the `BatchReadBlobsRequest`. |
| Compressor.Value compressor = 4; |
| |
| // The result of attempting to download that blob. |
| google.rpc.Status status = 3; |
| } |
| |
| // The responses to the requests. |
| repeated Response responses = 1; |
| } |
| |
| // A request message for |
| // [ContentAddressableStorage.GetTree][build.bazel.remote.execution.v2.ContentAddressableStorage.GetTree]. |
| message GetTreeRequest { |
| // The instance of the execution system to operate against. A server may |
| // support multiple instances of the execution system (with their own workers, |
| // storage, caches, etc.). The server MAY require use of this field to select |
| // between them in an implementation-defined fashion, otherwise it can be |
| // omitted. |
| string instance_name = 1; |
| |
| // The digest of the root, which must be an encoded |
| // [Directory][build.bazel.remote.execution.v2.Directory] message |
| // stored in the |
| // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. |
| Digest root_digest = 2; |
| |
| // A maximum page size to request. If present, the server will request no more |
| // than this many items. Regardless of whether a page size is specified, the |
| // server may place its own limit on the number of items to be returned and |
| // require the client to retrieve more items using a subsequent request. |
| int32 page_size = 3; |
| |
| // A page token, which must be a value received in a previous |
| // [GetTreeResponse][build.bazel.remote.execution.v2.GetTreeResponse]. |
| // If present, the server will use that token as an offset, returning only |
| // that page and the ones that succeed it. |
| string page_token = 4; |
| } |
| |
| // A response message for |
| // [ContentAddressableStorage.GetTree][build.bazel.remote.execution.v2.ContentAddressableStorage.GetTree]. |
| message GetTreeResponse { |
| // The directories descended from the requested root. |
| repeated Directory directories = 1; |
| |
| // If present, signifies that there are more results which the client can |
| // retrieve by passing this as the page_token in a subsequent |
| // [request][build.bazel.remote.execution.v2.GetTreeRequest]. |
| // If empty, signifies that this is the last page of results. |
| string next_page_token = 2; |
| } |
| |
| // A request message for |
| // [Capabilities.GetCapabilities][build.bazel.remote.execution.v2.Capabilities.GetCapabilities]. |
| message GetCapabilitiesRequest { |
| // The instance of the execution system to operate against. A server may |
| // support multiple instances of the execution system (with their own workers, |
| // storage, caches, etc.). The server MAY require use of this field to select |
| // between them in an implementation-defined fashion, otherwise it can be |
| // omitted. |
| string instance_name = 1; |
| } |
| |
| // A response message for |
| // [Capabilities.GetCapabilities][build.bazel.remote.execution.v2.Capabilities.GetCapabilities]. |
| message ServerCapabilities { |
| // Capabilities of the remote cache system. |
| CacheCapabilities cache_capabilities = 1; |
| |
| // Capabilities of the remote execution system. |
| ExecutionCapabilities execution_capabilities = 2; |
| |
| // Earliest RE API version supported, including deprecated versions. |
| build.bazel.semver.SemVer deprecated_api_version = 3; |
| |
| // Earliest non-deprecated RE API version supported. |
| build.bazel.semver.SemVer low_api_version = 4; |
| |
| // Latest RE API version supported. |
| build.bazel.semver.SemVer high_api_version = 5; |
| } |
| |
| // The digest function used for converting values into keys for CAS and Action |
| // Cache. |
| message DigestFunction { |
| enum Value { |
| // It is an error for the server to return this value. |
| UNKNOWN = 0; |
| |
| // The SHA-256 digest function. |
| SHA256 = 1; |
| |
| // The SHA-1 digest function. |
| SHA1 = 2; |
| |
| // The MD5 digest function. |
| MD5 = 3; |
| |
| // The Microsoft "VSO-Hash" paged SHA256 digest function. |
| // See https://github.com/microsoft/BuildXL/blob/master/Documentation/Specs/PagedHash.md . |
| VSO = 4; |
| |
| // The SHA-384 digest function. |
| SHA384 = 5; |
| |
| // The SHA-512 digest function. |
| SHA512 = 6; |
| |
| // Murmur3 128-bit digest function, x64 variant. Note that this is not a |
| // cryptographic hash function and its collision properties are not strongly guaranteed. |
| // See https://github.com/aappleby/smhasher/wiki/MurmurHash3 . |
| MURMUR3 = 7; |
| } |
| } |
| |
| // Describes the server/instance capabilities for updating the action cache. |
| message ActionCacheUpdateCapabilities { |
| bool update_enabled = 1; |
| } |
| |
| // Allowed values for priority in |
| // [ResultsCachePolicy][build.bazel.remoteexecution.v2.ResultsCachePolicy] and |
| // [ExecutionPolicy][build.bazel.remoteexecution.v2.ResultsCachePolicy] |
| // Used for querying both cache and execution valid priority ranges. |
| message PriorityCapabilities { |
| // Supported range of priorities, including boundaries. |
| message PriorityRange { |
| // The minimum numeric value for this priority range, which represents the |
| // most urgent task or longest retained item. |
| int32 min_priority = 1; |
| // The maximum numeric value for this priority range, which represents the |
| // least urgent task or shortest retained item. |
| int32 max_priority = 2; |
| } |
| repeated PriorityRange priorities = 1; |
| } |
| |
| // Describes how the server treats absolute symlink targets. |
| message SymlinkAbsolutePathStrategy { |
| enum Value { |
| // Invalid value. |
| UNKNOWN = 0; |
| |
| // Server will return an `INVALID_ARGUMENT` on input symlinks with absolute |
| // targets. |
| // If an action tries to create an output symlink with an absolute target, a |
| // `FAILED_PRECONDITION` will be returned. |
| DISALLOWED = 1; |
| |
| // Server will allow symlink targets to escape the input root tree, possibly |
| // resulting in non-hermetic builds. |
| ALLOWED = 2; |
| } |
| } |
| |
| // Compression formats which may be supported. |
| message Compressor { |
| enum Value { |
| // No compression. Servers and clients MUST always support this, and do |
| // not need to advertise it. |
| IDENTITY = 0; |
| |
| // Zstandard compression. |
| ZSTD = 1; |
| |
| // RFC 1951 Deflate. This format is identical to what is used by ZIP |
| // files. Headers such as the one generated by gzip are not |
| // included. |
| // |
| // It is advised to use algorithms such as Zstandard instead, as |
| // those are faster and/or provide a better compression ratio. |
| DEFLATE = 2; |
| } |
| } |
| |
| // Capabilities of the remote cache system. |
| message CacheCapabilities { |
| // All the digest functions supported by the remote cache. |
| // Remote cache may support multiple digest functions simultaneously. |
| repeated DigestFunction.Value digest_functions = 1; |
| |
| // Capabilities for updating the action cache. |
| ActionCacheUpdateCapabilities action_cache_update_capabilities = 2; |
| |
| // Supported cache priority range for both CAS and ActionCache. |
| PriorityCapabilities cache_priority_capabilities = 3; |
| |
| // Maximum total size of blobs to be uploaded/downloaded using |
| // batch methods. A value of 0 means no limit is set, although |
| // in practice there will always be a message size limitation |
| // of the protocol in use, e.g. GRPC. |
| int64 max_batch_total_size_bytes = 4; |
| |
| // Whether absolute symlink targets are supported. |
| SymlinkAbsolutePathStrategy.Value symlink_absolute_path_strategy = 5; |
| |
| // Compressors supported by the "compressed-blobs" bytestream resources. |
| // Servers MUST support identity/no-compression, even if it is not listed |
| // here. |
| // |
| // Note that this does not imply which if any compressors are supported by |
| // the server at the gRPC level. |
| repeated Compressor.Value supported_compressors = 6; |
| |
| // Compressors supported for inlined data in |
| // [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs] |
| // requests. |
| repeated Compressor.Value supported_batch_update_compressors = 7; |
| } |
| |
| // Capabilities of the remote execution system. |
| message ExecutionCapabilities { |
| // Remote execution may only support a single digest function. |
| DigestFunction.Value digest_function = 1; |
| |
| // Whether remote execution is enabled for the particular server/instance. |
| bool exec_enabled = 2; |
| |
| // Supported execution priority range. |
| PriorityCapabilities execution_priority_capabilities = 3; |
| |
| // Supported node properties. |
| repeated string supported_node_properties = 4; |
| } |
| |
| // Details for the tool used to call the API. |
| message ToolDetails { |
| // Name of the tool, e.g. bazel. |
| string tool_name = 1; |
| |
| // Version of the tool used for the request, e.g. 5.0.3. |
| string tool_version = 2; |
| } |
| |
| // An optional Metadata to attach to any RPC request to tell the server about an |
| // external context of the request. The server may use this for logging or other |
| // purposes. To use it, the client attaches the header to the call using the |
| // canonical proto serialization: |
| // |
| // * name: `build.bazel.remote.execution.v2.requestmetadata-bin` |
| // * contents: the base64 encoded binary `RequestMetadata` message. |
| // Note: the gRPC library serializes binary headers encoded in base 64 by |
| // default (https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests). |
| // Therefore, if the gRPC library is used to pass/retrieve this |
| // metadata, the user may ignore the base64 encoding and assume it is simply |
| // serialized as a binary message. |
| message RequestMetadata { |
| // The details for the tool invoking the requests. |
| ToolDetails tool_details = 1; |
| |
| // An identifier that ties multiple requests to the same action. |
| // For example, multiple requests to the CAS, Action Cache, and Execution |
| // API are used in order to compile foo.cc. |
| string action_id = 2; |
| |
| // An identifier that ties multiple actions together to a final result. |
| // For example, multiple actions are required to build and run foo_test. |
| string tool_invocation_id = 3; |
| |
| // An identifier to tie multiple tool invocations together. For example, |
| // runs of foo_test, bar_test and baz_test on a post-submit of a given patch. |
| string correlated_invocations_id = 4; |
| |
| // A brief description of the kind of action, for example, CppCompile or GoLink. |
| // There is no standard agreed set of values for this, and they are expected to vary between different client tools. |
| string action_mnemonic = 5; |
| |
| // An identifier for the target which produced this action. |
| // No guarantees are made around how many actions may relate to a single target. |
| string target_id = 6; |
| |
| // An identifier for the configuration in which the target was built, |
| // e.g. for differentiating building host tools or different target platforms. |
| // There is no expectation that this value will have any particular structure, |
| // or equality across invocations, though some client tools may offer these guarantees. |
| string configuration_id = 7; |
| } |