| // Copyright 2016 Google Inc. |
| // |
| // Licensed under the Apache License, Version 2.0 (the "License"); |
| // you may not use this file except in compliance with the License. |
| // You may obtain a copy of the License at |
| // |
| // http://www.apache.org/licenses/LICENSE-2.0 |
| // |
| // Unless required by applicable law or agreed to in writing, software |
| // distributed under the License is distributed on an "AS IS" BASIS, |
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| // See the License for the specific language governing permissions and |
| // limitations under the License. |
| |
| syntax = "proto3"; |
| |
| package google.bigtable.v1; |
| |
| import "google/bigtable/v1/bigtable_data.proto"; |
| import "google/rpc/status.proto"; |
| |
| option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable"; |
| option java_multiple_files = true; |
| option java_outer_classname = "BigtableServiceMessagesProto"; |
| option java_package = "com.google.bigtable.v1"; |
| |
| |
| // Request message for BigtableServer.ReadRows. |
| message ReadRowsRequest { |
| // The unique name of the table from which to read. |
| string table_name = 1; |
| |
| // If neither row_key nor row_range is set, reads from all rows. |
| oneof target { |
| // The key of a single row from which to read. |
| bytes row_key = 2; |
| |
| // A range of rows from which to read. |
| RowRange row_range = 3; |
| |
| // A set of rows from which to read. Entries need not be in order, and will |
| // be deduplicated before reading. |
| // The total serialized size of the set must not exceed 1MB. |
| RowSet row_set = 8; |
| } |
| |
| // The filter to apply to the contents of the specified row(s). If unset, |
| // reads the entire table. |
| RowFilter filter = 5; |
| |
| // By default, rows are read sequentially, producing results which are |
| // guaranteed to arrive in increasing row order. Setting |
| // "allow_row_interleaving" to true allows multiple rows to be interleaved in |
| // the response stream, which increases throughput but breaks this guarantee, |
| // and may force the client to use more memory to buffer partially-received |
| // rows. Cannot be set to true when specifying "num_rows_limit". |
| bool allow_row_interleaving = 6; |
| |
| // The read will terminate after committing to N rows' worth of results. The |
| // default (zero) is to return all results. |
| // Note that "allow_row_interleaving" cannot be set to true when this is set. |
| int64 num_rows_limit = 7; |
| } |
| |
| // Response message for BigtableService.ReadRows. |
| message ReadRowsResponse { |
| // Specifies a piece of a row's contents returned as part of the read |
| // response stream. |
| message Chunk { |
| oneof chunk { |
| // A subset of the data from a particular row. As long as no "reset_row" |
| // is received in between, multiple "row_contents" from the same row are |
| // from the same atomic view of that row, and will be received in the |
| // expected family/column/timestamp order. |
| Family row_contents = 1; |
| |
| // Indicates that the client should drop all previous chunks for |
| // "row_key", as it will be re-read from the beginning. |
| bool reset_row = 2; |
| |
| // Indicates that the client can safely process all previous chunks for |
| // "row_key", as its data has been fully read. |
| bool commit_row = 3; |
| } |
| } |
| |
| // The key of the row for which we're receiving data. |
| // Results will be received in increasing row key order, unless |
| // "allow_row_interleaving" was specified in the request. |
| bytes row_key = 1; |
| |
| // One or more chunks of the row specified by "row_key". |
| repeated Chunk chunks = 2; |
| } |
| |
| // Request message for BigtableService.SampleRowKeys. |
| message SampleRowKeysRequest { |
| // The unique name of the table from which to sample row keys. |
| string table_name = 1; |
| } |
| |
| // Response message for BigtableService.SampleRowKeys. |
| message SampleRowKeysResponse { |
| // Sorted streamed sequence of sample row keys in the table. The table might |
| // have contents before the first row key in the list and after the last one, |
| // but a key containing the empty string indicates "end of table" and will be |
| // the last response given, if present. |
| // Note that row keys in this list may not have ever been written to or read |
| // from, and users should therefore not make any assumptions about the row key |
| // structure that are specific to their use case. |
| bytes row_key = 1; |
| |
| // Approximate total storage space used by all rows in the table which precede |
| // "row_key". Buffering the contents of all rows between two subsequent |
| // samples would require space roughly equal to the difference in their |
| // "offset_bytes" fields. |
| int64 offset_bytes = 2; |
| } |
| |
| // Request message for BigtableService.MutateRow. |
| message MutateRowRequest { |
| // The unique name of the table to which the mutation should be applied. |
| string table_name = 1; |
| |
| // The key of the row to which the mutation should be applied. |
| bytes row_key = 2; |
| |
| // Changes to be atomically applied to the specified row. Entries are applied |
| // in order, meaning that earlier mutations can be masked by later ones. |
| // Must contain at least one entry and at most 100000. |
| repeated Mutation mutations = 3; |
| } |
| |
| // Request message for BigtableService.MutateRows. |
| message MutateRowsRequest { |
| message Entry { |
| // The key of the row to which the `mutations` should be applied. |
| bytes row_key = 1; |
| |
| // Changes to be atomically applied to the specified row. Mutations are |
| // applied in order, meaning that earlier mutations can be masked by |
| // later ones. |
| // At least one mutation must be specified. |
| repeated Mutation mutations = 2; |
| } |
| |
| // The unique name of the table to which the mutations should be applied. |
| string table_name = 1; |
| |
| // The row keys/mutations to be applied in bulk. |
| // Each entry is applied as an atomic mutation, but the entries may be |
| // applied in arbitrary order (even between entries for the same row). |
| // At least one entry must be specified, and in total the entries may |
| // contain at most 100000 mutations. |
| repeated Entry entries = 2; |
| } |
| |
| // Response message for BigtableService.MutateRows. |
| message MutateRowsResponse { |
| // The results for each Entry from the request, presented in the order |
| // in which the entries were originally given. |
| // Depending on how requests are batched during execution, it is possible |
| // for one Entry to fail due to an error with another Entry. In the event |
| // that this occurs, the same error will be reported for both entries. |
| repeated google.rpc.Status statuses = 1; |
| } |
| |
| // Request message for BigtableService.CheckAndMutateRowRequest |
| message CheckAndMutateRowRequest { |
| // The unique name of the table to which the conditional mutation should be |
| // applied. |
| string table_name = 1; |
| |
| // The key of the row to which the conditional mutation should be applied. |
| bytes row_key = 2; |
| |
| // The filter to be applied to the contents of the specified row. Depending |
| // on whether or not any results are yielded, either "true_mutations" or |
| // "false_mutations" will be executed. If unset, checks that the row contains |
| // any values at all. |
| RowFilter predicate_filter = 6; |
| |
| // Changes to be atomically applied to the specified row if "predicate_filter" |
| // yields at least one cell when applied to "row_key". Entries are applied in |
| // order, meaning that earlier mutations can be masked by later ones. |
| // Must contain at least one entry if "false_mutations" is empty, and at most |
| // 100000. |
| repeated Mutation true_mutations = 4; |
| |
| // Changes to be atomically applied to the specified row if "predicate_filter" |
| // does not yield any cells when applied to "row_key". Entries are applied in |
| // order, meaning that earlier mutations can be masked by later ones. |
| // Must contain at least one entry if "true_mutations" is empty, and at most |
| // 100000. |
| repeated Mutation false_mutations = 5; |
| } |
| |
| // Response message for BigtableService.CheckAndMutateRowRequest. |
| message CheckAndMutateRowResponse { |
| // Whether or not the request's "predicate_filter" yielded any results for |
| // the specified row. |
| bool predicate_matched = 1; |
| } |
| |
| // Request message for BigtableService.ReadModifyWriteRowRequest. |
| message ReadModifyWriteRowRequest { |
| // The unique name of the table to which the read/modify/write rules should be |
| // applied. |
| string table_name = 1; |
| |
| // The key of the row to which the read/modify/write rules should be applied. |
| bytes row_key = 2; |
| |
| // Rules specifying how the specified row's contents are to be transformed |
| // into writes. Entries are applied in order, meaning that earlier rules will |
| // affect the results of later ones. |
| repeated ReadModifyWriteRule rules = 3; |
| } |