Add hardcoded spaces to Rust output.

Normally, we don't need to worry about spaces as `rustfmt` will fix it all for us. Indeed, any added space would get removed/altered by `rustfmt`, so it would normally be pointless.

`__SPACE__` is for the abnornmal case, inside a macro invocation: `rustfmt` will not touch the `...` inside `foo!(...)`. So this change allows one to produce nicely-formatted macro invocations, which would otherwise include only the strictly necessary spacing:

```
forward_declare!(pub foo = bar)                     --> my_macro!(pub foo=bar)
forward_declare!(pub foo __SPACE__ = __SPACE__ bar) --> my_macro!(pub foo = bar)
```

(In particular, this is exactly the structure used by the forward_declare macro...)

PiperOrigin-RevId: 442817418
diff --git a/common/token_stream_printer.rs b/common/token_stream_printer.rs
index 997d052..3bf8902 100644
--- a/common/token_stream_printer.rs
+++ b/common/token_stream_printer.rs
@@ -25,6 +25,8 @@
 ///   "foo/bar.h"`). We are also using explict newlines for making the generated
 ///   Rust/C++ source code more readable. Use the placeholder `__NEWLINE__` to
 ///   insert a newline character.
+/// * `TokenStream` cannot encode formatting whitespace, so we use the
+///   placeholder `__SPACE__`.
 /// * `TokenStream` cannot encode comments, so we use the placeholder
 ///   `__COMMENT__`, followed by a string literal.
 pub fn tokens_to_string(tokens: TokenStream) -> Result<String> {
@@ -38,6 +40,7 @@
     while let Some(tt) = it.next() {
         match tt {
             TokenTree::Ident(ref tt) if tt == "__NEWLINE__" => writeln!(result)?,
+            TokenTree::Ident(ref tt) if tt == "__SPACE__" => write!(result, " ")?,
             TokenTree::Ident(ref tt) if tt == "__HASH_TOKEN__" => write!(result, "#")?,
 
             TokenTree::Ident(ref tt) if tt == "__COMMENT__" => {
@@ -80,8 +83,8 @@
 
 fn is_ident_or_literal(tt: &TokenTree) -> bool {
     match tt {
-        TokenTree::Ident(id) if id == "__NEWLINE__" => false,
-        TokenTree::Ident(_) | TokenTree::Literal(_) => true,
+        TokenTree::Ident(id) => id != "__NEWLINE__" && id != "__SPACE__",
+        TokenTree::Literal(_) => true,
         _ => false,
     }
 }
@@ -167,6 +170,20 @@
     }
 
     #[test]
+    fn test_space_token() -> Result<()> {
+        let token_stream = quote! { a __SPACE__ = __SPACE__ b };
+        assert_eq!(tokens_to_string(token_stream)?, "a = b");
+        Ok(())
+    }
+
+    #[test]
+    fn test_redundant_space_token() -> Result<()> {
+        let token_stream = quote! { a __SPACE__ b };
+        assert_eq!(tokens_to_string(token_stream)?, "a b");
+        Ok(())
+    }
+
+    #[test]
     fn test_hash_token() -> Result<()> {
         let token_stream = quote! { a __HASH_TOKEN__ b };
         assert_eq!(tokens_to_string(token_stream)?, "a #b");
@@ -225,6 +242,7 @@
     #[test]
     fn test_special_tokens_in_groups() -> Result<()> {
         assert_eq!(tokens_to_string(quote! {{ a __NEWLINE__ b }})?, "{ a\nb }");
+        assert_eq!(tokens_to_string(quote! {{ a __SPACE__ b }})?, "{ a b }");
         assert_eq!(tokens_to_string(quote! {(a __COMMENT__ "b")})?, "(a // b\n)");
         assert_eq!(tokens_to_string(quote! {[__HASH_TOKEN__ a]})?, "[#a]");
         Ok(())
diff --git a/rs_bindings_from_cc/token_stream_matchers.rs b/rs_bindings_from_cc/token_stream_matchers.rs
index bdcb6c0..e89b730 100644
--- a/rs_bindings_from_cc/token_stream_matchers.rs
+++ b/rs_bindings_from_cc/token_stream_matchers.rs
@@ -302,7 +302,7 @@
             }
         };
         while let Some(actual_token) = input_iter.next() {
-            if is_newline_token(&actual_token) {
+            if is_whitespace_token(&actual_token) {
                 continue;
             }
 
@@ -392,8 +392,8 @@
         iter::once(token).chain(iter).collect::<TokenStream>()
     }
 
-    fn is_newline_token(token: &TokenTree) -> bool {
-        matches!(token, TokenTree::Ident(ref id) if id == "__NEWLINE__")
+    fn is_whitespace_token(token: &TokenTree) -> bool {
+        matches!(token, TokenTree::Ident(id) if id == "__NEWLINE__" || id == "__SPACE__")
     }
 
     fn is_wildcard(pattern: TokenStream) -> bool {
@@ -431,7 +431,7 @@
                         if input_suffix
                             .clone()
                             .into_iter()
-                            .filter(|token| !is_newline_token(token))
+                            .filter(|token| !is_whitespace_token(token))
                             .count()
                             != 0
                         {
@@ -690,7 +690,16 @@
     fn test_ignore_newlines() {
         assert_rs_cc_matches!(
             quote! {__NEWLINE__ fn __NEWLINE__ foo __NEWLINE__ (
-            __NEWLINE__ a: __NEWLINE__ usize) {}},
+            __NEWLINE__ a __NEWLINE__ : __NEWLINE__ usize __NEWLINE__) {}},
+            quote! {fn foo(a: usize) {}}
+        );
+    }
+
+    #[test]
+    fn test_ignore_space() {
+        assert_rs_cc_matches!(
+            quote! {__SPACE__ fn __SPACE__ foo __SPACE__ (
+            __SPACE__ a __SPACE__ : __SPACE__ usize __SPACE__) {}},
             quote! {fn foo(a: usize) {}}
         );
     }