diff --git a/.flake8 b/.flake8 index 2e4387498..87f6e408c 100644 --- a/.flake8 +++ b/.flake8 @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index 98994f474..a3da1b0d4 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:2d816f26f728ac8b24248741e7d4c461c09764ef9f7be3684d557c9632e46dbd -# created: 2023-06-28T17:03:33.371210701Z + digest: sha256:3e3800bb100af5d7f9e810d48212b37812c1856d20ffeafb99ebe66461b61fc7 +# created: 2023-08-02T10:53:29.114535628Z diff --git a/.github/auto-label.yaml b/.github/auto-label.yaml index 41bff0b53..b2016d119 100644 --- a/.github/auto-label.yaml +++ b/.github/auto-label.yaml @@ -1,4 +1,4 @@ -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/build.sh b/.kokoro/build.sh index 35c57a936..1ff00f091 100755 --- a/.kokoro/build.sh +++ b/.kokoro/build.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2018 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/docker/docs/Dockerfile b/.kokoro/docker/docs/Dockerfile index f8137d0ae..8e39a2cc4 100644 --- a/.kokoro/docker/docs/Dockerfile +++ b/.kokoro/docker/docs/Dockerfile @@ -1,4 +1,4 @@ -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/populate-secrets.sh b/.kokoro/populate-secrets.sh index f52514257..6f3972140 100755 --- a/.kokoro/populate-secrets.sh +++ b/.kokoro/populate-secrets.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2020 Google LLC. +# Copyright 2023 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh index 1c4d62370..9eafe0be3 100755 --- a/.kokoro/publish-docs.sh +++ b/.kokoro/publish-docs.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/release.sh b/.kokoro/release.sh index 9202c2fdb..9e796d4c9 100755 --- a/.kokoro/release.sh +++ b/.kokoro/release.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt index c7929db6d..029bd342d 100644 --- a/.kokoro/requirements.txt +++ b/.kokoro/requirements.txt @@ -20,9 +20,9 @@ cachetools==5.2.0 \ --hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \ --hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db # via google-auth -certifi==2022.12.7 \ - --hash=sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3 \ - --hash=sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18 +certifi==2023.7.22 \ + --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \ + --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9 # via requests cffi==1.15.1 \ --hash=sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5 \ @@ -113,26 +113,30 @@ commonmark==0.9.1 \ --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 # via rich -cryptography==41.0.0 \ - --hash=sha256:0ddaee209d1cf1f180f1efa338a68c4621154de0afaef92b89486f5f96047c55 \ - --hash=sha256:14754bcdae909d66ff24b7b5f166d69340ccc6cb15731670435efd5719294895 \ - --hash=sha256:344c6de9f8bda3c425b3a41b319522ba3208551b70c2ae00099c205f0d9fd3be \ - --hash=sha256:34d405ea69a8b34566ba3dfb0521379b210ea5d560fafedf9f800a9a94a41928 \ - --hash=sha256:3680248309d340fda9611498a5319b0193a8dbdb73586a1acf8109d06f25b92d \ - --hash=sha256:3c5ef25d060c80d6d9f7f9892e1d41bb1c79b78ce74805b8cb4aa373cb7d5ec8 \ - --hash=sha256:4ab14d567f7bbe7f1cdff1c53d5324ed4d3fc8bd17c481b395db224fb405c237 \ - --hash=sha256:5c1f7293c31ebc72163a9a0df246f890d65f66b4a40d9ec80081969ba8c78cc9 \ - --hash=sha256:6b71f64beeea341c9b4f963b48ee3b62d62d57ba93eb120e1196b31dc1025e78 \ - --hash=sha256:7d92f0248d38faa411d17f4107fc0bce0c42cae0b0ba5415505df72d751bf62d \ - --hash=sha256:8362565b3835ceacf4dc8f3b56471a2289cf51ac80946f9087e66dc283a810e0 \ - --hash=sha256:84a165379cb9d411d58ed739e4af3396e544eac190805a54ba2e0322feb55c46 \ - --hash=sha256:88ff107f211ea696455ea8d911389f6d2b276aabf3231bf72c8853d22db755c5 \ - --hash=sha256:9f65e842cb02550fac96536edb1d17f24c0a338fd84eaf582be25926e993dde4 \ - --hash=sha256:a4fc68d1c5b951cfb72dfd54702afdbbf0fb7acdc9b7dc4301bbf2225a27714d \ - --hash=sha256:b7f2f5c525a642cecad24ee8670443ba27ac1fab81bba4cc24c7b6b41f2d0c75 \ - --hash=sha256:b846d59a8d5a9ba87e2c3d757ca019fa576793e8758174d3868aecb88d6fc8eb \ - --hash=sha256:bf8fc66012ca857d62f6a347007e166ed59c0bc150cefa49f28376ebe7d992a2 \ - --hash=sha256:f5d0bf9b252f30a31664b6f64432b4730bb7038339bd18b1fafe129cfc2be9be +cryptography==41.0.3 \ + --hash=sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306 \ + --hash=sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84 \ + --hash=sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47 \ + --hash=sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d \ + --hash=sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116 \ + --hash=sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207 \ + --hash=sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81 \ + --hash=sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087 \ + --hash=sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd \ + --hash=sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507 \ + --hash=sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858 \ + --hash=sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae \ + --hash=sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34 \ + --hash=sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906 \ + --hash=sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd \ + --hash=sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922 \ + --hash=sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7 \ + --hash=sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4 \ + --hash=sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574 \ + --hash=sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1 \ + --hash=sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c \ + --hash=sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e \ + --hash=sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de # via # gcp-releasetool # secretstorage @@ -392,9 +396,9 @@ pycparser==2.21 \ --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206 # via cffi -pygments==2.13.0 \ - --hash=sha256:56a8508ae95f98e2b9bdf93a6be5ae3f7d8af858b43e02c5a2ff083726be40c1 \ - --hash=sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42 +pygments==2.15.0 \ + --hash=sha256:77a3299119af881904cd5ecd1ac6a66214b6e9bed1f2db16993b54adede64094 \ + --hash=sha256:f7e36cffc4c517fbc252861b9a6e4644ca0e5abadf9a113c72d1358ad09b9500 # via # readme-renderer # rich diff --git a/.kokoro/test-samples-against-head.sh b/.kokoro/test-samples-against-head.sh index ba3a707b0..63ac41dfa 100755 --- a/.kokoro/test-samples-against-head.sh +++ b/.kokoro/test-samples-against-head.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/test-samples-impl.sh b/.kokoro/test-samples-impl.sh index 2c6500cae..5a0f5fab6 100755 --- a/.kokoro/test-samples-impl.sh +++ b/.kokoro/test-samples-impl.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2021 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh index 11c042d34..50b35a48c 100755 --- a/.kokoro/test-samples.sh +++ b/.kokoro/test-samples.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/trampoline.sh b/.kokoro/trampoline.sh index f39236e94..d85b1f267 100755 --- a/.kokoro/trampoline.sh +++ b/.kokoro/trampoline.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2017 Google Inc. +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/trampoline_v2.sh b/.kokoro/trampoline_v2.sh index 4af6cdc26..59a7cf3a9 100755 --- a/.kokoro/trampoline_v2.sh +++ b/.kokoro/trampoline_v2.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5405cc8ff..19409cbd3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,4 +1,4 @@ -# Copyright 2021 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,6 @@ repos: hooks: - id: black - repo: https://github.com/pycqa/flake8 - rev: 3.9.2 + rev: 6.1.0 hooks: - id: flake8 diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 23860f90c..2ef9a1c2e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.13.0" + ".": "1.14.0" } diff --git a/.trampolinerc b/.trampolinerc index 0eee72ab6..a7dfeb42c 100644 --- a/.trampolinerc +++ b/.trampolinerc @@ -1,4 +1,4 @@ -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Template for .trampolinerc - # Add required env vars here. required_envvars+=( ) diff --git a/CHANGELOG.md b/CHANGELOG.md index 952ac0f20..37368d11e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [1.14.0](https://github.com/googleapis/python-compute/compare/v1.13.0...v1.14.0) (2023-08-02) + + +### Features + +* Update Compute Engine API to revision 20230711 ([#836](https://github.com/googleapis/python-compute/issues/836)) ([#426](https://github.com/googleapis/python-compute/issues/426)) ([40f2de8](https://github.com/googleapis/python-compute/commit/40f2de88c12f28c68c55f1d250f51a561c2f195a)) + ## [1.13.0](https://github.com/googleapis/python-compute/compare/v1.12.1...v1.13.0) (2023-07-10) diff --git a/MANIFEST.in b/MANIFEST.in index e783f4c62..e0a667053 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/docs/conf.py b/docs/conf.py index 1b2f7cf9b..98e969bcd 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2021 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/compute/__init__.py b/google/cloud/compute/__init__.py index 646f9e1fc..86c395be0 100644 --- a/google/cloud/compute/__init__.py +++ b/google/cloud/compute/__init__.py @@ -1115,6 +1115,7 @@ PatchRegionSslPolicyRequest, PatchRegionTargetHttpsProxyRequest, PatchRegionUrlMapRequest, + PatchResourcePolicyRequest, PatchRouterRequest, PatchRuleFirewallPolicyRequest, PatchRuleNetworkFirewallPolicyRequest, @@ -2571,6 +2572,7 @@ "PatchRegionSslPolicyRequest", "PatchRegionTargetHttpsProxyRequest", "PatchRegionUrlMapRequest", + "PatchResourcePolicyRequest", "PatchRouterRequest", "PatchRuleFirewallPolicyRequest", "PatchRuleNetworkFirewallPolicyRequest", diff --git a/google/cloud/compute/gapic_version.py b/google/cloud/compute/gapic_version.py index 1fc1b24be..624a2e53d 100644 --- a/google/cloud/compute/gapic_version.py +++ b/google/cloud/compute/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.13.0" # {x-release-please-version} +__version__ = "1.14.0" # {x-release-please-version} diff --git a/google/cloud/compute_v1/__init__.py b/google/cloud/compute_v1/__init__.py index b3b495610..55da57749 100644 --- a/google/cloud/compute_v1/__init__.py +++ b/google/cloud/compute_v1/__init__.py @@ -1009,6 +1009,7 @@ PatchRegionSslPolicyRequest, PatchRegionTargetHttpsProxyRequest, PatchRegionUrlMapRequest, + PatchResourcePolicyRequest, PatchRouterRequest, PatchRuleFirewallPolicyRequest, PatchRuleNetworkFirewallPolicyRequest, @@ -2416,6 +2417,7 @@ "PatchRegionSslPolicyRequest", "PatchRegionTargetHttpsProxyRequest", "PatchRegionUrlMapRequest", + "PatchResourcePolicyRequest", "PatchRouterRequest", "PatchRuleFirewallPolicyRequest", "PatchRuleNetworkFirewallPolicyRequest", diff --git a/google/cloud/compute_v1/gapic_metadata.json b/google/cloud/compute_v1/gapic_metadata.json index d70dcf72e..0f722bc68 100644 --- a/google/cloud/compute_v1/gapic_metadata.json +++ b/google/cloud/compute_v1/gapic_metadata.json @@ -3317,6 +3317,11 @@ "list" ] }, + "Patch": { + "methods": [ + "patch" + ] + }, "SetIamPolicy": { "methods": [ "set_iam_policy" diff --git a/google/cloud/compute_v1/gapic_version.py b/google/cloud/compute_v1/gapic_version.py index 1fc1b24be..624a2e53d 100644 --- a/google/cloud/compute_v1/gapic_version.py +++ b/google/cloud/compute_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.13.0" # {x-release-please-version} +__version__ = "1.14.0" # {x-release-please-version} diff --git a/google/cloud/compute_v1/services/region_security_policies/client.py b/google/cloud/compute_v1/services/region_security_policies/client.py index d269c4dfc..0c306bab2 100644 --- a/google/cloud/compute_v1/services/region_security_policies/client.py +++ b/google/cloud/compute_v1/services/region_security_policies/client.py @@ -1267,7 +1267,7 @@ def patch_unary( metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Operation: r"""Patches the specified policy with the data included - in the request. To clear fields in the rule, leave the + in the request. To clear fields in the policy, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, @@ -1411,7 +1411,7 @@ def patch( metadata: Sequence[Tuple[str, str]] = (), ) -> extended_operation.ExtendedOperation: r"""Patches the specified policy with the data included - in the request. To clear fields in the rule, leave the + in the request. To clear fields in the policy, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, diff --git a/google/cloud/compute_v1/services/resource_policies/client.py b/google/cloud/compute_v1/services/resource_policies/client.py index 65bfec0a3..22dc62206 100644 --- a/google/cloud/compute_v1/services/resource_policies/client.py +++ b/google/cloud/compute_v1/services/resource_policies/client.py @@ -1518,6 +1518,301 @@ def sample_list(): # Done; return the response. return response + def patch_unary( + self, + request: Optional[Union[compute.PatchResourcePolicyRequest, dict]] = None, + *, + project: Optional[str] = None, + region: Optional[str] = None, + resource_policy: Optional[str] = None, + resource_policy_resource: Optional[compute.ResourcePolicy] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Modify the specified resource policy. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import compute_v1 + + def sample_patch(): + # Create a client + client = compute_v1.ResourcePoliciesClient() + + # Initialize request argument(s) + request = compute_v1.PatchResourcePolicyRequest( + project="project_value", + region="region_value", + resource_policy="resource_policy_value", + ) + + # Make the request + response = client.patch(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.compute_v1.types.PatchResourcePolicyRequest, dict]): + The request object. A request message for + ResourcePolicies.Patch. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource_policy (str): + Id of the resource policy to patch. + This corresponds to the ``resource_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource_policy_resource (google.cloud.compute_v1.types.ResourcePolicy): + The body resource for this request + This corresponds to the ``resource_policy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.extended_operation.ExtendedOperation: + An object representing a extended + long-running operation. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [project, region, resource_policy, resource_policy_resource] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchResourcePolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchResourcePolicyRequest): + request = compute.PatchResourcePolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource_policy is not None: + request.resource_policy = resource_policy + if resource_policy_resource is not None: + request.resource_policy_resource = resource_policy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + ( + ("project", request.project), + ("region", request.region), + ("resource_policy", request.resource_policy), + ) + ), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch( + self, + request: Optional[Union[compute.PatchResourcePolicyRequest, dict]] = None, + *, + project: Optional[str] = None, + region: Optional[str] = None, + resource_policy: Optional[str] = None, + resource_policy_resource: Optional[compute.ResourcePolicy] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> extended_operation.ExtendedOperation: + r"""Modify the specified resource policy. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import compute_v1 + + def sample_patch(): + # Create a client + client = compute_v1.ResourcePoliciesClient() + + # Initialize request argument(s) + request = compute_v1.PatchResourcePolicyRequest( + project="project_value", + region="region_value", + resource_policy="resource_policy_value", + ) + + # Make the request + response = client.patch(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.compute_v1.types.PatchResourcePolicyRequest, dict]): + The request object. A request message for + ResourcePolicies.Patch. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource_policy (str): + Id of the resource policy to patch. + This corresponds to the ``resource_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource_policy_resource (google.cloud.compute_v1.types.ResourcePolicy): + The body resource for this request + This corresponds to the ``resource_policy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.extended_operation.ExtendedOperation: + An object representing a extended + long-running operation. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [project, region, resource_policy, resource_policy_resource] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchResourcePolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchResourcePolicyRequest): + request = compute.PatchResourcePolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource_policy is not None: + request.resource_policy = resource_policy + if resource_policy_resource is not None: + request.resource_policy_resource = resource_policy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + ( + ("project", request.project), + ("region", request.region), + ("resource_policy", request.resource_policy), + ) + ), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + operation_service = self._transport._region_operations_client + operation_request = compute.GetRegionOperationRequest() + operation_request.project = request.project + operation_request.region = request.region + operation_request.operation = response.name + + get_operation = functools.partial(operation_service.get, operation_request) + # Cancel is not part of extended operations yet. + cancel_operation = lambda: None + + # Note: this class is an implementation detail to provide a uniform + # set of names for certain fields in the extended operation proto message. + # See google.api_core.extended_operation.ExtendedOperation for details + # on these properties and the expected interface. + class _CustomOperation(extended_operation.ExtendedOperation): + @property + def error_message(self): + return self._extended_operation.http_error_message + + @property + def error_code(self): + return self._extended_operation.http_error_status_code + + response = _CustomOperation.make(get_operation, cancel_operation, response) + + # Done; return the response. + return response + def set_iam_policy( self, request: Optional[ diff --git a/google/cloud/compute_v1/services/resource_policies/transports/base.py b/google/cloud/compute_v1/services/resource_policies/transports/base.py index 984d169dd..5f1f23fab 100644 --- a/google/cloud/compute_v1/services/resource_policies/transports/base.py +++ b/google/cloud/compute_v1/services/resource_policies/transports/base.py @@ -157,6 +157,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), self.set_iam_policy: gapic_v1.method.wrap_method( self.set_iam_policy, default_timeout=None, @@ -235,6 +240,15 @@ def list( ]: raise NotImplementedError() + @property + def patch( + self, + ) -> Callable[ + [compute.PatchResourcePolicyRequest], + Union[compute.Operation, Awaitable[compute.Operation]], + ]: + raise NotImplementedError() + @property def set_iam_policy( self, diff --git a/google/cloud/compute_v1/services/resource_policies/transports/rest.py b/google/cloud/compute_v1/services/resource_policies/transports/rest.py index 8293719ce..90b2870fc 100644 --- a/google/cloud/compute_v1/services/resource_policies/transports/rest.py +++ b/google/cloud/compute_v1/services/resource_policies/transports/rest.py @@ -111,6 +111,14 @@ def post_list(self, response): logging.log(f"Received response: {response}") return response + def pre_patch(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_patch(self, response): + logging.log(f"Received response: {response}") + return response + def pre_set_iam_policy(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -265,6 +273,27 @@ def post_list( """ return response + def pre_patch( + self, + request: compute.PatchResourcePolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[compute.PatchResourcePolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for patch + + Override in a subclass to manipulate the request or metadata + before they are sent to the ResourcePolicies server. + """ + return request, metadata + + def post_patch(self, response: compute.Operation) -> compute.Operation: + """Post-rpc interceptor for patch + + Override in a subclass to manipulate the response + after it is returned by the ResourcePolicies server but before + it is returned to user code. + """ + return response + def pre_set_iam_policy( self, request: compute.SetIamPolicyResourcePolicyRequest, @@ -1017,6 +1046,116 @@ def __call__( resp = self._interceptor.post_list(resp) return resp + class _Patch(ResourcePoliciesRestStub): + def __hash__(self): + return hash("Patch") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: compute.PatchResourcePolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchResourcePolicyRequest): + The request object. A request message for + ResourcePolicies.Patch. See the method + description for details. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource_policy}", + "body": "resource_policy_resource", + }, + ] + request, metadata = self._interceptor.pre_patch(request, metadata) + pb_request = compute.PatchResourcePolicyRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=False, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = compute.Operation() + pb_resp = compute.Operation.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_patch(resp) + return resp + class _SetIamPolicy(ResourcePoliciesRestStub): def __hash__(self): return hash("SetIamPolicy") @@ -1301,6 +1440,14 @@ def list( # In C++ this would require a dynamic_cast return self._List(self._session, self._host, self._interceptor) # type: ignore + @property + def patch( + self, + ) -> Callable[[compute.PatchResourcePolicyRequest], compute.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._Patch(self._session, self._host, self._interceptor) # type: ignore + @property def set_iam_policy( self, diff --git a/google/cloud/compute_v1/services/security_policies/client.py b/google/cloud/compute_v1/services/security_policies/client.py index 5acf15d6b..7f047d8ff 100644 --- a/google/cloud/compute_v1/services/security_policies/client.py +++ b/google/cloud/compute_v1/services/security_policies/client.py @@ -1792,7 +1792,7 @@ def patch_unary( metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Operation: r"""Patches the specified policy with the data included - in the request. To clear fields in the rule, leave the + in the request. To clear fields in the policy, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, @@ -1922,7 +1922,7 @@ def patch( metadata: Sequence[Tuple[str, str]] = (), ) -> extended_operation.ExtendedOperation: r"""Patches the specified policy with the data included - in the request. To clear fields in the rule, leave the + in the request. To clear fields in the policy, leave the fields empty and specify them in the updateMask. This cannot be used to be update the rules in the policy. Please use the per rule methods like addRule, patchRule, @@ -2075,7 +2075,9 @@ def patch_rule_unary( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> compute.Operation: - r"""Patches a rule at the specified priority. + r"""Patches a rule at the specified priority. To clear + fields in the rule, leave the fields empty and specify + them in the updateMask. .. code-block:: python @@ -2202,7 +2204,9 @@ def patch_rule( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> extended_operation.ExtendedOperation: - r"""Patches a rule at the specified priority. + r"""Patches a rule at the specified priority. To clear + fields in the rule, leave the fields empty and specify + them in the updateMask. .. code-block:: python diff --git a/google/cloud/compute_v1/types/__init__.py b/google/cloud/compute_v1/types/__init__.py index 244dd63a7..e310d187d 100644 --- a/google/cloud/compute_v1/types/__init__.py +++ b/google/cloud/compute_v1/types/__init__.py @@ -910,6 +910,7 @@ PatchRegionSslPolicyRequest, PatchRegionTargetHttpsProxyRequest, PatchRegionUrlMapRequest, + PatchResourcePolicyRequest, PatchRouterRequest, PatchRuleFirewallPolicyRequest, PatchRuleNetworkFirewallPolicyRequest, @@ -2276,6 +2277,7 @@ "PatchRegionSslPolicyRequest", "PatchRegionTargetHttpsProxyRequest", "PatchRegionUrlMapRequest", + "PatchResourcePolicyRequest", "PatchRouterRequest", "PatchRuleFirewallPolicyRequest", "PatchRuleNetworkFirewallPolicyRequest", diff --git a/google/cloud/compute_v1/types/compute.py b/google/cloud/compute_v1/types/compute.py index 12d7cf057..43d989f20 100644 --- a/google/cloud/compute_v1/types/compute.py +++ b/google/cloud/compute_v1/types/compute.py @@ -918,6 +918,7 @@ "PatchRegionSslPolicyRequest", "PatchRegionTargetHttpsProxyRequest", "PatchRegionUrlMapRequest", + "PatchResourcePolicyRequest", "PatchRouterRequest", "PatchRuleFirewallPolicyRequest", "PatchRuleNetworkFirewallPolicyRequest", @@ -12334,8 +12335,8 @@ class BackendBucket(proto.Message): This field is a member of `oneof`_ ``_creation_timestamp``. custom_response_headers (MutableSequence[str]): - Headers that the HTTP/S load balancer should - add to proxied responses. + Headers that the Application Load Balancer + should add to proxied responses. description (str): An optional textual description of the resource; provided by the client when the @@ -23807,6 +23808,13 @@ class TargetShape(proto.Enum): maximize utilization of unused zonal reservations. Recommended for batch workloads that do not require high availability. + ANY_SINGLE_ZONE (61100880): + The group creates all VM instances within a + single zone. The zone is selected based on the + present resource constraints and to maximize + utilization of unused zonal reservations. + Recommended for batch workloads with heavy + interprocess communication. BALANCED (468409608): The group prioritizes acquisition of resources, scheduling VMs in zones where @@ -23826,6 +23834,7 @@ class TargetShape(proto.Enum): """ UNDEFINED_TARGET_SHAPE = 0 ANY = 64972 + ANY_SINGLE_ZONE = 61100880 BALANCED = 468409608 EVEN = 2140442 @@ -40553,14 +40562,11 @@ class InstanceGroupManagerUpdatePolicy(proto.Message): This field is a member of `oneof`_ ``_replacement_method``. type_ (str): The type of update process. You can specify - either PROACTIVE so that the instance group - manager proactively executes actions in order to - bring instances to their target versions or - OPPORTUNISTIC so that no action is proactively - executed but the update will be performed as - part of other actions (for example, resizes or - recreateInstances calls). Check the Type enum - for the list of possible values. + either PROACTIVE so that the MIG automatically + updates VMs to the latest configurations or + OPPORTUNISTIC so that you can select the VMs + that you want to update. Check the Type enum for + the list of possible values. This field is a member of `oneof`_ ``_type``. """ @@ -40572,7 +40578,9 @@ class InstanceRedistributionType(proto.Enum): across zones in the region. - NONE: For non-autoscaled groups, proactive redistribution is disabled. Additional supported values which may be not listed in the enum directly due to - technical reasons: NONE + technical reasons: + + NONE PROACTIVE Values: @@ -40596,6 +40604,7 @@ class MinimalAction(proto.Enum): restarted, you can set the minimal action to RESTART in order to pick up metadata changes. Additional supported values which may be not listed in the enum directly due to technical reasons: + NONE REFRESH REPLACE @@ -40618,7 +40627,9 @@ class MostDisruptiveAllowedAction(proto.Enum): action needed is more disruptive than most disruptive allowed action you specify it will not perform the update at all. Additional supported values which may be not listed in the enum - directly due to technical reasons: NONE + directly due to technical reasons: + + NONE REFRESH REPLACE RESTART @@ -40651,24 +40662,21 @@ class ReplacementMethod(proto.Enum): class Type(proto.Enum): r"""The type of update process. You can specify either PROACTIVE - so that the instance group manager proactively executes actions - in order to bring instances to their target versions or - OPPORTUNISTIC so that no action is proactively executed but the - update will be performed as part of other actions (for example, - resizes or recreateInstances calls). Additional supported values - which may be not listed in the enum directly due to technical - reasons: PROACTIVE + so that the MIG automatically updates VMs to the latest + configurations or OPPORTUNISTIC so that you can select the VMs + that you want to update. Additional supported values which may + be not listed in the enum directly due to technical reasons: + + PROACTIVE Values: UNDEFINED_TYPE (0): A value indicating that the enum field is not set. OPPORTUNISTIC (429530089): - No action is being proactively performed in - order to bring this IGM to its target version - distribution (regardless of whether this - distribution is expressed using instanceTemplate - or versions field). + MIG will apply new configurations to existing + VMs only when you selectively target specific or + all VMs to be updated. """ UNDEFINED_TYPE = 0 OPPORTUNISTIC = 429530089 @@ -40844,7 +40852,9 @@ class MinimalAction(proto.Enum): a more disruptive action than you set with this flag, the necessary action is performed to execute the update. Additional supported values which may be not listed in the enum directly - due to technical reasons: NONE + due to technical reasons: + + NONE REFRESH REPLACE RESTART @@ -40866,7 +40876,9 @@ class MostDisruptiveAllowedAction(proto.Enum): your update requires a more disruptive action than you set with this flag, the update request will fail. Additional supported values which may be not listed in the enum directly due to - technical reasons: NONE + technical reasons: + + NONE REFRESH REPLACE RESTART @@ -69497,6 +69509,76 @@ class PatchRegionUrlMapRequest(proto.Message): ) +class PatchResourcePolicyRequest(proto.Message): + r"""A request message for ResourcePolicies.Patch. See the method + description for details. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + resource_policy (str): + Id of the resource policy to patch. + resource_policy_resource (google.cloud.compute_v1.types.ResourcePolicy): + The body resource for this request + update_mask (str): + update_mask indicates fields to be updated as part of this + request. + + This field is a member of `oneof`_ ``_update_mask``. + """ + + project: str = proto.Field( + proto.STRING, + number=227560217, + ) + region: str = proto.Field( + proto.STRING, + number=138946292, + ) + request_id: str = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + resource_policy: str = proto.Field( + proto.STRING, + number=159240835, + ) + resource_policy_resource: "ResourcePolicy" = proto.Field( + proto.MESSAGE, + number=76826186, + message="ResourcePolicy", + ) + update_mask: str = proto.Field( + proto.STRING, + number=500079778, + optional=True, + ) + + class PatchRouterRequest(proto.Message): r"""A request message for Routers.Patch. See the method description for details. @@ -72618,6 +72700,12 @@ class QuotaExceededInfo(proto.Message): Attributes: dimensions (MutableMapping[str, str]): The map holding related quota dimensions. + future_limit (float): + Future quota limit being rolled out. The + limit's unit depends on the quota type or + metric. + + This field is a member of `oneof`_ ``_future_limit``. limit (float): Current effective quota limit. The limit's unit depends on the quota type or metric. @@ -72631,13 +72719,42 @@ class QuotaExceededInfo(proto.Message): The Compute Engine quota metric name. This field is a member of `oneof`_ ``_metric_name``. + rollout_status (str): + Rollout status of the future quota limit. + Check the RolloutStatus enum for the list of + possible values. + + This field is a member of `oneof`_ ``_rollout_status``. """ + class RolloutStatus(proto.Enum): + r"""Rollout status of the future quota limit. + + Values: + UNDEFINED_ROLLOUT_STATUS (0): + A value indicating that the enum field is not + set. + IN_PROGRESS (469193735): + IN_PROGRESS - A rollout is in process which will change the + limit value to future limit. + ROLLOUT_STATUS_UNSPECIFIED (26864568): + ROLLOUT_STATUS_UNSPECIFIED - Rollout status is not + specified. The default value. + """ + UNDEFINED_ROLLOUT_STATUS = 0 + IN_PROGRESS = 469193735 + ROLLOUT_STATUS_UNSPECIFIED = 26864568 + dimensions: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=414334925, ) + future_limit: float = proto.Field( + proto.DOUBLE, + number=456564287, + optional=True, + ) limit: float = proto.Field( proto.DOUBLE, number=102976443, @@ -72653,6 +72770,11 @@ class QuotaExceededInfo(proto.Message): number=409881530, optional=True, ) + rollout_status: str = proto.Field( + proto.STRING, + number=476426816, + optional=True, + ) class RawDisk(proto.Message): @@ -73561,7 +73683,9 @@ class MinimalAction(proto.Enum): a more disruptive action than you set with this flag, the necessary action is performed to execute the update. Additional supported values which may be not listed in the enum directly - due to technical reasons: NONE + due to technical reasons: + + NONE REFRESH REPLACE RESTART @@ -73583,7 +73707,9 @@ class MostDisruptiveAllowedAction(proto.Enum): your update requires a more disruptive action than you set with this flag, the update request will fail. Additional supported values which may be not listed in the enum directly due to - technical reasons: NONE + technical reasons: + + NONE REFRESH REPLACE RESTART @@ -80373,6 +80499,14 @@ class Scheduling(proto.Message): enum for the list of possible values. This field is a member of `oneof`_ ``_instance_termination_action``. + local_ssd_recovery_timeout (google.cloud.compute_v1.types.Duration): + Specifies the maximum amount of time a Local + Ssd Vm should wait while recovery of the Local + Ssd state is attempted. Its value should be in + between 0 and 168 hours with hour granularity + and the default value being 1 hour. + + This field is a member of `oneof`_ ``_local_ssd_recovery_timeout``. location_hint (str): An opaque location hint used to place the instance close to other resources. This field is @@ -80492,6 +80626,12 @@ class ProvisioningModel(proto.Enum): number=107380667, optional=True, ) + local_ssd_recovery_timeout: "Duration" = proto.Field( + proto.MESSAGE, + number=268015590, + optional=True, + message="Duration", + ) location_hint: str = proto.Field( proto.STRING, number=350519505, @@ -82742,6 +82882,10 @@ class ServiceAttachmentConnectedEndpoint(proto.Message): .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: + consumer_network (str): + The url of the consumer network. + + This field is a member of `oneof`_ ``_consumer_network``. endpoint (str): The url of a connected endpoint. @@ -82795,6 +82939,11 @@ class Status(proto.Enum): REJECTED = 174130302 STATUS_UNSPECIFIED = 42133066 + consumer_network: str = proto.Field( + proto.STRING, + number=254357221, + optional=True, + ) endpoint: str = proto.Field( proto.STRING, number=130489749, diff --git a/noxfile.py b/noxfile.py index d53c9d05a..b8312a2c9 100644 --- a/noxfile.py +++ b/noxfile.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2018 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ import nox +FLAKE8_VERSION = "flake8==6.1.0" BLACK_VERSION = "black==22.3.0" ISORT_VERSION = "isort==5.10.1" LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] @@ -82,7 +83,7 @@ def lint(session): Returns a failure if the linters find linting errors or sufficiently serious code quality issues. """ - session.install("flake8", BLACK_VERSION) + session.install(FLAKE8_VERSION, BLACK_VERSION) session.run( "black", "--check", @@ -400,6 +401,7 @@ def prerelease_deps(session): "python", "-c", "import google.protobuf; print(google.protobuf.__version__)" ) session.run("python", "-c", "import grpc; print(grpc.__version__)") + session.run("python", "-c", "import google.auth; print(google.auth.__version__)") session.run("py.test", "tests/unit") diff --git a/samples/generated_samples/compute_v1_generated_resource_policies_patch_sync.py b/samples/generated_samples/compute_v1_generated_resource_policies_patch_sync.py new file mode 100644 index 000000000..c889d2989 --- /dev/null +++ b/samples/generated_samples/compute_v1_generated_resource_policies_patch_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for Patch +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-compute + + +# [START compute_v1_generated_ResourcePolicies_Patch_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import compute_v1 + + +def sample_patch(): + # Create a client + client = compute_v1.ResourcePoliciesClient() + + # Initialize request argument(s) + request = compute_v1.PatchResourcePolicyRequest( + project="project_value", + region="region_value", + resource_policy="resource_policy_value", + ) + + # Make the request + response = client.patch(request=request) + + # Handle the response + print(response) + +# [END compute_v1_generated_ResourcePolicies_Patch_sync] diff --git a/samples/generated_samples/snippet_metadata_google.cloud.compute.v1.json b/samples/generated_samples/snippet_metadata_google.cloud.compute.v1.json index e75a0d5a4..ebc934990 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.compute.v1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.compute.v1.json @@ -46911,6 +46911,98 @@ ], "title": "compute_v1_generated_resource_policies_list_sync.py" }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.compute_v1.ResourcePoliciesClient", + "shortName": "ResourcePoliciesClient" + }, + "fullName": "google.cloud.compute_v1.ResourcePoliciesClient.patch", + "method": { + "fullName": "google.cloud.compute.v1.ResourcePolicies.Patch", + "service": { + "fullName": "google.cloud.compute.v1.ResourcePolicies", + "shortName": "ResourcePolicies" + }, + "shortName": "Patch" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.compute_v1.types.PatchResourcePolicyRequest" + }, + { + "name": "project", + "type": "str" + }, + { + "name": "region", + "type": "str" + }, + { + "name": "resource_policy", + "type": "str" + }, + { + "name": "resource_policy_resource", + "type": "google.cloud.compute_v1.types.ResourcePolicy" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.extended_operation.ExtendedOperation", + "shortName": "patch" + }, + "description": "Sample for Patch", + "file": "compute_v1_generated_resource_policies_patch_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "compute_v1_generated_ResourcePolicies_Patch_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "compute_v1_generated_resource_policies_patch_sync.py" + }, { "canonical": true, "clientMethod": { diff --git a/scripts/decrypt-secrets.sh b/scripts/decrypt-secrets.sh index 21f6d2a26..0018b421d 100755 --- a/scripts/decrypt-secrets.sh +++ b/scripts/decrypt-secrets.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2015 Google Inc. All rights reserved. +# Copyright 2023 Google LLC All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/scripts/readme-gen/readme_gen.py b/scripts/readme-gen/readme_gen.py index 91b59676b..1acc11983 100644 --- a/scripts/readme-gen/readme_gen.py +++ b/scripts/readme-gen/readme_gen.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2016 Google Inc +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -33,17 +33,17 @@ autoescape=True, ) -README_TMPL = jinja_env.get_template('README.tmpl.rst') +README_TMPL = jinja_env.get_template("README.tmpl.rst") def get_help(file): - return subprocess.check_output(['python', file, '--help']).decode() + return subprocess.check_output(["python", file, "--help"]).decode() def main(): parser = argparse.ArgumentParser() - parser.add_argument('source') - parser.add_argument('--destination', default='README.rst') + parser.add_argument("source") + parser.add_argument("--destination", default="README.rst") args = parser.parse_args() @@ -51,9 +51,9 @@ def main(): root = os.path.dirname(source) destination = os.path.join(root, args.destination) - jinja_env.globals['get_help'] = get_help + jinja_env.globals["get_help"] = get_help - with io.open(source, 'r') as f: + with io.open(source, "r") as f: config = yaml.load(f) # This allows get_help to execute in the right directory. @@ -61,9 +61,9 @@ def main(): output = README_TMPL.render(config) - with io.open(destination, 'w') as f: + with io.open(destination, "w") as f: f.write(output) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/setup.cfg b/setup.cfg index c3a2b39f6..052350089 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/gapic/compute_v1/test_instance_templates.py b/tests/unit/gapic/compute_v1/test_instance_templates.py index 9f8ca9503..e36fcb894 100644 --- a/tests/unit/gapic/compute_v1/test_instance_templates.py +++ b/tests/unit/gapic/compute_v1/test_instance_templates.py @@ -2309,6 +2309,7 @@ def test_insert_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -2721,6 +2722,7 @@ def test_insert_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -3002,6 +3004,7 @@ def test_insert_unary_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -3392,6 +3395,7 @@ def test_insert_unary_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ diff --git a/tests/unit/gapic/compute_v1/test_instances.py b/tests/unit/gapic/compute_v1/test_instances.py index c15735cf3..267b76c39 100644 --- a/tests/unit/gapic/compute_v1/test_instances.py +++ b/tests/unit/gapic/compute_v1/test_instances.py @@ -3375,6 +3375,7 @@ def test_bulk_insert_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -3778,6 +3779,7 @@ def test_bulk_insert_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -4049,6 +4051,7 @@ def test_bulk_insert_unary_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -4430,6 +4433,7 @@ def test_bulk_insert_unary_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -8912,6 +8916,7 @@ def test_insert_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -9330,6 +9335,7 @@ def test_insert_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -9612,6 +9618,7 @@ def test_insert_unary_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -10008,6 +10015,7 @@ def test_insert_unary_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -19000,6 +19008,7 @@ def test_set_scheduling_rest(request_type): request_init["scheduling_resource"] = { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -19252,6 +19261,7 @@ def test_set_scheduling_rest_bad_request( request_init["scheduling_resource"] = { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -19369,6 +19379,7 @@ def test_set_scheduling_unary_rest(request_type): request_init["scheduling_resource"] = { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -19599,6 +19610,7 @@ def test_set_scheduling_unary_rest_bad_request( request_init["scheduling_resource"] = { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -25533,6 +25545,7 @@ def test_update_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -25956,6 +25969,7 @@ def test_update_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -26244,6 +26258,7 @@ def test_update_unary_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -26645,6 +26660,7 @@ def test_update_unary_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ diff --git a/tests/unit/gapic/compute_v1/test_machine_images.py b/tests/unit/gapic/compute_v1/test_machine_images.py index 44ed4b485..b0e034587 100644 --- a/tests/unit/gapic/compute_v1/test_machine_images.py +++ b/tests/unit/gapic/compute_v1/test_machine_images.py @@ -1924,6 +1924,7 @@ def test_insert_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -2386,6 +2387,7 @@ def test_insert_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -2709,6 +2711,7 @@ def test_insert_unary_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -3151,6 +3154,7 @@ def test_insert_unary_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ diff --git a/tests/unit/gapic/compute_v1/test_region_instance_templates.py b/tests/unit/gapic/compute_v1/test_region_instance_templates.py index 49f1fd7dc..40f9ddd46 100644 --- a/tests/unit/gapic/compute_v1/test_region_instance_templates.py +++ b/tests/unit/gapic/compute_v1/test_region_instance_templates.py @@ -1726,6 +1726,7 @@ def test_insert_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -2143,6 +2144,7 @@ def test_insert_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -2426,6 +2428,7 @@ def test_insert_unary_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -2821,6 +2824,7 @@ def test_insert_unary_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ diff --git a/tests/unit/gapic/compute_v1/test_region_instances.py b/tests/unit/gapic/compute_v1/test_region_instances.py index 88c18a466..ccec113ad 100644 --- a/tests/unit/gapic/compute_v1/test_region_instances.py +++ b/tests/unit/gapic/compute_v1/test_region_instances.py @@ -726,6 +726,7 @@ def test_bulk_insert_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -1131,6 +1132,7 @@ def test_bulk_insert_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -1402,6 +1404,7 @@ def test_bulk_insert_unary_rest(request_type): "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ @@ -1785,6 +1788,7 @@ def test_bulk_insert_unary_rest_bad_request( "scheduling": { "automatic_restart": True, "instance_termination_action": "instance_termination_action_value", + "local_ssd_recovery_timeout": {"nanos": 543, "seconds": 751}, "location_hint": "location_hint_value", "min_node_cpus": 1379, "node_affinities": [ diff --git a/tests/unit/gapic/compute_v1/test_resource_policies.py b/tests/unit/gapic/compute_v1/test_resource_policies.py index ca97652f3..d6d7e3759 100644 --- a/tests/unit/gapic/compute_v1/test_resource_policies.py +++ b/tests/unit/gapic/compute_v1/test_resource_policies.py @@ -3481,6 +3481,964 @@ def test_list_rest_pager(transport: str = "rest"): assert page_.raw_page.next_page_token == token +@pytest.mark.parametrize( + "request_type", + [ + compute.PatchResourcePolicyRequest, + dict, + ], +) +def test_patch_rest(request_type): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "project": "sample1", + "region": "sample2", + "resource_policy": "sample3", + } + request_init["resource_policy_resource"] = { + "creation_timestamp": "creation_timestamp_value", + "description": "description_value", + "disk_consistency_group_policy": {}, + "group_placement_policy": { + "availability_domain_count": 2650, + "collocation": "collocation_value", + "vm_count": 875, + }, + "id": 205, + "instance_schedule_policy": { + "expiration_time": "expiration_time_value", + "start_time": "start_time_value", + "time_zone": "time_zone_value", + "vm_start_schedule": {"schedule": "schedule_value"}, + "vm_stop_schedule": {}, + }, + "kind": "kind_value", + "name": "name_value", + "region": "region_value", + "resource_status": { + "instance_schedule_policy": { + "last_run_start_time": "last_run_start_time_value", + "next_run_start_time": "next_run_start_time_value", + } + }, + "self_link": "self_link_value", + "snapshot_schedule_policy": { + "retention_policy": { + "max_retention_days": 1933, + "on_source_disk_delete": "on_source_disk_delete_value", + }, + "schedule": { + "daily_schedule": { + "days_in_cycle": 1366, + "duration": "duration_value", + "start_time": "start_time_value", + }, + "hourly_schedule": { + "duration": "duration_value", + "hours_in_cycle": 1494, + "start_time": "start_time_value", + }, + "weekly_schedule": { + "day_of_weeks": [ + { + "day": "day_value", + "duration": "duration_value", + "start_time": "start_time_value", + } + ] + }, + }, + "snapshot_properties": { + "chain_name": "chain_name_value", + "guest_flush": True, + "labels": {}, + "storage_locations": [ + "storage_locations_value1", + "storage_locations_value2", + ], + }, + }, + "status": "status_value", + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id="client_operation_id_value", + creation_timestamp="creation_timestamp_value", + description="description_value", + end_time="end_time_value", + http_error_message="http_error_message_value", + http_error_status_code=2374, + id=205, + insert_time="insert_time_value", + kind="kind_value", + name="name_value", + operation_group_id="operation_group_id_value", + operation_type="operation_type_value", + progress=885, + region="region_value", + self_link="self_link_value", + start_time="start_time_value", + status=compute.Operation.Status.DONE, + status_message="status_message_value", + target_id=947, + target_link="target_link_value", + user="user_value", + zone="zone_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = compute.Operation.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, extended_operation.ExtendedOperation) + assert response.client_operation_id == "client_operation_id_value" + assert response.creation_timestamp == "creation_timestamp_value" + assert response.description == "description_value" + assert response.end_time == "end_time_value" + assert response.http_error_message == "http_error_message_value" + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == "insert_time_value" + assert response.kind == "kind_value" + assert response.name == "name_value" + assert response.operation_group_id == "operation_group_id_value" + assert response.operation_type == "operation_type_value" + assert response.progress == 885 + assert response.region == "region_value" + assert response.self_link == "self_link_value" + assert response.start_time == "start_time_value" + assert response.status == compute.Operation.Status.DONE + assert response.status_message == "status_message_value" + assert response.target_id == 947 + assert response.target_link == "target_link_value" + assert response.user == "user_value" + assert response.zone == "zone_value" + + +def test_patch_rest_required_fields(request_type=compute.PatchResourcePolicyRequest): + transport_class = transports.ResourcePoliciesRestTransport + + request_init = {} + request_init["project"] = "" + request_init["region"] = "" + request_init["resource_policy"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).patch._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["project"] = "project_value" + jsonified_request["region"] = "region_value" + jsonified_request["resourcePolicy"] = "resource_policy_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).patch._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "request_id", + "update_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "project" in jsonified_request + assert jsonified_request["project"] == "project_value" + assert "region" in jsonified_request + assert jsonified_request["region"] == "region_value" + assert "resourcePolicy" in jsonified_request + assert jsonified_request["resourcePolicy"] == "resource_policy_value" + + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = compute.Operation.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.patch(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_patch_rest_unset_required_fields(): + transport = transports.ResourcePoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.patch._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "requestId", + "updateMask", + ) + ) + & set( + ( + "project", + "region", + "resourcePolicy", + "resourcePolicyResource", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_patch_rest_interceptors(null_interceptor): + transport = transports.ResourcePoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ResourcePoliciesRestInterceptor(), + ) + client = ResourcePoliciesClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ResourcePoliciesRestInterceptor, "post_patch" + ) as post, mock.patch.object( + transports.ResourcePoliciesRestInterceptor, "pre_patch" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = compute.PatchResourcePolicyRequest.pb( + compute.PatchResourcePolicyRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = compute.Operation.to_json(compute.Operation()) + + request = compute.PatchResourcePolicyRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = compute.Operation() + + client.patch( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_patch_rest_bad_request( + transport: str = "rest", request_type=compute.PatchResourcePolicyRequest +): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "project": "sample1", + "region": "sample2", + "resource_policy": "sample3", + } + request_init["resource_policy_resource"] = { + "creation_timestamp": "creation_timestamp_value", + "description": "description_value", + "disk_consistency_group_policy": {}, + "group_placement_policy": { + "availability_domain_count": 2650, + "collocation": "collocation_value", + "vm_count": 875, + }, + "id": 205, + "instance_schedule_policy": { + "expiration_time": "expiration_time_value", + "start_time": "start_time_value", + "time_zone": "time_zone_value", + "vm_start_schedule": {"schedule": "schedule_value"}, + "vm_stop_schedule": {}, + }, + "kind": "kind_value", + "name": "name_value", + "region": "region_value", + "resource_status": { + "instance_schedule_policy": { + "last_run_start_time": "last_run_start_time_value", + "next_run_start_time": "next_run_start_time_value", + } + }, + "self_link": "self_link_value", + "snapshot_schedule_policy": { + "retention_policy": { + "max_retention_days": 1933, + "on_source_disk_delete": "on_source_disk_delete_value", + }, + "schedule": { + "daily_schedule": { + "days_in_cycle": 1366, + "duration": "duration_value", + "start_time": "start_time_value", + }, + "hourly_schedule": { + "duration": "duration_value", + "hours_in_cycle": 1494, + "start_time": "start_time_value", + }, + "weekly_schedule": { + "day_of_weeks": [ + { + "day": "day_value", + "duration": "duration_value", + "start_time": "start_time_value", + } + ] + }, + }, + "snapshot_properties": { + "chain_name": "chain_name_value", + "guest_flush": True, + "labels": {}, + "storage_locations": [ + "storage_locations_value1", + "storage_locations_value2", + ], + }, + }, + "status": "status_value", + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_flattened(): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # get arguments that satisfy an http rule for this method + sample_request = { + "project": "sample1", + "region": "sample2", + "resource_policy": "sample3", + } + + # get truthy value for each flattened field + mock_args = dict( + project="project_value", + region="region_value", + resource_policy="resource_policy_value", + resource_policy_resource=compute.ResourcePolicy( + creation_timestamp="creation_timestamp_value" + ), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = compute.Operation.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource_policy}" + % client.transport._host, + args[1], + ) + + +def test_patch_rest_flattened_error(transport: str = "rest"): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchResourcePolicyRequest(), + project="project_value", + region="region_value", + resource_policy="resource_policy_value", + resource_policy_resource=compute.ResourcePolicy( + creation_timestamp="creation_timestamp_value" + ), + ) + + +def test_patch_rest_error(): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + compute.PatchResourcePolicyRequest, + dict, + ], +) +def test_patch_unary_rest(request_type): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "project": "sample1", + "region": "sample2", + "resource_policy": "sample3", + } + request_init["resource_policy_resource"] = { + "creation_timestamp": "creation_timestamp_value", + "description": "description_value", + "disk_consistency_group_policy": {}, + "group_placement_policy": { + "availability_domain_count": 2650, + "collocation": "collocation_value", + "vm_count": 875, + }, + "id": 205, + "instance_schedule_policy": { + "expiration_time": "expiration_time_value", + "start_time": "start_time_value", + "time_zone": "time_zone_value", + "vm_start_schedule": {"schedule": "schedule_value"}, + "vm_stop_schedule": {}, + }, + "kind": "kind_value", + "name": "name_value", + "region": "region_value", + "resource_status": { + "instance_schedule_policy": { + "last_run_start_time": "last_run_start_time_value", + "next_run_start_time": "next_run_start_time_value", + } + }, + "self_link": "self_link_value", + "snapshot_schedule_policy": { + "retention_policy": { + "max_retention_days": 1933, + "on_source_disk_delete": "on_source_disk_delete_value", + }, + "schedule": { + "daily_schedule": { + "days_in_cycle": 1366, + "duration": "duration_value", + "start_time": "start_time_value", + }, + "hourly_schedule": { + "duration": "duration_value", + "hours_in_cycle": 1494, + "start_time": "start_time_value", + }, + "weekly_schedule": { + "day_of_weeks": [ + { + "day": "day_value", + "duration": "duration_value", + "start_time": "start_time_value", + } + ] + }, + }, + "snapshot_properties": { + "chain_name": "chain_name_value", + "guest_flush": True, + "labels": {}, + "storage_locations": [ + "storage_locations_value1", + "storage_locations_value2", + ], + }, + }, + "status": "status_value", + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id="client_operation_id_value", + creation_timestamp="creation_timestamp_value", + description="description_value", + end_time="end_time_value", + http_error_message="http_error_message_value", + http_error_status_code=2374, + id=205, + insert_time="insert_time_value", + kind="kind_value", + name="name_value", + operation_group_id="operation_group_id_value", + operation_type="operation_type_value", + progress=885, + region="region_value", + self_link="self_link_value", + start_time="start_time_value", + status=compute.Operation.Status.DONE, + status_message="status_message_value", + target_id=947, + target_link="target_link_value", + user="user_value", + zone="zone_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = compute.Operation.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.patch_unary(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + + +def test_patch_unary_rest_required_fields( + request_type=compute.PatchResourcePolicyRequest, +): + transport_class = transports.ResourcePoliciesRestTransport + + request_init = {} + request_init["project"] = "" + request_init["region"] = "" + request_init["resource_policy"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).patch._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["project"] = "project_value" + jsonified_request["region"] = "region_value" + jsonified_request["resourcePolicy"] = "resource_policy_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).patch._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "request_id", + "update_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "project" in jsonified_request + assert jsonified_request["project"] == "project_value" + assert "region" in jsonified_request + assert jsonified_request["region"] == "region_value" + assert "resourcePolicy" in jsonified_request + assert jsonified_request["resourcePolicy"] == "resource_policy_value" + + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = compute.Operation.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.patch_unary(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_patch_unary_rest_unset_required_fields(): + transport = transports.ResourcePoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.patch._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "requestId", + "updateMask", + ) + ) + & set( + ( + "project", + "region", + "resourcePolicy", + "resourcePolicyResource", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_patch_unary_rest_interceptors(null_interceptor): + transport = transports.ResourcePoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ResourcePoliciesRestInterceptor(), + ) + client = ResourcePoliciesClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ResourcePoliciesRestInterceptor, "post_patch" + ) as post, mock.patch.object( + transports.ResourcePoliciesRestInterceptor, "pre_patch" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = compute.PatchResourcePolicyRequest.pb( + compute.PatchResourcePolicyRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = compute.Operation.to_json(compute.Operation()) + + request = compute.PatchResourcePolicyRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = compute.Operation() + + client.patch_unary( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_patch_unary_rest_bad_request( + transport: str = "rest", request_type=compute.PatchResourcePolicyRequest +): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "project": "sample1", + "region": "sample2", + "resource_policy": "sample3", + } + request_init["resource_policy_resource"] = { + "creation_timestamp": "creation_timestamp_value", + "description": "description_value", + "disk_consistency_group_policy": {}, + "group_placement_policy": { + "availability_domain_count": 2650, + "collocation": "collocation_value", + "vm_count": 875, + }, + "id": 205, + "instance_schedule_policy": { + "expiration_time": "expiration_time_value", + "start_time": "start_time_value", + "time_zone": "time_zone_value", + "vm_start_schedule": {"schedule": "schedule_value"}, + "vm_stop_schedule": {}, + }, + "kind": "kind_value", + "name": "name_value", + "region": "region_value", + "resource_status": { + "instance_schedule_policy": { + "last_run_start_time": "last_run_start_time_value", + "next_run_start_time": "next_run_start_time_value", + } + }, + "self_link": "self_link_value", + "snapshot_schedule_policy": { + "retention_policy": { + "max_retention_days": 1933, + "on_source_disk_delete": "on_source_disk_delete_value", + }, + "schedule": { + "daily_schedule": { + "days_in_cycle": 1366, + "duration": "duration_value", + "start_time": "start_time_value", + }, + "hourly_schedule": { + "duration": "duration_value", + "hours_in_cycle": 1494, + "start_time": "start_time_value", + }, + "weekly_schedule": { + "day_of_weeks": [ + { + "day": "day_value", + "duration": "duration_value", + "start_time": "start_time_value", + } + ] + }, + }, + "snapshot_properties": { + "chain_name": "chain_name_value", + "guest_flush": True, + "labels": {}, + "storage_locations": [ + "storage_locations_value1", + "storage_locations_value2", + ], + }, + }, + "status": "status_value", + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch_unary(request) + + +def test_patch_unary_rest_flattened(): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # get arguments that satisfy an http rule for this method + sample_request = { + "project": "sample1", + "region": "sample2", + "resource_policy": "sample3", + } + + # get truthy value for each flattened field + mock_args = dict( + project="project_value", + region="region_value", + resource_policy="resource_policy_value", + resource_policy_resource=compute.ResourcePolicy( + creation_timestamp="creation_timestamp_value" + ), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = compute.Operation.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.patch_unary(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource_policy}" + % client.transport._host, + args[1], + ) + + +def test_patch_unary_rest_flattened_error(transport: str = "rest"): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch_unary( + compute.PatchResourcePolicyRequest(), + project="project_value", + region="region_value", + resource_policy="resource_policy_value", + resource_policy_resource=compute.ResourcePolicy( + creation_timestamp="creation_timestamp_value" + ), + ) + + +def test_patch_unary_rest_error(): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + @pytest.mark.parametrize( "request_type", [ @@ -4362,6 +5320,7 @@ def test_resource_policies_base_transport(): "get_iam_policy", "insert", "list", + "patch", "set_iam_policy", "test_iam_permissions", ) @@ -4519,6 +5478,9 @@ def test_resource_policies_client_transport_session_collision(transport_name): session1 = client1.transport.list._session session2 = client2.transport.list._session assert session1 != session2 + session1 = client1.transport.patch._session + session2 = client2.transport.patch._session + assert session1 != session2 session1 = client1.transport.set_iam_policy._session session2 = client2.transport.set_iam_policy._session assert session1 != session2 diff --git a/tests/unit/gapic/compute_v1/test_service_attachments.py b/tests/unit/gapic/compute_v1/test_service_attachments.py index 83b59ab40..074f6b74e 100644 --- a/tests/unit/gapic/compute_v1/test_service_attachments.py +++ b/tests/unit/gapic/compute_v1/test_service_attachments.py @@ -2255,6 +2255,7 @@ def test_insert_rest(request_type): request_init["service_attachment_resource"] = { "connected_endpoints": [ { + "consumer_network": "consumer_network_value", "endpoint": "endpoint_value", "psc_connection_id": 1793, "status": "status_value", @@ -2524,6 +2525,7 @@ def test_insert_rest_bad_request( request_init["service_attachment_resource"] = { "connected_endpoints": [ { + "consumer_network": "consumer_network_value", "endpoint": "endpoint_value", "psc_connection_id": 1793, "status": "status_value", @@ -2592,7 +2594,7 @@ def test_insert_rest_flattened(): service_attachment_resource=compute.ServiceAttachment( connected_endpoints=[ compute.ServiceAttachmentConnectedEndpoint( - endpoint="endpoint_value" + consumer_network="consumer_network_value" ) ] ), @@ -2636,7 +2638,7 @@ def test_insert_rest_flattened_error(transport: str = "rest"): service_attachment_resource=compute.ServiceAttachment( connected_endpoints=[ compute.ServiceAttachmentConnectedEndpoint( - endpoint="endpoint_value" + consumer_network="consumer_network_value" ) ] ), @@ -2667,6 +2669,7 @@ def test_insert_unary_rest(request_type): request_init["service_attachment_resource"] = { "connected_endpoints": [ { + "consumer_network": "consumer_network_value", "endpoint": "endpoint_value", "psc_connection_id": 1793, "status": "status_value", @@ -2914,6 +2917,7 @@ def test_insert_unary_rest_bad_request( request_init["service_attachment_resource"] = { "connected_endpoints": [ { + "consumer_network": "consumer_network_value", "endpoint": "endpoint_value", "psc_connection_id": 1793, "status": "status_value", @@ -2982,7 +2986,7 @@ def test_insert_unary_rest_flattened(): service_attachment_resource=compute.ServiceAttachment( connected_endpoints=[ compute.ServiceAttachmentConnectedEndpoint( - endpoint="endpoint_value" + consumer_network="consumer_network_value" ) ] ), @@ -3026,7 +3030,7 @@ def test_insert_unary_rest_flattened_error(transport: str = "rest"): service_attachment_resource=compute.ServiceAttachment( connected_endpoints=[ compute.ServiceAttachmentConnectedEndpoint( - endpoint="endpoint_value" + consumer_network="consumer_network_value" ) ] ), @@ -3420,6 +3424,7 @@ def test_patch_rest(request_type): request_init["service_attachment_resource"] = { "connected_endpoints": [ { + "consumer_network": "consumer_network_value", "endpoint": "endpoint_value", "psc_connection_id": 1793, "status": "status_value", @@ -3696,6 +3701,7 @@ def test_patch_rest_bad_request( request_init["service_attachment_resource"] = { "connected_endpoints": [ { + "consumer_network": "consumer_network_value", "endpoint": "endpoint_value", "psc_connection_id": 1793, "status": "status_value", @@ -3769,7 +3775,7 @@ def test_patch_rest_flattened(): service_attachment_resource=compute.ServiceAttachment( connected_endpoints=[ compute.ServiceAttachmentConnectedEndpoint( - endpoint="endpoint_value" + consumer_network="consumer_network_value" ) ] ), @@ -3814,7 +3820,7 @@ def test_patch_rest_flattened_error(transport: str = "rest"): service_attachment_resource=compute.ServiceAttachment( connected_endpoints=[ compute.ServiceAttachmentConnectedEndpoint( - endpoint="endpoint_value" + consumer_network="consumer_network_value" ) ] ), @@ -3849,6 +3855,7 @@ def test_patch_unary_rest(request_type): request_init["service_attachment_resource"] = { "connected_endpoints": [ { + "consumer_network": "consumer_network_value", "endpoint": "endpoint_value", "psc_connection_id": 1793, "status": "status_value", @@ -4105,6 +4112,7 @@ def test_patch_unary_rest_bad_request( request_init["service_attachment_resource"] = { "connected_endpoints": [ { + "consumer_network": "consumer_network_value", "endpoint": "endpoint_value", "psc_connection_id": 1793, "status": "status_value", @@ -4178,7 +4186,7 @@ def test_patch_unary_rest_flattened(): service_attachment_resource=compute.ServiceAttachment( connected_endpoints=[ compute.ServiceAttachmentConnectedEndpoint( - endpoint="endpoint_value" + consumer_network="consumer_network_value" ) ] ), @@ -4223,7 +4231,7 @@ def test_patch_unary_rest_flattened_error(transport: str = "rest"): service_attachment_resource=compute.ServiceAttachment( connected_endpoints=[ compute.ServiceAttachmentConnectedEndpoint( - endpoint="endpoint_value" + consumer_network="consumer_network_value" ) ] ), pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy