diff --git a/.gitignore b/.gitignore
index 97367a64c..aa280469d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,6 +5,7 @@ example.tf
terraform.tfvars
terraform.tfplan
terraform.tfstate
+.terraform.lock.hcl
bin/
terraform-provider-google
modules-dev/
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3ca18c618..b14ab957f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,12 +1,41 @@
-## 0.76.0 (Unreleased)
+## 0.77.0 (Unreleased)
+BUG FIXES:
+* iam: fix access bindings deletion
+* ydb: fixed panic if database was removed outside of terraform
+* storage: fix issue when error, returned from reading extend bucket settings treated as important.
+ It will be printed as log output now instead of interrupting plan execution.
+* greenplum: fix bug with not setting `security groups_ids` in `resource_yandex_mdb_greenplum_cluster`
+* greenplum: fix bug with not setting `access` in `resource_yandex_mdb_greenplum_cluster`
+
+ENHANCEMENTS:
+* mdb: add `sqlcollation` attribute to `yandex_mdb_sqlserver_cluster` resource and data source
+* serverless: increase operation timeouts in `yandex_function` resource
+
+FEATURES:
+* k8s: add `instance_template.name` attribute to `node group` resource and data source
+* k8s: add `instance_template.ipv4_dns_records`, `instance_template.ipv6_dns_records` attributes to `node group` resource and data source
+* k8s: add `instance_template.labels` attribute to `node group` resource and data source
+* greenplum: add fields `greenplum_config` and `pooler_config` in `resource_yandex_mdb_greenplum_cluster`
+* **New Resource:** `yandex_serverless_container_iam_binding`
+* **New Resource:** `yandex_ydb_database_iam_binding`
+
+## 0.76.0 (July 01, 2022)
BUG FIXES:
* alb: `send` and `receive` attributes can be empty now in `yandex_alb_backend_group` resource and data source
* alb: fixed `yandex_alb_load_balancer` resource and data source documentation
* alb: support `mode` attribute in `load_balancing_config` entity in `yandex_alb_backend_group` resource and data source
* mysql: `mysql_config.sql_mode` is setting now when creating a cluster
-
+* mdb: fix changing `maintenance_window` only in `yandex_mdb_redis_cluster`
+* clickhouse: fix issue with `shard_group` update
+* serverless: fix issue with `retention_period` update in `yandex_logging_group` resource
+
ENHANCEMENTS:
* alb: refactoring `yandex_alb_backend_group` resource and data source
+* alb: extra validation added to `yandex_alb_load_balancer` and `yandex_alb_virtual_host` resources and data sources
+* mdb: add `client_output_buffer_limit_normal` and `client_output_buffer_limit_pubsub` attributes to `yandex_mdb_redis_cluster` resource and data source
+* mdb: add `replica_priority` attribute to `yandex_mdb_redis_cluster` resource and data source
+* mdb: add `assign_public_ip` attribute to `yandex_mdb_redis_cluster` resource and data source
+* ymq: add `region_id` attribute to resource
FEATURES:
* compute: support for creating `yandex_compute_instance` with local disks
diff --git a/go.mod b/go.mod
index ec3072a46..0f30a847d 100644
--- a/go.mod
+++ b/go.mod
@@ -9,7 +9,7 @@ require (
github.com/client9/misspell v0.3.4
github.com/fatih/structs v1.1.0
github.com/frankban/quicktest v1.14.0 // indirect
- github.com/golang-jwt/jwt/v4 v4.4.1 // indirect
+ github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
github.com/golang/mock v1.6.0
github.com/golang/protobuf v1.5.2
github.com/golang/snappy v0.0.4
@@ -30,12 +30,12 @@ require (
github.com/pierrec/lz4 v2.6.1+incompatible
github.com/stretchr/objx v0.1.1
github.com/stretchr/testify v1.7.0
- github.com/yandex-cloud/go-genproto v0.0.0-20220427145821-09b0e1a69c0c
- github.com/yandex-cloud/go-sdk v0.0.0-20220427150059-eb82b5db2827
- golang.org/x/net v0.0.0-20220412020605-290c469a71a5
- golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect
- google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9
- google.golang.org/grpc v1.45.0
+ github.com/yandex-cloud/go-genproto v0.0.0-20220718095639-7971ba320057
+ github.com/yandex-cloud/go-sdk v0.0.0-20220718100739-50070cd9746e
+ golang.org/x/net v0.0.0-20220630215102-69896b714898
+ golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e // indirect
+ google.golang.org/genproto v0.0.0-20220630174209-ad1d48641aa7
+ google.golang.org/grpc v1.47.0
google.golang.org/protobuf v1.28.0
)
diff --git a/go.sum b/go.sum
index 31d9d4feb..c97c32c13 100644
--- a/go.sum
+++ b/go.sum
@@ -167,6 +167,7 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@@ -204,6 +205,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/esimonov/ifshort v1.0.3 h1:JD6x035opqGec5fZ0TLjXeROD2p5H7oLGn8MKfy9HTM=
@@ -290,8 +292,8 @@ github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
-github.com/golang-jwt/jwt/v4 v4.4.1 h1:pC5DB52sCeK48Wlb9oPcdhnjkz1TKt1D/P7WKJ0kUcQ=
-github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=
+github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -925,10 +927,14 @@ github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6e
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
-github.com/yandex-cloud/go-genproto v0.0.0-20220427145821-09b0e1a69c0c h1:LYGfLFlyGt/Q8Ik+m9ggoYwpHAKMhIbebM0VHwDt6F4=
-github.com/yandex-cloud/go-genproto v0.0.0-20220427145821-09b0e1a69c0c/go.mod h1:HEUYX/p8966tMUHHT+TsS0hF/Ca/NYwqprC5WXSDMfE=
-github.com/yandex-cloud/go-sdk v0.0.0-20220427150059-eb82b5db2827 h1:GIr8xGRoNq7JsJ1GbRVR5rIBJQIYjDjr5uNZM9kD324=
-github.com/yandex-cloud/go-sdk v0.0.0-20220427150059-eb82b5db2827/go.mod h1:KbdvVhd2QWGD+tUMEL4k7kai2LbbSGJ+YhkteUVpoP0=
+github.com/yandex-cloud/go-genproto v0.0.0-20220704123856-8e873fc548ca h1:cwUthmZSUaOEwDWEMspkay/NNgfSjl2KrNGGKve8gww=
+github.com/yandex-cloud/go-genproto v0.0.0-20220704123856-8e873fc548ca/go.mod h1:HEUYX/p8966tMUHHT+TsS0hF/Ca/NYwqprC5WXSDMfE=
+github.com/yandex-cloud/go-genproto v0.0.0-20220718095639-7971ba320057 h1:E7Hb3za9EHWx/AiXNGjBxOPudLFSelaRtyVsvxturM4=
+github.com/yandex-cloud/go-genproto v0.0.0-20220718095639-7971ba320057/go.mod h1:HEUYX/p8966tMUHHT+TsS0hF/Ca/NYwqprC5WXSDMfE=
+github.com/yandex-cloud/go-sdk v0.0.0-20220704124340-b9137a069154 h1:QPDUAzgN8SfMy2mo2a6ZVFMftST6I95kuFFwlbcWBSQ=
+github.com/yandex-cloud/go-sdk v0.0.0-20220704124340-b9137a069154/go.mod h1:XeEIr+Nx2+v6zlNaeRGQufLWGs6Btg4uOaHnl0b4VyE=
+github.com/yandex-cloud/go-sdk v0.0.0-20220718100739-50070cd9746e h1:p/APVZzglWfiQJY+rIPH94SgAqD7PQzoAuAvvVU0iY0=
+github.com/yandex-cloud/go-sdk v0.0.0-20220718100739-50070cd9746e/go.mod h1:NszadvtI2oAulirw9kCjAXjSL31EPqe35CbLi0g5Se8=
github.com/yeya24/promlinter v0.1.0 h1:goWULN0jH5Yajmu/K+v1xCqIREeB+48OiJ2uu2ssc7U=
github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc=
github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg=
@@ -1085,8 +1091,8 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20220412020605-290c469a71a5 h1:bRb386wvrE+oBNdF1d/Xh9mQrfQ4ecYhW5qJ5GvTGT4=
-golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220630215102-69896b714898 h1:K7wO6V1IrczY9QOQ2WkVpw4JQSwCd52UsxVEirZUfiw=
+golang.org/x/net v0.0.0-20220630215102-69896b714898/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1196,9 +1202,9 @@ golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
-golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e h1:CsOuNlbOuf0mzxJIefr6Q4uAUetRUwZE4qt7VfzP+xo=
+golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
@@ -1425,8 +1431,8 @@ google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwy
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9 h1:XGQ6tc+EnM35IAazg4y6AHmUg4oK8NXsXaILte1vRlk=
-google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220630174209-ad1d48641aa7 h1:q4zUJDd0+knPFB9x20S3vnxzlYNBbt8Yd7zBMVMteeM=
+google.golang.org/genproto v0.0.0-20220630174209-ad1d48641aa7/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@@ -1458,8 +1464,8 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
-google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
-google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
+google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
+google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
diff --git a/website/docs/d/datasource_kubernetes_node_group.html.markdown b/website/docs/d/datasource_kubernetes_node_group.html.markdown
index 6021f1674..6823b1444 100644
--- a/website/docs/d/datasource_kubernetes_node_group.html.markdown
+++ b/website/docs/d/datasource_kubernetes_node_group.html.markdown
@@ -85,6 +85,9 @@ The `instance_template` block supports:
* `placement_policy` - (Optional) The placement policy configuration. The structure is documented below.
* `container_runtime` - Container runtime configuration. The structure is documented below.
+
+* `name` - Name template of the instance.
+* `labels` - Labels assigned to compute nodes (instances), created by the Node Group.
---
The `network_interface` block supports:
@@ -94,6 +97,26 @@ The `network_interface` block supports:
* `security_group_ids` - Security group ids for network interface.
* `ipv4` - Indicates whether the IPv4 address has been assigned.
* `ipv6` - Indicates whether the IPv6 address has been assigned.
+* `ipv4_dns_records` - List of configurations for creating ipv4 DNS records. The structure is documented below.
+* `ipv6_dns_records` - List of configurations for creating ipv6 DNS records. The structure is documented below.
+
+---
+
+The `ipv4_dns_records` block supports:
+
+* `fqdn` - DNS record FQDN.
+* `dns_zone_id` - DNS zone ID (if not set, private zone is used).
+* `ttl` - DNS record TTL (in seconds).
+* `ptr` - When set to true, also create a PTR DNS record.
+
+---
+
+The `ipv6_dns_records` block supports:
+
+* `fqdn` - DNS record FQDN.
+* `dns_zone_id` - DNS zone ID (if not set, private zone is used).
+* `ttl` - DNS record TTL (in seconds).
+* `ptr` - When set to true, also create a PTR DNS record.
---
diff --git a/website/docs/d/datasource_mdb_greenplum_cluster.html.markdown b/website/docs/d/datasource_mdb_greenplum_cluster.html.markdown
index 6a3215be2..4ad3d86c6 100644
--- a/website/docs/d/datasource_mdb_greenplum_cluster.html.markdown
+++ b/website/docs/d/datasource_mdb_greenplum_cluster.html.markdown
@@ -66,6 +66,9 @@ exported:
* `access` - Access policy to the Greenplum cluster. The structure is documented below.
* `backup_window_start` - Time to start the daily backup, in the UTC timezone. The structure is documented below.
+* `pooler_config` - Configuration of the connection pooler. The structure is documented below.
+* `greenplum_config` - Greenplum cluster config.
+
* `user_name` - Greenplum cluster admin user name.
* `security_group_ids` - A set of ids of security groups assigned to hosts of the cluster.
* `deletion_protection` - Flag to protect the cluster from deletion.
@@ -98,3 +101,11 @@ The `access` block supports:
* `data_lens` - Allow access for [Yandex DataLens](https://cloud.yandex.com/services/datalens).
* `web_sql` - Allows access for SQL queries in the management console.
+
+
+The `pooler_config` block supports:
+
+* `pooling_mode` - Mode that the connection pooler is working in. See descriptions of all modes in the [documentation for Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool-string.
+* `pool_size` - Value for `pool_size` [parameter in Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool_size-integer).
+* `pool_client_idle_timeout` - Value for `pool_client_idle_timeout` [parameter in Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool_ttl-integer).
+
diff --git a/website/docs/d/datasource_mdb_redis_cluster.html.markdown b/website/docs/d/datasource_mdb_redis_cluster.html.markdown
index bae96bb06..1bccd1568 100644
--- a/website/docs/d/datasource_mdb_redis_cluster.html.markdown
+++ b/website/docs/d/datasource_mdb_redis_cluster.html.markdown
@@ -63,6 +63,8 @@ The `config` block supports:
* `slowlog_max_len` - Slow queries log length.
* `databases` - Number of databases (changing requires redis-server restart).
* `version` - Version of Redis (5.0, 6.0 or 6.2).
+* `client_output_buffer_limit_normal` - Normal clients output buffer limits.
+* `client_output_buffer_limit_pubsub` - Pubsub clients output buffer limits.
The `resources` block supports:
@@ -78,6 +80,8 @@ The `host` block supports:
be a part of the network to which the cluster belongs.
* `shard_name` - The name of the shard to which the host belongs.
* `fqdn` - The fully qualified domain name of the host.
+* `replica_priority` - Replica priority of a current replica (usable for non-sharded only).
+* `assign_public_ip` - Sets whether the host should get a public IP address or not.
The `maintenance_window` block supports:
diff --git a/website/docs/d/datasource_mdb_sqlserver_cluster.html.markdown b/website/docs/d/datasource_mdb_sqlserver_cluster.html.markdown
index 65e2ee65e..8363253eb 100644
--- a/website/docs/d/datasource_mdb_sqlserver_cluster.html.markdown
+++ b/website/docs/d/datasource_mdb_sqlserver_cluster.html.markdown
@@ -55,6 +55,8 @@ exported:
* `sqlserver_config` - SQLServer cluster config.
* `security_group_ids` - A set of ids of security groups assigned to hosts of the cluster.
* `host_group_ids` - A list of IDs of the host groups hosting VMs of the cluster.
+* `deletion_protection` - Inhibits deletion of the cluster. Can be either `true` or `false`.
+* `sqlcollation` - SQL Collation cluster will be created with. This attribute cannot be changed when cluster is created!
The `resources` block supports:
diff --git a/website/docs/d/datasource_message_queue.html.markdown b/website/docs/d/datasource_message_queue.html.markdown
index 7695da118..876c578c3 100644
--- a/website/docs/d/datasource_message_queue.html.markdown
+++ b/website/docs/d/datasource_message_queue.html.markdown
@@ -22,6 +22,7 @@ data "yandex_message_queue" "example_queue" {
## Argument Reference
* `name` - (Required) Queue name.
+* `region_id` - (Optional) The region ID where the message queue is located.
## Attributes Reference
diff --git a/website/docs/r/kubernetes_node_group.html.markdown b/website/docs/r/kubernetes_node_group.html.markdown
index 7505a07ff..e4a322d3f 100644
--- a/website/docs/r/kubernetes_node_group.html.markdown
+++ b/website/docs/r/kubernetes_node_group.html.markdown
@@ -135,6 +135,17 @@ The `instance_template` block supports:
* `network_acceleration_type` - (Optional) Type of network acceleration. Values: `standard`, `software_accelerated`.
* `container_runtime` - (Optional) Container runtime configuration. The structure is documented below.
+
+* `name` - (Optional) Name template of the instance.
+In order to be unique it must contain at least one of instance unique placeholders:
+{instance.short_id}
+{instance.index}
+combination of {instance.zone_id} and {instance.index_in_zone}
+Example: my-instance-{instance.index}
+If not set, default is used: {instance_group.id}-{instance.short_id}
+It may also contain another placeholders, see [Compute Instance group metadata doc](https://cloud.yandex.com/en-ru/docs/compute/api-ref/grpc/instance_group_service) for full list.
+
+* `labels` - (Optional) Labels that will be assigned to compute nodes (instances), created by the Node Group.
---
The `boot_disk` block supports:
@@ -162,6 +173,26 @@ The `network_interface` block supports:
* `ipv6` - (Optional) If true, allocate an IPv6 address for the interface. The address will be automatically assigned from the specified subnet.
* `nat` - A public address that can be used to access the internet over NAT.
* `security_group_ids` - (Optional) Security group ids for network interface.
+* `ipv4_dns_records` - (Optional) List of configurations for creating ipv4 DNS records. The structure is documented below.
+* `ipv6_dns_records` - (Optional) List of configurations for creating ipv6 DNS records. The structure is documented below.
+
+---
+
+The `ipv4_dns_records` block supports:
+
+* `fqdn` - (Required) DNS record FQDN.
+* `dns_zone_id` - (Optional) DNS zone ID (if not set, private zone is used).
+* `ttl` - (Optional) DNS record TTL (in seconds).
+* `ptr` - (Optional) When set to true, also create a PTR DNS record.
+
+---
+
+The `ipv6_dns_records` block supports:
+
+* `fqdn` - (Required) DNS record FQDN.
+* `dns_zone_id` - (Optional) DNS zone ID (if not set, private zone is used).
+* `ttl` - (Optional) DNS record TTL (in seconds).
+* `ptr` - (Optional) When set to true, also create a PTR DNS record.
---
diff --git a/website/docs/r/mdb_greenplum_cluster.html.markdown b/website/docs/r/mdb_greenplum_cluster.html.markdown
index bb1861133..ca076f631 100644
--- a/website/docs/r/mdb_greenplum_cluster.html.markdown
+++ b/website/docs/r/mdb_greenplum_cluster.html.markdown
@@ -51,6 +51,11 @@ resource "yandex_mdb_greenplum_cluster" "foo" {
web_sql = true
}
+ greenplum_config = {
+ max_connections = 395
+ gp_workfile_compression = "false"
+ }
+
user_name = "admin_user"
user_password = "your_super_secret_password"
@@ -119,6 +124,10 @@ The following arguments are supported:
* `backup_window_start` - (Optional) Time to start the daily backup, in the UTC timezone. The structure is documented below.
+* `pooler_config` - (Optional) Configuration of the connection pooler. The structure is documented below.
+
+* `greenplum_config` - (Optional) Greenplum cluster config. Detail info in "Greenplum cluster settings" section (documented below).
+
- - -
* `user_name` - (Required) Greenplum cluster admin user name.
@@ -157,6 +166,14 @@ The `access` block supports:
* `web_sql` - Allows access for SQL queries in the management console
+The `pooler_config` block supports:
+
+* `pooling_mode` - (Optional) Mode that the connection pooler is working in. See descriptions of all modes in the [documentation for Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool-string.
+
+* `pool_size` - (Optional) Value for `pool_size` [parameter in Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool_size-integer).
+
+* `pool_client_idle_timeout` - (Optional) Value for `pool_client_idle_timeout` [parameter in Odyssey](https://github.com/yandex/odyssey/blob/master/documentation/configuration.md#pool_ttl-integer).
+
## Attributes Reference
In addition to the arguments listed above, the following computed attributes are exported:
@@ -187,3 +204,17 @@ A cluster can be imported using the `id` of the resource, e.g.
```
$ terraform import yandex_mdb_greenplum_cluster.foo cluster_id
```
+
+## Greenplum cluster settings
+
+| Setting name and type \ Greenplum version | 6.17 | 6.19 |
+| ------------------------------------------| ---- | ---- |
+| max_connections : integer | supported | supported |
+| max_prepared_transactions : integer | supported | supported |
+| gp_workfile_limit_per_query : integer | supported | supported |
+| gp_workfile_limit_files_per_query : integer | supported | supported |
+| max_slot_wal_keep_size : integer | supported | supported |
+| gp_workfile_limit_per_segment : integer | supported | supported |
+| gp_workfile_compression : boolean | supported | supported |
+| max_statement_mem : integer | - | supported |
+| log_statement : one of
- 0: " LOG_STATEMENT_UNSPECIFIED"
- 1: " LOG_STATEMENT_NONE"
- 2: " LOG_STATEMENT_DDL" - 3: " LOG_STATEMENT_MOD"
- 4: " LOG_STATEMENT_ALL" | - | supported |
\ No newline at end of file
diff --git a/website/docs/r/mdb_redis_cluster.html.markdown b/website/docs/r/mdb_redis_cluster.html.markdown
index f5135735b..d0983c94b 100644
--- a/website/docs/r/mdb_redis_cluster.html.markdown
+++ b/website/docs/r/mdb_redis_cluster.html.markdown
@@ -165,6 +165,12 @@ The `config` block supports:
* `version` - (Required) Version of Redis (5.0, 6.0 or 6.2).
+* `client_output_buffer_limit_normal` - (Optional) Normal clients output buffer limits.
+ See [redis config file](https://github.com/redis/redis/blob/6.2/redis.conf#L1841).
+
+* `client_output_buffer_limit_pubsub` - (Optional) Pubsub clients output buffer limits.
+ See [redis config file](https://github.com/redis/redis/blob/6.2/redis.conf#L1843).
+
The `resources` block supports:
* `resources_preset_id` - (Required) The ID of the preset for computational resources available to a host (CPU, memory etc.).
@@ -186,6 +192,10 @@ The `host` block supports:
* `shard_name` (Optional) - The name of the shard to which the host belongs.
+* `replica_priority` - (Optional) Replica priority of a current replica (usable for non-sharded only).
+
+* `assign_public_ip` - (Optional) Sets whether the host should get a public IP address or not.
+
The `maintenance_window` block supports:
* `type` - (Required) Type of maintenance window. Can be either `ANYTIME` or `WEEKLY`. A day and hour of window need to be specified with weekly window.
diff --git a/website/docs/r/mdb_sqlserver_cluster.html.markdown b/website/docs/r/mdb_sqlserver_cluster.html.markdown
index 9b4b2f050..8a98f52da 100644
--- a/website/docs/r/mdb_sqlserver_cluster.html.markdown
+++ b/website/docs/r/mdb_sqlserver_cluster.html.markdown
@@ -162,6 +162,8 @@ The following arguments are supported:
* `host_group_ids` - (Optional) A list of IDs of the host groups hosting VMs of the cluster.
+* `sqlcollation` - (Optional) SQL Collation cluster will be created with. This attribute cannot be changed when cluster is created!
+
- - -
The `resources` block supports:
diff --git a/website/docs/r/message_queue.html.markdown b/website/docs/r/message_queue.html.markdown
index e4ee0931d..a87e132c4 100644
--- a/website/docs/r/message_queue.html.markdown
+++ b/website/docs/r/message_queue.html.markdown
@@ -67,6 +67,9 @@ The following arguments are supported:
* `secret_key` - (Optional) The [secret key](https://cloud.yandex.com/docs/iam/operations/sa/create-access-key) to use when applying changes. If omitted, `ymq_secret_key` specified in provider config is used. For more information see [documentation](https://cloud.yandex.com/docs/message-queue/quickstart).
+* `region_id` - (Optional, forces new resource) ID of the region where the message queue is located at.
+The default is 'ru-central1'.
+
## Attributes Reference
Message Queue also has the following attributes:
diff --git a/website/docs/r/serverless_container_iam_binding.html.markdown b/website/docs/r/serverless_container_iam_binding.html.markdown
new file mode 100644
index 000000000..8002a5a56
--- /dev/null
+++ b/website/docs/r/serverless_container_iam_binding.html.markdown
@@ -0,0 +1,34 @@
+---
+layout: "yandex"
+page_title: "Yandex: yandex_serverless_container_iam_binding"
+sidebar_current: "docs-yandex-serverless-container-iam-binding"
+description: |-
+Allows management of a single IAM binding for a [Yandex Serverless Container](https://cloud.yandex.com/docs/serverless-containers/).
+---
+
+## yandex\_serverless\_container\_iam\_binding
+
+```hcl
+resource "yandex_serverless_container_iam_binding" "container-iam" {
+ container_id = "your-container-id"
+ role = "serverless.containers.invoker"
+
+ members = [
+ "system:allUsers",
+ ]
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `container_id` - (Required) The [Yandex Serverless Container](https://cloud.yandex.com/docs/serverless-containers/) ID to apply a binding to.
+
+* `role` - (Required) The role that should be applied.
+
+* `members` - (Required) Identities that will be granted the privilege in `role`.
+ Each entry can have one of the following values:
+ * **userAccount:{user_id}**: A unique user ID that represents a specific Yandex account.
+ * **serviceAccount:{service_account_id}**: A unique service account ID.
+ * **system:{allUsers|allAuthenticatedUsers}**: see [system groups](https://cloud.yandex.com/docs/iam/concepts/access-control/system-group)
diff --git a/website/docs/r/ydb_database_iam_binding.html.markdown b/website/docs/r/ydb_database_iam_binding.html.markdown
new file mode 100644
index 000000000..3cf18fab6
--- /dev/null
+++ b/website/docs/r/ydb_database_iam_binding.html.markdown
@@ -0,0 +1,53 @@
+---
+layout: "yandex"
+page_title: "Yandex: yandex_ydb_database_iam_binding"
+sidebar_current: "docs-yandex-ydb-database-iam-binding"
+description: |-
+Allows management of a single IAM binding for a [Managed service for YDB](https://cloud.yandex.com/docs/ydb/).
+---
+
+## yandex\_ydb\_database\_iam\_binding
+
+Allows creation and management of a single binding within IAM policy for
+an existing Managed YDB Database instance.
+
+## Example Usage
+
+```hcl
+resource "yandex_ydb_database_serverless" "database1" {
+ name = "test-ydb-serverless"
+ folder_id = data.yandex_resourcemanager_folder.test_folder.id
+}
+
+resource "yandex_ydb_database_iam_binding" "viewer" {
+ database_id = yandex_ydb_database_serverless.database1.id
+ role = "ydb.viewer"
+
+ members = [
+ "userAccount:foo_user_id",
+ ]
+}
+```
+
+## Argument Reference
+
+The following arguments are supported:
+
+* `database_id` - (Required) The [Managed Service YDB instance](https://cloud.yandex.com/docs/ydb/) Database ID to apply a binding to.
+
+* `role` - (Required) The role that should be applied. See [roles](https://cloud.yandex.com/docs/ydb/security/).
+
+* `members` - (Required) Identities that will be granted the privilege in `role`.
+ Each entry can have one of the following values:
+ * **userAccount:{user_id}**: A unique user ID that represents a specific Yandex account.
+ * **serviceAccount:{service_account_id}**: A unique service account ID.
+ * **system:{allUsers|allAuthenticatedUsers}**: see [system groups](https://cloud.yandex.com/docs/iam/concepts/access-control/system-group)
+
+## Import
+
+IAM binding imports use space-delimited identifiers; first the resource in question and then the role.
+These bindings can be imported using the `database_id` and role, e.g.
+
+```
+$ terraform import yandex_ydb_database_iam_binding.viewer "database_id ydb.viewer"
+```
diff --git a/website/yandex.erb b/website/yandex.erb
index d428210b4..83e66104e 100644
--- a/website/yandex.erb
+++ b/website/yandex.erb
@@ -175,6 +175,9 @@
>
yandex_vpc_subnet
+ >
+ yandex_ydb_database_iam_binding
+
>
yandex_ydb_database_dedicated
diff --git a/yandex/alb_structures.go b/yandex/alb_structures.go
index ea1c3dfc7..a76393834 100644
--- a/yandex/alb_structures.go
+++ b/yandex/alb_structures.go
@@ -57,33 +57,43 @@ func expandALBHeaderModification(d *schema.ResourceData, key string) ([]*appload
var modifications []*apploadbalancer.HeaderModification
for _, currentKey := range IterateKeys(d, key) {
- modification := expandALBModification(d, currentKey)
+ modification, err := expandALBModification(d, currentKey)
+ if err != nil {
+ return nil, err
+ }
modifications = append(modifications, modification)
}
return modifications, nil
}
-func expandALBModification(d *schema.ResourceData, key string) *apploadbalancer.HeaderModification {
+func expandALBModification(d *schema.ResourceData, path string) (*apploadbalancer.HeaderModification, error) {
modification := &apploadbalancer.HeaderModification{}
- if v, ok := d.GetOk(key + "name"); ok {
+ if v, ok := d.GetOk(path + "name"); ok {
modification.SetName(v.(string))
}
- if v, ok := d.GetOk(key + "replace"); ok {
- modification.SetReplace(v.(string))
+ replace, gotReplace := d.GetOk(path + "replace")
+ remove, gotRemove := d.GetOk(path + "remove")
+ appendValue, gotAppend := d.GetOk(path + "append")
+
+ if isPlural(gotReplace, gotRemove, gotAppend) {
+ return nil, fmt.Errorf("Cannot specify more than one of replace and remove and append operation for the header modification at the same time")
+ }
+ if gotReplace {
+ modification.SetReplace(replace.(string))
}
- if v, ok := d.GetOk(key + "append"); ok {
- modification.SetAppend(v.(string))
+ if gotRemove {
+ modification.SetRemove(remove.(bool))
}
- if v, ok := d.GetOk(key + "remove"); ok {
- modification.SetRemove(v.(bool))
+ if gotAppend {
+ modification.SetAppend(appendValue.(string))
}
- return modification
+ return modification, nil
}
func expandALBRoutes(d *schema.ResourceData) ([]*apploadbalancer.Route, error) {
@@ -111,7 +121,7 @@ func expandALBRoute(d *schema.ResourceData, path string) (*apploadbalancer.Route
_, gotHTTPRoute := d.GetOk(path + "http_route")
_, gotGRPCRoute := d.GetOk(path + "grpc_route")
- if gotHTTPRoute && gotGRPCRoute {
+ if isPlural(gotHTTPRoute, gotGRPCRoute) {
return nil, fmt.Errorf("Cannot specify both HTTP route and gRPC route for the route")
}
if !gotHTTPRoute && !gotGRPCRoute {
@@ -150,8 +160,8 @@ func expandALBHTTPRoute(d *schema.ResourceData, path string) (*apploadbalancer.H
_, gotRedirectAction := d.GetOk(path + "redirect_action")
_, gotDirectResponseAction := d.GetOk(path + "direct_response_action")
- if gotHTTPRouteAction && gotRedirectAction && gotDirectResponseAction {
- return nil, fmt.Errorf("Cannot specify HTTP route action and redirect action and direct response action for the HTTP route at the same time")
+ if isPlural(gotHTTPRouteAction, gotRedirectAction, gotDirectResponseAction) {
+ return nil, fmt.Errorf("Cannot specify more than one of HTTP route action and redirect action and direct response action for the HTTP route at the same time")
}
if !gotHTTPRouteAction && !gotRedirectAction && !gotDirectResponseAction {
return nil, fmt.Errorf("Either HTTP route action or redirect action or direct response action should be specified for the HTTP route")
@@ -164,7 +174,11 @@ func expandALBHTTPRoute(d *schema.ResourceData, path string) (*apploadbalancer.H
httpRoute.SetRoute(action)
}
if gotRedirectAction {
- httpRoute.SetRedirect(expandALBRedirectAction(d, path+"redirect_action.0."))
+ action, err := expandALBRedirectAction(d, path+"redirect_action.0.")
+ if err != nil {
+ return nil, err
+ }
+ httpRoute.SetRedirect(action)
}
if gotDirectResponseAction {
httpRoute.SetDirectResponse(expandALBDirectResponseAction(d, path+"direct_response_action.0."))
@@ -188,7 +202,7 @@ func expandALBDirectResponseAction(d *schema.ResourceData, path string) *appload
return directResponseAction
}
-func expandALBRedirectAction(d *schema.ResourceData, path string) *apploadbalancer.RedirectAction {
+func expandALBRedirectAction(d *schema.ResourceData, path string) (*apploadbalancer.RedirectAction, error) {
readStr := func(field string) (string, bool) {
s, ok := d.GetOk(path + field)
if ok {
@@ -216,12 +230,18 @@ func expandALBRedirectAction(d *schema.ResourceData, path string) *apploadbalanc
redirectAction.RemoveQuery = val.(bool)
}
- if val, ok := readStr("replace_path"); ok {
- redirectAction.SetReplacePath(val)
+ replacePath, gotReplacePath := readStr("replace_path")
+ replacePrefix, gotReplacePrefix := readStr("replace_prefix")
+
+ if isPlural(gotReplacePrefix, gotReplacePath) {
+ return nil, fmt.Errorf("Cannot specify both replace path and replace prefix for the redirect action")
+ }
+ if gotReplacePath {
+ redirectAction.SetReplacePath(replacePath)
}
- if val, ok := readStr("replace_prefix"); ok {
- redirectAction.SetReplacePrefix(val)
+ if gotReplacePrefix {
+ redirectAction.SetReplacePrefix(replacePrefix)
}
if val, ok := readStr("response_code"); ok {
@@ -229,7 +249,7 @@ func expandALBRedirectAction(d *schema.ResourceData, path string) *apploadbalanc
redirectAction.ResponseCode = apploadbalancer.RedirectAction_RedirectResponseCode(code)
}
- return redirectAction
+ return redirectAction, nil
}
func expandALBHTTPRouteAction(d *schema.ResourceData, path string) (*apploadbalancer.HttpRouteAction, error) {
@@ -273,13 +293,19 @@ func expandALBHTTPRouteAction(d *schema.ResourceData, path string) (*apploadbala
}
routeAction.UpgradeTypes = upgradeTypes
}
+ hostRewrite, gotHostRewrite := readStr("host_rewrite")
+ autoHostRewrite, gotAutoHostRewrite := d.GetOk(path + "auto_host_rewrite")
- if val, ok := readStr("host_rewrite"); ok {
- routeAction.SetHostRewrite(val)
+ if isPlural(gotHostRewrite, gotAutoHostRewrite) {
+ return nil, fmt.Errorf("Cannot specify both host rewrite and auto host rewrite for the HTTP route action")
}
- if val, ok := d.GetOk(path + "auto_host_rewrite"); ok {
- routeAction.SetAutoHostRewrite(val.(bool))
+ if gotHostRewrite {
+ routeAction.SetHostRewrite(hostRewrite)
+ }
+
+ if gotAutoHostRewrite {
+ routeAction.SetAutoHostRewrite(autoHostRewrite.(bool))
}
return routeAction, nil
@@ -351,7 +377,7 @@ func expandALBGRPCRoute(d *schema.ResourceData, path string) (*apploadbalancer.G
_, gotGRPCRouteAction := d.GetOk(path + "grpc_route_action")
gRPCStatusResponseAction, gotGRPCStatusResponseAction := d.GetOk(path + "grpc_status_response_action")
- if gotGRPCRouteAction && gotGRPCStatusResponseAction {
+ if isPlural(gotGRPCRouteAction, gotGRPCStatusResponseAction) {
return nil, fmt.Errorf("Cannot specify both gRPC route action and gRPC status response action for the gRPC route")
}
if !gotGRPCRouteAction && !gotGRPCStatusResponseAction {
@@ -396,7 +422,7 @@ func expandALBStringMatch(d *schema.ResourceData, path string) (*apploadbalancer
exactMatch, gotExactMatch := d.GetOk(path + "exact")
prefixMatch, gotPrefixMatch := d.GetOk(path + "prefix")
- if gotExactMatch && gotPrefixMatch {
+ if isPlural(gotExactMatch, gotPrefixMatch) {
return nil, fmt.Errorf("Cannot specify both exact match and prefix match for the string match")
}
if !gotExactMatch && !gotPrefixMatch {
@@ -466,258 +492,320 @@ func expandALBListeners(d *schema.ResourceData) ([]*apploadbalancer.ListenerSpec
return listeners, nil
}
-func expandALBListener(d *schema.ResourceData, key string) (*apploadbalancer.ListenerSpec, error) {
+func isPlural(values ...bool) bool {
+ n := 0
+ for _, value := range values {
+ if value {
+ n++
+ }
+ }
+ return n > 1
+}
+
+func expandALBListener(d *schema.ResourceData, path string) (*apploadbalancer.ListenerSpec, error) {
listener := &apploadbalancer.ListenerSpec{}
- if v, ok := d.GetOk(key + "name"); ok {
+ if v, ok := d.GetOk(path + "name"); ok {
listener.Name = v.(string)
}
- if v, ok := d.GetOk(key + "endpoint"); ok {
- listener.EndpointSpecs = expandALBEndpoints(v)
+ if _, ok := d.GetOk(path + "endpoint"); ok {
+ endpoints, err := expandALBEndpoints(d, path+"endpoint")
+ if err != nil {
+ return nil, err
+ }
+ listener.SetEndpointSpecs(endpoints)
}
- if conf, ok := getFirstElementConfigIfExists(d, key+"http"); ok {
- listener.SetHttp(expandALBHTTPListener(conf))
- }
+ _, gotHTTPListener := d.GetOk(path + "http.0")
+ _, gotStreamListener := d.GetOk(path + "stream.0")
+ _, gotTLSListener := d.GetOk(path + "tls.0")
- if conf, ok := getFirstElementConfigIfExists(d, key+"tls"); ok {
- listener.SetTls(expandALBTLSListener(conf))
+ if isPlural(gotHTTPListener, gotStreamListener, gotTLSListener) {
+ return nil, fmt.Errorf("Cannot specify more than one of HTTP listener and Stream listener and TLS listener for the ALB listener at the same time")
}
-
- if conf, ok := getFirstElementConfigIfExists(d, key+"stream"); ok {
- listener.SetStream(expandALBStreamListener(conf))
+ if !gotHTTPListener && !gotStreamListener && !gotTLSListener {
+ return nil, fmt.Errorf("Either HTTP listener or Stream listener or TLS listener should be specified for the ALB listener")
}
- return listener, nil
-}
-
-func getFirstElementConfigIfExists(d *schema.ResourceData, key string) (map[string]interface{}, bool) {
- if v, ok := d.GetOk(key); ok {
- arr := v.([]interface{})
- if len(arr) > 0 {
- var resultConfig map[string]interface{}
- if result := arr[0]; result != nil {
- resultConfig = result.(map[string]interface{})
- } else {
- resultConfig = map[string]interface{}{}
- }
- return resultConfig, true
+ if gotHTTPListener {
+ http, err := expandALBHTTPListener(d, path+"http.0.")
+ if err != nil {
+ return nil, err
}
+ listener.SetHttp(http)
}
- return nil, false
-}
-func getFirstElementConfig(config map[string]interface{}, key string) (map[string]interface{}, bool) {
- if v, ok := config[key]; ok {
- switch v := v.(type) {
- case []interface{}:
- if len(v) > 0 {
- var resultConfig map[string]interface{}
- if result := v[0]; result != nil {
- resultConfig = result.(map[string]interface{})
- } else {
- resultConfig = map[string]interface{}{}
- }
- return resultConfig, true
- }
- case []map[string]interface{}:
- if len(v) > 0 {
- if result := v[0]; result != nil {
- return result, true
- }
- }
+ if gotTLSListener {
+ tls, err := expandALBTLSListener(d, path+"tls.0.")
+ if err != nil {
+ return nil, err
}
+ listener.SetTls(tls)
}
- return nil, false
+
+ if gotStreamListener {
+ listener.SetStream(expandALBStreamListener(d, path+"stream.0."))
+ }
+
+ return listener, nil
}
-func expandALBTLSListener(config map[string]interface{}) *apploadbalancer.TlsListener {
+func expandALBTLSListener(d *schema.ResourceData, path string) (*apploadbalancer.TlsListener, error) {
tlsListener := &apploadbalancer.TlsListener{}
- if conf, ok := getFirstElementConfig(config, "default_handler"); ok {
- tlsListener.SetDefaultHandler(expandALBTLSHandler(conf))
+ if _, ok := d.GetOk(path + "default_handler.0"); ok {
+ handler, err := expandALBTLSHandler(d, path+"default_handler.0.")
+ if err != nil {
+ return nil, err
+ }
+ tlsListener.SetDefaultHandler(handler)
}
- if v, ok := config["sni_handler"]; ok {
- tlsListener.SniHandlers = expandALBSNIMatches(v)
+ if _, ok := d.GetOk(path + "sni_handler"); ok {
+ sniHandlers, err := expandALBSNIMatches(d, path+"sni_handler")
+ if err != nil {
+ return nil, err
+ }
+ tlsListener.SetSniHandlers(sniHandlers)
}
- return tlsListener
+ return tlsListener, nil
}
-func expandALBSNIMatches(v interface{}) []*apploadbalancer.SniMatch {
- var matches []*apploadbalancer.SniMatch
+func expandALBSNIMatch(d *schema.ResourceData, path string) (*apploadbalancer.SniMatch, error) {
+ match := &apploadbalancer.SniMatch{}
- if v != nil {
- matchSet := v.([]interface{})
+ if val, ok := d.GetOk(path + "name"); ok {
+ match.Name = val.(string)
+ }
- for _, h := range matchSet {
- match := &apploadbalancer.SniMatch{}
- config := h.(map[string]interface{})
+ if val, ok := d.GetOk(path + "server_names"); ok {
+ if serverNames, err := expandALBStringListFromSchemaSet(val); err == nil {
+ match.ServerNames = serverNames
+ }
+ }
- if val, ok := config["name"]; ok {
- match.Name = val.(string)
- }
+ if _, ok := d.GetOk(path + "handler.0"); ok {
+ handler, err := expandALBTLSHandler(d, path+"handler.0.")
+ if err != nil {
+ return nil, err
+ }
+ match.SetHandler(handler)
+ }
- if val, ok := config["server_names"]; ok {
- if serverNames, err := expandALBStringListFromSchemaSet(val); err == nil {
- match.ServerNames = serverNames
- }
- }
+ return match, nil
+}
- if val, ok := config["handler"]; ok {
- handlerConfig := val.([]interface{})
- if len(handlerConfig) == 1 {
- match.Handler = expandALBTLSHandler(handlerConfig[0].(map[string]interface{}))
- }
- }
+func expandALBSNIMatches(d *schema.ResourceData, path string) ([]*apploadbalancer.SniMatch, error) {
+ var matches []*apploadbalancer.SniMatch
- matches = append(matches, match)
+ for _, key := range IterateKeys(d, path) {
+ match, err := expandALBSNIMatch(d, key)
+ if err != nil {
+ return nil, err
}
+ matches = append(matches, match)
}
- return matches
+
+ return matches, nil
}
-func expandALBStreamListener(config map[string]interface{}) *apploadbalancer.StreamListener {
+func expandALBStreamListener(d *schema.ResourceData, path string) *apploadbalancer.StreamListener {
streamListener := &apploadbalancer.StreamListener{}
- if conf, ok := getFirstElementConfig(config, "handler"); ok {
- streamListener.Handler = expandALBStreamHandler(conf)
+ if _, ok := d.GetOk(path + "handler.0"); ok {
+ streamListener.Handler = expandALBStreamHandler(d, path+"handler.0.")
}
return streamListener
}
-func expandALBHTTPListener(config map[string]interface{}) *apploadbalancer.HttpListener {
+func expandALBHTTPListener(d *schema.ResourceData, path string) (*apploadbalancer.HttpListener, error) {
httpListener := &apploadbalancer.HttpListener{}
- if conf, ok := getFirstElementConfig(config, "handler"); ok {
- httpListener.Handler = expandALBHTTPHandler(conf)
+ if _, ok := d.GetOk(path + "handler.0"); ok {
+ handler, err := expandALBHTTPHandler(d, path+"handler.0.")
+ if err != nil {
+ return nil, err
+ }
+ httpListener.SetHandler(handler)
}
- if conf, ok := getFirstElementConfig(config, "redirects"); ok {
- if v, ok := conf["http_to_https"]; ok {
+ if _, ok := d.GetOk(path + "redirects.0"); ok {
+ currentKey := path + "redirects.0." + "http_to_https"
+ if v, ok := d.GetOk(currentKey); ok {
httpListener.Redirects = &apploadbalancer.Redirects{HttpToHttps: v.(bool)}
}
}
- return httpListener
+ return httpListener, nil
}
-func expandALBStreamHandler(config map[string]interface{}) *apploadbalancer.StreamHandler {
+func expandALBStreamHandler(d *schema.ResourceData, path string) *apploadbalancer.StreamHandler {
streamHandler := &apploadbalancer.StreamHandler{}
- if v, ok := config["backend_group_id"]; ok {
+ if v, ok := d.GetOk(path + "backend_group_id"); ok {
streamHandler.BackendGroupId = v.(string)
}
return streamHandler
}
-func expandALBHTTPHandler(config map[string]interface{}) *apploadbalancer.HttpHandler {
+func expandALBHTTPHandler(d *schema.ResourceData, path string) (*apploadbalancer.HttpHandler, error) {
httpHandler := &apploadbalancer.HttpHandler{}
- if v, ok := config["allow_http10"]; ok {
- httpHandler.SetAllowHttp10(v.(bool))
+ if v, ok := d.GetOk(path + "http_router_id"); ok {
+ httpHandler.HttpRouterId = v.(string)
}
- if v, ok := config["http_router_id"]; ok {
- httpHandler.HttpRouterId = v.(string)
+ allowHTTP10, gotAllowHTTP10 := d.GetOk(path + "allow_http10")
+ _, gotHTTP2Options := d.GetOk(path + "http2_options.0")
+
+ if isPlural(gotAllowHTTP10, gotHTTP2Options) {
+ return nil, fmt.Errorf("Cannot specify both allow HTTP 1.0 and HTTP 2 options for the HTTP Handler")
}
- if conf, ok := getFirstElementConfig(config, "http2_options"); ok {
+ if gotAllowHTTP10 {
+ httpHandler.SetAllowHttp10(allowHTTP10.(bool))
+ }
+
+ if gotHTTP2Options {
+ currentKey := path + "http2_options.0." + "max_concurrent_streams"
http2Options := &apploadbalancer.Http2Options{}
- if val, ok := conf["max_concurrent_streams"]; ok {
+ if val, ok := d.GetOk(currentKey); ok {
http2Options.MaxConcurrentStreams = int64(val.(int))
}
httpHandler.SetHttp2Options(http2Options)
}
- return httpHandler
+ return httpHandler, nil
}
-func expandALBTLSHandler(config map[string]interface{}) *apploadbalancer.TlsHandler {
+func expandALBTLSHandler(d *schema.ResourceData, path string) (*apploadbalancer.TlsHandler, error) {
tlsHandler := &apploadbalancer.TlsHandler{}
- if conf, ok := getFirstElementConfig(config, "http_handler"); ok {
- tlsHandler.SetHttpHandler(expandALBHTTPHandler(conf))
+ _, gotHTTPHandler := d.GetOk(path + "http_handler.0")
+ _, gotStreamHandler := d.GetOk(path + "stream_handler.0")
+
+ if isPlural(gotHTTPHandler, gotStreamHandler) {
+ return nil, fmt.Errorf("Cannot specify both HTTP handler and Stream handler for the TLS Handler")
+ }
+ if !gotHTTPHandler && !gotStreamHandler {
+ return nil, fmt.Errorf("Either HTTP handler or Stream handler should be specified for the TLS Handler")
+ }
+
+ if gotHTTPHandler {
+ handler, err := expandALBHTTPHandler(d, path+"http_handler.0.")
+ if err != nil {
+ return nil, err
+ }
+ tlsHandler.SetHttpHandler(handler)
}
- if conf, ok := getFirstElementConfig(config, "stream_handler"); ok {
- tlsHandler.SetStreamHandler(expandALBStreamHandler(conf))
+ if gotStreamHandler {
+ tlsHandler.SetStreamHandler(expandALBStreamHandler(d, path+"stream_handler.0."))
}
- if v, ok := config["certificate_ids"]; ok {
+ if v, ok := d.GetOk(path + "certificate_ids"); ok {
if certificateIDs, err := expandALBStringListFromSchemaSet(v); err == nil {
tlsHandler.CertificateIds = certificateIDs
}
}
- return tlsHandler
+ return tlsHandler, nil
}
+func expandALBEndpoint(d *schema.ResourceData, path string) (*apploadbalancer.EndpointSpec, error) {
+ endpoint := &apploadbalancer.EndpointSpec{}
-func expandALBEndpoints(v interface{}) []*apploadbalancer.EndpointSpec {
- var endpoints []*apploadbalancer.EndpointSpec
- if v != nil {
+ if _, ok := d.GetOk(path + "address"); ok {
+ address, err := expandALBEndpointAddresses(d, path+"address")
+ if err != nil {
+ return nil, err
+ }
+ endpoint.SetAddressSpecs(address)
+ }
- for _, h := range v.([]interface{}) {
- endpoint := &apploadbalancer.EndpointSpec{}
- config := h.(map[string]interface{})
+ if val, ok := d.GetOk(path + "ports"); ok {
+ if ports, err := expandALBInt64ListFromList(val); err == nil {
+ endpoint.Ports = ports
+ }
+ }
- if val, ok := config["address"]; ok {
- endpoint.AddressSpecs = expandALBEndpointAddresses(val)
- }
+ return endpoint, nil
+}
- if val, ok := config["ports"]; ok {
- if ports, err := expandALBInt64ListFromList(val); err == nil {
- endpoint.Ports = ports
- }
- }
+func expandALBEndpoints(d *schema.ResourceData, path string) ([]*apploadbalancer.EndpointSpec, error) {
+ var endpoints []*apploadbalancer.EndpointSpec
- endpoints = append(endpoints, endpoint)
+ for _, key := range IterateKeys(d, path) {
+ endpoint, err := expandALBEndpoint(d, key)
+ if err != nil {
+ return nil, err
}
+ endpoints = append(endpoints, endpoint)
}
- return endpoints
+
+ return endpoints, nil
}
-func expandALBEndpointAddresses(v interface{}) []*apploadbalancer.AddressSpec {
- var addresses []*apploadbalancer.AddressSpec
- if v != nil {
+func expandALBEndpointAddress(d *schema.ResourceData, path string) (*apploadbalancer.AddressSpec, error) {
+ endpointAddress := &apploadbalancer.AddressSpec{}
- for _, h := range v.([]interface{}) {
- elem := &apploadbalancer.AddressSpec{}
- elemConfig := h.(map[string]interface{})
+ _, gotExternalIPV4Address := d.GetOk(path + "external_ipv4_address.0")
+ _, gotInternalIPV4Address := d.GetOk(path + "internal_ipv4_address.0")
+ _, gotExternalIPV6Address := d.GetOk(path + "external_ipv6_address.0")
- if config, ok := getFirstElementConfig(elemConfig, "external_ipv4_address"); ok {
- address := &apploadbalancer.ExternalIpv4AddressSpec{}
- if value, ok := config["address"]; ok {
- address.Address = value.(string)
- }
- elem.SetExternalIpv4AddressSpec(address)
- }
+ if isPlural(gotExternalIPV4Address, gotInternalIPV4Address, gotExternalIPV6Address) {
+ return nil, fmt.Errorf("Cannot specify more than one of external ipv4 address and internal ipv4 address and external ipv6 address for the endpoint address at the same time")
+ }
+ if !gotExternalIPV4Address && !gotInternalIPV4Address && !gotExternalIPV6Address {
+ return nil, fmt.Errorf("Either external ipv4 address or internal ipv4 address or external ipv6 address should be specified for the HTTP route")
+ }
- if config, ok := getFirstElementConfig(elemConfig, "internal_ipv4_address"); ok {
- address := &apploadbalancer.InternalIpv4AddressSpec{}
- if value, ok := config["address"]; ok {
- address.Address = value.(string)
- }
- if value, ok := config["subnet_id"]; ok {
- address.SubnetId = value.(string)
- }
- elem.SetInternalIpv4AddressSpec(address)
- }
+ if gotExternalIPV4Address {
+ currentKey := path + "external_ipv4_address.0." + "address"
+ address := &apploadbalancer.ExternalIpv4AddressSpec{}
+ if value, ok := d.GetOk(currentKey); ok {
+ address.Address = value.(string)
+ }
+ endpointAddress.SetExternalIpv4AddressSpec(address)
+ }
- if config, ok := getFirstElementConfig(elemConfig, "external_ipv6_address"); ok {
- address := &apploadbalancer.ExternalIpv6AddressSpec{}
- if value, ok := config["address"]; ok {
- address.Address = value.(string)
- }
- elem.SetExternalIpv6AddressSpec(address)
- }
+ if gotInternalIPV4Address {
+ currentPath := path + "internal_ipv4_address.0."
+ address := &apploadbalancer.InternalIpv4AddressSpec{}
+ if value, ok := d.GetOk(currentPath + "address"); ok {
+ address.Address = value.(string)
+ }
+ if value, ok := d.GetOk(currentPath + "subnet_id"); ok {
+ address.SubnetId = value.(string)
+ }
+ endpointAddress.SetInternalIpv4AddressSpec(address)
+ }
- addresses = append(addresses, elem)
+ if gotExternalIPV6Address {
+ currentKey := path + "external_ipv6_address.0." + "address"
+ address := &apploadbalancer.ExternalIpv6AddressSpec{}
+ if value, ok := d.GetOk(currentKey); ok {
+ address.Address = value.(string)
}
+ endpointAddress.SetExternalIpv6AddressSpec(address)
}
- return addresses
+
+ return endpointAddress, nil
+}
+
+func expandALBEndpointAddresses(d *schema.ResourceData, path string) ([]*apploadbalancer.AddressSpec, error) {
+ var addresses []*apploadbalancer.AddressSpec
+
+ for _, key := range IterateKeys(d, path) {
+ address, err := expandALBEndpointAddress(d, key)
+ if err != nil {
+ return nil, err
+ }
+ addresses = append(addresses, address)
+ }
+
+ return addresses, nil
}
func expandALBHTTPBackends(d *schema.ResourceData) (*apploadbalancer.HttpBackendGroup, error) {
@@ -958,7 +1046,7 @@ func expandALBHTTPBackend(d *schema.ResourceData, key string) (*apploadbalancer.
switch {
case !haveTargetGroups && !haveStorageBucket:
return nil, fmt.Errorf("Either target_group_ids or storage_bucket should be specified for http backend")
- case haveTargetGroups && haveStorageBucket:
+ case isPlural(haveTargetGroups, haveStorageBucket):
return nil, fmt.Errorf("Cannot specify both target_group_ids and storage_bucket for http backend")
}
return backend, nil
@@ -1236,7 +1324,7 @@ func expandALBTarget(d *schema.ResourceData, key string) (*apploadbalancer.Targe
subnet, gotSubnet := d.GetOk(key + "subnet_id")
privateAddr, gotPrivateAddr := d.GetOk(key + "private_ipv4_address")
- if gotSubnet && gotPrivateAddr {
+ if isPlural(gotSubnet, gotPrivateAddr) {
return nil, fmt.Errorf("Cannot specify both subnet_id and private_ipv4_address for a target")
}
diff --git a/yandex/base_alb_test.go b/yandex/base_alb_test.go
index 598a35688..fac0d7203 100644
--- a/yandex/base_alb_test.go
+++ b/yandex/base_alb_test.go
@@ -486,6 +486,7 @@ resource "yandex_alb_load_balancer" "test-balancer" {
handler {
http_handler {
http_router_id = yandex_alb_http_router.test-router.id
+ allow_http10 = true
}
certificate_ids = ["{{.CertificateID}}"]
}
diff --git a/yandex/config.go b/yandex/config.go
index 7bf9e7027..51f94f0d3 100644
--- a/yandex/config.go
+++ b/yandex/config.go
@@ -48,6 +48,7 @@ type Config struct {
MaxRetries int
StorageEndpoint string
YMQEndpoint string
+ Region string
// These storage access keys are optional and only used when
// storage data/resource doesn't have own access keys explicitly specified.
diff --git a/yandex/data_source_yandex_kubernetes_node_group.go b/yandex/data_source_yandex_kubernetes_node_group.go
index 46eece2f5..538e18ae8 100644
--- a/yandex/data_source_yandex_kubernetes_node_group.go
+++ b/yandex/data_source_yandex_kubernetes_node_group.go
@@ -145,6 +145,54 @@ func dataSourceYandexKubernetesNodeGroup() *schema.Resource {
Set: schema.HashString,
Computed: true,
},
+ "ipv4_dns_records": {
+ Type: schema.TypeList,
+ Computed: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fqdn": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "dns_zone_id": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "ttl": {
+ Type: schema.TypeInt,
+ Computed: true,
+ },
+ "ptr": {
+ Type: schema.TypeBool,
+ Computed: true,
+ },
+ },
+ },
+ },
+ "ipv6_dns_records": {
+ Type: schema.TypeList,
+ Computed: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fqdn": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "dns_zone_id": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "ttl": {
+ Type: schema.TypeInt,
+ Computed: true,
+ },
+ "ptr": {
+ Type: schema.TypeBool,
+ Computed: true,
+ },
+ },
+ },
+ },
},
},
},
@@ -182,6 +230,16 @@ func dataSourceYandexKubernetesNodeGroup() *schema.Resource {
},
},
},
+ "name": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "labels": {
+ Type: schema.TypeMap,
+ Computed: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ Set: schema.HashString,
+ },
},
},
},
diff --git a/yandex/data_source_yandex_kubernetes_node_group_test.go b/yandex/data_source_yandex_kubernetes_node_group_test.go
index d212d7690..6ecc4be2c 100644
--- a/yandex/data_source_yandex_kubernetes_node_group_test.go
+++ b/yandex/data_source_yandex_kubernetes_node_group_test.go
@@ -63,7 +63,7 @@ func TestAccDataSourceKubernetesNodeGroupNetworkInterfaces_basic(t *testing.T) {
clusterResource := clusterInfoWithSecurityGroups("TestAccDataSourceKubernetesNodeGroupNetworkInterfaces_basic", true)
nodeResource := nodeGroupInfo(clusterResource.ClusterResourceName)
nodeResource.constructNetworkInterfaces(clusterResource.SubnetResourceNameA, clusterResource.SecurityGroupName)
- nodeResourceFullName := nodeResource.ResourceFullName(true)
+ nodeResourceFullName := nodeResource.ResourceFullName(false)
var ng k8s.NodeGroup
@@ -216,6 +216,30 @@ func TestAccDataSourceKubernetesNodeGroup_containerRuntimeContainerd(t *testing.
})
}
+func TestAccDataSourceKubernetesNodeGroupIPv4DNSFQDN_basic(t *testing.T) {
+ clusterResource := clusterInfoWithSecurityGroups("TestAccDataSourceKubernetesNodeGroupIPv4DNSFQDN_basic", true)
+ nodeResource := nodeGroupInfoIPv4DNSFQDN(clusterResource.ClusterResourceName)
+ nodeResource.constructNetworkInterfaces(clusterResource.SubnetResourceNameA, clusterResource.SecurityGroupName)
+ nodeResourceFullName := nodeResource.ResourceFullName(false)
+
+ var ng k8s.NodeGroup
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ CheckDestroy: testAccCheckKubernetesNodeGroupDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccDataSourceKubernetesNodeGroupConfig_basic(clusterResource, nodeResource),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckKubernetesNodeGroupExists(nodeResourceFullName, &ng),
+ checkNodeGroupAttributes(&ng, &nodeResource, false, false),
+ ),
+ },
+ },
+ })
+}
+
const dataNodeGroupConfigTemplate = `
data "yandex_kubernetes_node_group" "{{.NodeGroupResourceName}}" {
name = "${yandex_kubernetes_node_group.{{.NodeGroupResourceName}}.name}"
diff --git a/yandex/data_source_yandex_mdb_greenplum_cluster.go b/yandex/data_source_yandex_mdb_greenplum_cluster.go
index 9c05b9c28..006f4c61e 100644
--- a/yandex/data_source_yandex_mdb_greenplum_cluster.go
+++ b/yandex/data_source_yandex_mdb_greenplum_cluster.go
@@ -223,6 +223,37 @@ func dataSourceYandexMDBGreenplumCluster() *schema.Resource {
},
},
},
+ "pooler_config": {
+ Type: schema.TypeList,
+ Optional: true,
+ MaxItems: 1,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "pooling_mode": {
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ "pool_size": {
+ Type: schema.TypeInt,
+ Optional: true,
+ },
+ "pool_client_idle_timeout": {
+ Type: schema.TypeInt,
+ Optional: true,
+ },
+ },
+ },
+ },
+ "greenplum_config": {
+ Type: schema.TypeMap,
+ Optional: true,
+ Computed: true,
+ DiffSuppressFunc: generateMapSchemaDiffSuppressFunc(mdbGreenplumSettingsFieldsInfo),
+ ValidateFunc: generateMapSchemaValidateFunc(mdbGreenplumSettingsFieldsInfo),
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
},
}
}
@@ -244,7 +275,10 @@ func dataSourceYandexMDBGreenplumClusterRead(d *schema.ResourceData, meta interf
if err != nil {
return fmt.Errorf("failed to resolve data source Greenplum Cluster by name: %v", err)
}
+
+ d.Set("cluster_id", clusterID)
}
+
cluster, err := config.sdk.MDB().Greenplum().Cluster().Get(ctx, &greenplum.GetClusterRequest{
ClusterId: clusterID,
})
@@ -252,101 +286,6 @@ func dataSourceYandexMDBGreenplumClusterRead(d *schema.ResourceData, meta interf
return handleNotFoundError(err, d, fmt.Sprintf("Cluster %q", d.Get("name").(string)))
}
- d.Set("folder_id", cluster.GetFolderId())
- d.Set("cluster_id", cluster.Id)
- d.Set("name", cluster.GetName())
- d.Set("description", cluster.GetDescription())
- d.Set("environment", cluster.GetEnvironment().String())
- d.Set("network_id", cluster.GetNetworkId())
- d.Set("health", cluster.GetHealth().String())
- d.Set("status", cluster.GetStatus().String())
- d.Set("version", cluster.GetConfig().GetVersion())
-
- d.Set("zone", cluster.GetConfig().ZoneId)
- d.Set("subnet_id", cluster.GetConfig().SubnetId)
- d.Set("assign_public_ip", cluster.GetConfig().AssignPublicIp)
- d.Set("version", cluster.GetConfig().Version)
-
- d.Set("master_host_count", cluster.GetMasterHostCount())
- d.Set("segment_host_count", cluster.GetSegmentHostCount())
- d.Set("segment_in_host", cluster.GetSegmentInHost())
-
- d.Set("user_name", cluster.GetUserName())
-
- masterSubcluster := map[string]interface{}{}
- masterResources := map[string]interface{}{}
- masterResources["resource_preset_id"] = cluster.GetMasterConfig().Resources.ResourcePresetId
- masterResources["disk_type_id"] = cluster.GetMasterConfig().Resources.DiskTypeId
- masterResources["disk_size"] = toGigabytes(cluster.GetMasterConfig().Resources.DiskSize)
- masterSubcluster["resources"] = []map[string]interface{}{masterResources}
- d.Set("master_subcluster", []map[string]interface{}{masterSubcluster})
-
- segmentSubcluster := map[string]interface{}{}
- segmentResources := map[string]interface{}{}
- segmentResources["resource_preset_id"] = cluster.GetMasterConfig().Resources.ResourcePresetId
- segmentResources["disk_type_id"] = cluster.GetMasterConfig().Resources.DiskTypeId
- segmentResources["disk_size"] = toGigabytes(cluster.GetMasterConfig().Resources.DiskSize)
- segmentSubcluster["resources"] = []map[string]interface{}{segmentResources}
- d.Set("segment_subcluster", []map[string]interface{}{segmentSubcluster})
-
- if cluster.Labels == nil {
- if err = d.Set("labels", make(map[string]string)); err != nil {
- return err
- }
- } else if err = d.Set("labels", cluster.Labels); err != nil {
- return err
- }
-
- if cluster.SecurityGroupIds == nil {
- if err = d.Set("security_group_ids", make([]string, 0)); err != nil {
- return err
- }
- } else if err = d.Set("security_group_ids", cluster.SecurityGroupIds); err != nil {
- return err
- }
-
- masterHosts, err := listGreenplumMasterHosts(ctx, config, cluster.GetId())
- if err != nil {
- return err
- }
- mHost := make([]map[string]interface{}, 0, len(masterHosts))
- for _, h := range masterHosts {
- mHost = append(mHost, map[string]interface{}{"fqdn": h.Name, "assign_public_ip": h.AssignPublicIp})
- }
- if err = d.Set("master_hosts", mHost); err != nil {
- return err
- }
-
- segmentHosts, err := listGreenplumSegmentHosts(ctx, config, cluster.GetId())
- if err != nil {
- return err
- }
- sHost := make([]map[string]interface{}, 0, len(segmentHosts))
- for _, h := range segmentHosts {
- sHost = append(sHost, map[string]interface{}{"fqdn": h.Name})
- }
- if err = d.Set("segment_hosts", sHost); err != nil {
- return err
- }
-
- d.Set("deletion_protection", cluster.DeletionProtection)
-
- accessElement := map[string]interface{}{}
- if cluster.Config != nil && cluster.Config.Access != nil {
- accessElement["data_lens"] = cluster.Config.Access.DataLens
- accessElement["web_sql"] = cluster.Config.Access.WebSql
- }
- d.Set("access", []map[string]interface{}{accessElement})
-
- bwsElement := map[string]interface{}{}
- if cluster.Config != nil && cluster.Config.BackupWindowStart != nil {
- bwsElement["hours"] = cluster.Config.BackupWindowStart.Hours
- bwsElement["minutes"] = cluster.Config.BackupWindowStart.Minutes
- }
- d.Set("backup_window_start", []map[string]interface{}{bwsElement})
-
- d.Set("created_at", getTimestamp(cluster.CreatedAt))
-
d.SetId(cluster.Id)
- return nil
+ return resourceYandexMDBGreenplumClusterRead(d, meta)
}
diff --git a/yandex/data_source_yandex_mdb_greenplum_cluster_test.go b/yandex/data_source_yandex_mdb_greenplum_cluster_test.go
index 7e5a4b0c3..b7d6e7f7e 100644
--- a/yandex/data_source_yandex_mdb_greenplum_cluster_test.go
+++ b/yandex/data_source_yandex_mdb_greenplum_cluster_test.go
@@ -13,7 +13,7 @@ func TestAccDataSourceMDBGreenplumCluster_byID(t *testing.T) {
t.Parallel()
greenplumName := acctest.RandomWithPrefix("ds-greenplum-by-id")
- greenplumDesc := "Greenplum Cluster Terraform Datasource Test"
+ greenplumDescription := "Greenplum Cluster Terraform Datasource Test"
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -21,10 +21,10 @@ func TestAccDataSourceMDBGreenplumCluster_byID(t *testing.T) {
CheckDestroy: testAccCheckMDBGreenplumClusterDestroy,
Steps: []resource.TestStep{
{
- Config: testAccDataSourceMDBGreenplumClusterConfig(greenplumName, greenplumDesc, true),
+ Config: testAccDataSourceMDBGreenplumClusterConfig(greenplumName, greenplumDescription, true),
Check: testAccDataSourceMDBGreenplumClusterCheck(
"data.yandex_mdb_greenplum_cluster.bar",
- "yandex_mdb_greenplum_cluster.foo", greenplumName, greenplumDesc),
+ "yandex_mdb_greenplum_cluster.foo", greenplumName, greenplumDescription),
},
},
})
@@ -130,6 +130,26 @@ func testAccDataSourceMDBGreenplumClusterAttributesCheck(datasourceName string,
"deletion_protection",
"deletion_protection",
},
+ {
+ "pooler_config.0.pooling_mode",
+ "pooler_config.0.pooling_mode",
+ },
+ {
+ "pooler_config.0.pool_size",
+ "pooler_config.0.pool_size",
+ },
+ {
+ "pooler_config.0.pool_client_idle_timeout",
+ "pooler_config.0.pool_client_idle_timeout",
+ },
+ {
+ "access.#",
+ "access.#",
+ },
+ {
+ "access.0.data_lens",
+ "access.0.data_lens",
+ },
}
for _, attrToCheck := range instanceAttrsToTest {
@@ -183,10 +203,10 @@ data "yandex_mdb_greenplum_cluster" "bar" {
}
`
-func testAccDataSourceMDBGreenplumClusterConfig(greenplumName, greenplumDesc string, useDataID bool) string {
+func testAccDataSourceMDBGreenplumClusterConfig(greenplumName, greenplumDescription string, useDataID bool) string {
if useDataID {
- return testAccMDBGreenplumClusterConfigMain(greenplumName, greenplumDesc, "PRESTABLE", false) + mdbGreenplumClusterByIDConfig
+ return testAccMDBGreenplumClusterConfigStep1(greenplumName, greenplumDescription) + mdbGreenplumClusterByIDConfig
}
- return testAccMDBGreenplumClusterConfigMain(greenplumName, greenplumDesc, "PRESTABLE", false) + mdbGreenplumClusterByNameConfig
+ return testAccMDBGreenplumClusterConfigStep1(greenplumName, greenplumDescription) + mdbGreenplumClusterByNameConfig
}
diff --git a/yandex/data_source_yandex_mdb_kafka_cluster_test.go b/yandex/data_source_yandex_mdb_kafka_cluster_test.go
index 45698f7ac..b0c74a249 100644
--- a/yandex/data_source_yandex_mdb_kafka_cluster_test.go
+++ b/yandex/data_source_yandex_mdb_kafka_cluster_test.go
@@ -64,7 +64,7 @@ func TestAccDataSourceMDBKafkaClusterAndTopic(t *testing.T) {
resource.TestCheckResourceAttr(topicDatasource, "partitions", "1"),
resource.TestCheckResourceAttr(topicDatasource, "replication_factor", "1"),
resource.TestCheckResourceAttr(topicDatasource, "topic_config.0.cleanup_policy", "CLEANUP_POLICY_COMPACT_AND_DELETE"),
- resource.TestCheckResourceAttr(topicDatasource, "topic_config.0.max_message_bytes", "16777216"),
+ resource.TestCheckResourceAttr(topicDatasource, "topic_config.0.max_message_bytes", "777216"),
resource.TestCheckResourceAttr(topicDatasource, "topic_config.0.segment_bytes", "134217728"),
resource.TestCheckResourceAttr(topicDatasource, "topic_config.0.flush_ms", "9223372036854775807"),
),
diff --git a/yandex/data_source_yandex_mdb_redis_cluster.go b/yandex/data_source_yandex_mdb_redis_cluster.go
index 8e7095ba4..35caa2e1b 100644
--- a/yandex/data_source_yandex_mdb_redis_cluster.go
+++ b/yandex/data_source_yandex_mdb_redis_cluster.go
@@ -62,6 +62,14 @@ func dataSourceYandexMDBRedisCluster() *schema.Resource {
Type: schema.TypeInt,
Computed: true,
},
+ "client_output_buffer_limit_normal": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "client_output_buffer_limit_pubsub": {
+ Type: schema.TypeString,
+ Computed: true,
+ },
"databases": {
Type: schema.TypeInt,
Computed: true,
@@ -115,6 +123,15 @@ func dataSourceYandexMDBRedisCluster() *schema.Resource {
Type: schema.TypeString,
Computed: true,
},
+ "replica_priority": {
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: defaultReplicaPriority,
+ },
+ "assign_public_ip": {
+ Type: schema.TypeBool,
+ Optional: true,
+ },
},
},
},
@@ -250,13 +267,15 @@ func dataSourceYandexMDBRedisClusterRead(d *schema.ResourceData, meta interface{
conf := extractRedisConfig(cluster.Config)
err = d.Set("config", []map[string]interface{}{
{
- "timeout": conf.timeout,
- "maxmemory_policy": conf.maxmemoryPolicy,
- "version": conf.version,
- "notify_keyspace_events": conf.notifyKeyspaceEvents,
- "slowlog_log_slower_than": conf.slowlogLogSlowerThan,
- "slowlog_max_len": conf.slowlogMaxLen,
- "databases": conf.databases,
+ "timeout": conf.timeout,
+ "maxmemory_policy": conf.maxmemoryPolicy,
+ "version": conf.version,
+ "notify_keyspace_events": conf.notifyKeyspaceEvents,
+ "slowlog_log_slower_than": conf.slowlogLogSlowerThan,
+ "slowlog_max_len": conf.slowlogMaxLen,
+ "databases": conf.databases,
+ "client_output_buffer_limit_normal": conf.clientOutputBufferLimitNormal,
+ "client_output_buffer_limit_pubsub": conf.clientOutputBufferLimitPubsub,
},
})
if err != nil {
@@ -268,7 +287,7 @@ func dataSourceYandexMDBRedisClusterRead(d *schema.ResourceData, meta interface{
return err
}
- hs, err := flattenRedisHosts(hosts)
+ hs, err := flattenRedisHosts(cluster.Sharded, hosts)
if err != nil {
return err
}
diff --git a/yandex/data_source_yandex_mdb_redis_cluster_test.go b/yandex/data_source_yandex_mdb_redis_cluster_test.go
index 4b8af9456..a40287b9a 100644
--- a/yandex/data_source_yandex_mdb_redis_cluster_test.go
+++ b/yandex/data_source_yandex_mdb_redis_cluster_test.go
@@ -93,6 +93,8 @@ func testAccDataSourceMDBRedisClusterAttributesCheck(datasourceName string, reso
"config.0.notify_keyspace_events",
"config.0.slowlog_log_slower_than",
"config.0.slowlog_max_len",
+ "config.0.client_output_buffer_limit_normal",
+ "config.0.client_output_buffer_limit_pubsub",
"config.0.databases",
"config.0.version",
"security_group_ids",
@@ -143,6 +145,8 @@ func testAccDataSourceMDBRedisClusterCheck(datasourceName string, resourceName s
resource.TestCheckResourceAttr(datasourceName, "persistence_mode", persistenceModeStr),
resource.TestCheckResourceAttr(datasourceName, "host.#", "1"),
resource.TestCheckResourceAttrSet(datasourceName, "host.0.fqdn"),
+ resource.TestCheckResourceAttr(datasourceName, "host.0.replica_priority", fmt.Sprintf("%d", defaultReplicaPriority)),
+ resource.TestCheckResourceAttr(datasourceName, "host.0.assign_public_ip", "false"),
testAccCheckCreatedAtAttr(datasourceName),
resource.TestCheckResourceAttr(datasourceName, "security_group_ids.#", "1"),
resource.TestCheckResourceAttr(datasourceName, "maintenance_window.0.type", "WEEKLY"),
@@ -168,9 +172,11 @@ func testAccDataSourceMDBRedisClusterConfig(redisName, redisDesc string, tlsEnab
useDataID bool) string {
if useDataID {
return testAccMDBRedisClusterConfigMain(redisName, redisDesc, "PRESTABLE", false,
- tlsEnabled, persistenceMode, version, "hm1.nano", 16, "") + mdbRedisClusterByIDConfig
+ tlsEnabled, persistenceMode, version, "hm1.nano", 16, "", "", "",
+ []*bool{nil}, []*int{nil}) + mdbRedisClusterByIDConfig
}
return testAccMDBRedisClusterConfigMain(redisName, redisDesc, "PRESTABLE", false,
- tlsEnabled, persistenceMode, version, "hm1.nano", 16, "") + mdbRedisClusterByNameConfig
+ tlsEnabled, persistenceMode, version, "hm1.nano", 16, "", "", "",
+ []*bool{nil}, []*int{nil}) + mdbRedisClusterByNameConfig
}
diff --git a/yandex/data_source_yandex_mdb_sqlserver_cluster.go b/yandex/data_source_yandex_mdb_sqlserver_cluster.go
index 475990d97..881631890 100644
--- a/yandex/data_source_yandex_mdb_sqlserver_cluster.go
+++ b/yandex/data_source_yandex_mdb_sqlserver_cluster.go
@@ -200,6 +200,11 @@ func dataSourceYandexMDBSQLServerCluster() *schema.Resource {
Computed: true,
Optional: true,
},
+ "sqlcollation": {
+ Type: schema.TypeString,
+ Computed: true,
+ Optional: true,
+ },
},
}
}
@@ -312,6 +317,7 @@ func dataSourceYandexMDBSQLServerClusterRead(d *schema.ResourceData, meta interf
}
d.Set("deletion_protection", cluster.DeletionProtection)
+ d.Set("sqlcollation", cluster.Sqlcollation)
d.Set("created_at", getTimestamp(cluster.CreatedAt))
d.SetId(cluster.Id)
diff --git a/yandex/data_source_yandex_mdb_sqlserver_cluster_test.go b/yandex/data_source_yandex_mdb_sqlserver_cluster_test.go
index de1a20b58..9a9d7a7ee 100644
--- a/yandex/data_source_yandex_mdb_sqlserver_cluster_test.go
+++ b/yandex/data_source_yandex_mdb_sqlserver_cluster_test.go
@@ -181,6 +181,10 @@ func testAccDataSourceMDBSQLServerClusterAttributesCheck(datasourceName string,
"deletion_protection",
"deletion_protection",
},
+ {
+ "sqlcollation",
+ "sqlcollation",
+ },
}
for _, attrToCheck := range instanceAttrsToTest {
@@ -223,6 +227,7 @@ func testAccDataSourceMDBSQLServerClusterCheck(datasourceName string, resourceNa
testAccCheckCreatedAtAttr(datasourceName),
resource.TestCheckResourceAttr(datasourceName, "security_group_ids.#", "1"),
resource.TestCheckResourceAttr(datasourceName, "deletion_protection", "false"),
+ resource.TestCheckResourceAttr(datasourceName, "sqlcollation", "Cyrillic_General_CI_AI"),
)
}
diff --git a/yandex/data_source_yandex_message_queue.go b/yandex/data_source_yandex_message_queue.go
index 42167a293..d89b7cff6 100644
--- a/yandex/data_source_yandex_message_queue.go
+++ b/yandex/data_source_yandex_message_queue.go
@@ -30,6 +30,10 @@ func dataSourceYandexMessageQueue() *schema.Resource {
Optional: true,
Sensitive: true,
},
+ "region_id": {
+ Type: schema.TypeString,
+ Optional: true,
+ },
// Computed
"arn": {
diff --git a/yandex/iam_binding.go b/yandex/iam_binding.go
index 7400b614f..81c2be396 100644
--- a/yandex/iam_binding.go
+++ b/yandex/iam_binding.go
@@ -37,7 +37,7 @@ var accessBindingSchema = map[string]*schema.Schema{
func resourceAccessBinding(parentSpecificSchema map[string]*schema.Schema, newUpdaterFunc newResourceIamUpdaterFunc) *schema.Resource {
return &schema.Resource{
Create: resourceAccessBindingCreate(newUpdaterFunc),
- Read: resourceAccessBindingRead(newUpdaterFunc),
+ Read: resourceAccessBindingRead(newUpdaterFunc, true),
Update: resourceAccessBindingUpdate(newUpdaterFunc),
Delete: resourceAccessBindingDelete(newUpdaterFunc),
Schema: mergeSchemas(accessBindingSchema, parentSpecificSchema),
@@ -80,11 +80,11 @@ func resourceAccessBindingCreate(newUpdaterFunc newResourceIamUpdaterFunc) schem
time.Sleep(time.Second * time.Duration(v.(int)))
}
- return resourceAccessBindingRead(newUpdaterFunc)(d, meta)
+ return resourceAccessBindingRead(newUpdaterFunc, true)(d, meta)
}
}
-func resourceAccessBindingRead(newUpdaterFunc newResourceIamUpdaterFunc) schema.ReadFunc {
+func resourceAccessBindingRead(newUpdaterFunc newResourceIamUpdaterFunc, check bool) schema.ReadFunc {
return func(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
updater, err := newUpdaterFunc(d, config)
@@ -122,14 +122,13 @@ func resourceAccessBindingRead(newUpdaterFunc newResourceIamUpdaterFunc) schema.
}
}
- if len(mBindings) == 0 {
+ if check && len(mBindings) == 0 {
return fmt.Errorf("Binding for role %q not found in policy for %s.", role, updater.DescribeResource())
}
if err := d.Set("members", roleToMembersList(role, mBindings)); err != nil {
return err
}
- d.Set("role", role)
return nil
}
}
@@ -154,7 +153,7 @@ func resourceAccessBindingUpdate(newUpdaterFunc newResourceIamUpdaterFunc) schem
return err
}
- return resourceAccessBindingRead(newUpdaterFunc)(d, meta)
+ return resourceAccessBindingRead(newUpdaterFunc, true)(d, meta)
}
}
@@ -182,7 +181,7 @@ func resourceAccessBindingDelete(newUpdaterFunc newResourceIamUpdaterFunc) schem
return err
}
- return resourceAccessBindingRead(newUpdaterFunc)(d, meta)
+ return resourceAccessBindingRead(newUpdaterFunc, false)(d, meta)
}
}
diff --git a/yandex/iam_serverless_container.go b/yandex/iam_serverless_container.go
new file mode 100644
index 000000000..02fee5bac
--- /dev/null
+++ b/yandex/iam_serverless_container.go
@@ -0,0 +1,107 @@
+package yandex
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+
+ "github.com/yandex-cloud/go-genproto/yandex/cloud/access"
+)
+
+const yandexIAMServerlessContainerDefaultTimeout = 1 * time.Minute
+
+var IamServerlessContainerSchema = map[string]*schema.Schema{
+ "container_id": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+}
+
+type ServerlessContainerIamUpdater struct {
+ containerID string
+ Config *Config
+}
+
+func newServerlessContainerIamUpdater(d *schema.ResourceData, config *Config) (ResourceIamUpdater, error) {
+ return &ServerlessContainerIamUpdater{
+ containerID: d.Get("container_id").(string),
+ Config: config,
+ }, nil
+}
+
+func serverlessContainerIDParseFunc(d *schema.ResourceData, _ *Config) error {
+ d.Set("container_id", d.Id())
+ return nil
+}
+
+func (u *ServerlessContainerIamUpdater) GetResourceIamPolicy() (*Policy, error) {
+ bindings, err := getServerlessContainerAccessBindings(u.Config, u.GetResourceID())
+ if err != nil {
+ return nil, err
+ }
+ return &Policy{bindings}, nil
+}
+
+func (u *ServerlessContainerIamUpdater) SetResourceIamPolicy(policy *Policy) error {
+ req := &access.SetAccessBindingsRequest{
+ ResourceId: u.containerID,
+ AccessBindings: policy.Bindings,
+ }
+
+ ctx, cancel := context.WithTimeout(u.Config.Context(), yandexIAMServerlessContainerDefaultTimeout)
+ defer cancel()
+
+ op, err := u.Config.sdk.WrapOperation(u.Config.sdk.Serverless().Containers().Container().SetAccessBindings(ctx, req))
+ if err != nil {
+ return fmt.Errorf("Error setting IAM policy for %s: %s", u.DescribeResource(), err)
+ }
+
+ err = op.Wait(ctx)
+ if err != nil {
+ return fmt.Errorf("Error setting IAM policy for %s: %s", u.DescribeResource(), err)
+ }
+
+ return nil
+}
+
+func (u *ServerlessContainerIamUpdater) GetResourceID() string {
+ return u.containerID
+}
+
+func (u *ServerlessContainerIamUpdater) GetMutexKey() string {
+ return fmt.Sprintf("iam-container-%s", u.containerID)
+}
+
+func (u *ServerlessContainerIamUpdater) DescribeResource() string {
+ return fmt.Sprintf("container '%s'", u.containerID)
+}
+
+func getServerlessContainerAccessBindings(config *Config, containerID string) ([]*access.AccessBinding, error) {
+ bindings := []*access.AccessBinding{}
+ pageToken := ""
+ ctx := config.Context()
+
+ for {
+ resp, err := config.sdk.Serverless().Containers().Container().ListAccessBindings(ctx, &access.ListAccessBindingsRequest{
+ ResourceId: containerID,
+ PageSize: defaultListSize,
+ PageToken: pageToken,
+ })
+
+ if err != nil {
+ return nil, fmt.Errorf("error retrieving IAM access bindings for container %s: %s", containerID, err)
+ }
+
+ bindings = append(bindings, resp.AccessBindings...)
+
+ if resp.NextPageToken == "" {
+ break
+ }
+
+ pageToken = resp.NextPageToken
+ }
+ return bindings, nil
+}
diff --git a/yandex/iam_ydb_database.go b/yandex/iam_ydb_database.go
new file mode 100644
index 000000000..459162ed1
--- /dev/null
+++ b/yandex/iam_ydb_database.go
@@ -0,0 +1,107 @@
+package yandex
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+
+ "github.com/yandex-cloud/go-genproto/yandex/cloud/access"
+)
+
+const yandexIAMYDBDefaultTimeout = 1 * time.Minute
+
+var IamYDBDatabaseSchema = map[string]*schema.Schema{
+ "database_id": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+}
+
+type YDBDatabaseIamUpdater struct {
+ databaseID string
+ Config *Config
+}
+
+func newYDBDatabaseIamUpdater(d *schema.ResourceData, config *Config) (ResourceIamUpdater, error) {
+ return &YDBDatabaseIamUpdater{
+ databaseID: d.Get("database_id").(string),
+ Config: config,
+ }, nil
+}
+
+func ydbDatabaseIDParseFunc(d *schema.ResourceData, _ *Config) error {
+ d.Set("database_id", d.Id())
+ return nil
+}
+
+func (u *YDBDatabaseIamUpdater) GetResourceIamPolicy() (*Policy, error) {
+ bindings, err := getYDBDatabaseAccessBindings(u.Config, u.GetResourceID())
+ if err != nil {
+ return nil, err
+ }
+ return &Policy{bindings}, nil
+}
+
+func (u *YDBDatabaseIamUpdater) SetResourceIamPolicy(policy *Policy) error {
+ req := &access.SetAccessBindingsRequest{
+ ResourceId: u.databaseID,
+ AccessBindings: policy.Bindings,
+ }
+
+ ctx, cancel := context.WithTimeout(u.Config.Context(), yandexIAMYDBDefaultTimeout)
+ defer cancel()
+
+ op, err := u.Config.sdk.WrapOperation(u.Config.sdk.YDB().Database().SetAccessBindings(ctx, req))
+ if err != nil {
+ return fmt.Errorf("Error setting IAM policy for %s: %s", u.DescribeResource(), err)
+ }
+
+ err = op.Wait(ctx)
+ if err != nil {
+ return fmt.Errorf("Error setting IAM policy for %s: %s", u.DescribeResource(), err)
+ }
+
+ return nil
+}
+
+func (u *YDBDatabaseIamUpdater) GetResourceID() string {
+ return u.databaseID
+}
+
+func (u *YDBDatabaseIamUpdater) GetMutexKey() string {
+ return fmt.Sprintf("iam-ydb-database-%s", u.databaseID)
+}
+
+func (u *YDBDatabaseIamUpdater) DescribeResource() string {
+ return fmt.Sprintf("YDB Database '%s'", u.databaseID)
+}
+
+func getYDBDatabaseAccessBindings(config *Config, databaseID string) ([]*access.AccessBinding, error) {
+ bindings := []*access.AccessBinding{}
+ pageToken := ""
+ ctx := config.Context()
+
+ for {
+ resp, err := config.sdk.YDB().Database().ListAccessBindings(ctx, &access.ListAccessBindingsRequest{
+ ResourceId: databaseID,
+ PageSize: defaultListSize,
+ PageToken: pageToken,
+ })
+
+ if err != nil {
+ return nil, fmt.Errorf("Error retrieving IAM access bindings for YDB Database %s: %s", databaseID, err)
+ }
+
+ bindings = append(bindings, resp.AccessBindings...)
+
+ if resp.NextPageToken == "" {
+ break
+ }
+
+ pageToken = resp.NextPageToken
+ }
+ return bindings, nil
+}
diff --git a/yandex/mdb_greenplum_structures.go b/yandex/mdb_greenplum_structures.go
index aab47339b..4278ecd23 100644
--- a/yandex/mdb_greenplum_structures.go
+++ b/yandex/mdb_greenplum_structures.go
@@ -2,8 +2,12 @@ package yandex
import (
"fmt"
+ "log"
+ "github.com/golang/protobuf/ptypes/wrappers"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/greenplum/v1"
+ "google.golang.org/genproto/googleapis/type/timeofday"
)
func parseGreenplumEnv(e string) (greenplum.Cluster_Environment, error) {
@@ -14,3 +18,248 @@ func parseGreenplumEnv(e string) (greenplum.Cluster_Environment, error) {
}
return greenplum.Cluster_Environment(v), nil
}
+
+func getGreenplumConfigFieldName(version string) string {
+ if version == "6.17" {
+ return "greenplum_config_6_17"
+ }
+ return "greenplum_config_6_19"
+}
+
+func flattenGreenplumMasterSubcluster(r *greenplum.Resources) []map[string]interface{} {
+ subcluster := map[string]interface{}{}
+ resources := map[string]interface{}{}
+ resources["resource_preset_id"] = r.ResourcePresetId
+ resources["disk_type_id"] = r.DiskTypeId
+ resources["disk_size"] = toGigabytes(r.DiskSize)
+ subcluster["resources"] = []map[string]interface{}{resources}
+ return []map[string]interface{}{subcluster}
+}
+
+func flattenGreenplumSegmentSubcluster(r *greenplum.Resources) []map[string]interface{} {
+ subcluster := map[string]interface{}{}
+ resources := map[string]interface{}{}
+ resources["resource_preset_id"] = r.ResourcePresetId
+ resources["disk_type_id"] = r.DiskTypeId
+ resources["disk_size"] = toGigabytes(r.DiskSize)
+ subcluster["resources"] = []map[string]interface{}{resources}
+ return []map[string]interface{}{subcluster}
+}
+
+func flattenGreenplumHosts(masterHosts, segmentHosts []*greenplum.Host) ([]map[string]interface{}, []map[string]interface{}) {
+ mHost := make([]map[string]interface{}, 0, len(masterHosts))
+ for _, h := range masterHosts {
+ mHost = append(mHost, map[string]interface{}{"fqdn": h.Name, "assign_public_ip": h.AssignPublicIp})
+ }
+
+ sHost := make([]map[string]interface{}, 0, len(segmentHosts))
+ for _, h := range segmentHosts {
+ sHost = append(sHost, map[string]interface{}{"fqdn": h.Name})
+ }
+
+ return mHost, sHost
+}
+
+func flattenGreenplumAccess(c *greenplum.GreenplumConfig) []map[string]interface{} {
+ out := map[string]interface{}{}
+ if c != nil && c.Access != nil {
+ out["data_lens"] = c.Access.DataLens
+ out["web_sql"] = c.Access.WebSql
+ }
+ return []map[string]interface{}{out}
+}
+
+func flattenBackupWindowsStart(c *greenplum.GreenplumConfig) []map[string]interface{} {
+ out := map[string]interface{}{}
+ if c != nil && c.BackupWindowStart != nil {
+ out["hours"] = c.BackupWindowStart.Hours
+ out["minutes"] = c.BackupWindowStart.Minutes
+ }
+ return []map[string]interface{}{out}
+}
+
+func flattenGreenplumClusterConfig(c *greenplum.ClusterConfigSet) (map[string]string, error) {
+ var gpConfig interface{}
+
+ if cf, ok := c.GreenplumConfig.(*greenplum.ClusterConfigSet_GreenplumConfigSet_6_17); ok {
+ gpConfig = cf.GreenplumConfigSet_6_17.UserConfig
+ }
+ if cf, ok := c.GreenplumConfig.(*greenplum.ClusterConfigSet_GreenplumConfigSet_6_19); ok {
+ gpConfig = cf.GreenplumConfigSet_6_19.UserConfig
+ }
+
+ settings, err := flattenResourceGenerateMapS(gpConfig, false, mdbGreenplumSettingsFieldsInfo, false, true, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return settings, err
+}
+
+func flattenGreenplumPoolerConfig(c *greenplum.ConnectionPoolerConfigSet) ([]interface{}, error) {
+ if c == nil {
+ return nil, nil
+ }
+
+ out := map[string]interface{}{}
+
+ out["pooling_mode"] = c.EffectiveConfig.GetMode().String()
+ out["pool_size"] = c.EffectiveConfig.GetSize().GetValue()
+ out["pool_client_idle_timeout"] = c.EffectiveConfig.GetClientIdleTimeout().GetValue()
+
+ return []interface{}{out}, nil
+}
+
+func expandGreenplumBackupWindowStart(d *schema.ResourceData) *timeofday.TimeOfDay {
+ out := &timeofday.TimeOfDay{}
+
+ if v, ok := d.GetOk("backup_window_start.0.hours"); ok {
+ out.Hours = int32(v.(int))
+ }
+
+ if v, ok := d.GetOk("backup_window_start.0.minutes"); ok {
+ out.Minutes = int32(v.(int))
+ }
+
+ return out
+}
+
+func expandGreenplumAccess(d *schema.ResourceData) *greenplum.Access {
+ if _, ok := d.GetOkExists("access"); !ok {
+ return nil
+ }
+
+ out := &greenplum.Access{}
+
+ if v, ok := d.GetOk("access.0.data_lens"); ok {
+ out.DataLens = v.(bool)
+ }
+
+ if v, ok := d.GetOk("access.0.web_sql"); ok {
+ out.WebSql = v.(bool)
+ }
+
+ return out
+}
+
+func expandGreenplumUpdatePath(d *schema.ResourceData, settingNames []string) []string {
+ mdbGreenplumUpdateFieldsMap := map[string]string{
+ "name": "name",
+ "description": "description",
+ "labels": "labels",
+ "access.0.data_lens": "config.access.data_lens",
+ "access.0.web_sql": "config.access.web_sql",
+ "backup_window_start": "config.backup_window_start",
+ "deletion_protection": "deletion_protection",
+ "security_group_ids": "security_group_ids",
+ "pooler_config.0.pooling_mode": "config_spec.pool.mode",
+ "pooler_config.0.pool_size": "config_spec.pool.size",
+ "pooler_config.0.pool_client_idle_timeout": "config_spec.pool.client_idle_timeout",
+ }
+
+ updatePath := []string{}
+ for field, path := range mdbGreenplumUpdateFieldsMap {
+ if d.HasChange(field) {
+ updatePath = append(updatePath, path)
+ }
+ }
+
+ version := d.Get("version").(string)
+ gpFieldName := getGreenplumConfigFieldName(version)
+
+ for _, setting := range settingNames {
+ field := fmt.Sprintf("greenplum_config.%s", setting)
+ if d.HasChange(field) {
+ path := fmt.Sprintf("config_spec.%s.%s", gpFieldName, setting)
+ updatePath = append(updatePath, path)
+ }
+ }
+
+ return updatePath
+}
+
+func expandGreenplumConfigSpec(d *schema.ResourceData) (*greenplum.ConfigSpec, []string, error) {
+ poolerConfig, err := expandGreenplumPoolerConfig(d)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ gpConfig617, gpConfig619, settingNames, err := expandGreenplumConfigSpecGreenplumConfig(d)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ configSpec := &greenplum.ConfigSpec{Pool: poolerConfig}
+ if gpConfig617 != nil {
+ configSpec.GreenplumConfig = gpConfig617
+ } else {
+ configSpec.GreenplumConfig = gpConfig619
+ }
+
+ return configSpec, settingNames, nil
+}
+
+func expandGreenplumConfigSpecGreenplumConfig(d *schema.ResourceData) (*greenplum.ConfigSpec_GreenplumConfig_6_17, *greenplum.ConfigSpec_GreenplumConfig_6_19, []string, error) {
+ version := d.Get("version").(string)
+ if version == "6.17" {
+ cfg := &greenplum.ConfigSpec_GreenplumConfig_6_17{
+ GreenplumConfig_6_17: &greenplum.GreenplumConfig6_17{},
+ }
+ fields, err := expandResourceGenerateNonSkippedFields(mdbGreenplumSettingsFieldsInfo, d, cfg.GreenplumConfig_6_17, "greenplum_config.", true)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ return cfg, nil, fields, nil
+ } else if version == "6.19" {
+ cfg := &greenplum.ConfigSpec_GreenplumConfig_6_19{
+ GreenplumConfig_6_19: &greenplum.GreenplumConfig6_19{},
+ }
+
+ settingNames, err := expandResourceGenerateNonSkippedFields(mdbGreenplumSettingsFieldsInfo, d, cfg.GreenplumConfig_6_19, "greenplum_config.", true)
+ if err != nil {
+ return nil, nil, []string{}, err
+ }
+ log.Printf("[SPECIAL DEBUG] %v", cfg.GreenplumConfig_6_19)
+ log.Printf("[SPECIAL DEBUG] %v", settingNames)
+ return nil, cfg, settingNames, nil
+ }
+
+ return nil, nil, nil, fmt.Errorf("unknown Greenplum version: '%s' but '6.17' and '6.19' are only available", version)
+}
+
+func expandGreenplumPoolerConfig(d *schema.ResourceData) (*greenplum.ConnectionPoolerConfig, error) {
+ pc := &greenplum.ConnectionPoolerConfig{}
+
+ if v, ok := d.GetOk("pooler_config.0.pooling_mode"); ok {
+ pm, err := parseGreenplumPoolingMode(v.(string))
+ if err != nil {
+ return nil, err
+ }
+
+ pc.Mode = pm
+ }
+
+ if v, ok := d.GetOk("pooler_config.0.pool_size"); ok {
+ pc.Size = &wrappers.Int64Value{Value: int64(v.(int))}
+ }
+
+ if v, ok := d.GetOk("pooler_config.0.pool_client_idle_timeout"); ok {
+ pc.ClientIdleTimeout = &wrappers.Int64Value{Value: int64(v.(int))}
+ }
+
+ return pc, nil
+}
+
+func parseGreenplumPoolingMode(s string) (greenplum.ConnectionPoolerConfig_PoolMode, error) {
+ v, ok := greenplum.ConnectionPoolerConfig_PoolMode_value[s]
+ if !ok {
+ return 0, fmt.Errorf("value for 'pooling_mode' must be one of %s, not `%s`",
+ getJoinedKeys(getEnumValueMapKeys(greenplum.ConnectionPoolerConfig_PoolMode_value)), s)
+ }
+
+ return greenplum.ConnectionPoolerConfig_PoolMode(v), nil
+}
+
+var mdbGreenplumSettingsFieldsInfo = newObjectFieldsInfo().
+ addType(greenplum.GreenplumConfig6_17{}).
+ addType(greenplum.GreenplumConfig6_19{})
diff --git a/yandex/mdb_mysql_structures_test.go b/yandex/mdb_mysql_structures_test.go
index 5f3f41f59..9f2e8aad5 100644
--- a/yandex/mdb_mysql_structures_test.go
+++ b/yandex/mdb_mysql_structures_test.go
@@ -59,13 +59,14 @@ func TestFlattenMySQLSettings_5_7(t *testing.T) {
}
ethalon := map[string]string{
- "max_connections": "555",
- "sql_mode": "NO_BACKSLASH_ESCAPES,STRICT_ALL_TABLES",
- "innodb_print_all_deadlocks": "true",
- "log_slow_rate_type": "0",
+ "binlog_transaction_dependency_tracking": "0",
+ "max_connections": "555",
+ "sql_mode": "NO_BACKSLASH_ESCAPES,STRICT_ALL_TABLES",
+ "innodb_print_all_deadlocks": "true",
+ "log_slow_rate_type": "0",
}
- if !reflect.DeepEqual(m, ethalon) {
+ if !reflect.DeepEqual(ethalon, m) {
t.Errorf("FlattenMySQLSettings fail: flatten 5_7 should return %v map but map is: %v", ethalon, m)
}
}
@@ -101,13 +102,14 @@ func TestFlattenMySQLSettings_8_0(t *testing.T) {
}
ethalon := map[string]string{
- "max_connections": "555",
- "sql_mode": "NO_BACKSLASH_ESCAPES,STRICT_ALL_TABLES",
- "innodb_print_all_deadlocks": "true",
- "log_slow_rate_type": "0",
+ "binlog_transaction_dependency_tracking": "0",
+ "max_connections": "555",
+ "sql_mode": "NO_BACKSLASH_ESCAPES,STRICT_ALL_TABLES",
+ "innodb_print_all_deadlocks": "true",
+ "log_slow_rate_type": "0",
}
- if !reflect.DeepEqual(m, ethalon) {
+ if !reflect.DeepEqual(ethalon, m) {
t.Errorf("FlattenMySQLSettings fail: flatten 8_0 should return %v map but map is: %v", ethalon, m)
}
}
diff --git a/yandex/mdb_postgresql_structures.go b/yandex/mdb_postgresql_structures.go
index 395ed937d..73b24b7ed 100644
--- a/yandex/mdb_postgresql_structures.go
+++ b/yandex/mdb_postgresql_structures.go
@@ -92,19 +92,6 @@ func flattenPGResources(r *postgresql.Resources) ([]interface{}, error) {
return []interface{}{out}, nil
}
-func flattenPGBackupWindowStart(t *timeofday.TimeOfDay) ([]interface{}, error) {
- if t == nil {
- return nil, nil
- }
-
- out := map[string]interface{}{}
-
- out["hours"] = int(t.Hours)
- out["minutes"] = int(t.Minutes)
-
- return []interface{}{out}, nil
-}
-
func flattenPGPerformanceDiagnostics(p *postgresql.PerformanceDiagnostics) ([]interface{}, error) {
if p == nil {
return nil, nil
diff --git a/yandex/mdb_redis_structures.go b/yandex/mdb_redis_structures.go
index 031293a78..460b52bd3 100644
--- a/yandex/mdb_redis_structures.go
+++ b/yandex/mdb_redis_structures.go
@@ -2,6 +2,9 @@ package yandex
import (
"fmt"
+ "sort"
+ "strconv"
+ "strings"
wrappers "github.com/golang/protobuf/ptypes/wrappers"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
@@ -10,61 +13,175 @@ import (
)
type redisConfig struct {
- timeout int64
- maxmemoryPolicy string
- notifyKeyspaceEvents string
- slowlogLogSlowerThan int64
- slowlogMaxLen int64
- databases int64
- version string
+ timeout int64
+ maxmemoryPolicy string
+ notifyKeyspaceEvents string
+ slowlogLogSlowerThan int64
+ slowlogMaxLen int64
+ databases int64
+ version string
+ clientOutputBufferLimitNormal string
+ clientOutputBufferLimitPubsub string
+}
+
+const defaultReplicaPriority = 100
+
+func weightFunc(zone, shard, subnet string, priority *wrappers.Int64Value, ipFlag bool) int {
+ weight := 0
+ if zone != "" {
+ weight += 10000
+ }
+ if shard != "" {
+ weight += 1000
+ }
+ if subnet != "" {
+ weight += 100
+ }
+ if priority != nil {
+ weight += 10
+ }
+ if ipFlag {
+ weight += 1
+ }
+ return weight
+}
+
+func getHostWeight(spec *redis.Host) int {
+ return weightFunc(spec.ZoneId, spec.ShardName, spec.SubnetId, spec.ReplicaPriority, spec.AssignPublicIp)
}
// Sorts list of hosts in accordance with the order in config.
// We need to keep the original order so there's no diff appears on each apply.
-func sortRedisHosts(hosts []*redis.Host, specs []*redis.HostSpec) {
- for i, h := range specs {
- for j := i + 1; j < len(hosts); j++ {
- if h.ZoneId == hosts[j].ZoneId && (h.ShardName == "" || h.ShardName == hosts[j].ShardName) {
- hosts[i], hosts[j] = hosts[j], hosts[i]
- break
+func sortRedisHosts(sharded bool, hosts []*redis.Host, specs []*redis.HostSpec) {
+ for i, hs := range specs {
+ switched := false
+ for j := i; j < len(hosts); j++ {
+ if (hs.ZoneId == hosts[j].ZoneId) &&
+ (hs.ShardName == "" || hs.ShardName == hosts[j].ShardName) &&
+ (hs.SubnetId == "" || hs.SubnetId == hosts[j].SubnetId) &&
+ (sharded || hosts[j].ReplicaPriority != nil && (hs.ReplicaPriority == nil && hosts[j].ReplicaPriority.GetValue() == defaultReplicaPriority ||
+ hs.ReplicaPriority.GetValue() == hosts[j].ReplicaPriority.GetValue())) &&
+ (hs.AssignPublicIp == hosts[j].AssignPublicIp) {
+ if !switched || getHostWeight(hosts[j]) > getHostWeight(hosts[i]) {
+ hosts[i], hosts[j] = hosts[j], hosts[i]
+ switched = true
+ }
}
}
}
}
-// Takes the current list of hosts and the desirable list of hosts.
-// Returns the map of hostnames to delete grouped by shard,
-// and the map of hosts to add grouped by shard as well.
-func redisHostsDiff(currHosts []*redis.Host, targetHosts []*redis.HostSpec) (map[string][]string, map[string][]*redis.HostSpec) {
- m := map[string][]*redis.HostSpec{}
+func keyFunc(zone, shard, subnet string) string {
+ return fmt.Sprintf("zone:%s;shard:%s;subnet:%s",
+ zone, shard, subnet,
+ )
+}
- for _, h := range targetHosts {
- key := h.ZoneId + h.ShardName
- m[key] = append(m[key], h)
+func getHostSpecBaseKey(h *redis.HostSpec) string {
+ return keyFunc(h.ZoneId, h.ShardName, h.SubnetId)
+}
+
+func getHostBaseKey(h *redis.Host) string {
+ return keyFunc(h.ZoneId, h.ShardName, h.SubnetId)
+}
+
+func getHostSpecWeight(spec *redis.HostSpec) int {
+ return weightFunc(spec.ZoneId, spec.ShardName, spec.SubnetId, spec.ReplicaPriority, spec.AssignPublicIp)
+}
+
+// used to detect specs to update, add, delete
+func sortHostSpecs(targetHosts []*redis.HostSpec) []*redis.HostSpec {
+ weightedHosts := make(map[int][]*redis.HostSpec)
+ for _, spec := range targetHosts {
+ weight := getHostSpecWeight(spec)
+ weightedHosts[weight] = append(weightedHosts[weight], spec)
+ }
+
+ keys := make([]int, 0, len(weightedHosts))
+ for k := range weightedHosts {
+ keys = append(keys, k)
+ }
+ sort.Slice(keys, func(i, j int) bool {
+ return keys[i] > keys[j]
+ })
+
+ res := []*redis.HostSpec{}
+ for _, k := range keys {
+ res = append(res, weightedHosts[k]...)
+ }
+
+ return res
+}
+
+func separateHostsToUpdateAndDelete(sharded bool, sortedHosts []*redis.HostSpec, currHosts []*redis.Host) (
+ []*redis.HostSpec, map[string][]*HostUpdateInfo, map[string][]string, error) {
+ targetHostsBaseMap := map[string][]*redis.HostSpec{}
+ for _, h := range sortedHosts {
+ key := getHostSpecBaseKey(h)
+ targetHostsBaseMap[key] = append(targetHostsBaseMap[key], h)
}
toDelete := map[string][]string{}
+ toUpdate := map[string][]*HostUpdateInfo{}
for _, h := range currHosts {
- key := h.ZoneId + h.ShardName
- hs, ok := m[key]
- if !ok {
- toDelete[h.ShardName] = append(toDelete[h.ShardName], h.Name)
- }
- if len(hs) > 1 {
- m[key] = hs[1:]
+ key := getHostBaseKey(h)
+ hs, ok := targetHostsBaseMap[key]
+ if ok {
+ newSpec := hs[0]
+ hostInfo, err := getHostUpdateInfo(sharded, h.Name, h.ReplicaPriority, h.AssignPublicIp,
+ newSpec.ReplicaPriority, newSpec.AssignPublicIp)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ if hostInfo != nil {
+ toUpdate[h.ShardName] = append(toUpdate[h.ShardName], hostInfo)
+ }
+ if len(hs) > 1 {
+ targetHostsBaseMap[key] = hs[1:]
+ } else {
+ delete(targetHostsBaseMap, key)
+ }
} else {
- delete(m, key)
+ toDelete[h.ShardName] = append(toDelete[h.ShardName], h.Name)
}
}
+ hostsLeft := []*redis.HostSpec{}
+ for _, specs := range targetHostsBaseMap {
+ hostsLeft = append(hostsLeft, specs...)
+ }
+
+ return hostsLeft, toUpdate, toDelete, nil
+}
+
+// Takes the current list of hosts and the desirable list of hosts.
+// Returns the map of hostnames:
+// to delete grouped by shard,
+// to update grouped by shard,
+// to add grouped by shard as well.
+func redisHostsDiff(sharded bool, currHosts []*redis.Host, targetHosts []*redis.HostSpec) (map[string][]string,
+ map[string][]*HostUpdateInfo, map[string][]*redis.HostSpec, error) {
+ sortedHosts := sortHostSpecs(targetHosts)
+ hostsLeft, toUpdate, toDelete, err := separateHostsToUpdateAndDelete(sharded, sortedHosts, currHosts)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
toAdd := map[string][]*redis.HostSpec{}
- for _, hs := range m {
- for _, h := range hs {
- toAdd[h.ShardName] = append(toAdd[h.ShardName], h)
- }
+ for _, h := range hostsLeft {
+ toAdd[h.ShardName] = append(toAdd[h.ShardName], h)
}
- return toDelete, toAdd
+ return toDelete, toUpdate, toAdd, nil
+}
+
+func limitToStr(hard, soft, secs *wrappers.Int64Value) string {
+ vals := []string{
+ strconv.FormatInt(hard.GetValue(), 10),
+ strconv.FormatInt(soft.GetValue(), 10),
+ strconv.FormatInt(secs.GetValue(), 10),
+ }
+ return strings.Join(vals, " ")
}
func extractRedisConfig(cc *redis.ClusterConfig) redisConfig {
@@ -80,6 +197,16 @@ func extractRedisConfig(cc *redis.ClusterConfig) redisConfig {
res.slowlogLogSlowerThan = c.GetSlowlogLogSlowerThan().GetValue()
res.slowlogMaxLen = c.GetSlowlogMaxLen().GetValue()
res.databases = c.GetDatabases().GetValue()
+ res.clientOutputBufferLimitNormal = limitToStr(
+ c.ClientOutputBufferLimitNormal.HardLimit,
+ c.ClientOutputBufferLimitNormal.SoftLimit,
+ c.ClientOutputBufferLimitNormal.SoftSeconds,
+ )
+ res.clientOutputBufferLimitPubsub = limitToStr(
+ c.ClientOutputBufferLimitPubsub.HardLimit,
+ c.ClientOutputBufferLimitPubsub.SoftLimit,
+ c.ClientOutputBufferLimitPubsub.SoftSeconds,
+ )
case *redis.ClusterConfig_RedisConfig_6_0:
c := rc.RedisConfig_6_0.EffectiveConfig
res.maxmemoryPolicy = c.GetMaxmemoryPolicy().String()
@@ -88,6 +215,16 @@ func extractRedisConfig(cc *redis.ClusterConfig) redisConfig {
res.slowlogLogSlowerThan = c.GetSlowlogLogSlowerThan().GetValue()
res.slowlogMaxLen = c.GetSlowlogMaxLen().GetValue()
res.databases = c.GetDatabases().GetValue()
+ res.clientOutputBufferLimitNormal = limitToStr(
+ c.ClientOutputBufferLimitNormal.HardLimit,
+ c.ClientOutputBufferLimitNormal.SoftLimit,
+ c.ClientOutputBufferLimitNormal.SoftSeconds,
+ )
+ res.clientOutputBufferLimitPubsub = limitToStr(
+ c.ClientOutputBufferLimitPubsub.HardLimit,
+ c.ClientOutputBufferLimitPubsub.SoftLimit,
+ c.ClientOutputBufferLimitPubsub.SoftSeconds,
+ )
case *redis.ClusterConfig_RedisConfig_6_2:
c := rc.RedisConfig_6_2.EffectiveConfig
res.maxmemoryPolicy = c.GetMaxmemoryPolicy().String()
@@ -96,11 +233,37 @@ func extractRedisConfig(cc *redis.ClusterConfig) redisConfig {
res.slowlogLogSlowerThan = c.GetSlowlogLogSlowerThan().GetValue()
res.slowlogMaxLen = c.GetSlowlogMaxLen().GetValue()
res.databases = c.GetDatabases().GetValue()
+ res.clientOutputBufferLimitNormal = limitToStr(
+ c.ClientOutputBufferLimitNormal.HardLimit,
+ c.ClientOutputBufferLimitNormal.SoftLimit,
+ c.ClientOutputBufferLimitNormal.SoftSeconds,
+ )
+ res.clientOutputBufferLimitPubsub = limitToStr(
+ c.ClientOutputBufferLimitPubsub.HardLimit,
+ c.ClientOutputBufferLimitPubsub.SoftLimit,
+ c.ClientOutputBufferLimitPubsub.SoftSeconds,
+ )
}
return res
}
+func expandLimit(limit string) ([]*wrappers.Int64Value, error) {
+ vals := strings.Split(limit, " ")
+ if len(vals) != 3 {
+ return nil, fmt.Errorf("%s should be space-separated 3-values string", limit)
+ }
+ res := []*wrappers.Int64Value{}
+ for _, val := range vals {
+ parsed, err := strconv.ParseInt(val, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ res = append(res, &wrappers.Int64Value{Value: parsed})
+ }
+ return res, nil
+}
+
func expandRedisConfig(d *schema.ResourceData) (*redis.ConfigSpec_RedisSpec, string, error) {
var cs redis.ConfigSpec_RedisSpec
@@ -138,6 +301,23 @@ func expandRedisConfig(d *schema.ResourceData) (*redis.ConfigSpec_RedisSpec, str
if v, ok := d.GetOk("config.0.version"); ok {
version = v.(string)
}
+
+ var expandedNormal []*wrappers.Int64Value
+ var err error
+ if v, ok := d.GetOk("config.0.client_output_buffer_limit_normal"); ok {
+ expandedNormal, err = expandLimit(v.(string))
+ if err != nil {
+ return nil, "", err
+ }
+ }
+ var expandedPubsub []*wrappers.Int64Value
+ if v, ok := d.GetOk("config.0.client_output_buffer_limit_pubsub"); ok {
+ expandedPubsub, err = expandLimit(v.(string))
+ if err != nil {
+ return nil, "", err
+ }
+ }
+
switch version {
case "5.0":
c := config.RedisConfig5_0{
@@ -148,10 +328,30 @@ func expandRedisConfig(d *schema.ResourceData) (*redis.ConfigSpec_RedisSpec, str
SlowlogMaxLen: slowlogMaxLen,
Databases: databases,
}
+
+ if len(expandedNormal) != 0 {
+ normalLimit := &config.RedisConfig5_0_ClientOutputBufferLimit{
+ HardLimit: expandedNormal[0],
+ SoftLimit: expandedNormal[1],
+ SoftSeconds: expandedNormal[2],
+ }
+ c.SetClientOutputBufferLimitNormal(normalLimit)
+ }
+
+ if len(expandedPubsub) != 0 {
+ pubsubLimit := &config.RedisConfig5_0_ClientOutputBufferLimit{
+ HardLimit: expandedPubsub[0],
+ SoftLimit: expandedPubsub[1],
+ SoftSeconds: expandedPubsub[2],
+ }
+ c.SetClientOutputBufferLimitPubsub(pubsubLimit)
+ }
+
err := setMaxMemory5_0(&c, d)
if err != nil {
return nil, version, err
}
+
cs = &redis.ConfigSpec_RedisConfig_5_0{
RedisConfig_5_0: &c,
}
@@ -164,10 +364,30 @@ func expandRedisConfig(d *schema.ResourceData) (*redis.ConfigSpec_RedisSpec, str
SlowlogMaxLen: slowlogMaxLen,
Databases: databases,
}
+
+ if len(expandedNormal) != 0 {
+ normalLimit := &config.RedisConfig6_0_ClientOutputBufferLimit{
+ HardLimit: expandedNormal[0],
+ SoftLimit: expandedNormal[1],
+ SoftSeconds: expandedNormal[2],
+ }
+ c.SetClientOutputBufferLimitNormal(normalLimit)
+ }
+
+ if len(expandedPubsub) != 0 {
+ pubsubLimit := &config.RedisConfig6_0_ClientOutputBufferLimit{
+ HardLimit: expandedPubsub[0],
+ SoftLimit: expandedPubsub[1],
+ SoftSeconds: expandedPubsub[2],
+ }
+ c.SetClientOutputBufferLimitPubsub(pubsubLimit)
+ }
+
err := setMaxMemory6_0(&c, d)
if err != nil {
return nil, version, err
}
+
cs = &redis.ConfigSpec_RedisConfig_6_0{
RedisConfig_6_0: &c,
}
@@ -180,10 +400,30 @@ func expandRedisConfig(d *schema.ResourceData) (*redis.ConfigSpec_RedisSpec, str
SlowlogMaxLen: slowlogMaxLen,
Databases: databases,
}
+
+ if len(expandedNormal) != 0 {
+ normalLimit := &config.RedisConfig6_2_ClientOutputBufferLimit{
+ HardLimit: expandedNormal[0],
+ SoftLimit: expandedNormal[1],
+ SoftSeconds: expandedNormal[2],
+ }
+ c.SetClientOutputBufferLimitNormal(normalLimit)
+ }
+
+ if len(expandedPubsub) != 0 {
+ pubsubLimit := &config.RedisConfig6_2_ClientOutputBufferLimit{
+ HardLimit: expandedPubsub[0],
+ SoftLimit: expandedPubsub[1],
+ SoftSeconds: expandedPubsub[2],
+ }
+ c.SetClientOutputBufferLimitPubsub(pubsubLimit)
+ }
+
err := setMaxMemory6_2(&c, d)
if err != nil {
return nil, version, err
}
+
cs = &redis.ConfigSpec_RedisConfig_6_2{
RedisConfig_6_2: &c,
}
@@ -322,7 +562,7 @@ func flattenRedisMaintenanceWindow(mw *redis.MaintenanceWindow) []map[string]int
return []map[string]interface{}{result}
}
-func flattenRedisHosts(hs []*redis.Host) ([]map[string]interface{}, error) {
+func flattenRedisHosts(sharded bool, hs []*redis.Host) ([]map[string]interface{}, error) {
res := []map[string]interface{}{}
for _, h := range hs {
@@ -331,6 +571,12 @@ func flattenRedisHosts(hs []*redis.Host) ([]map[string]interface{}, error) {
m["subnet_id"] = h.SubnetId
m["shard_name"] = h.ShardName
m["fqdn"] = h.Name
+ if sharded {
+ m["replica_priority"] = defaultReplicaPriority
+ } else {
+ m["replica_priority"] = h.ReplicaPriority.GetValue()
+ }
+ m["assign_public_ip"] = h.AssignPublicIp
res = append(res, m)
}
@@ -340,17 +586,18 @@ func flattenRedisHosts(hs []*redis.Host) ([]map[string]interface{}, error) {
func expandRedisHosts(d *schema.ResourceData) ([]*redis.HostSpec, error) {
var result []*redis.HostSpec
hosts := d.Get("host").([]interface{})
+ sharded := d.Get("sharded").(bool)
for _, v := range hosts {
config := v.(map[string]interface{})
- host := expandRedisHost(config)
+ host := expandRedisHost(sharded, config)
result = append(result, host)
}
return result, nil
}
-func expandRedisHost(config map[string]interface{}) *redis.HostSpec {
+func expandRedisHost(sharded bool, config map[string]interface{}) *redis.HostSpec {
host := &redis.HostSpec{}
if v, ok := config["zone"]; ok {
host.ZoneId = v.(string)
@@ -363,6 +610,15 @@ func expandRedisHost(config map[string]interface{}) *redis.HostSpec {
if v, ok := config["shard_name"]; ok {
host.ShardName = v.(string)
}
+
+ if v, ok := config["replica_priority"]; ok && !sharded {
+ priority := v.(int)
+ host.ReplicaPriority = &wrappers.Int64Value{Value: int64(priority)}
+ }
+
+ if v, ok := config["assign_public_ip"]; ok {
+ host.AssignPublicIp = v.(bool)
+ }
return host
}
diff --git a/yandex/mdb_redis_structures_test.go b/yandex/mdb_redis_structures_test.go
new file mode 100644
index 000000000..12eb7b7c2
--- /dev/null
+++ b/yandex/mdb_redis_structures_test.go
@@ -0,0 +1,731 @@
+package yandex
+
+import (
+ "github.com/golang/protobuf/ptypes/wrappers"
+ "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/redis/v1"
+ "google.golang.org/genproto/protobuf/field_mask"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetSentinelHosts(t *testing.T) {
+ diskTypeId := ""
+ publicIPFlags := []*bool{nil}
+ replicaPriorities := []*int{nil}
+ expected := `
+ host {
+ zone = "ru-central1-c"
+ subnet_id = "${yandex_vpc_subnet.foo.id}"
+
+
+ }
+`
+
+ actual := getSentinelHosts(diskTypeId, publicIPFlags, replicaPriorities)
+ require.Equal(t, expected, actual)
+}
+
+func TestRedisHostsDiff(t *testing.T) {
+ cases := []struct {
+ sharded bool
+ name string
+ currHosts []*redis.Host
+ targetHosts []*redis.HostSpec
+ expectedError string
+ expectedToDelete map[string][]string
+ expectedtoUpdate map[string][]*HostUpdateInfo
+ expectedToAdd map[string][]*redis.HostSpec
+ }{
+ {
+ name: "0 add, 0 update, 0 delete",
+ currHosts: []*redis.Host{
+ {
+ Name: "fqdn1",
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ },
+ targetHosts: []*redis.HostSpec{
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ },
+ expectedToAdd: map[string][]*redis.HostSpec{},
+ expectedtoUpdate: map[string][]*HostUpdateInfo{},
+ expectedToDelete: map[string][]string{},
+ },
+ {
+ name: "0 add, 1 update (ip), 0 delete",
+ currHosts: []*redis.Host{
+ {
+ Name: "fqdn1",
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ },
+ targetHosts: []*redis.HostSpec{
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: true,
+ },
+ },
+ expectedToAdd: map[string][]*redis.HostSpec{},
+ expectedtoUpdate: map[string][]*HostUpdateInfo{
+ "shard1": {
+ {
+ HostName: "fqdn1",
+ AssignPublicIp: true,
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ UpdateMask: &field_mask.FieldMask{
+ Paths: []string{"assign_public_ip"},
+ },
+ },
+ },
+ },
+ expectedToDelete: map[string][]string{},
+ },
+ {
+ name: "0 add, 1 update (priority), 0 delete",
+ currHosts: []*redis.Host{
+ {
+ Name: "fqdn1",
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ },
+ targetHosts: []*redis.HostSpec{
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 99},
+ AssignPublicIp: false,
+ },
+ },
+ expectedToAdd: map[string][]*redis.HostSpec{},
+ expectedtoUpdate: map[string][]*HostUpdateInfo{
+ "shard1": {
+ {
+ HostName: "fqdn1",
+ AssignPublicIp: false,
+ ReplicaPriority: &wrappers.Int64Value{Value: 99},
+ UpdateMask: &field_mask.FieldMask{
+ Paths: []string{"replica_priority"},
+ },
+ },
+ },
+ },
+ expectedToDelete: map[string][]string{},
+ },
+ {
+ name: "0 add, 1 update (ip), 0 delete - works in sharded",
+ sharded: true,
+ currHosts: []*redis.Host{
+ {
+ Name: "fqdn1",
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ },
+ targetHosts: []*redis.HostSpec{
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: true,
+ },
+ },
+ expectedToAdd: map[string][]*redis.HostSpec{},
+ expectedtoUpdate: map[string][]*HostUpdateInfo{
+ "shard1": {
+ {
+ HostName: "fqdn1",
+ AssignPublicIp: true,
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ UpdateMask: &field_mask.FieldMask{
+ Paths: []string{"assign_public_ip"},
+ },
+ },
+ },
+ },
+ expectedToDelete: map[string][]string{},
+ },
+ {
+ name: "0 add, 1 update (priority), 0 delete - fails in sharded",
+ sharded: true,
+ currHosts: []*redis.Host{
+ {
+ Name: "fqdn1",
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ },
+ targetHosts: []*redis.HostSpec{
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 99},
+ AssignPublicIp: false,
+ },
+ },
+ expectedError: "modifying replica priority in hosts of sharded clusters is not supported: fqdn1",
+ },
+ {
+ name: "0 add, 1 update (ip and priority), 0 delete",
+ currHosts: []*redis.Host{
+ {
+ Name: "fqdn1",
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ },
+ targetHosts: []*redis.HostSpec{
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 99},
+ AssignPublicIp: true,
+ },
+ },
+ expectedToAdd: map[string][]*redis.HostSpec{},
+ expectedtoUpdate: map[string][]*HostUpdateInfo{
+ "shard1": {
+ {
+ HostName: "fqdn1",
+ AssignPublicIp: true,
+ ReplicaPriority: &wrappers.Int64Value{Value: 99},
+ UpdateMask: &field_mask.FieldMask{
+ Paths: []string{"replica_priority", "assign_public_ip"},
+ },
+ },
+ },
+ },
+ expectedToDelete: map[string][]string{},
+ },
+ {
+ name: "1 add, 0 update, 0 delete",
+ currHosts: []*redis.Host{
+ {
+ Name: "fqdn1",
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ },
+ targetHosts: []*redis.HostSpec{
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet2",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ },
+ expectedToAdd: map[string][]*redis.HostSpec{
+ "shard1": {
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet2",
+ AssignPublicIp: false,
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ ZoneId: "",
+ },
+ },
+ },
+ expectedtoUpdate: map[string][]*HostUpdateInfo{},
+ expectedToDelete: map[string][]string{},
+ },
+ {
+ name: "1 add, 1 update (priority), 0 delete",
+ currHosts: []*redis.Host{
+ {
+ Name: "fqdn1",
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ },
+ targetHosts: []*redis.HostSpec{
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 99},
+ AssignPublicIp: false,
+ },
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet2",
+ ReplicaPriority: &wrappers.Int64Value{Value: 101},
+ AssignPublicIp: false,
+ },
+ },
+ expectedToAdd: map[string][]*redis.HostSpec{
+ "shard1": {
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet2",
+ AssignPublicIp: false,
+ ReplicaPriority: &wrappers.Int64Value{Value: 101},
+ ZoneId: "",
+ },
+ },
+ },
+ expectedtoUpdate: map[string][]*HostUpdateInfo{
+ "shard1": {
+ {
+ HostName: "fqdn1",
+ AssignPublicIp: false,
+ ReplicaPriority: &wrappers.Int64Value{Value: 99},
+ UpdateMask: &field_mask.FieldMask{
+ Paths: []string{"replica_priority"},
+ },
+ },
+ },
+ },
+ expectedToDelete: map[string][]string{},
+ },
+ {
+ name: "1 add, 1 update (ip), 0 delete",
+ currHosts: []*redis.Host{
+ {
+ Name: "fqdn1",
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ },
+ targetHosts: []*redis.HostSpec{
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: true,
+ },
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet2",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: true,
+ },
+ },
+ expectedToAdd: map[string][]*redis.HostSpec{
+ "shard1": {
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet2",
+ AssignPublicIp: true,
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ ZoneId: "",
+ },
+ },
+ },
+ expectedtoUpdate: map[string][]*HostUpdateInfo{
+ "shard1": {
+ {
+ HostName: "fqdn1",
+ AssignPublicIp: true,
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ UpdateMask: &field_mask.FieldMask{
+ Paths: []string{"assign_public_ip"},
+ },
+ },
+ },
+ },
+ expectedToDelete: map[string][]string{},
+ },
+ {
+ name: "1 add, 1 update (ip and priority), 0 delete",
+ currHosts: []*redis.Host{
+ {
+ Name: "fqdn1",
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ },
+ targetHosts: []*redis.HostSpec{
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 99},
+ AssignPublicIp: true,
+ },
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet2",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ },
+ expectedToAdd: map[string][]*redis.HostSpec{
+ "shard1": {
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet2",
+ AssignPublicIp: false,
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ ZoneId: "",
+ },
+ },
+ },
+ expectedtoUpdate: map[string][]*HostUpdateInfo{
+ "shard1": {
+ {
+ HostName: "fqdn1",
+ AssignPublicIp: true,
+ ReplicaPriority: &wrappers.Int64Value{Value: 99},
+ UpdateMask: &field_mask.FieldMask{
+ Paths: []string{"replica_priority", "assign_public_ip"},
+ },
+ },
+ },
+ },
+ expectedToDelete: map[string][]string{},
+ },
+ {
+ name: "1 add, 0 update, 1 delete",
+ currHosts: []*redis.Host{
+ {
+ Name: "fqdn1",
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ },
+ targetHosts: []*redis.HostSpec{
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet2",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ },
+ expectedToAdd: map[string][]*redis.HostSpec{
+ "shard1": {
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet2",
+ AssignPublicIp: false,
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ ZoneId: "",
+ },
+ },
+ },
+ expectedtoUpdate: map[string][]*HostUpdateInfo{},
+ expectedToDelete: map[string][]string{
+ "shard1": {
+ "fqdn1",
+ },
+ },
+ },
+ {
+ name: "1 add, 1 update (ip and priority), 1 delete",
+ currHosts: []*redis.Host{
+ {
+ Name: "fqdn1",
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ {
+ Name: "fqdn2",
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ },
+ targetHosts: []*redis.HostSpec{
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 99},
+ AssignPublicIp: true,
+ },
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet2",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ },
+ expectedToAdd: map[string][]*redis.HostSpec{
+ "shard1": {
+ {
+ ShardName: "shard1",
+ SubnetId: "subnet2",
+ AssignPublicIp: false,
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ ZoneId: "",
+ },
+ },
+ },
+ expectedtoUpdate: map[string][]*HostUpdateInfo{
+ "shard1": {
+ {
+ HostName: "fqdn1",
+ AssignPublicIp: true,
+ ReplicaPriority: &wrappers.Int64Value{Value: 99},
+ UpdateMask: &field_mask.FieldMask{
+ Paths: []string{"replica_priority", "assign_public_ip"},
+ },
+ },
+ },
+ },
+ expectedToDelete: map[string][]string{
+ "shard1": {
+ "fqdn2",
+ },
+ },
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ actualToDelete, actualToUpdate, actualToAdd, err := redisHostsDiff(tc.sharded, tc.currHosts, tc.targetHosts)
+ if tc.expectedError == "" {
+ require.Nil(t, err)
+ } else {
+ require.NotNil(t, err)
+ require.Equal(t, err.Error(), tc.expectedError)
+ }
+ require.Equal(t, tc.expectedToAdd, actualToAdd, "unexpected ADD")
+ require.Equal(t, tc.expectedtoUpdate, actualToUpdate, "unexpected UPDATE")
+ require.Equal(t, tc.expectedToDelete, actualToDelete, "unexpected DELETE")
+ })
+ }
+}
+
+func TestSortRedisHostsNonsharded(t *testing.T) {
+ h1 := &redis.Host{
+ Name: "fqdn1",
+ ZoneId: "zone1",
+ SubnetId: "subnet1",
+ ShardName: "shard1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ }
+ h2 := &redis.Host{
+ Name: "fqdn2",
+ ZoneId: "zone1",
+ SubnetId: "subnet1",
+ ShardName: "shard1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 101},
+ AssignPublicIp: true,
+ }
+ h3 := &redis.Host{
+ Name: "fqdn3",
+ ZoneId: "zone1",
+ SubnetId: "subnet1",
+ ShardName: "shard1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: true,
+ }
+ specs := []*redis.HostSpec{
+ {
+ ZoneId: "zone1",
+ SubnetId: "subnet1",
+ ShardName: "shard1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ {
+ ZoneId: "zone1",
+ SubnetId: "subnet1",
+ ShardName: "shard1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 101},
+ AssignPublicIp: true,
+ },
+ {
+ ZoneId: "zone1",
+ SubnetId: "subnet1",
+ ShardName: "shard1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: true,
+ },
+ }
+ expectedHosts := []*redis.Host{h1, h2, h3}
+
+ cases := []struct {
+ name string
+ hosts []*redis.Host
+ specs []*redis.HostSpec
+ expectedHosts []*redis.Host
+ }{
+ {
+ name: "same order",
+ hosts: []*redis.Host{h1, h2, h3},
+ specs: specs,
+ expectedHosts: expectedHosts,
+ },
+ {
+ name: "mixed order (1 3 2)",
+ hosts: []*redis.Host{h1, h3, h2},
+ specs: specs,
+ expectedHosts: expectedHosts,
+ },
+ {
+ name: "mixed order (2 3 1)",
+ hosts: []*redis.Host{h2, h3, h1},
+ specs: specs,
+ expectedHosts: expectedHosts,
+ },
+
+ {
+ name: "mixed order (2 1 3)",
+ hosts: []*redis.Host{h2, h1, h3},
+ specs: specs,
+ expectedHosts: expectedHosts,
+ },
+ {
+ name: "mixed order (3 2 1)",
+ hosts: []*redis.Host{h3, h2, h1},
+ specs: specs,
+ expectedHosts: expectedHosts,
+ },
+ {
+ name: "mixed order (3 1 2)",
+ hosts: []*redis.Host{h3, h1, h2},
+ specs: specs,
+ expectedHosts: expectedHosts,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ sortRedisHosts(false, tc.hosts, tc.specs)
+ require.Equal(t, tc.expectedHosts, tc.hosts)
+ })
+ }
+}
+
+func TestSortRedisHostsSharded(t *testing.T) {
+ h1 := &redis.Host{
+ Name: "fqdn1",
+ ZoneId: "zone1",
+ SubnetId: "subnet1",
+ ShardName: "shard1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ }
+ h2 := &redis.Host{
+ Name: "fqdn2",
+ ZoneId: "zone1",
+ SubnetId: "subnet1",
+ ShardName: "shard2",
+ ReplicaPriority: &wrappers.Int64Value{Value: 101},
+ AssignPublicIp: true,
+ }
+ h3 := &redis.Host{
+ Name: "fqdn3",
+ ZoneId: "zone1",
+ SubnetId: "subnet1",
+ ShardName: "shard3",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: true,
+ }
+ specs := []*redis.HostSpec{
+ {
+ ZoneId: "zone1",
+ SubnetId: "subnet1",
+ ShardName: "shard1",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: false,
+ },
+ {
+ ZoneId: "zone1",
+ SubnetId: "subnet1",
+ ShardName: "shard2",
+ ReplicaPriority: &wrappers.Int64Value{Value: 101},
+ AssignPublicIp: true,
+ },
+ {
+ ZoneId: "zone1",
+ SubnetId: "subnet1",
+ ShardName: "shard3",
+ ReplicaPriority: &wrappers.Int64Value{Value: 100},
+ AssignPublicIp: true,
+ },
+ }
+ expectedHosts := []*redis.Host{h1, h2, h3}
+
+ cases := []struct {
+ name string
+ hosts []*redis.Host
+ specs []*redis.HostSpec
+ expectedHosts []*redis.Host
+ }{
+ {
+ name: "same order",
+ hosts: []*redis.Host{h1, h2, h3},
+ specs: specs,
+ expectedHosts: expectedHosts,
+ },
+ {
+ name: "mixed order (1 3 2)",
+ hosts: []*redis.Host{h1, h3, h2},
+ specs: specs,
+ expectedHosts: expectedHosts,
+ },
+ {
+ name: "mixed order (2 3 1)",
+ hosts: []*redis.Host{h2, h3, h1},
+ specs: specs,
+ expectedHosts: expectedHosts,
+ },
+
+ {
+ name: "mixed order (2 1 3)",
+ hosts: []*redis.Host{h2, h1, h3},
+ specs: specs,
+ expectedHosts: expectedHosts,
+ },
+ {
+ name: "mixed order (3 2 1)",
+ hosts: []*redis.Host{h3, h2, h1},
+ specs: specs,
+ expectedHosts: expectedHosts,
+ },
+ {
+ name: "mixed order (3 1 2)",
+ hosts: []*redis.Host{h3, h1, h2},
+ specs: specs,
+ expectedHosts: expectedHosts,
+ },
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.name, func(t *testing.T) {
+ sortRedisHosts(true, tc.hosts, tc.specs)
+ require.Equal(t, tc.expectedHosts, tc.hosts)
+ })
+ }
+}
diff --git a/yandex/mdb_structures.go b/yandex/mdb_structures.go
new file mode 100644
index 000000000..107edb87f
--- /dev/null
+++ b/yandex/mdb_structures.go
@@ -0,0 +1,24 @@
+package yandex
+
+import (
+ "google.golang.org/genproto/googleapis/type/timeofday"
+ "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+type MdbConnectionPoolerConfig interface {
+ GetPoolingMode() int32
+ GetPoolDiscard() *wrapperspb.BoolValue
+}
+
+func flattenPGBackupWindowStart(t *timeofday.TimeOfDay) ([]interface{}, error) {
+ if t == nil {
+ return nil, nil
+ }
+
+ out := map[string]interface{}{}
+
+ out["hours"] = int(t.Hours)
+ out["minutes"] = int(t.Minutes)
+
+ return []interface{}{out}, nil
+}
diff --git a/yandex/provider.go b/yandex/provider.go
index e9572b9a4..a773066a4 100644
--- a/yandex/provider.go
+++ b/yandex/provider.go
@@ -18,6 +18,7 @@ const (
defaultEndpoint = "api.cloud.yandex.net:443"
defaultStorageEndpoint = "storage.yandexcloud.net"
defaultYMQEndpoint = "message-queue.api.cloud.yandex.net"
+ defaultRegion = "ru-central1"
)
// Global MutexKV
@@ -58,6 +59,12 @@ func provider(emptyFolder bool) *schema.Provider {
DefaultFunc: schema.EnvDefaultFunc("YC_ORGANIZATION_ID", nil),
Description: descriptions["organization_id"],
},
+ "region_id": {
+ Type: schema.TypeString,
+ Optional: true,
+ DefaultFunc: schema.EnvDefaultFunc("YC_REGION", defaultRegion),
+ Description: descriptions["region"],
+ },
"zone": {
Type: schema.TypeString,
Optional: true,
@@ -284,6 +291,7 @@ func provider(emptyFolder bool) *schema.Provider {
"yandex_resourcemanager_folder_iam_member": resourceYandexResourceManagerFolderIAMMember(),
"yandex_resourcemanager_folder_iam_policy": resourceYandexResourceManagerFolderIAMPolicy(),
"yandex_serverless_container": resourceYandexServerlessContainer(),
+ "yandex_serverless_container_iam_binding": resourceYandexServerlessContainerIAMBinding(),
"yandex_storage_bucket": resourceYandexStorageBucket(),
"yandex_storage_object": resourceYandexStorageObject(),
"yandex_vpc_address": resourceYandexVPCAddress(),
@@ -293,6 +301,7 @@ func provider(emptyFolder bool) *schema.Provider {
"yandex_vpc_security_group": resourceYandexVPCSecurityGroup(),
"yandex_vpc_security_group_rule": resourceYandexVpcSecurityGroupRule(),
"yandex_vpc_subnet": resourceYandexVPCSubnet(),
+ "yandex_ydb_database_iam_binding": resourceYandexYDBDatabaseIAMBinding(),
"yandex_ydb_database_dedicated": resourceYandexYDBDatabaseDedicated(),
"yandex_ydb_database_serverless": resourceYandexYDBDatabaseServerless(),
},
@@ -344,6 +353,9 @@ var descriptions = map[string]string{
"cloud_id": "ID of Yandex.Cloud tenant.",
+ "region_id": "The region where operations will take place. Examples\n" +
+ "are ru-central1",
+
"zone": "The zone where operations will take place. Examples\n" +
"are ru-central1-a, ru-central2-c, etc.",
@@ -380,6 +392,7 @@ func providerConfigure(ctx context.Context, d *schema.ResourceData, p *schema.Pr
config := Config{
Token: d.Get("token").(string),
ServiceAccountKeyFileOrContent: d.Get("service_account_key_file").(string),
+ Region: d.Get("region_id").(string),
Zone: d.Get("zone").(string),
FolderID: d.Get("folder_id").(string),
CloudID: d.Get("cloud_id").(string),
diff --git a/yandex/resource_yandex_compute_instance.go b/yandex/resource_yandex_compute_instance.go
index b79a03fb7..fa598ea17 100644
--- a/yandex/resource_yandex_compute_instance.go
+++ b/yandex/resource_yandex_compute_instance.go
@@ -368,10 +368,11 @@ func resourceYandexComputeInstance() *schema.Resource {
},
"hostname": {
- Type: schema.TypeString,
- Optional: true,
- Computed: true,
- ForceNew: true,
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ ForceNew: true,
+ DiffSuppressFunc: hostnameDiffSuppressFunc,
},
"metadata": {
@@ -1284,12 +1285,18 @@ func prepareCreateInstanceRequest(d *schema.ResourceData, meta *Config) (*comput
}
func parseHostnameFromFQDN(fqdn string) (string, error) {
- p := strings.Split(fqdn, ".")
- if len(p) < 1 {
- return "", fmt.Errorf("failed to get instance hostname from its fqdn")
+ if !strings.Contains(fqdn, ".") {
+ return fqdn + ".", nil
+ }
+ if strings.HasSuffix(fqdn, ".auto.internal") {
+ return "", nil
+ }
+ if strings.HasSuffix(fqdn, ".internal") {
+ p := strings.Split(fqdn, ".")
+ return p[0], nil
}
- return p[0], nil
+ return fqdn, nil
}
func wantChangeAddressSpec(old *compute.PrimaryAddressSpec, new *compute.PrimaryAddressSpec) bool {
@@ -1560,3 +1567,7 @@ func ensureAllowStoppingForUpdate(d *schema.ResourceData, propNames ...string) e
}
return nil
}
+
+func hostnameDiffSuppressFunc(_, oldValue, newValue string, _ *schema.ResourceData) bool {
+ return strings.TrimRight(oldValue, ".") == strings.TrimRight(newValue, ".")
+}
diff --git a/yandex/resource_yandex_function.go b/yandex/resource_yandex_function.go
index eb1c04736..6a6c779fd 100644
--- a/yandex/resource_yandex_function.go
+++ b/yandex/resource_yandex_function.go
@@ -20,7 +20,7 @@ import (
"github.com/yandex-cloud/go-genproto/yandex/cloud/serverless/functions/v1"
)
-const yandexFunctionDefaultTimeout = 5 * time.Minute
+const yandexFunctionDefaultTimeout = 10 * time.Minute
const versionCreateSourceContentMaxBytes = 3670016
func resourceYandexFunction() *schema.Resource {
diff --git a/yandex/resource_yandex_kubernetes_node_group.go b/yandex/resource_yandex_kubernetes_node_group.go
index aa3a0e3e4..b2db3afcf 100644
--- a/yandex/resource_yandex_kubernetes_node_group.go
+++ b/yandex/resource_yandex_kubernetes_node_group.go
@@ -159,6 +159,54 @@ func resourceYandexKubernetesNodeGroup() *schema.Resource {
Set: schema.HashString,
Optional: true,
},
+ "ipv4_dns_records": {
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fqdn": {
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "dns_zone_id": {
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ "ttl": {
+ Type: schema.TypeInt,
+ Optional: true,
+ },
+ "ptr": {
+ Type: schema.TypeBool,
+ Optional: true,
+ },
+ },
+ },
+ },
+ "ipv6_dns_records": {
+ Type: schema.TypeList,
+ Optional: true,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fqdn": {
+ Type: schema.TypeString,
+ Required: true,
+ },
+ "dns_zone_id": {
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ "ttl": {
+ Type: schema.TypeInt,
+ Optional: true,
+ },
+ "ptr": {
+ Type: schema.TypeBool,
+ Optional: true,
+ },
+ },
+ },
+ },
},
},
},
@@ -203,6 +251,16 @@ func resourceYandexKubernetesNodeGroup() *schema.Resource {
},
},
},
+ "name": {
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ "labels": {
+ Type: schema.TypeMap,
+ Optional: true,
+ Elem: &schema.Schema{Type: schema.TypeString},
+ Set: schema.HashString,
+ },
},
},
},
@@ -733,6 +791,11 @@ func getNodeGroupTemplate(d *schema.ResourceData) (*k8s.NodeTemplate, error) {
return nil, fmt.Errorf("error expanding metadata while creating Kubernetes node group: %s", err)
}
+ labels, err := expandLabels(h.Get("labels"))
+ if err != nil {
+ return nil, fmt.Errorf("error expanding template labels while creating Kubernetes node group: %s", err)
+ }
+
ns, err := getNodeGroupTemplateNetworkSettings(d)
if err != nil {
return nil, fmt.Errorf("error expanding metadata while creating Kubernetes node group: %s", err)
@@ -754,6 +817,8 @@ func getNodeGroupTemplate(d *schema.ResourceData) (*k8s.NodeTemplate, error) {
PlacementPolicy: getNodeGroupTemplatePlacementPolicy(d),
NetworkSettings: ns,
ContainerRuntimeSettings: crs,
+ Name: h.GetString("name"),
+ Labels: labels,
}
return tpl, nil
@@ -823,6 +888,17 @@ func getNodeGroupNetworkInterfaceSpecs(d *schema.ResourceData) []*k8s.NetworkInt
nifSpec.SubnetIds = expandSubnetIds(subnets)
}
+ if rec, ok := nif["ipv4_dns_records"]; ok {
+ if nifSpec.PrimaryV4AddressSpec != nil {
+ nifSpec.PrimaryV4AddressSpec.DnsRecordSpecs = expandK8SNodeGroupDNSRecords(rec.([]interface{}))
+ }
+ }
+ if rec, ok := nif["ipv6_dns_records"]; ok {
+ if nifSpec.PrimaryV6AddressSpec != nil {
+ nifSpec.PrimaryV6AddressSpec.DnsRecordSpecs = expandK8SNodeGroupDNSRecords(rec.([]interface{}))
+ }
+ }
+
nifs = append(nifs, nifSpec)
}
return nifs
@@ -975,6 +1051,8 @@ var nodeGroupUpdateFieldsMap = map[string]string{
"instance_template.0.network_interface": "node_template.network_interface_specs",
"instance_template.0.network_acceleration_type": "node_template.network_settings",
"instance_template.0.container_runtime.0.type": "node_template.container_runtime_settings.type",
+ "instance_template.0.name": "node_template.name",
+ "instance_template.0.labels": "node_template.labels",
"scale_policy.0.fixed_scale.0.size": "scale_policy.fixed_scale.size",
"scale_policy.0.auto_scale.0.min": "scale_policy.auto_scale.min_size",
"scale_policy.0.auto_scale.0.max": "scale_policy.auto_scale.max_size",
@@ -1114,6 +1192,8 @@ func flattenKubernetesNodeGroupTemplate(ngTpl *k8s.NodeTemplate) []map[string]in
"placement_policy": flattenKubernetesNodeGroupTemplatePlacementPolicy(ngTpl.GetPlacementPolicy()),
"network_acceleration_type": strings.ToLower(ngTpl.GetNetworkSettings().GetType().String()),
"container_runtime": flattenKubernetesNodeGroupTemplateContainerRuntime(ngTpl.GetContainerRuntimeSettings()),
+ "name": ngTpl.GetName(),
+ "labels": ngTpl.GetLabels(),
}
return []map[string]interface{}{tpl}
@@ -1129,13 +1209,21 @@ func flattenKubernetesNodeGroupNetworkInterfaces(ifs []*k8s.NetworkInterfaceSpec
}
func flattenKubernetesNodeGroupNetworkInterface(nif *k8s.NetworkInterfaceSpec) map[string]interface{} {
- return map[string]interface{}{
+ res := map[string]interface{}{
"subnet_ids": nif.SubnetIds,
"security_group_ids": nif.SecurityGroupIds,
"nat": flattenKubernetesNodeGroupNat(nif),
"ipv4": nif.PrimaryV4AddressSpec != nil,
"ipv6": nif.PrimaryV6AddressSpec != nil,
}
+ if nif.PrimaryV4AddressSpec != nil {
+ res["ipv4_dns_records"] = flattenK8SNodeGroupDNSRecords(nif.GetPrimaryV4AddressSpec().GetDnsRecordSpecs())
+ }
+ if nif.PrimaryV6AddressSpec != nil {
+ res["ipv6_dns_records"] = flattenK8SNodeGroupDNSRecords(nif.GetPrimaryV6AddressSpec().GetDnsRecordSpecs())
+ }
+
+ return res
}
func flattenKubernetesNodeGroupNat(nif *k8s.NetworkInterfaceSpec) bool {
diff --git a/yandex/resource_yandex_kubernetes_node_group_test.go b/yandex/resource_yandex_kubernetes_node_group_test.go
index 4a6793c42..fbe8003ef 100644
--- a/yandex/resource_yandex_kubernetes_node_group_test.go
+++ b/yandex/resource_yandex_kubernetes_node_group_test.go
@@ -199,6 +199,9 @@ func TestAccKubernetesNodeGroup_update(t *testing.T) {
nodeUpdatedResource.Cores = "2"
nodeUpdatedResource.DiskSize = "65"
nodeUpdatedResource.Preemptible = "false"
+ nodeUpdatedResource.NodeName = "new-{instance.short_id}"
+ nodeUpdatedResource.TemplateLabelKey = "two"
+ nodeUpdatedResource.TemplateLabelValue = "2"
// update maintenance policy
nodeUpdatedResource.constructMaintenancePolicyField(false, false, dailyMaintenancePolicy)
@@ -206,6 +209,9 @@ func TestAccKubernetesNodeGroup_update(t *testing.T) {
//nodeUpdatedResource.FixedScale = "2"
nodeUpdatedResource2 := nodeUpdatedResource
+ nodeUpdatedResource2.NodeName = ""
+ // clearing node group template labels
+ nodeUpdatedResource2.TemplateLabelKey = ""
nodeUpdatedResource2.constructMaintenancePolicyField(true, true, weeklyMaintenancePolicy)
nodeUpdatedResource3 := nodeUpdatedResource2
@@ -272,7 +278,7 @@ func TestAccKubernetesNodeGroup_update(t *testing.T) {
func TestAccKubernetesNodeGroupNetworkInterfaces_update(t *testing.T) {
clusterResource := clusterInfoWithSecurityGroups("TestAccKubernetesNodeGroupNetworkInterfaces_update", true)
- nodeResource := nodeGroupInfo(clusterResource.ClusterResourceName)
+ nodeResource := nodeGroupInfoIPv4DNSFQDN(clusterResource.ClusterResourceName)
nodeResource.constructNetworkInterfaces(clusterResource.SubnetResourceNameA, clusterResource.SecurityGroupName)
nodeResourceFullName := nodeResource.ResourceFullName(true)
@@ -280,8 +286,13 @@ func TestAccKubernetesNodeGroupNetworkInterfaces_update(t *testing.T) {
nodeUpdatedResource.NetworkInterfaces = enableNAT
nodeUpdatedResource2 := nodeUpdatedResource
+ nodeUpdatedResource2.IPv4DNSFQDN = "new-{instance.short_id}.ipv4.internal."
nodeUpdatedResource2.constructNetworkInterfaces(clusterResource.SubnetResourceNameA, "")
+ nodeUpdatedResource3 := nodeUpdatedResource2
+ nodeUpdatedResource3.IPv4DNSFQDN = ""
+ nodeUpdatedResource3.constructNetworkInterfaces(clusterResource.SubnetResourceNameA, "")
+
var ng k8s.NodeGroup
resource.Test(t, resource.TestCase{
@@ -310,6 +321,13 @@ func TestAccKubernetesNodeGroupNetworkInterfaces_update(t *testing.T) {
checkNodeGroupAttributes(&ng, &nodeUpdatedResource2, true, false),
),
},
+ {
+ Config: testAccKubernetesNodeGroupConfig_basic(clusterResource, nodeUpdatedResource3),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckKubernetesNodeGroupExists(nodeResourceFullName, &ng),
+ checkNodeGroupAttributes(&ng, &nodeUpdatedResource3, true, false),
+ ),
+ },
},
})
}
@@ -365,10 +383,22 @@ func TestAccKubernetesNodeGroup_createPlacementGroup(t *testing.T) {
func TestAccKubernetesNodeGroup_dualStack(t *testing.T) {
clusterResource := clusterInfoDualStack("TestAccKubernetesNodeGroup_dualStack", true)
nodeResource := nodeGroupInfoDualStack(clusterResource.ClusterResourceName)
-
nodeResource.constructNetworkInterfaces(clusterResource.SubnetResourceNameA, clusterResource.SecurityGroupName)
nodeResourceFullName := nodeResource.ResourceFullName(true)
+ nodeUpdatedResource := nodeResource
+ nodeUpdatedResource.IPv4DNSFQDN = "new-{instance.short_id}.ipv4.internal."
+ nodeUpdatedResource.constructNetworkInterfaces(clusterResource.SubnetResourceNameA, clusterResource.SecurityGroupName)
+
+ nodeUpdatedResource2 := nodeUpdatedResource
+ nodeUpdatedResource2.IPv6DNSFQDN = "new-{instance.short_id}.ipv6.internal."
+ nodeUpdatedResource2.constructNetworkInterfaces(clusterResource.SubnetResourceNameA, clusterResource.SecurityGroupName)
+
+ nodeUpdatedResource3 := nodeUpdatedResource2
+ nodeUpdatedResource3.IPv4DNSFQDN = ""
+ nodeUpdatedResource3.IPv6DNSFQDN = ""
+ nodeUpdatedResource3.constructNetworkInterfaces(clusterResource.SubnetResourceNameA, clusterResource.SecurityGroupName)
+
var ng k8s.NodeGroup
// All dual stack test share the same subnet. Disallow concurrent execution.
@@ -387,6 +417,27 @@ func TestAccKubernetesNodeGroup_dualStack(t *testing.T) {
checkNodeGroupAttributes(&ng, &nodeResource, true, false),
),
},
+ {
+ Config: testAccKubernetesNodeGroupConfig_basic(clusterResource, nodeUpdatedResource),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckKubernetesNodeGroupExists(nodeResourceFullName, &ng),
+ checkNodeGroupAttributes(&ng, &nodeUpdatedResource, true, false),
+ ),
+ },
+ {
+ Config: testAccKubernetesNodeGroupConfig_basic(clusterResource, nodeUpdatedResource2),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckKubernetesNodeGroupExists(nodeResourceFullName, &ng),
+ checkNodeGroupAttributes(&ng, &nodeUpdatedResource2, true, false),
+ ),
+ },
+ {
+ Config: testAccKubernetesNodeGroupConfig_basic(clusterResource, nodeUpdatedResource3),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckKubernetesNodeGroupExists(nodeResourceFullName, &ng),
+ checkNodeGroupAttributes(&ng, &nodeUpdatedResource3, true, false),
+ ),
+ },
},
})
}
@@ -510,6 +561,8 @@ type resourceNodeGroupInfo struct {
MaintenancePolicy string
NetworkInterfaces string
+ IPv4DNSFQDN string
+ IPv6DNSFQDN string
autoUpgrade bool
autoRepair bool
@@ -520,6 +573,11 @@ type resourceNodeGroupInfo struct {
DualStack bool
NetworkAccelerationType string
+
+ NodeName string
+
+ TemplateLabelKey string
+ TemplateLabelValue string
}
func nodeGroupInfo(clusterResourceName string) resourceNodeGroupInfo {
@@ -529,6 +587,20 @@ func nodeGroupInfo(clusterResourceName string) resourceNodeGroupInfo {
func nodeGroupInfoDualStack(clusterResourceName string) resourceNodeGroupInfo {
ng := nodeGroupInfoWithMaintenance(clusterResourceName, true, true, anyMaintenancePolicy)
ng.DualStack = true
+ ng.IPv4DNSFQDN = "{instance.short_id}.{instance_group.id}.ipv4.internal."
+ ng.IPv6DNSFQDN = "{instance.short_id}.{instance_group.id}.ipv6.internal."
+ return ng
+}
+
+func nodeGroupInfoAutoscaled(clusterResourceName string) resourceNodeGroupInfo {
+ ng := nodeGroupInfo(clusterResourceName)
+ ng.ScalePolicy = autoscaledScalePolicy
+ return ng
+}
+
+func nodeGroupInfoIPv4DNSFQDN(clusterResourceName string) resourceNodeGroupInfo {
+ ng := nodeGroupInfo(clusterResourceName)
+ ng.IPv4DNSFQDN = "{instance.short_id}.{instance_group.id}.ipv4.internal."
return ng
}
@@ -549,18 +621,15 @@ func nodeGroupInfoWithMaintenance(clusterResourceName string, autoUpgrade, autoR
NodeLabelValue: "node_label_value",
ScalePolicy: fixedScalePolicy,
NetworkInterfaces: enableNAT,
+ NodeName: "node-{instance.short_id}",
+ TemplateLabelKey: "one",
+ TemplateLabelValue: "1",
}
info.constructMaintenancePolicyField(autoUpgrade, autoRepair, policyType)
return info
}
-func nodeGroupInfoAutoscaled(clusterResourceName string) resourceNodeGroupInfo {
- info := nodeGroupInfo(clusterResourceName)
- info.ScalePolicy = autoscaledScalePolicy
- return info
-}
-
func (i *resourceNodeGroupInfo) Map() map[string]interface{} {
return structs.Map(i)
}
@@ -597,11 +666,25 @@ func (i *resourceNodeGroupInfo) constructMaintenancePolicyField(autoUpgrade, aut
}
}
-func (i *resourceNodeGroupInfo) constructNetworkInterfaces(subnetName, securityGroupName string) {
+func (i *resourceNodeGroupInfo) constructNetworkInterfaces(subnetName string, securityGroupName string) {
i.SubnetName = subnetName
i.SecurityGroupName = securityGroupName
+
if i.DualStack {
- i.NetworkInterfaces = fmt.Sprintf(networkInterfacesTemplateDualStack, subnetName, securityGroupName)
+ if i.IPv4DNSFQDN != "" && i.IPv6DNSFQDN != "" {
+ i.NetworkInterfaces = fmt.Sprintf(networkInterfacesTemplateDualStackWithDNSRecords,
+ subnetName,
+ securityGroupName,
+ i.IPv4DNSFQDN,
+ i.IPv6DNSFQDN,
+ )
+ return
+ }
+
+ i.NetworkInterfaces = fmt.Sprintf(networkInterfacesTemplateDualStack,
+ subnetName,
+ securityGroupName,
+ )
return
}
@@ -611,6 +694,15 @@ func (i *resourceNodeGroupInfo) constructNetworkInterfaces(subnetName, securityG
securityGroupIDGetter = fmt.Sprintf("\"${yandex_vpc_security_group.%s.id}\"", i.SecurityGroupName)
}
+ if i.IPv4DNSFQDN != "" {
+ i.NetworkInterfaces = fmt.Sprintf(networkInterfacesTemplateWithDNSRecords,
+ subnetNameGetter,
+ securityGroupIDGetter,
+ i.IPv4DNSFQDN,
+ )
+ return
+ }
+
i.NetworkInterfaces = fmt.Sprintf(networkInterfacesTemplate,
subnetNameGetter,
securityGroupIDGetter,
@@ -754,6 +846,14 @@ resource "yandex_kubernetes_node_group" "{{.NodeGroupResourceName}}" {
{{if .NetworkAccelerationType}}
network_acceleration_type = "{{.NetworkAccelerationType}}"
{{end}}
+
+ name = "{{.NodeName}}"
+
+ {{if .TemplateLabelKey}}
+ labels = {
+ {{.TemplateLabelKey}} = "{{.TemplateLabelValue}}"
+ }
+ {{end}}
}
{{.ScalePolicy}}
@@ -788,6 +888,17 @@ var networkInterfacesTemplate = `
}
`
+var networkInterfacesTemplateWithDNSRecords = `
+ network_interface {
+ nat = true
+ subnet_ids = [%s]
+ security_group_ids = [%s]
+ ipv4_dns_records {
+ fqdn = "%s"
+ }
+ }
+`
+
var networkInterfacesTemplateDualStack = `
network_interface {
ipv4 = true
@@ -797,6 +908,21 @@ var networkInterfacesTemplateDualStack = `
}
`
+var networkInterfacesTemplateDualStackWithDNSRecords = `
+ network_interface {
+ ipv4 = true
+ ipv6 = true
+ subnet_ids = ["%s"]
+ security_group_ids = ["%s"]
+ ipv4_dns_records {
+ fqdn = "%s"
+ }
+ ipv6_dns_records {
+ fqdn = "%s"
+ }
+ }
+`
+
// language=tf
const constPlacementGroupResource = `
resource yandex_compute_placement_group pg {
@@ -823,6 +949,10 @@ func checkNodeGroupAttributes(ng *k8s.NodeGroup, info *resourceNodeGroupInfo, rs
return fmt.Errorf("failed to get kubernetes node group specs info")
}
+ expectedTemplateLabels := map[string]string{}
+ if info.TemplateLabelKey != "" {
+ expectedTemplateLabels[info.TemplateLabelKey] = info.TemplateLabelValue
+ }
resourceFullName := info.ResourceFullName(rs)
checkFuncsAr := []resource.TestCheckFunc{
resource.TestCheckResourceAttr(resourceFullName, "cluster_id", ng.ClusterId),
@@ -854,6 +984,9 @@ func checkNodeGroupAttributes(ng *k8s.NodeGroup, info *resourceNodeGroupInfo, rs
strconv.FormatBool(tpl.GetSchedulingPolicy().GetPreemptible())),
resource.TestCheckResourceAttr(resourceFullName, "instance_template.0.network_acceleration_type",
strings.ToLower(tpl.NetworkSettings.Type.String())),
+ resource.TestCheckResourceAttr(resourceFullName, "instance_template.0.name", info.NodeName),
+ testCheckResourceMap(resourceFullName, "instance_template.0.labels", tpl.GetLabels()),
+ testCheckResourceMap(resourceFullName, "instance_template.0.labels", expectedTemplateLabels),
resource.TestCheckResourceAttr(resourceFullName, "version_info.0.current_version",
versionInfo.GetCurrentVersion()),
@@ -948,6 +1081,19 @@ func checkNodeGroupAttributes(ng *k8s.NodeGroup, info *resourceNodeGroupInfo, rs
)
}
+ if info.IPv4DNSFQDN != "" {
+ checkFuncsAr = append(checkFuncsAr,
+ resource.TestCheckResourceAttr(resourceFullName, "instance_template.0.network_interface.0.ipv4_dns_records.#", "1"),
+ resource.TestCheckResourceAttr(resourceFullName, "instance_template.0.network_interface.0.ipv4_dns_records.0.fqdn", info.IPv4DNSFQDN),
+ )
+ }
+ if info.IPv6DNSFQDN != "" && info.DualStack {
+ checkFuncsAr = append(checkFuncsAr,
+ resource.TestCheckResourceAttr(resourceFullName, "instance_template.0.network_interface.0.ipv6_dns_records.#", "1"),
+ resource.TestCheckResourceAttr(resourceFullName, "instance_template.0.network_interface.0.ipv6_dns_records.0.fqdn", info.IPv6DNSFQDN),
+ )
+ }
+
if info.policy != emptyMaintenancePolicy {
checkFuncsAr = append(checkFuncsAr,
resource.TestCheckResourceAttr(resourceFullName, "maintenance_policy.0.auto_upgrade", strconv.FormatBool(info.autoUpgrade)),
diff --git a/yandex/resource_yandex_logging_group.go b/yandex/resource_yandex_logging_group.go
index 83e7aef6b..636721b5b 100644
--- a/yandex/resource_yandex_logging_group.go
+++ b/yandex/resource_yandex_logging_group.go
@@ -46,10 +46,11 @@ func resourceYandexLoggingGroup() *schema.Resource {
},
"retention_period": {
- Type: schema.TypeString,
- Computed: true,
- Optional: true,
- ValidateFunc: validateParsableValue(parseDuration),
+ Type: schema.TypeString,
+ Computed: true,
+ Optional: true,
+ ValidateFunc: validateParsableValue(parseDuration),
+ DiffSuppressFunc: shouldSuppressDiffForTimeDuration,
},
"description": {
diff --git a/yandex/resource_yandex_mdb_clickhouse_cluster.go b/yandex/resource_yandex_mdb_clickhouse_cluster.go
index 01880a9e0..d4ae8bfe3 100644
--- a/yandex/resource_yandex_mdb_clickhouse_cluster.go
+++ b/yandex/resource_yandex_mdb_clickhouse_cluster.go
@@ -1998,7 +1998,7 @@ func listClickHouseShards(ctx context.Context, config *Config, id string) ([]*cl
return nil, fmt.Errorf("error while getting list of shards for '%s': %s", id, err)
}
shards = append(shards, resp.Shards...)
- if resp.NextPageToken == "" {
+ if resp.NextPageToken == "" || resp.NextPageToken == pageToken {
break
}
pageToken = resp.NextPageToken
diff --git a/yandex/resource_yandex_mdb_greenplum_cluster.go b/yandex/resource_yandex_mdb_greenplum_cluster.go
index e648d7c61..41dbe5822 100644
--- a/yandex/resource_yandex_mdb_greenplum_cluster.go
+++ b/yandex/resource_yandex_mdb_greenplum_cluster.go
@@ -9,7 +9,6 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/greenplum/v1"
- "google.golang.org/genproto/googleapis/type/timeofday"
"google.golang.org/genproto/protobuf/field_mask"
)
@@ -100,7 +99,6 @@ func resourceYandexMDBGreenplumCluster() *schema.Resource {
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
-
"master_subcluster": {
Type: schema.TypeList,
Required: true,
@@ -161,7 +159,6 @@ func resourceYandexMDBGreenplumCluster() *schema.Resource {
},
},
},
-
"master_hosts": {
Type: schema.TypeList,
Computed: true,
@@ -190,7 +187,6 @@ func resourceYandexMDBGreenplumCluster() *schema.Resource {
},
},
},
-
"user_name": {
Type: schema.TypeString,
Required: true,
@@ -200,7 +196,6 @@ func resourceYandexMDBGreenplumCluster() *schema.Resource {
Required: true,
Sensitive: true,
},
-
"created_at": {
Type: schema.TypeString,
Computed: true,
@@ -224,7 +219,6 @@ func resourceYandexMDBGreenplumCluster() *schema.Resource {
Optional: true,
Computed: true,
},
-
"backup_window_start": {
Type: schema.TypeList,
MaxItems: 1,
@@ -247,7 +241,6 @@ func resourceYandexMDBGreenplumCluster() *schema.Resource {
},
},
},
-
"access": {
Type: schema.TypeList,
MaxItems: 1,
@@ -268,13 +261,44 @@ func resourceYandexMDBGreenplumCluster() *schema.Resource {
},
},
},
+ "pooler_config": {
+ Type: schema.TypeList,
+ Optional: true,
+ MaxItems: 1,
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "pooling_mode": {
+ Type: schema.TypeString,
+ Optional: true,
+ },
+ "pool_size": {
+ Type: schema.TypeInt,
+ Optional: true,
+ },
+ "pool_client_idle_timeout": {
+ Type: schema.TypeInt,
+ Optional: true,
+ },
+ },
+ },
+ },
+ "greenplum_config": {
+ Type: schema.TypeMap,
+ Optional: true,
+ Computed: true,
+ DiffSuppressFunc: generateMapSchemaDiffSuppressFunc(mdbGreenplumSettingsFieldsInfo),
+ ValidateFunc: generateMapSchemaValidateFunc(mdbGreenplumSettingsFieldsInfo),
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
},
}
}
func resourceYandexMDBGreenplumClusterCreate(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
- req, err := prepareCreateGreenplumRequest(d, config)
+ req, err := prepareCreateGreenplumClusterRequest(d, config)
if err != nil {
return err
}
@@ -283,71 +307,75 @@ func resourceYandexMDBGreenplumClusterCreate(d *schema.ResourceData, meta interf
defer cancel()
op, err := config.sdk.WrapOperation(config.sdk.MDB().Greenplum().Cluster().Create(ctx, req))
if err != nil {
- return fmt.Errorf("Error while requesting API to create Greenplum Cluster: %s", err)
+ return fmt.Errorf("error while requesting API to create Greenplum Cluster: %s", err)
}
protoMetadata, err := op.Metadata()
if err != nil {
- return fmt.Errorf("Error while get Greenplum create operation metadata: %s", err)
+ return fmt.Errorf("error while get Greenplum create operation metadata: %s", err)
}
md, ok := protoMetadata.(*greenplum.CreateClusterMetadata)
if !ok {
- return fmt.Errorf("Could not get Greenplum Cluster ID from create operation metadata")
+ return fmt.Errorf("could not get Greenplum Cluster ID from create operation metadata")
}
d.SetId(md.ClusterId)
err = op.Wait(ctx)
if err != nil {
- return fmt.Errorf("Error while waiting for operation to create Greenplum Cluster: %s", err)
+ return fmt.Errorf("error while waiting for operation to create Greenplum Cluster: %s", err)
}
if _, err := op.Response(); err != nil {
- return fmt.Errorf("Greenplum Cluster creation failed: %s", err)
+ return fmt.Errorf("failed to create Greenplum Cluster: %s", err)
}
return resourceYandexMDBGreenplumClusterRead(d, meta)
}
-func prepareCreateGreenplumRequest(d *schema.ResourceData, meta *Config) (*greenplum.CreateClusterRequest, error) {
+func prepareCreateGreenplumClusterRequest(d *schema.ResourceData, meta *Config) (*greenplum.CreateClusterRequest, error) {
labels, err := expandLabels(d.Get("labels"))
-
if err != nil {
- return nil, fmt.Errorf("Error while expanding labels on Greenplum Cluster create: %s", err)
+ return nil, fmt.Errorf("error while expanding labels on Greenplum Cluster create: %s", err)
}
folderID, err := getFolderID(d, meta)
if err != nil {
- return nil, fmt.Errorf("Error getting folder ID while creating Greenplum Cluster: %s", err)
+ return nil, fmt.Errorf("error getting folder ID while creating Greenplum Cluster: %s", err)
}
e := d.Get("environment").(string)
env, err := parseGreenplumEnv(e)
if err != nil {
- return nil, fmt.Errorf("Error resolving environment while creating Greenplum Cluster: %s", err)
+ return nil, fmt.Errorf("error resolving environment while creating Greenplum Cluster: %s", err)
}
- securityGroupIds := expandSecurityGroupIds(d.Get("security_group_ids"))
-
networkID, err := expandAndValidateNetworkId(d, meta)
if err != nil {
- return nil, fmt.Errorf("Error while expanding network id on Greenplum Cluster create: %s", err)
+ return nil, fmt.Errorf("error while expanding network id on Greenplum Cluster create: %s", err)
+ }
+
+ configSpec, _, err := expandGreenplumConfigSpec(d)
+ if err != nil {
+ return nil, fmt.Errorf("error while expanding config spec on Greenplum Cluster create: %s", err)
}
- req := greenplum.CreateClusterRequest{
+ return &greenplum.CreateClusterRequest{
FolderId: folderID,
Name: d.Get("name").(string),
Description: d.Get("description").(string),
NetworkId: networkID,
Environment: env,
Labels: labels,
- SecurityGroupIds: securityGroupIds,
+ SecurityGroupIds: expandSecurityGroupIds(d.Get("security_group_ids")),
MasterHostCount: int64(d.Get("master_host_count").(int)),
SegmentInHost: int64(d.Get("segment_in_host").(int)),
SegmentHostCount: int64(d.Get("segment_host_count").(int)),
Config: &greenplum.GreenplumConfig{
- Version: d.Get("version").(string),
- ZoneId: d.Get("zone").(string),
- SubnetId: d.Get("subnet_id").(string),
- AssignPublicIp: d.Get("assign_public_ip").(bool),
+ Version: d.Get("version").(string),
+ BackupWindowStart: expandGreenplumBackupWindowStart(d),
+ Access: expandGreenplumAccess(d),
+ ZoneId: d.Get("zone").(string),
+ SubnetId: d.Get("subnet_id").(string),
+ AssignPublicIp: d.Get("assign_public_ip").(bool),
},
MasterConfig: &greenplum.MasterSubclusterConfigSpec{
Resources: &greenplum.Resources{
@@ -366,8 +394,9 @@ func prepareCreateGreenplumRequest(d *schema.ResourceData, meta *Config) (*green
UserName: d.Get("user_name").(string),
UserPassword: d.Get("user_password").(string),
- }
- return &req, nil
+
+ ConfigSpec: configSpec,
+ }, nil
}
func resourceYandexMDBGreenplumClusterRead(d *schema.ResourceData, meta interface{}) error {
@@ -391,6 +420,7 @@ func resourceYandexMDBGreenplumClusterRead(d *schema.ResourceData, meta interfac
d.Set("health", cluster.GetHealth().String())
d.Set("status", cluster.GetStatus().String())
d.Set("version", cluster.GetConfig().GetVersion())
+ d.Set("deletion_protection", cluster.DeletionProtection)
d.Set("zone", cluster.GetConfig().ZoneId)
d.Set("subnet_id", cluster.GetConfig().SubnetId)
@@ -403,79 +433,62 @@ func resourceYandexMDBGreenplumClusterRead(d *schema.ResourceData, meta interfac
d.Set("user_name", cluster.GetUserName())
- masterSubcluster := map[string]interface{}{}
- masterResources := map[string]interface{}{}
- masterResources["resource_preset_id"] = cluster.GetMasterConfig().Resources.ResourcePresetId
- masterResources["disk_type_id"] = cluster.GetMasterConfig().Resources.DiskTypeId
- masterResources["disk_size"] = toGigabytes(cluster.GetMasterConfig().Resources.DiskSize)
- masterSubcluster["resources"] = []map[string]interface{}{masterResources}
- d.Set("master_subcluster", []map[string]interface{}{masterSubcluster})
-
- segmentSubcluster := map[string]interface{}{}
- segmentResources := map[string]interface{}{}
- segmentResources["resource_preset_id"] = cluster.GetMasterConfig().Resources.ResourcePresetId
- segmentResources["disk_type_id"] = cluster.GetMasterConfig().Resources.DiskTypeId
- segmentResources["disk_size"] = toGigabytes(cluster.GetMasterConfig().Resources.DiskSize)
- segmentSubcluster["resources"] = []map[string]interface{}{segmentResources}
- d.Set("segment_subcluster", []map[string]interface{}{segmentSubcluster})
-
- if cluster.Labels == nil {
- if err = d.Set("labels", make(map[string]string)); err != nil {
- return err
- }
- } else if err = d.Set("labels", cluster.Labels); err != nil {
+ d.Set("master_subcluster", flattenGreenplumMasterSubcluster(cluster.GetMasterConfig().Resources))
+ d.Set("segment_subcluster", flattenGreenplumSegmentSubcluster(cluster.GetSegmentConfig().Resources))
+
+ poolConfig, err := flattenGreenplumPoolerConfig(cluster.GetClusterConfig().GetPool())
+ if err != nil {
return err
}
-
- if cluster.SecurityGroupIds == nil {
- if err = d.Set("security_group_ids", make([]string, 0)); err != nil {
- return err
- }
- } else if err = d.Set("security_group_ids", cluster.SecurityGroupIds); err != nil {
+ if err := d.Set("pooler_config", poolConfig); err != nil {
return err
}
- masterHosts, err := listGreenplumMasterHosts(ctx, config, d.Id())
+ gpConfig, err := flattenGreenplumClusterConfig(cluster.ClusterConfig)
if err != nil {
return err
}
- mHost := make([]map[string]interface{}, 0, len(masterHosts))
- for _, h := range masterHosts {
- mHost = append(mHost, map[string]interface{}{"fqdn": h.Name, "assign_public_ip": h.AssignPublicIp})
+ if err := d.Set("greenplum_config", gpConfig); err != nil {
+ return err
}
- if err = d.Set("master_hosts", mHost); err != nil {
+
+ if err := d.Set("labels", cluster.Labels); err != nil {
return err
}
+ if err := d.Set("security_group_ids", cluster.SecurityGroupIds); err != nil {
+ return err
+ }
+
+ masterHosts, err := listGreenplumMasterHosts(ctx, config, d.Id())
+ if err != nil {
+ return err
+ }
segmentHosts, err := listGreenplumSegmentHosts(ctx, config, d.Id())
if err != nil {
return err
}
- sHost := make([]map[string]interface{}, 0, len(segmentHosts))
- for _, h := range segmentHosts {
- sHost = append(sHost, map[string]interface{}{"fqdn": h.Name})
+ mHost, sHost := flattenGreenplumHosts(masterHosts, segmentHosts)
+ if err := d.Set("master_hosts", mHost); err != nil {
+ return err
}
- if err = d.Set("segment_hosts", sHost); err != nil {
+ if err := d.Set("segment_hosts", sHost); err != nil {
return err
}
- d.Set("deletion_protection", cluster.DeletionProtection)
+ if err := d.Set("access", flattenGreenplumAccess(cluster.Config)); err != nil {
+ return err
+ }
- accessElement := map[string]interface{}{}
- if cluster.Config != nil && cluster.Config.Access != nil {
- accessElement["data_lens"] = cluster.Config.Access.DataLens
- accessElement["web_sql"] = cluster.Config.Access.WebSql
+ if err := d.Set("backup_window_start", flattenBackupWindowsStart(cluster.Config)); err != nil {
+ return err
}
- d.Set("access", []map[string]interface{}{accessElement})
- bwsElement := map[string]interface{}{}
- if cluster.Config != nil && cluster.Config.BackupWindowStart != nil {
- bwsElement["hours"] = cluster.Config.BackupWindowStart.Hours
- bwsElement["minutes"] = cluster.Config.BackupWindowStart.Minutes
+ if err := d.Set("created_at", getTimestamp(cluster.CreatedAt)); err != nil {
+ return err
}
- d.Set("backup_window_start", []map[string]interface{}{bwsElement})
- return d.Set("created_at", getTimestamp(cluster.CreatedAt))
+ return nil
}
func listGreenplumMasterHosts(ctx context.Context, config *Config, id string) ([]*greenplum.Host, error) {
@@ -502,6 +515,7 @@ func listGreenplumMasterHosts(ctx context.Context, config *Config, id string) ([
return hosts, nil
}
+
func listGreenplumSegmentHosts(ctx context.Context, config *Config, id string) ([]*greenplum.Host, error) {
hosts := []*greenplum.Host{}
pageToken := ""
@@ -527,45 +541,19 @@ func listGreenplumSegmentHosts(ctx context.Context, config *Config, id string) (
return hosts, nil
}
-var mdbGreenplumUpdateFieldsMap = map[string]string{
- "name": "name",
- "description": "description",
- "labels": "labels",
- "access.0.data_lens": "config.access.data_lens",
- "access.0.web_sql": "config.access.web_sql",
- "backup_window_start": "config.backup_window_start",
- "deletion_protection": "deletion_protection",
-}
-
func resourceYandexMDBGreenplumClusterUpdate(d *schema.ResourceData, meta interface{}) error {
d.Partial(true)
config := meta.(*Config)
- req, err := getGreenplumlusterUpdateRequest(d)
+ req, err := prepareUpdateGreenplumClusterRequest(d)
if err != nil {
return err
}
- backupWindowStart := expandGreenplumBackupWindowStart(d)
- req.Config = &greenplum.GreenplumConfig{
- Version: d.Get("version").(string),
- BackupWindowStart: backupWindowStart,
- Access: expandGreenplumAccess(d),
- }
-
- updatePath := []string{}
- for field, path := range mdbGreenplumUpdateFieldsMap {
- if d.HasChange(field) {
- updatePath = append(updatePath, path)
- }
- }
-
- if len(updatePath) == 0 {
+ if len(req.UpdateMask.Paths) == 0 {
return nil
}
- req.UpdateMask = &field_mask.FieldMask{Paths: updatePath}
-
ctx, cancel := config.ContextWithTimeout(d.Timeout(schema.TimeoutUpdate))
defer cancel()
@@ -583,53 +571,35 @@ func resourceYandexMDBGreenplumClusterUpdate(d *schema.ResourceData, meta interf
return resourceYandexMDBGreenplumClusterRead(d, meta)
}
-func getGreenplumlusterUpdateRequest(d *schema.ResourceData) (*greenplum.UpdateClusterRequest, error) {
+func prepareUpdateGreenplumClusterRequest(d *schema.ResourceData) (*greenplum.UpdateClusterRequest, error) {
+ if d.HasChange("security_group_ids") {
+ return nil, fmt.Errorf("changing of 'security_group_ids' is not implemented yet")
+ }
labels, err := expandLabels(d.Get("labels"))
if err != nil {
return nil, fmt.Errorf("error expanding labels while updating Greenplum cluster: %s", err)
}
- req := &greenplum.UpdateClusterRequest{
+ configSpec, settingNames, err := expandGreenplumConfigSpec(d)
+ if err != nil {
+ return nil, fmt.Errorf("error while expanding config spec on Greenplum Cluster create: %s", err)
+ }
+
+ return &greenplum.UpdateClusterRequest{
ClusterId: d.Id(),
Name: d.Get("name").(string),
Description: d.Get("description").(string),
Labels: labels,
DeletionProtection: d.Get("deletion_protection").(bool),
- }
-
- return req, nil
-}
-
-func expandGreenplumBackupWindowStart(d *schema.ResourceData) *timeofday.TimeOfDay {
- out := &timeofday.TimeOfDay{}
-
- if v, ok := d.GetOk("backup_window_start.0.hours"); ok {
- out.Hours = int32(v.(int))
- }
-
- if v, ok := d.GetOk("backup_window_start.0.minutes"); ok {
- out.Minutes = int32(v.(int))
- }
-
- return out
-}
-
-func expandGreenplumAccess(d *schema.ResourceData) *greenplum.Access {
- if _, ok := d.GetOkExists("access"); !ok {
- return nil
- }
-
- out := &greenplum.Access{}
-
- if v, ok := d.GetOk("access.0.data_lens"); ok {
- out.DataLens = v.(bool)
- }
-
- if v, ok := d.GetOk("access.0.web_sql"); ok {
- out.WebSql = v.(bool)
- }
-
- return out
+ Config: &greenplum.GreenplumConfig{
+ Version: d.Get("version").(string),
+ BackupWindowStart: expandGreenplumBackupWindowStart(d),
+ Access: expandGreenplumAccess(d),
+ },
+ SecurityGroupIds: expandSecurityGroupIds(d.Get("security_group_ids")),
+ UpdateMask: &field_mask.FieldMask{Paths: expandGreenplumUpdatePath(d, settingNames)},
+ ConfigSpec: configSpec,
+ }, nil
}
func resourceYandexMDBGreenplumClusterDelete(d *schema.ResourceData, meta interface{}) error {
diff --git a/yandex/resource_yandex_mdb_greenplum_cluster_test.go b/yandex/resource_yandex_mdb_greenplum_cluster_test.go
index 16a914a5a..e81b48605 100644
--- a/yandex/resource_yandex_mdb_greenplum_cluster_test.go
+++ b/yandex/resource_yandex_mdb_greenplum_cluster_test.go
@@ -88,10 +88,10 @@ func mdbGreenplumClusterImportStep(name string) resource.TestStep {
func TestAccMDBGreenplumCluster_full(t *testing.T) {
t.Parallel()
- GreenplumName := acctest.RandomWithPrefix("tf-greenplum")
- greenplumNameMod := GreenplumName + "_mod"
- GreenplumDesc := "Greenplum Cluster Terraform Test"
- greenplumDescMod := GreenplumDesc + "_mod"
+ clusterName := acctest.RandomWithPrefix("tf-greenplum")
+ clusterNameUpdated := clusterName + "_updated"
+ clusterDescription := "Greenplum Cluster Terraform Test"
+ clusterDescriptionUpdated := clusterDescription + " Updated"
folderID := getExampleFolderID()
resource.Test(t, resource.TestCase{
@@ -99,29 +99,73 @@ func TestAccMDBGreenplumCluster_full(t *testing.T) {
Providers: testAccProviders,
CheckDestroy: testAccCheckMDBGreenplumClusterDestroy,
Steps: []resource.TestStep{
- //Create Greenplum Cluster
+ // Create Greenplum Cluster
{
- Config: testAccMDBGreenplumClusterConfigMain(GreenplumName, GreenplumDesc, "PRESTABLE", false),
+ Config: testAccMDBGreenplumClusterConfigStep1(clusterName, clusterDescription),
Check: resource.ComposeTestCheckFunc(
testAccCheckMDBGreenplumClusterExists(greenplumResource, 2, 5),
- resource.TestCheckResourceAttr(greenplumResource, "name", GreenplumName),
+ resource.TestCheckResourceAttr(greenplumResource, "name", clusterName),
resource.TestCheckResourceAttr(greenplumResource, "folder_id", folderID),
- resource.TestCheckResourceAttr(greenplumResource, "description", GreenplumDesc),
+ resource.TestCheckResourceAttr(greenplumResource, "description", clusterDescription),
testAccCheckCreatedAtAttr(greenplumResource),
resource.TestCheckResourceAttr(greenplumResource, "security_group_ids.#", "1"),
resource.TestCheckResourceAttr(greenplumResource, "deletion_protection", "false"),
+
+ resource.TestCheckResourceAttr(greenplumResource, "pooler_config.0.pooling_mode", "TRANSACTION"),
+ resource.TestCheckResourceAttr(greenplumResource, "pooler_config.0.pool_size", "10"),
+ resource.TestCheckResourceAttr(greenplumResource, "pooler_config.0.pool_client_idle_timeout", "0"),
+
+ resource.TestCheckResourceAttr(greenplumResource, "greenplum_config.max_connections", "395"),
+ resource.TestCheckResourceAttr(greenplumResource, "greenplum_config.max_slot_wal_keep_size", "1048576"),
+ resource.TestCheckResourceAttr(greenplumResource, "greenplum_config.gp_workfile_limit_per_segment", "0"),
+ resource.TestCheckResourceAttr(greenplumResource, "greenplum_config.gp_workfile_limit_per_query", "0"),
+ resource.TestCheckResourceAttr(greenplumResource, "greenplum_config.gp_workfile_limit_files_per_query", "100000"),
+ resource.TestCheckResourceAttr(greenplumResource, "greenplum_config.max_prepared_transactions", "500"),
+ resource.TestCheckResourceAttr(greenplumResource, "greenplum_config.gp_workfile_compression", "false"),
),
},
mdbGreenplumClusterImportStep(greenplumResource),
- // Change some options
+ // Update name and description of the cluster
{
- Config: testAccMDBGreenplumClusterConfigUpdate(greenplumNameMod, greenplumDescMod, "PRESTABLE", false),
+ Config: testAccMDBGreenplumClusterConfigStep2(clusterNameUpdated, clusterDescriptionUpdated),
Check: resource.ComposeTestCheckFunc(
- resource.TestCheckResourceAttr(greenplumResource, "name", greenplumNameMod),
- resource.TestCheckResourceAttr(greenplumResource, "description", greenplumDescMod),
- resource.TestCheckResourceAttr(greenplumResource, "security_group_ids.#", "1"),
+ resource.TestCheckResourceAttr(greenplumResource, "name", clusterNameUpdated),
+ resource.TestCheckResourceAttr(greenplumResource, "description", clusterDescriptionUpdated),
+ ),
+ },
+ mdbGreenplumClusterImportStep(greenplumResource),
+ // Update pooler_config and greenplum_config
+ {
+ Config: testAccMDBGreenplumClusterConfigStep3(clusterNameUpdated, clusterDescriptionUpdated),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckMDBGreenplumClusterExists(greenplumResource, 2, 5),
+ resource.TestCheckResourceAttr(greenplumResource, "greenplum_config.max_connections", "400"),
+ resource.TestCheckResourceAttr(greenplumResource, "greenplum_config.gp_workfile_compression", "true"),
+ resource.TestCheckResourceAttr(greenplumResource, "pooler_config.0.pooling_mode", "SESSION"),
+ resource.TestCheckResourceAttr(greenplumResource, "pooler_config.0.pool_size", "10"),
+ resource.TestCheckResourceAttr(greenplumResource, "pooler_config.0.pool_client_idle_timeout", "0"),
+ ),
+ },
+ mdbGreenplumClusterImportStep(greenplumResource),
+ // Update deletion_protection
+ {
+ Config: testAccMDBGreenplumClusterConfigStep4(clusterNameUpdated, clusterDescriptionUpdated),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckMDBGreenplumClusterExists(greenplumResource, 2, 5),
+ testAccCheckCreatedAtAttr(greenplumResource),
+ resource.TestCheckResourceAttr(greenplumResource, "deletion_protection", "true"),
+ ),
+ },
+ mdbGreenplumClusterImportStep(greenplumResource),
+ // Add access and backup_window_start fields
+ {
+ Config: testAccMDBGreenplumClusterConfigStep5(clusterNameUpdated, clusterDescriptionUpdated),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckMDBGreenplumClusterExists(greenplumResource, 2, 5),
+
resource.TestCheckResourceAttr(greenplumResource, "access.0.data_lens", "true"),
resource.TestCheckResourceAttr(greenplumResource, "backup_window_start.0.minutes", "15"),
+ resource.TestCheckResourceAttr(greenplumResource, "deletion_protection", "false"),
),
},
mdbGreenplumClusterImportStep(greenplumResource),
@@ -204,8 +248,6 @@ func testAccCheckMDBGreenplumClusterExists(n string, masterHosts int, segmentHos
const greenplumVPCDependencies = `
resource "yandex_vpc_network" "mdb-greenplum-test-net" {}
-
-
resource "yandex_vpc_subnet" "mdb-greenplum-test-subnet-b" {
zone = "ru-central1-b"
network_id = yandex_vpc_network.mdb-greenplum-test-net.id
@@ -229,33 +271,14 @@ resource "yandex_vpc_security_group" "mdb-greenplum-test-sg-x" {
v4_cidr_blocks = ["0.0.0.0/0"]
}
}
-
-resource "yandex_vpc_security_group" "mdb-greenplum-test-sg-y" {
- network_id = yandex_vpc_network.mdb-greenplum-test-net.id
-
- ingress {
- protocol = "ANY"
- description = "Allow incoming traffic from members of the same security group"
- from_port = 0
- to_port = 65535
- v4_cidr_blocks = ["0.0.0.0/0"]
- }
- egress {
- protocol = "ANY"
- description = "Allow outgoing traffic to members of the same security group"
- from_port = 0
- to_port = 65535
- v4_cidr_blocks = ["0.0.0.0/0"]
- }
-}
`
-func testAccMDBGreenplumClusterConfigMain(name, desc, environment string, deletionProtection bool) string {
+func testAccMDBGreenplumClusterConfigStep0(name string, description string) string {
return fmt.Sprintf(greenplumVPCDependencies+`
resource "yandex_mdb_greenplum_cluster" "foo" {
name = "%s"
description = "%s"
- environment = "%s"
+ environment = "PRESTABLE"
network_id = yandex_vpc_network.mdb-greenplum-test-net.id
zone = "ru-central1-b"
subnet_id = yandex_vpc_subnet.mdb-greenplum-test-subnet-b.id
@@ -285,46 +308,96 @@ resource "yandex_mdb_greenplum_cluster" "foo" {
user_name = "user1"
user_password = "mysecurepassword"
-
security_group_ids = [yandex_vpc_security_group.mdb-greenplum-test-sg-x.id]
- deletion_protection = %t
+`, name, description)
}
-`, name, desc, environment, deletionProtection)
+
+func testAccMDBGreenplumClusterConfigStep1(name string, description string) string {
+ return testAccMDBGreenplumClusterConfigStep0(name, description) + `
+ pooler_config {
+ pooling_mode = "TRANSACTION"
+ pool_size = 10
+ pool_client_idle_timeout = 0
+ }
+
+ greenplum_config = {
+ max_connections = 395
+ max_slot_wal_keep_size = 1048576
+ gp_workfile_limit_per_segment = 0
+ gp_workfile_limit_per_query = 0
+ gp_workfile_limit_files_per_query = 100000
+ max_prepared_transactions = 500
+ gp_workfile_compression = "false"
+ }
+}`
}
-func testAccMDBGreenplumClusterConfigUpdate(name, desc, environment string, deletionProtection bool) string {
- return fmt.Sprintf(greenplumVPCDependencies+`
-resource "yandex_mdb_greenplum_cluster" "foo" {
- name = "%s"
- description = "%s"
- environment = "%s"
- network_id = yandex_vpc_network.mdb-greenplum-test-net.id
- zone = "ru-central1-b"
- subnet_id = yandex_vpc_subnet.mdb-greenplum-test-subnet-b.id
- assign_public_ip = false
- version = "6.19"
+func testAccMDBGreenplumClusterConfigStep2(name string, description string) string {
+ return testAccMDBGreenplumClusterConfigStep1(name, description)
+}
- labels = { test_key_create2 : "test_value_create2" }
+func testAccMDBGreenplumClusterConfigStep3(name string, description string) string {
+ return testAccMDBGreenplumClusterConfigStep0(name, description) + `
+ pooler_config {
+ pooling_mode = "SESSION"
+ pool_size = 10
+ pool_client_idle_timeout = 0
+ }
- master_host_count = 2
- segment_host_count = 5
- segment_in_host = 1
+ greenplum_config = {
+ max_connections = 400
+ max_slot_wal_keep_size = 1048576
+ gp_workfile_limit_per_segment = 0
+ gp_workfile_limit_per_query = 0
+ gp_workfile_limit_files_per_query = 100000
+ max_prepared_transactions = 500
+ gp_workfile_compression = "true"
+ }
+}`
+}
- master_subcluster {
- resources {
- resource_preset_id = "s2.micro"
- disk_size = 24
- disk_type_id = "network-ssd"
- }
+func testAccMDBGreenplumClusterConfigStep4(name string, description string) string {
+ return testAccMDBGreenplumClusterConfigStep0(name, description) + `
+ pooler_config {
+ pooling_mode = "SESSION"
+ pool_size = 10
+ pool_client_idle_timeout = 0
}
- segment_subcluster {
- resources {
- resource_preset_id = "s2.micro"
- disk_size = 24
- disk_type_id = "network-ssd"
- }
+
+ greenplum_config = {
+ max_connections = 400
+ max_slot_wal_keep_size = 1048576
+ gp_workfile_limit_per_segment = 0
+ gp_workfile_limit_per_query = 0
+ gp_workfile_limit_files_per_query = 100000
+ max_prepared_transactions = 500
+ gp_workfile_compression = "true"
+ }
+
+ deletion_protection = true
+}`
+}
+
+func testAccMDBGreenplumClusterConfigStep5(name string, description string) string {
+ return testAccMDBGreenplumClusterConfigStep0(name, description) + `
+ pooler_config {
+ pooling_mode = "SESSION"
+ pool_size = 10
+ pool_client_idle_timeout = 0
+ }
+
+ greenplum_config = {
+ max_connections = 400
+ max_slot_wal_keep_size = 1048576
+ gp_workfile_limit_per_segment = 0
+ gp_workfile_limit_per_query = 0
+ gp_workfile_limit_files_per_query = 100000
+ max_prepared_transactions = 500
+ gp_workfile_compression = "true"
}
+
+ deletion_protection = false
access {
data_lens = true
@@ -334,13 +407,5 @@ resource "yandex_mdb_greenplum_cluster" "foo" {
hours = 22
minutes = 15
}
-
- user_name = "user1"
- user_password = "mysecurepassword"
-
- security_group_ids = [yandex_vpc_security_group.mdb-greenplum-test-sg-x.id]
-
- deletion_protection = %t
-}
-`, name, desc, environment, deletionProtection)
+}`
}
diff --git a/yandex/resource_yandex_mdb_kafka_cluster_test.go b/yandex/resource_yandex_mdb_kafka_cluster_test.go
index 9c244bb28..8eb8d95b3 100644
--- a/yandex/resource_yandex_mdb_kafka_cluster_test.go
+++ b/yandex/resource_yandex_mdb_kafka_cluster_test.go
@@ -859,10 +859,10 @@ func TestAccMDBKafkaCluster_single(t *testing.T) {
testAccCheckMDBKafkaClusterHasUsers(kfResource, map[string][]string{"alice": {"raw_events"}, "bob": {"raw_events", "final"}}),
testAccCheckMDBKafkaClusterCompressionType(&r, kafka.CompressionType_COMPRESSION_TYPE_ZSTD),
testAccCheckMDBKafkaClusterLogRetentionBytes(&r, 1073741824),
- testAccCheckMDBKafkaTopicMaxMessageBytes(kfResource, "raw_events", 16777216),
+ testAccCheckMDBKafkaTopicMaxMessageBytes(kfResource, "raw_events", 777216),
testAccCheckMDBKafkaTopicConfig(kfResource, "raw_events", &kafka.TopicConfig2_8{
CleanupPolicy: kafka.TopicConfig2_8_CLEANUP_POLICY_COMPACT_AND_DELETE,
- MaxMessageBytes: &wrappers.Int64Value{Value: 16777216},
+ MaxMessageBytes: &wrappers.Int64Value{Value: 777216},
SegmentBytes: &wrappers.Int64Value{Value: 134217728},
FlushMs: &wrappers.Int64Value{Value: 9223372036854775807},
}),
@@ -886,7 +886,7 @@ func TestAccMDBKafkaCluster_single(t *testing.T) {
testAccCheckMDBKafkaClusterLogSegmentBytes(&r, 268435456),
testAccCheckMDBKafkaTopicConfig(kfResource, "raw_events", &kafka.TopicConfig2_8{
CleanupPolicy: kafka.TopicConfig2_8_CLEANUP_POLICY_DELETE,
- MaxMessageBytes: &wrappers.Int64Value{Value: 33554432},
+ MaxMessageBytes: &wrappers.Int64Value{Value: 554432},
SegmentBytes: &wrappers.Int64Value{Value: 268435456},
FlushMs: &wrappers.Int64Value{Value: 9223372036854775807},
}),
@@ -928,7 +928,7 @@ func TestAccMDBKafkaCluster_HA(t *testing.T) {
testAccCheckMDBKafkaConfigBrokersCount(&r, 1),
testAccCheckMDBKafkaClusterCompressionType(&r, kafka.CompressionType_COMPRESSION_TYPE_ZSTD),
testAccCheckMDBKafkaClusterLogRetentionBytes(&r, 1073741824),
- testAccCheckMDBKafkaTopicConfig(kfResource, "raw_events", &kafka.TopicConfig2_8{MaxMessageBytes: &wrappers.Int64Value{Value: 16777216}, SegmentBytes: &wrappers.Int64Value{Value: 134217728}}),
+ testAccCheckMDBKafkaTopicConfig(kfResource, "raw_events", &kafka.TopicConfig2_8{MaxMessageBytes: &wrappers.Int64Value{Value: 777216}, SegmentBytes: &wrappers.Int64Value{Value: 134217728}}),
testAccCheckCreatedAtAttr(kfResource),
),
},
@@ -949,7 +949,7 @@ func TestAccMDBKafkaCluster_HA(t *testing.T) {
testAccCheckMDBKafkaClusterCompressionType(&r, kafka.CompressionType_COMPRESSION_TYPE_ZSTD),
testAccCheckMDBKafkaClusterLogRetentionBytes(&r, 2147483648),
testAccCheckMDBKafkaClusterLogSegmentBytes(&r, 268435456),
- testAccCheckMDBKafkaTopicConfig(kfResource, "raw_events", &kafka.TopicConfig2_8{MaxMessageBytes: &wrappers.Int64Value{Value: 33554432}, SegmentBytes: &wrappers.Int64Value{Value: 268435456}, RetentionBytes: &wrappers.Int64Value{Value: 1073741824}}),
+ testAccCheckMDBKafkaTopicConfig(kfResource, "raw_events", &kafka.TopicConfig2_8{MaxMessageBytes: &wrappers.Int64Value{Value: 554432}, SegmentBytes: &wrappers.Int64Value{Value: 268435456}, RetentionBytes: &wrappers.Int64Value{Value: 1073741824}}),
testAccCheckCreatedAtAttr(kfResource),
),
},
@@ -1047,7 +1047,7 @@ resource "yandex_mdb_kafka_cluster" "foo" {
replication_factor = 1
topic_config {
cleanup_policy = "CLEANUP_POLICY_COMPACT_AND_DELETE"
- max_message_bytes = 16777216
+ max_message_bytes = 777216
segment_bytes = 134217728
flush_ms = 9223372036854775807
}
@@ -1129,7 +1129,7 @@ resource "yandex_mdb_kafka_cluster" "foo" {
topic_config {
cleanup_policy = "CLEANUP_POLICY_DELETE"
- max_message_bytes = 33554432
+ max_message_bytes = 554432
segment_bytes = 268435456
flush_ms = 9223372036854775807
}
@@ -1443,7 +1443,7 @@ resource "yandex_mdb_kafka_cluster" "foo" {
replication_factor = 1
topic_config {
- max_message_bytes = 16777216
+ max_message_bytes = 777216
segment_bytes = 134217728
}
}
@@ -1522,7 +1522,7 @@ resource "yandex_mdb_kafka_cluster" "foo" {
partitions = 2
replication_factor = 1
topic_config {
- max_message_bytes = 33554432
+ max_message_bytes = 554432
segment_bytes = 268435456
retention_bytes = 1073741824
}
diff --git a/yandex/resource_yandex_mdb_redis_cluster.go b/yandex/resource_yandex_mdb_redis_cluster.go
index e3144fb42..946924268 100644
--- a/yandex/resource_yandex_mdb_redis_cluster.go
+++ b/yandex/resource_yandex_mdb_redis_cluster.go
@@ -3,6 +3,8 @@ package yandex
import (
"context"
"fmt"
+ "github.com/yandex-cloud/go-genproto/yandex/cloud/operation"
+ "google.golang.org/protobuf/types/known/wrapperspb"
"log"
"time"
@@ -96,6 +98,16 @@ func resourceYandexMDBRedisCluster() *schema.Resource {
Optional: true,
Computed: true,
},
+ "client_output_buffer_limit_normal": {
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
+ "client_output_buffer_limit_pubsub": {
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
"version": {
Type: schema.TypeString,
Required: true,
@@ -148,6 +160,16 @@ func resourceYandexMDBRedisCluster() *schema.Resource {
Type: schema.TypeString,
Computed: true,
},
+ "replica_priority": {
+ Type: schema.TypeInt,
+ Optional: true,
+ Default: defaultReplicaPriority,
+ },
+ "assign_public_ip": {
+ Type: schema.TypeBool,
+ Optional: true,
+ Default: false,
+ },
},
},
},
@@ -265,7 +287,7 @@ func resourceYandexMDBRedisClusterCreate(d *schema.ResourceData, meta interface{
}
d.SetId(md.ClusterId)
- log.Printf("[DEBUG] Created Redis Cluster %q", md.ClusterId)
+ log.Printf("[DEBUG] Creating Redis Cluster %q", md.ClusterId)
err = op.Wait(ctx)
if err != nil {
@@ -292,6 +314,7 @@ func resourceYandexMDBRedisClusterCreate(d *schema.ResourceData, meta interface{
func prepareCreateRedisRequest(d *schema.ResourceData, meta *Config) (*redis.CreateClusterRequest, error) {
labels, err := expandLabels(d.Get("labels"))
+ sharded := d.Get("sharded").(bool)
if err != nil {
return nil, fmt.Errorf("Error while expanding labels on Redis Cluster create: %s", err)
@@ -350,7 +373,7 @@ func prepareCreateRedisRequest(d *schema.ResourceData, meta *Config) (*redis.Cre
ConfigSpec: configSpec,
HostSpecs: hosts,
Labels: labels,
- Sharded: d.Get("sharded").(bool),
+ Sharded: sharded,
TlsEnabled: &wrappers.BoolValue{Value: d.Get("tls_enabled").(bool)},
PersistenceMode: persistenceMode,
SecurityGroupIds: securityGroupIds,
@@ -402,14 +425,16 @@ func resourceYandexMDBRedisClusterRead(d *schema.ResourceData, meta interface{})
err = d.Set("config", []map[string]interface{}{
{
- "timeout": conf.timeout,
- "maxmemory_policy": conf.maxmemoryPolicy,
- "notify_keyspace_events": conf.notifyKeyspaceEvents,
- "slowlog_log_slower_than": conf.slowlogLogSlowerThan,
- "slowlog_max_len": conf.slowlogMaxLen,
- "databases": conf.databases,
- "version": conf.version,
- "password": password,
+ "timeout": conf.timeout,
+ "maxmemory_policy": conf.maxmemoryPolicy,
+ "notify_keyspace_events": conf.notifyKeyspaceEvents,
+ "slowlog_log_slower_than": conf.slowlogLogSlowerThan,
+ "slowlog_max_len": conf.slowlogMaxLen,
+ "databases": conf.databases,
+ "version": conf.version,
+ "password": password,
+ "client_output_buffer_limit_normal": conf.clientOutputBufferLimitNormal,
+ "client_output_buffer_limit_pubsub": conf.clientOutputBufferLimitPubsub,
},
})
if err != nil {
@@ -426,9 +451,9 @@ func resourceYandexMDBRedisClusterRead(d *schema.ResourceData, meta interface{})
return err
}
- sortRedisHosts(hosts, dHosts)
+ sortRedisHosts(cluster.Sharded, hosts, dHosts)
- hs, err := flattenRedisHosts(hosts)
+ hs, err := flattenRedisHosts(cluster.Sharded, hosts)
if err != nil {
return err
}
@@ -458,18 +483,12 @@ func resourceYandexMDBRedisClusterUpdate(d *schema.ResourceData, meta interface{
return fmt.Errorf("Changing disk_type_id is not supported for Redis Cluster. Id: %v", d.Id())
}
- if d.HasChange("name") || d.HasChange("labels") || d.HasChange("description") || d.HasChange("resources") ||
- d.HasChange("config") || d.HasChange("security_group_ids") || d.HasChange("deletion_protection") ||
- d.HasChange("persistence_mode") {
- if err := updateRedisClusterParams(d, meta); err != nil {
- return err
- }
+ if err := updateRedisClusterParams(d, meta); err != nil {
+ return err
}
- if d.HasChange("host") {
- if err := updateRedisClusterHosts(d, meta); err != nil {
- return err
- }
+ if err := updateRedisClusterHosts(d, meta); err != nil {
+ return err
}
d.Partial(false)
@@ -581,6 +600,8 @@ func updateRedisClusterParams(d *schema.ResourceData, meta interface{}) error {
"slowlog_max_len",
"databases",
"version",
+ "client_output_buffer_limit_normal",
+ "client_output_buffer_limit_pubsub",
}
for _, field := range fields {
fullPath := "config_spec." + updateFieldConfigName + "." + field
@@ -625,6 +646,10 @@ func updateRedisClusterParams(d *schema.ResourceData, meta interface{}) error {
})
}
+ if len(req.UpdateMask.Paths) == 0 {
+ return nil
+ }
+
err := makeRedisClusterUpdateRequest(req, d, meta)
if err != nil {
return err
@@ -636,38 +661,15 @@ func updateRedisClusterParams(d *schema.ResourceData, meta interface{}) error {
return nil
}
-func updateRedisClusterHosts(d *schema.ResourceData, meta interface{}) error {
- config := meta.(*Config)
- ctx, cancel := context.WithTimeout(context.Background(), d.Timeout(schema.TimeoutRead))
- defer cancel()
-
- sharded := d.Get("sharded").(bool)
-
- currHosts, err := listRedisHosts(ctx, config, d)
- if err != nil {
- return err
- }
-
- targetHosts, err := expandRedisHosts(d)
- if err != nil {
- return fmt.Errorf("Error while expanding hosts on Redis Cluster create: %s", err)
- }
-
- currShards, err := listRedisShards(ctx, config, d)
- if err != nil {
- return err
- }
-
- toDelete, toAdd := redisHostsDiff(currHosts, targetHosts)
-
- ctx, cancel = context.WithTimeout(context.Background(), d.Timeout(schema.TimeoutUpdate))
- defer cancel()
-
+func addHosts(ctx context.Context, d *schema.ResourceData, config *Config, sharded bool, currShards []*redis.Shard,
+ toAdd map[string][]*redis.HostSpec) error {
+ var err error
for shardName, specs := range toAdd {
shardExists := false
for _, s := range currShards {
if s.Name == shardName {
shardExists = true
+ break
}
}
if sharded && !shardExists {
@@ -682,12 +684,18 @@ func updateRedisClusterHosts(d *schema.ResourceData, meta interface{}) error {
}
}
}
+ return nil
+}
+func deleteHosts(ctx context.Context, d *schema.ResourceData, config *Config, sharded bool, targetHosts []*redis.HostSpec,
+ toDelete map[string][]string) error {
+ var err error
for shardName, fqdns := range toDelete {
deleteShard := true
for _, th := range targetHosts {
if th.ShardName == shardName {
deleteShard = false
+ break
}
}
if sharded && deleteShard {
@@ -702,6 +710,57 @@ func updateRedisClusterHosts(d *schema.ResourceData, meta interface{}) error {
}
}
}
+ return nil
+}
+
+func updateRedisClusterHosts(d *schema.ResourceData, meta interface{}) error {
+ if !d.HasChange("host") {
+ return nil
+ }
+
+ config := meta.(*Config)
+ ctx, cancel := context.WithTimeout(context.Background(), d.Timeout(schema.TimeoutRead))
+ defer cancel()
+
+ sharded := d.Get("sharded").(bool)
+
+ currHosts, err := listRedisHosts(ctx, config, d)
+ if err != nil {
+ return err
+ }
+
+ targetHosts, err := expandRedisHosts(d)
+ if err != nil {
+ return fmt.Errorf("Error while expanding hosts on Redis Cluster create: %s", err)
+ }
+
+ currShards, err := listRedisShards(ctx, config, d)
+ if err != nil {
+ return err
+ }
+
+ toDelete, toUpdate, toAdd, err := redisHostsDiff(sharded, currHosts, targetHosts)
+ if err != nil {
+ return err
+ }
+
+ ctx, cancel = context.WithTimeout(context.Background(), d.Timeout(schema.TimeoutUpdate))
+ defer cancel()
+
+ err = addHosts(ctx, d, config, sharded, currShards, toAdd)
+ if err != nil {
+ return err
+ }
+
+ err = updateHosts(ctx, d, config, toUpdate)
+ if err != nil {
+ return err
+ }
+
+ err = deleteHosts(ctx, d, config, sharded, targetHosts, toDelete)
+ if err != nil {
+ return err
+ }
return nil
}
@@ -815,6 +874,82 @@ func createRedisHosts(ctx context.Context, config *Config, d *schema.ResourceDat
return nil
}
+type HostUpdateInfo struct {
+ HostName string
+ ReplicaPriority *wrappers.Int64Value
+ AssignPublicIp bool
+ UpdateMask *field_mask.FieldMask
+}
+
+func getHostUpdateInfo(sharded bool, fqdn string, oldPriority *wrapperspb.Int64Value, oldAssignPublicIp bool,
+ newPriority *wrapperspb.Int64Value, newAssignPublicIp bool) (*HostUpdateInfo, error) {
+ var maskPaths []string
+ if newPriority != nil && oldPriority != nil && oldPriority.Value != newPriority.Value {
+ if sharded {
+ return nil, fmt.Errorf("modifying replica priority in hosts of sharded clusters is not supported: %s", fqdn)
+ }
+ maskPaths = append(maskPaths, "replica_priority")
+ }
+ if oldAssignPublicIp != newAssignPublicIp {
+ maskPaths = append(maskPaths, "assign_public_ip")
+ }
+
+ if len(maskPaths) == 0 {
+ return nil, nil
+ }
+ res := &HostUpdateInfo{
+ HostName: fqdn,
+ ReplicaPriority: newPriority,
+ AssignPublicIp: newAssignPublicIp,
+ UpdateMask: &field_mask.FieldMask{Paths: maskPaths},
+ }
+ return res, nil
+}
+
+func updateRedisHost(ctx context.Context, config *Config, d *schema.ResourceData, host *HostUpdateInfo) error {
+ request := &redis.UpdateClusterHostsRequest{
+ ClusterId: d.Id(),
+ UpdateHostSpecs: []*redis.UpdateHostSpec{
+ {
+ HostName: host.HostName,
+ AssignPublicIp: host.AssignPublicIp,
+ ReplicaPriority: host.ReplicaPriority,
+ UpdateMask: host.UpdateMask,
+ },
+ },
+ }
+ op, err := retryConflictingOperation(ctx, config, func() (*operation.Operation, error) {
+ log.Printf("[DEBUG] Sending Redis cluster update hosts request: %+v", request)
+ return config.sdk.MDB().Redis().Cluster().UpdateHosts(ctx, request)
+ })
+ if err != nil {
+ return fmt.Errorf("error while requesting API to update host for Redis Cluster %q - host %v: %s", d.Id(), host.HostName, err)
+ }
+
+ err = op.Wait(ctx)
+ if err != nil {
+ return fmt.Errorf("error while updating host for Redis Cluster %q - host %v: %s", d.Id(), host.HostName, err)
+ }
+
+ if _, err := op.Response(); err != nil {
+ return fmt.Errorf("updating host for Redis Cluster %q - host %v failed: %s", d.Id(), host.HostName, err)
+ }
+
+ return nil
+}
+
+func updateHosts(ctx context.Context, d *schema.ResourceData, config *Config, specs map[string][]*HostUpdateInfo) error {
+ for _, hostInfos := range specs {
+ for _, hostInfo := range hostInfos {
+ if err := updateRedisHost(ctx, config, d, hostInfo); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
func deleteRedisShard(ctx context.Context, config *Config, d *schema.ResourceData, shardName string) error {
op, err := config.sdk.WrapOperation(
config.sdk.MDB().Redis().Cluster().DeleteShard(ctx, &redis.DeleteClusterShardRequest{
diff --git a/yandex/resource_yandex_mdb_redis_cluster_test.go b/yandex/resource_yandex_mdb_redis_cluster_test.go
index 8d0b41f14..c450e8684 100644
--- a/yandex/resource_yandex_mdb_redis_cluster_test.go
+++ b/yandex/resource_yandex_mdb_redis_cluster_test.go
@@ -106,6 +106,14 @@ func TestAccMDBRedisCluster_full_networkssd(t *testing.T) {
updatedFlavor := "hm1.micro"
tlsEnabled := false
persistenceMode := "ON"
+ normalLimits := "16777215 8388607 61"
+ pubsubLimits := "16777214 8388606 62"
+ normalUpdatedLimits := "16777212 8388605 63"
+ pubsubUpdatedLimits := "33554432 16777216 60"
+ pubIpSet := true
+ pubIpUnset := false
+ baseReplicaPriority := 100
+ updatedReplicaPriority := 61
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -115,15 +123,19 @@ func TestAccMDBRedisCluster_full_networkssd(t *testing.T) {
// Create Redis Cluster
{
Config: testAccMDBRedisClusterConfigMain(redisName, redisDesc, "PRESTABLE", true,
- nil, "", version, baseFlavor, baseDiskSize, ""),
+ nil, "", version, baseFlavor, baseDiskSize, "", normalLimits, pubsubLimits,
+ []*bool{nil}, []*int{nil}),
Check: resource.ComposeTestCheckFunc(
testAccCheckMDBRedisClusterExists(redisResource, &r, 1, tlsEnabled, persistenceMode),
resource.TestCheckResourceAttr(redisResource, "name", redisName),
resource.TestCheckResourceAttr(redisResource, "folder_id", folderID),
resource.TestCheckResourceAttr(redisResource, "description", redisDesc),
resource.TestCheckResourceAttrSet(redisResource, "host.0.fqdn"),
+ resource.TestCheckResourceAttr(redisResource, "host.0.assign_public_ip", fmt.Sprintf("%t", pubIpUnset)),
+ resource.TestCheckResourceAttr(redisResource, "host.0.replica_priority", fmt.Sprintf("%d", baseReplicaPriority)),
testAccCheckMDBRedisClusterHasConfig(&r, "ALLKEYS_LRU", 100,
- "Elg", 5000, 10, 15, version),
+ "Elg", 5000, 10, 15, version,
+ normalLimits, pubsubLimits),
testAccCheckMDBRedisClusterHasResources(&r, baseFlavor, baseDiskSize, diskTypeId),
testAccCheckMDBRedisClusterContainsLabel(&r, "test_key", "test_value"),
testAccCheckCreatedAtAttr(redisResource),
@@ -138,7 +150,8 @@ func TestAccMDBRedisCluster_full_networkssd(t *testing.T) {
// uncheck 'deletion_protection'
{
Config: testAccMDBRedisClusterConfigMain(redisName, redisDesc, "PRESTABLE", false,
- nil, "", version, baseFlavor, baseDiskSize, ""),
+ nil, "", version, baseFlavor, baseDiskSize, "", normalLimits, pubsubLimits,
+ []*bool{nil}, []*int{nil}),
Check: resource.ComposeTestCheckFunc(
testAccCheckMDBRedisClusterExists(redisResource, &r, 1, tlsEnabled, persistenceMode),
resource.TestCheckResourceAttr(redisResource, "deletion_protection", "false"),
@@ -148,7 +161,8 @@ func TestAccMDBRedisCluster_full_networkssd(t *testing.T) {
// check 'deletion_protection'
{
Config: testAccMDBRedisClusterConfigMain(redisName, redisDesc, "PRESTABLE", true,
- nil, "", version, baseFlavor, baseDiskSize, ""),
+ nil, "", version, baseFlavor, baseDiskSize, "", normalLimits, pubsubLimits,
+ []*bool{nil}, []*int{nil}),
Check: resource.ComposeTestCheckFunc(
testAccCheckMDBRedisClusterExists(redisResource, &r, 1, tlsEnabled, persistenceMode),
resource.TestCheckResourceAttr(redisResource, "deletion_protection", "true"),
@@ -158,13 +172,15 @@ func TestAccMDBRedisCluster_full_networkssd(t *testing.T) {
// check 'deletion_protection
{
Config: testAccMDBRedisClusterConfigMain(redisName, redisDesc, "PRODUCTION", true,
- nil, "", version, baseFlavor, baseDiskSize, ""),
+ nil, "", version, baseFlavor, baseDiskSize, "", normalLimits, pubsubLimits,
+ []*bool{nil}, []*int{nil}),
ExpectError: regexp.MustCompile(".*The operation was rejected because cluster has 'deletion_protection' = ON.*"),
},
// uncheck 'deletion_protection'
{
Config: testAccMDBRedisClusterConfigMain(redisName, redisDesc, "PRESTABLE", false,
- nil, "", version, baseFlavor, baseDiskSize, ""),
+ nil, "", version, baseFlavor, baseDiskSize, "", normalLimits, pubsubLimits,
+ []*bool{nil}, []*int{nil}),
Check: resource.ComposeTestCheckFunc(
testAccCheckMDBRedisClusterExists(redisResource, &r, 1, tlsEnabled, persistenceMode),
resource.TestCheckResourceAttr(redisResource, "deletion_protection", "false"),
@@ -174,15 +190,19 @@ func TestAccMDBRedisCluster_full_networkssd(t *testing.T) {
// Change some options
{
Config: testAccMDBRedisClusterConfigUpdated(redisName, redisDesc2, &tlsEnabled, persistenceMode,
- version, updatedFlavor, updatedDiskSize, diskTypeId),
+ version, updatedFlavor, updatedDiskSize, diskTypeId, normalUpdatedLimits, pubsubUpdatedLimits,
+ []*bool{&pubIpSet}, []*int{&updatedReplicaPriority}),
Check: resource.ComposeTestCheckFunc(
testAccCheckMDBRedisClusterExists(redisResource, &r, 1, tlsEnabled, persistenceMode),
resource.TestCheckResourceAttr(redisResource, "name", redisName),
resource.TestCheckResourceAttr(redisResource, "folder_id", folderID),
resource.TestCheckResourceAttr(redisResource, "description", redisDesc2),
resource.TestCheckResourceAttrSet(redisResource, "host.0.fqdn"),
+ resource.TestCheckResourceAttr(redisResource, "host.0.assign_public_ip", fmt.Sprintf("%t", pubIpSet)),
+ resource.TestCheckResourceAttr(redisResource, "host.0.replica_priority", fmt.Sprintf("%d", updatedReplicaPriority)),
testAccCheckMDBRedisClusterHasConfig(&r, "VOLATILE_LFU", 200,
- "Ex", 6000, 12, 17, version),
+ "Ex", 6000, 12, 17, version,
+ normalUpdatedLimits, pubsubUpdatedLimits),
testAccCheckMDBRedisClusterHasResources(&r, updatedFlavor, updatedDiskSize, diskTypeId),
testAccCheckMDBRedisClusterContainsLabel(&r, "new_key", "new_value"),
testAccCheckCreatedAtAttr(redisResource),
@@ -194,16 +214,22 @@ func TestAccMDBRedisCluster_full_networkssd(t *testing.T) {
// Add new host
{
Config: testAccMDBRedisClusterConfigAddedHost(redisName, redisDesc2, nil, persistenceMode,
- version, updatedFlavor, updatedDiskSize, ""),
+ version, updatedFlavor, updatedDiskSize, "",
+ []*bool{&pubIpUnset, &pubIpSet}, []*int{nil, &updatedReplicaPriority}),
Check: resource.ComposeTestCheckFunc(
testAccCheckMDBRedisClusterExists(redisResource, &r, 2, tlsEnabled, persistenceMode),
resource.TestCheckResourceAttr(redisResource, "name", redisName),
resource.TestCheckResourceAttr(redisResource, "folder_id", folderID),
resource.TestCheckResourceAttr(redisResource, "description", redisDesc2),
resource.TestCheckResourceAttrSet(redisResource, "host.0.fqdn"),
+ resource.TestCheckResourceAttr(redisResource, "host.0.assign_public_ip", fmt.Sprintf("%t", pubIpUnset)),
+ resource.TestCheckResourceAttr(redisResource, "host.0.replica_priority", fmt.Sprintf("%d", baseReplicaPriority)),
resource.TestCheckResourceAttrSet(redisResource, "host.1.fqdn"),
+ resource.TestCheckResourceAttr(redisResource, "host.1.assign_public_ip", fmt.Sprintf("%t", pubIpSet)),
+ resource.TestCheckResourceAttr(redisResource, "host.1.replica_priority", fmt.Sprintf("%d", updatedReplicaPriority)),
testAccCheckMDBRedisClusterHasConfig(&r, "VOLATILE_LFU", 200,
- "Ex", 6000, 12, 17, version),
+ "Ex", 6000, 12, 17, version,
+ normalUpdatedLimits, pubsubUpdatedLimits),
testAccCheckMDBRedisClusterHasResources(&r, updatedFlavor, updatedDiskSize, diskTypeId),
testAccCheckMDBRedisClusterContainsLabel(&r, "new_key", "new_value"),
testAccCheckCreatedAtAttr(redisResource),
@@ -230,6 +256,14 @@ func TestAccMDBRedisCluster_full_localssd(t *testing.T) {
tlsEnabled := true
persistenceMode := "OFF"
persistenceModeChanged := "ON"
+ normalLimits := "16777215 8388607 61"
+ pubsubLimits := "16777214 8388606 62"
+ normalUpdatedLimits := "16777212 8388605 63"
+ pubsubUpdatedLimits := "33554432 16777216 60"
+ pubIpSet := true
+ pubIpUnset := false
+ baseReplicaPriority := 100
+ updatedReplicaPriority := 51
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
@@ -239,15 +273,25 @@ func TestAccMDBRedisCluster_full_localssd(t *testing.T) {
// Create Redis Cluster
{
Config: testAccMDBRedisClusterConfigMain(redisName, redisDesc, "PRESTABLE", false,
- &tlsEnabled, persistenceMode, version, baseFlavor, baseDiskSize, diskTypeId),
+ &tlsEnabled, persistenceMode, version, baseFlavor, baseDiskSize, diskTypeId, normalLimits, pubsubLimits,
+ []*bool{&pubIpUnset, &pubIpSet, &pubIpUnset}, []*int{&baseReplicaPriority, &updatedReplicaPriority, &baseReplicaPriority}),
Check: resource.ComposeTestCheckFunc(
testAccCheckMDBRedisClusterExists(redisResource, &r, 3, tlsEnabled, persistenceMode),
resource.TestCheckResourceAttr(redisResource, "name", redisName),
resource.TestCheckResourceAttr(redisResource, "folder_id", folderID),
resource.TestCheckResourceAttr(redisResource, "description", redisDesc),
resource.TestCheckResourceAttrSet(redisResource, "host.0.fqdn"),
+ resource.TestCheckResourceAttr(redisResource, "host.0.assign_public_ip", fmt.Sprintf("%t", pubIpUnset)),
+ resource.TestCheckResourceAttr(redisResource, "host.0.replica_priority", fmt.Sprintf("%d", baseReplicaPriority)),
+ resource.TestCheckResourceAttrSet(redisResource, "host.1.fqdn"),
+ resource.TestCheckResourceAttr(redisResource, "host.1.assign_public_ip", fmt.Sprintf("%t", pubIpSet)),
+ resource.TestCheckResourceAttr(redisResource, "host.1.replica_priority", fmt.Sprintf("%d", updatedReplicaPriority)),
+ resource.TestCheckResourceAttrSet(redisResource, "host.2.fqdn"),
+ resource.TestCheckResourceAttr(redisResource, "host.2.assign_public_ip", fmt.Sprintf("%t", pubIpUnset)),
+ resource.TestCheckResourceAttr(redisResource, "host.2.replica_priority", fmt.Sprintf("%d", baseReplicaPriority)),
testAccCheckMDBRedisClusterHasConfig(&r, "ALLKEYS_LRU", 100,
- "Elg", 5000, 10, 15, version),
+ "Elg", 5000, 10, 15, version,
+ normalLimits, pubsubLimits),
testAccCheckMDBRedisClusterHasResources(&r, baseFlavor, baseDiskSize, diskTypeId),
testAccCheckMDBRedisClusterContainsLabel(&r, "test_key", "test_value"),
resource.TestCheckResourceAttr(redisResource, "maintenance_window.0.type", "WEEKLY"),
@@ -260,15 +304,25 @@ func TestAccMDBRedisCluster_full_localssd(t *testing.T) {
// Change some options
{
Config: testAccMDBRedisClusterConfigUpdated(redisName, redisDesc2, &tlsEnabled, persistenceModeChanged,
- version, baseFlavor, baseDiskSize, diskTypeId),
+ version, baseFlavor, baseDiskSize, diskTypeId, normalUpdatedLimits, pubsubUpdatedLimits,
+ []*bool{&pubIpUnset, &pubIpSet, &pubIpSet}, []*int{nil, &baseReplicaPriority, &updatedReplicaPriority}),
Check: resource.ComposeTestCheckFunc(
testAccCheckMDBRedisClusterExists(redisResource, &r, 3, tlsEnabled, persistenceModeChanged),
resource.TestCheckResourceAttr(redisResource, "name", redisName),
resource.TestCheckResourceAttr(redisResource, "folder_id", folderID),
resource.TestCheckResourceAttr(redisResource, "description", redisDesc2),
resource.TestCheckResourceAttrSet(redisResource, "host.0.fqdn"),
+ resource.TestCheckResourceAttr(redisResource, "host.0.assign_public_ip", fmt.Sprintf("%t", pubIpUnset)),
+ resource.TestCheckResourceAttr(redisResource, "host.0.replica_priority", fmt.Sprintf("%d", baseReplicaPriority)),
+ resource.TestCheckResourceAttrSet(redisResource, "host.1.fqdn"),
+ resource.TestCheckResourceAttr(redisResource, "host.1.assign_public_ip", fmt.Sprintf("%t", pubIpSet)),
+ resource.TestCheckResourceAttr(redisResource, "host.1.replica_priority", fmt.Sprintf("%d", baseReplicaPriority)),
+ resource.TestCheckResourceAttrSet(redisResource, "host.2.fqdn"),
+ resource.TestCheckResourceAttr(redisResource, "host.2.assign_public_ip", fmt.Sprintf("%t", pubIpSet)),
+ resource.TestCheckResourceAttr(redisResource, "host.2.replica_priority", fmt.Sprintf("%d", updatedReplicaPriority)),
testAccCheckMDBRedisClusterHasConfig(&r, "VOLATILE_LFU", 200,
- "Ex", 6000, 12, 17, version),
+ "Ex", 6000, 12, 17, version,
+ normalUpdatedLimits, pubsubUpdatedLimits),
testAccCheckMDBRedisClusterHasResources(&r, baseFlavor, baseDiskSize, diskTypeId),
testAccCheckMDBRedisClusterContainsLabel(&r, "new_key", "new_value"),
testAccCheckCreatedAtAttr(redisResource),
@@ -279,16 +333,28 @@ func TestAccMDBRedisCluster_full_localssd(t *testing.T) {
// Add new host
{
Config: testAccMDBRedisClusterConfigAddedHost(redisName, redisDesc2, &tlsEnabled, persistenceMode,
- version, baseFlavor, baseDiskSize, diskTypeId),
+ version, baseFlavor, baseDiskSize, diskTypeId,
+ []*bool{&pubIpSet, nil, nil, nil}, []*int{&baseReplicaPriority, &updatedReplicaPriority, nil, &updatedReplicaPriority}),
Check: resource.ComposeTestCheckFunc(
testAccCheckMDBRedisClusterExists(redisResource, &r, 4, tlsEnabled, persistenceMode),
resource.TestCheckResourceAttr(redisResource, "name", redisName),
resource.TestCheckResourceAttr(redisResource, "folder_id", folderID),
resource.TestCheckResourceAttr(redisResource, "description", redisDesc2),
resource.TestCheckResourceAttrSet(redisResource, "host.0.fqdn"),
+ resource.TestCheckResourceAttr(redisResource, "host.0.assign_public_ip", fmt.Sprintf("%t", pubIpSet)),
+ resource.TestCheckResourceAttr(redisResource, "host.0.replica_priority", fmt.Sprintf("%d", baseReplicaPriority)),
resource.TestCheckResourceAttrSet(redisResource, "host.1.fqdn"),
+ resource.TestCheckResourceAttr(redisResource, "host.1.assign_public_ip", fmt.Sprintf("%t", pubIpUnset)),
+ resource.TestCheckResourceAttr(redisResource, "host.1.replica_priority", fmt.Sprintf("%d", updatedReplicaPriority)),
+ resource.TestCheckResourceAttrSet(redisResource, "host.2.fqdn"),
+ resource.TestCheckResourceAttr(redisResource, "host.2.assign_public_ip", fmt.Sprintf("%t", pubIpUnset)),
+ resource.TestCheckResourceAttr(redisResource, "host.2.replica_priority", fmt.Sprintf("%d", baseReplicaPriority)),
+ resource.TestCheckResourceAttrSet(redisResource, "host.3.fqdn"),
+ resource.TestCheckResourceAttr(redisResource, "host.3.assign_public_ip", fmt.Sprintf("%t", pubIpUnset)),
+ resource.TestCheckResourceAttr(redisResource, "host.3.replica_priority", fmt.Sprintf("%d", updatedReplicaPriority)),
testAccCheckMDBRedisClusterHasConfig(&r, "VOLATILE_LFU", 200,
- "Ex", 6000, 12, 17, version),
+ "Ex", 6000, 12, 17, version,
+ normalUpdatedLimits, pubsubUpdatedLimits),
testAccCheckMDBRedisClusterHasResources(&r, baseFlavor, baseDiskSize, diskTypeId),
testAccCheckMDBRedisClusterContainsLabel(&r, "new_key", "new_value"),
testAccCheckCreatedAtAttr(redisResource),
@@ -350,7 +416,6 @@ func TestAccMDBRedisCluster_sharded(t *testing.T) {
testAccCheckCreatedAtAttr(redisResourceSharded),
),
},
- mdbRedisClusterImportStep(redisResourceSharded),
},
})
}
@@ -457,30 +522,38 @@ func testAccCheckMDBRedisClusterHasShards(r *redis.Cluster, shards []string) res
}
func testAccCheckMDBRedisClusterHasConfig(r *redis.Cluster, maxmemoryPolicy string, timeout int64,
- notifyKeyspaceEvents string, slowlogLogSlowerThan int64, slowlogMaxLen int64, databases int64,
- version string) resource.TestCheckFunc {
+ notifyKeyspaceEvents string, slowlogLogSlowerThan, slowlogMaxLen, databases int64,
+ version, clientOutputBufferLimitNormal, clientOutputBufferLimitPubsub string) resource.TestCheckFunc {
return func(s *terraform.State) error {
c := extractRedisConfig(r.Config)
if c.maxmemoryPolicy != maxmemoryPolicy {
- return fmt.Errorf("Expected config.maxmemory_policy '%s', got '%s'", maxmemoryPolicy, c.maxmemoryPolicy)
+ return fmt.Errorf("expected config.maxmemory_policy '%s', got '%s'", maxmemoryPolicy, c.maxmemoryPolicy)
}
if c.timeout != timeout {
- return fmt.Errorf("Expected config.timeout '%d', got '%d'", timeout, c.timeout)
+ return fmt.Errorf("expected config.timeout '%d', got '%d'", timeout, c.timeout)
}
if c.notifyKeyspaceEvents != notifyKeyspaceEvents {
- return fmt.Errorf("Expected config.notify_keyspace_events '%s', got '%s'", notifyKeyspaceEvents, c.notifyKeyspaceEvents)
+ return fmt.Errorf("expected config.notify_keyspace_events '%s', got '%s'", notifyKeyspaceEvents, c.notifyKeyspaceEvents)
}
if c.slowlogLogSlowerThan != slowlogLogSlowerThan {
- return fmt.Errorf("Expected config.slowlog_log_slower_than '%d', got '%d'", slowlogLogSlowerThan, c.slowlogLogSlowerThan)
+ return fmt.Errorf("expected config.slowlog_log_slower_than '%d', got '%d'", slowlogLogSlowerThan, c.slowlogLogSlowerThan)
}
if c.slowlogMaxLen != slowlogMaxLen {
- return fmt.Errorf("Expected config.slowlog_max_len '%d', got '%d'", slowlogMaxLen, c.slowlogMaxLen)
+ return fmt.Errorf("expected config.slowlog_max_len '%d', got '%d'", slowlogMaxLen, c.slowlogMaxLen)
}
if c.databases != databases {
- return fmt.Errorf("Expected config.databases '%d', got '%d'", databases, c.databases)
+ return fmt.Errorf("expected config.databases '%d', got '%d'", databases, c.databases)
}
if c.version != version {
- return fmt.Errorf("Expected config.version '%s', got '%s'", version, c.version)
+ return fmt.Errorf("expected config.version '%s', got '%s'", version, c.version)
+ }
+ if c.clientOutputBufferLimitNormal != clientOutputBufferLimitNormal {
+ return fmt.Errorf("expected config.clientOutputBufferLimitNormal '%s', got '%s'",
+ clientOutputBufferLimitNormal, c.clientOutputBufferLimitNormal)
+ }
+ if c.clientOutputBufferLimitPubsub != clientOutputBufferLimitPubsub {
+ return fmt.Errorf("expected config.clientOutputBufferLimitPubsub '%s', got '%s'",
+ clientOutputBufferLimitPubsub, c.clientOutputBufferLimitPubsub)
}
return nil
}
@@ -517,6 +590,17 @@ func testAccCheckMDBRedisClusterContainsLabel(r *redis.Cluster, key string, valu
}
}
+func getPublicIPStr(ipFlag *bool) string {
+ if ipFlag == nil {
+ return ""
+ }
+ ipFlagStr := "false"
+ if *ipFlag {
+ ipFlagStr = "true"
+ }
+ return fmt.Sprintf("assign_public_ip = %s", ipFlagStr)
+}
+
// TODO: add more zones when v2 platform becomes available.
const redisVPCDependencies = `
resource "yandex_vpc_network" "foo" {}
@@ -565,33 +649,25 @@ resource "yandex_vpc_security_group" "sg-y" {
}
`
-func getSentinelHosts(diskTypeId string) string {
- res := ""
- if diskTypeId == "local-ssd" {
- res = `
- host {
- zone = "ru-central1-c"
- subnet_id = "${yandex_vpc_subnet.foo.id}"
- }
-
- host {
- zone = "ru-central1-c"
- subnet_id = "${yandex_vpc_subnet.foo.id}"
- }
-
- host {
- zone = "ru-central1-c"
- subnet_id = "${yandex_vpc_subnet.foo.id}"
- }
-`
- } else {
- res = `
+func getSentinelHosts(diskTypeId string, publicIPFlags []*bool, replicaPriorities []*int) string {
+ host := `
host {
zone = "ru-central1-c"
subnet_id = "${yandex_vpc_subnet.foo.id}"
+ %s
+ %s
}
`
+ hosts := []string{host}
+ res := ""
+ if diskTypeId == "local-ssd" {
+ hosts = append(hosts, host, host)
+ }
+
+ for i, h := range hosts {
+ res += fmt.Sprintf(h, getPublicIPStr(publicIPFlags[i]), getReplicaPriorityStr(replicaPriorities[i]))
}
+
return res
}
@@ -683,8 +759,33 @@ func getPersistenceMode(persistenceMode string) string {
return res
}
+func getNormalLimitStr(limit string) string {
+ res := ""
+ if limit != "" {
+ res = fmt.Sprintf(`client_output_buffer_limit_normal = "%s"`, limit)
+ }
+ return res
+}
+
+func getPubsubLimitStr(limit string) string {
+ res := ""
+ if limit != "" {
+ res = fmt.Sprintf(`client_output_buffer_limit_pubsub = "%s"`, limit)
+ }
+ return res
+}
+
+func getReplicaPriorityStr(priority *int) string {
+ res := ""
+ if priority != nil {
+ res = fmt.Sprintf(`replica_priority = "%d"`, *priority)
+ }
+ return res
+}
+
func testAccMDBRedisClusterConfigMain(name, desc, environment string, deletionProtection bool, tlsEnabled *bool,
- persistenceMode, version, flavor string, diskSize int, diskTypeId string) string {
+ persistenceMode, version, flavor string, diskSize int, diskTypeId, normalLimits, pubsubLimits string,
+ publicIPFlags []*bool, replicaPriorities []*int) string {
return fmt.Sprintf(redisVPCDependencies+`
resource "yandex_mdb_redis_cluster" "foo" {
name = "%s"
@@ -707,6 +808,8 @@ resource "yandex_mdb_redis_cluster" "foo" {
slowlog_max_len = 10
databases = 15
version = "%s"
+ %s
+ %s
}
resources {
@@ -727,12 +830,13 @@ resource "yandex_mdb_redis_cluster" "foo" {
deletion_protection = %t
}
-`, name, desc, environment, getTlsEnabled(tlsEnabled), getPersistenceMode(persistenceMode), version, flavor, diskSize, getDiskTypeStr(diskTypeId),
- getSentinelHosts(diskTypeId), deletionProtection)
+`, name, desc, environment, getTlsEnabled(tlsEnabled), getPersistenceMode(persistenceMode), version,
+ getNormalLimitStr(normalLimits), getPubsubLimitStr(pubsubLimits), flavor, diskSize, getDiskTypeStr(diskTypeId),
+ getSentinelHosts(diskTypeId, publicIPFlags, replicaPriorities), deletionProtection)
}
func testAccMDBRedisClusterConfigUpdated(name, desc string, tlsEnabled *bool, persistenceMode, version, flavor string,
- diskSize int, diskTypeId string) string {
+ diskSize int, diskTypeId, normalLimits, pubsubLimits string, publicIPFlags []*bool, replicaPriorities []*int) string {
return fmt.Sprintf(redisVPCDependencies+`
resource "yandex_mdb_redis_cluster" "foo" {
name = "%s"
@@ -755,6 +859,8 @@ resource "yandex_mdb_redis_cluster" "foo" {
slowlog_max_len = 12
databases = 17
version = "%s"
+ %s
+ %s
}
resources {
@@ -771,12 +877,18 @@ resource "yandex_mdb_redis_cluster" "foo" {
type = "ANYTIME"
}
}
-`, name, desc, getTlsEnabled(tlsEnabled), getPersistenceMode(persistenceMode), version, flavor, diskSize,
- getDiskTypeStr(diskTypeId), getSentinelHosts(diskTypeId))
+`, name, desc, getTlsEnabled(tlsEnabled), getPersistenceMode(persistenceMode), version,
+ getNormalLimitStr(normalLimits), getPubsubLimitStr(pubsubLimits),
+ flavor, diskSize, getDiskTypeStr(diskTypeId), getSentinelHosts(diskTypeId, publicIPFlags, replicaPriorities))
}
func testAccMDBRedisClusterConfigAddedHost(name, desc string, tlsEnabled *bool, persistenceMode, version, flavor string,
- diskSize int, diskTypeId string) string {
+ diskSize int, diskTypeId string, publicIPFlags []*bool, replicaPriorities []*int) string {
+ ipCount := len(publicIPFlags)
+ newPublicIPFlag := publicIPFlags[ipCount-1]
+ oldPublicIPFlags := publicIPFlags[:ipCount-1]
+ newReplicaPriority := replicaPriorities[ipCount-1]
+ oldReplicaPriorities := replicaPriorities[:ipCount-1]
return fmt.Sprintf(redisVPCDependencies+`
resource "yandex_mdb_redis_cluster" "foo" {
name = "%s"
@@ -812,12 +924,15 @@ resource "yandex_mdb_redis_cluster" "foo" {
host {
zone = "ru-central1-c"
subnet_id = "${yandex_vpc_subnet.foo.id}"
+ %s
+ %s
}
security_group_ids = ["${yandex_vpc_security_group.sg-y.id}"]
}
`, name, desc, getTlsEnabled(tlsEnabled), getPersistenceMode(persistenceMode), version, flavor, diskSize,
- getDiskTypeStr(diskTypeId), getSentinelHosts(diskTypeId))
+ getDiskTypeStr(diskTypeId), getSentinelHosts(diskTypeId, oldPublicIPFlags, oldReplicaPriorities),
+ getPublicIPStr(newPublicIPFlag), getReplicaPriorityStr(newReplicaPriority))
}
func testAccMDBRedisShardedClusterConfig(name, desc, persistenceMode, version string, diskSize int, diskTypeId string) string {
@@ -843,7 +958,8 @@ resource "yandex_mdb_redis_cluster" "bar" {
%s
}
-`, name, desc, getPersistenceMode(persistenceMode), version, diskSize, getDiskTypeStr(diskTypeId), getShardedHosts(diskTypeId, "third"))
+`, name, desc, getPersistenceMode(persistenceMode), version, diskSize, getDiskTypeStr(diskTypeId),
+ getShardedHosts(diskTypeId, "third"))
}
func testAccMDBRedisShardedClusterConfigUpdated(name, desc, persistenceMode string, version string, diskSize int,
@@ -870,5 +986,6 @@ resource "yandex_mdb_redis_cluster" "bar" {
%s
}
-`, name, desc, getPersistenceMode(persistenceMode), version, diskSize, getDiskTypeStr(diskTypeId), getShardedHosts(diskTypeId, "new"))
+`, name, desc, getPersistenceMode(persistenceMode), version, diskSize, getDiskTypeStr(diskTypeId),
+ getShardedHosts(diskTypeId, "new"))
}
diff --git a/yandex/resource_yandex_mdb_sqlserver_cluster.go b/yandex/resource_yandex_mdb_sqlserver_cluster.go
index abe082d75..ddc9fb359 100644
--- a/yandex/resource_yandex_mdb_sqlserver_cluster.go
+++ b/yandex/resource_yandex_mdb_sqlserver_cluster.go
@@ -231,6 +231,11 @@ func resourceYandexMDBSQLServerCluster() *schema.Resource {
Optional: true,
Computed: true,
},
+ "sqlcollation": {
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ },
},
}
}
@@ -334,6 +339,7 @@ func prepareCreateSQLServerRequest(d *schema.ResourceData, meta *Config) (*sqlse
Labels: labels,
SecurityGroupIds: securityGroupIds,
DeletionProtection: d.Get("deletion_protection").(bool),
+ Sqlcollation: d.Get("sqlcollation").(string),
HostGroupIds: hostGroupIds,
}
return &req, nil
@@ -437,6 +443,7 @@ func resourceYandexMDBSQLServerClusterRead(d *schema.ResourceData, meta interfac
}
d.Set("deletion_protection", cluster.DeletionProtection)
+ d.Set("sqlcollation", cluster.Sqlcollation)
return d.Set("created_at", getTimestamp(cluster.CreatedAt))
}
@@ -567,6 +574,11 @@ func sqlserverClusterUpdate(ctx context.Context, config *Config, d *schema.Resou
return fmt.Errorf("error expanding labels while updating SQLServer cluster: %s", err)
}
+ sqlcollation := d.Get("sqlcollation").(string)
+ if sqlcollation != "" {
+ return fmt.Errorf("SQL Collation cannot be changed!")
+ }
+
securityGroupIds := expandSecurityGroupIds(d.Get("security_group_ids"))
req := &sqlserver.UpdateClusterRequest{
diff --git a/yandex/resource_yandex_mdb_sqlserver_cluster_test.go b/yandex/resource_yandex_mdb_sqlserver_cluster_test.go
index 7603c8e59..d37d9916c 100644
--- a/yandex/resource_yandex_mdb_sqlserver_cluster_test.go
+++ b/yandex/resource_yandex_mdb_sqlserver_cluster_test.go
@@ -119,6 +119,7 @@ func TestAccMDBSQLServerCluster_full(t *testing.T) {
testAccCheckCreatedAtAttr(sqlserverResource),
resource.TestCheckResourceAttr(sqlserverResource, "security_group_ids.#", "1"),
resource.TestCheckResourceAttr(sqlserverResource, "deletion_protection", "true"),
+ resource.TestCheckResourceAttr(sqlserverResource, "sqlcollation", "Cyrillic_General_CI_AI"),
),
},
mdbSQLServerClusterImportStep(sqlserverResource),
@@ -180,6 +181,7 @@ func TestAccMDBSQLServerCluster_full(t *testing.T) {
resource.TestCheckResourceAttr(sqlserverResource, "database.2.name", "testdb-b"),
testAccCheckCreatedAtAttr(sqlserverResource),
resource.TestCheckResourceAttr(sqlserverResource, "security_group_ids.#", "2"),
+ resource.TestCheckResourceAttr(sqlserverResource, "sqlcollation", "Cyrillic_General_CI_AI"),
),
},
mdbSQLServerClusterImportStep(sqlserverResource),
@@ -409,6 +411,8 @@ resource "yandex_mdb_sqlserver_cluster" "foo" {
security_group_ids = [yandex_vpc_security_group.mdb-sqlserver-test-sg-x.id]
+ sqlcollation = "Cyrillic_General_CI_AI"
+
deletion_protection = %t
}
`, name, desc, environment, deletionProtection)
@@ -497,6 +501,8 @@ resource "yandex_mdb_sqlserver_cluster" "foo" {
}
security_group_ids = [yandex_vpc_security_group.mdb-sqlserver-test-sg-x.id, yandex_vpc_security_group.mdb-sqlserver-test-sg-y.id]
+
+ sqlcollation = "Cyrillic_General_CS_AS"
}
`, name, desc)
}
diff --git a/yandex/resource_yandex_message_queue.go b/yandex/resource_yandex_message_queue.go
index d0486de09..9bffc6724 100644
--- a/yandex/resource_yandex_message_queue.go
+++ b/yandex/resource_yandex_message_queue.go
@@ -109,6 +109,12 @@ func resourceYandexMessageQueue() *schema.Resource {
Default: false,
Optional: true,
},
+ "region_id": {
+ Type: schema.TypeString,
+ Optional: true,
+ Default: defaultYMQRegion,
+ ForceNew: true,
+ },
// Credentials
"access_key": {
@@ -340,12 +346,18 @@ func resourceYandexMessageQueueReadImpl(d *schema.ResourceData, meta interface{}
d.Set("receive_wait_time_seconds", 0)
d.Set("redrive_policy", "")
d.Set("visibility_timeout_seconds", 30)
+ d.Set("region_id", defaultYMQRegion)
if attributeOutput != nil {
queueAttributes := aws.StringValueMap(attributeOutput.Attributes)
if v, ok := queueAttributes[sqs.QueueAttributeNameQueueArn]; ok {
d.Set("arn", v)
+ region, err := regionFromYRN(v)
+ if err != nil {
+ return err
+ }
+ d.Set("region_id", region)
}
if v, ok := queueAttributes[sqs.QueueAttributeNameContentBasedDeduplication]; ok && v != "" {
@@ -548,7 +560,7 @@ func newYMQClientConfigFromKeys(accessKey, secretKey string, providerConfig *Con
return &aws.Config{
Credentials: credentials.NewStaticCredentials(accessKey, secretKey, ""),
Endpoint: aws.String(providerConfig.YMQEndpoint),
- Region: aws.String(defaultYMQRegion),
+ Region: aws.String(providerConfig.Region),
}
}
@@ -559,6 +571,10 @@ func newYMQClientConfig(d *schema.ResourceData, meta interface{}) (config *aws.C
return
}
config = newYMQClientConfigFromKeys(accessKey, secretKey, providerConfig)
+ if v, ok := d.GetOk("region_id"); ok {
+ log.Printf("[DEBUG] Use custom region: %s", v.(string))
+ config.WithRegion(v.(string))
+ }
return
}
@@ -577,6 +593,16 @@ func newYMQClient(d *schema.ResourceData, meta interface{}) (*sqs.SQS, error) {
if err != nil {
return nil, err
}
+ log.Printf("[DEBUG] YMQ config: %v", config)
return newYMQClientFromConfig(config)
}
+
+func regionFromYRN(yrn string) (string, error) {
+ // yrn:yc:ymq:ru-central1:21i6v06sqmsaoeon7nus:event-queue
+ parts := strings.Split(yrn, ":")
+ if len(parts) > 4 {
+ return parts[3], nil
+ }
+ return "", fmt.Errorf("YRN was not parsed correctly: %s", yrn)
+}
diff --git a/yandex/resource_yandex_message_queue_test.go b/yandex/resource_yandex_message_queue_test.go
index 9e78ac809..701c6bcd4 100644
--- a/yandex/resource_yandex_message_queue_test.go
+++ b/yandex/resource_yandex_message_queue_test.go
@@ -248,6 +248,9 @@ func testAccNewYMQClientForResource(rs *terraform.ResourceState) (ymqClient *sqs
}
ymqClient, err = newYMQClientFromConfig(newYMQClientConfigFromKeys(accessKey, secretKey,
testAccProvider.Meta().(*Config)))
+ if region, ok := rs.Primary.Attributes["region_id"]; err == nil && ok && region != "" {
+ ymqClient.Config.WithRegion(region)
+ }
return
}
@@ -507,7 +510,8 @@ func testAccMessageQueueSetTmpKeysForProvider() (cleanupFunc func(), err error)
func testAccMessageQueueConfigWithDefaults(randInt int) string {
return fmt.Sprintf(`
resource "yandex_message_queue" "queue" {
- name = "message-queue-%d"
+ region_id = "ru-central1"
+ name = "message-queue-%d"
access_key = yandex_iam_service_account_static_access_key.sa-key.access_key
secret_key = yandex_iam_service_account_static_access_key.sa-key.secret_key
@@ -518,7 +522,8 @@ resource "yandex_message_queue" "queue" {
func testAccMessageQueueConfigForImport(randInt int) string {
return fmt.Sprintf(`
resource "yandex_message_queue" "queue" {
- name = "message-queue-%d"
+ region_id = "ru-central1"
+ name = "message-queue-%d"
delay_seconds = 303
max_message_size = 2049
@@ -532,6 +537,7 @@ resource "yandex_message_queue" "queue" {
func testAccMessageQueueConfigWithNamePrefix(prefix string, randInt int) string {
return fmt.Sprintf(`
resource "yandex_message_queue" "queue" {
+ region_id = "ru-central1"
name_prefix = "%s"
access_key = yandex_iam_service_account_static_access_key.sa-key.access_key
diff --git a/yandex/resource_yandex_serverless_container_iam_binding.go b/yandex/resource_yandex_serverless_container_iam_binding.go
new file mode 100644
index 000000000..5971601d6
--- /dev/null
+++ b/yandex/resource_yandex_serverless_container_iam_binding.go
@@ -0,0 +1,7 @@
+package yandex
+
+import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+
+func resourceYandexServerlessContainerIAMBinding() *schema.Resource {
+ return resourceIamBindingWithImport(IamServerlessContainerSchema, newServerlessContainerIamUpdater, serverlessContainerIDParseFunc)
+}
diff --git a/yandex/resource_yandex_serverless_container_iam_binding_test.go b/yandex/resource_yandex_serverless_container_iam_binding_test.go
new file mode 100644
index 000000000..957049a8e
--- /dev/null
+++ b/yandex/resource_yandex_serverless_container_iam_binding_test.go
@@ -0,0 +1,66 @@
+package yandex
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
+
+ "github.com/yandex-cloud/go-genproto/yandex/cloud/serverless/containers/v1"
+)
+
+func importServerlessContainerIDFunc(container *containers.Container, role string) func(*terraform.State) (string, error) {
+ return func(s *terraform.State) (string, error) {
+ return container.Id + " " + role, nil
+ }
+}
+
+func TestAccServerlessContainerIamBinding(t *testing.T) {
+ var container containers.Container
+ containerName := acctest.RandomWithPrefix("tf-container")
+ memory := (1 + acctest.RandIntRange(1, 4)) * 128
+
+ userID := "allUsers"
+ role := "serverless.containers.invoker"
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccServerlessContainerIamBinding_basic(containerName, memory, serverlessContainerTestImage1, role, userID),
+ Check: resource.ComposeTestCheckFunc(
+ testYandexServerlessContainerExists(serverlessContainerResource, &container),
+ testAccCheckServerlessContainerIam(serverlessContainerResource, role, []string{"system:" + userID}),
+ ),
+ },
+ {
+ ResourceName: "yandex_serverless_container_iam_binding.foo",
+ ImportStateIdFunc: importServerlessContainerIDFunc(&container, role),
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
+//revive:disable:var-naming
+func testAccServerlessContainerIamBinding_basic(cName string, memory int, url, role, userID string) string {
+ return fmt.Sprintf(`
+resource "yandex_serverless_container" "test-container" {
+ name = "%s"
+ memory = %d
+ image {
+ url = "%s"
+ }
+}
+
+resource "yandex_serverless_container_iam_binding" "foo" {
+ container_id = yandex_serverless_container.test-container.id
+ role = "%s"
+ members = ["system:%s"]
+}
+`, cName, memory, url, role, userID)
+}
diff --git a/yandex/resource_yandex_storage_bucket.go b/yandex/resource_yandex_storage_bucket.go
index a4f7a8e4e..8765b8e8e 100644
--- a/yandex/resource_yandex_storage_bucket.go
+++ b/yandex/resource_yandex_storage_bucket.go
@@ -837,7 +837,7 @@ func resourceYandexStorageBucketRead(d *schema.ResourceData, meta interface{}) e
err = resourceYandexStorageBucketReadExtended(d, meta)
if err != nil {
- return err
+ log.Printf("[WARN] Got an error reading Storage Bucket's extended properties: %s", err)
}
return nil
diff --git a/yandex/resource_yandex_ydb_database_dedicated.go b/yandex/resource_yandex_ydb_database_dedicated.go
index 9ed47fb60..05f67d3c1 100644
--- a/yandex/resource_yandex_ydb_database_dedicated.go
+++ b/yandex/resource_yandex_ydb_database_dedicated.go
@@ -414,6 +414,12 @@ func performYandexYDBDatabaseRead(d *schema.ResourceData, config *Config) (*ydb.
}
func flattenYandexYDBDatabaseDedicated(d *schema.ResourceData, database *ydb.Database) error {
+ if database == nil {
+ // NOTE(shmel1k@): database existed before but was removed outside of terraform.
+ d.SetId("")
+ return nil
+ }
+
switch database.DatabaseType.(type) {
case *ydb.Database_RegionalDatabase,
*ydb.Database_ZonalDatabase,
diff --git a/yandex/resource_yandex_ydb_database_iam_binding.go b/yandex/resource_yandex_ydb_database_iam_binding.go
new file mode 100644
index 000000000..89b645aba
--- /dev/null
+++ b/yandex/resource_yandex_ydb_database_iam_binding.go
@@ -0,0 +1,7 @@
+package yandex
+
+import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+
+func resourceYandexYDBDatabaseIAMBinding() *schema.Resource {
+ return resourceIamBindingWithImport(IamYDBDatabaseSchema, newYDBDatabaseIamUpdater, ydbDatabaseIDParseFunc)
+}
diff --git a/yandex/resource_yandex_ydb_database_iam_binding_test.go b/yandex/resource_yandex_ydb_database_iam_binding_test.go
new file mode 100644
index 000000000..8616136df
--- /dev/null
+++ b/yandex/resource_yandex_ydb_database_iam_binding_test.go
@@ -0,0 +1,161 @@
+package yandex
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+ "testing"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
+
+ "github.com/yandex-cloud/go-genproto/yandex/cloud/access"
+ "github.com/yandex-cloud/go-genproto/yandex/cloud/ydb/v1"
+)
+
+const ydbDatabaseResource = "yandex_ydb_database_serverless.test-database"
+
+func importYDBDatabaseIDFunc(database *ydb.Database, role string) func(*terraform.State) (string, error) {
+ return func(s *terraform.State) (string, error) {
+ return database.Id + " " + role, nil
+ }
+}
+
+func TestAccYDBDatabaseIamBinding_basic(t *testing.T) {
+ var database ydb.Database
+ databaseName := acctest.RandomWithPrefix("tf-ydb-database")
+
+ role := "ydb.viewer"
+ userID := "system:allAuthenticatedUsers"
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccYDBDatabaseIamBindingBasic(databaseName, role, userID),
+ Check: resource.ComposeTestCheckFunc(
+ testYandexYDBDatabaseServerlessExists(ydbDatabaseResource, &database),
+ testAccCheckYDBDatabaseIam(ydbDatabaseResource, role, []string{userID}),
+ ),
+ },
+ {
+ ResourceName: "yandex_ydb_database_iam_binding.viewer",
+ ImportStateIdFunc: importYDBDatabaseIDFunc(&database, role),
+ ImportState: true,
+ ImportStateVerify: true,
+ },
+ },
+ })
+}
+
+func TestAccYDBDatabaseIamBinding_remove(t *testing.T) {
+ var database ydb.Database
+ databaseName := acctest.RandomWithPrefix("tf-ydb-database")
+
+ role := "ydb.viewer"
+ userID := "system:allAuthenticatedUsers"
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ Providers: testAccProviders,
+ Steps: []resource.TestStep{
+ // Prepare data source
+ {
+ Config: testAccYDBDatabase(databaseName),
+ Check: resource.ComposeTestCheckFunc(
+ testYandexYDBDatabaseServerlessExists(ydbDatabaseResource, &database),
+ testAccCheckYDBDatabaseEmptyIam(ydbDatabaseResource),
+ ),
+ },
+ // Apply IAM bindings
+ {
+ Config: testAccYDBDatabaseIamBindingBasic(databaseName, role, userID),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckYDBDatabaseIam(ydbDatabaseResource, role, []string{userID}),
+ ),
+ },
+ // Remove the bindings
+ {
+ Config: testAccYDBDatabase(databaseName),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckYDBDatabaseEmptyIam(ydbDatabaseResource),
+ ),
+ },
+ },
+ })
+}
+
+func testAccYDBDatabaseIamBindingBasic(databaseName, role, userID string) string {
+ return fmt.Sprintf(`
+resource "yandex_ydb_database_serverless" "test-database" {
+ name = "%s"
+}
+
+resource "yandex_ydb_database_iam_binding" "viewer" {
+ database_id = yandex_ydb_database_serverless.test-database.id
+ role = "%s"
+ members = ["%s"]
+}
+`, databaseName, role, userID)
+}
+
+func testAccYDBDatabase(databaseName string) string {
+ return fmt.Sprintf(`
+resource "yandex_ydb_database_serverless" "test-database" {
+ name = "%s"
+}
+`, databaseName)
+}
+
+func testAccCheckYDBDatabaseEmptyIam(resourceName string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ bindings, err := getYDBDatabaseResourceAccessBindings(s, resourceName)
+ if err != nil {
+ return err
+ }
+
+ if len(bindings) == 0 {
+ return nil
+ }
+
+ return fmt.Errorf("Binding found but expected empty for %s", resourceName)
+ }
+}
+
+func testAccCheckYDBDatabaseIam(resourceName, role string, members []string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ bindings, err := getYDBDatabaseResourceAccessBindings(s, resourceName)
+ if err != nil {
+ return err
+ }
+
+ var roleMembers []string
+ for _, binding := range bindings {
+ if binding.RoleId == role {
+ member := binding.Subject.Type + ":" + binding.Subject.Id
+ roleMembers = append(roleMembers, member)
+ }
+ }
+ sort.Strings(members)
+ sort.Strings(roleMembers)
+
+ if reflect.DeepEqual(members, roleMembers) {
+ return nil
+ }
+
+ return fmt.Errorf("Binding found but expected members is %v, got %v", members, roleMembers)
+ }
+}
+
+func getYDBDatabaseResourceAccessBindings(s *terraform.State, resourceName string) ([]*access.AccessBinding, error) {
+ config := testAccProvider.Meta().(*Config)
+
+ rs, ok := s.RootModule().Resources[resourceName]
+ if !ok {
+ return nil, fmt.Errorf("can't find %s in state", resourceName)
+ }
+
+ return getYDBDatabaseAccessBindings(config, rs.Primary.ID)
+}
diff --git a/yandex/resource_yandex_ydb_database_serverless.go b/yandex/resource_yandex_ydb_database_serverless.go
index 257bc4095..f53a6ff02 100644
--- a/yandex/resource_yandex_ydb_database_serverless.go
+++ b/yandex/resource_yandex_ydb_database_serverless.go
@@ -159,6 +159,12 @@ func resourceYandexYDBDatabaseServerlessRead(d *schema.ResourceData, meta interf
}
func flattenYandexYDBDatabaseServerless(d *schema.ResourceData, database *ydb.Database) error {
+ if database == nil {
+ // NOTE(shmel1k@): database existed before but was removed outside of terraform.
+ d.SetId("")
+ return nil
+ }
+
switch database.DatabaseType.(type) {
case *ydb.Database_ServerlessDatabase: // we actually expect it
case *ydb.Database_DedicatedDatabase:
diff --git a/yandex/structures.go b/yandex/structures.go
index c7e316699..81bb4be4a 100644
--- a/yandex/structures.go
+++ b/yandex/structures.go
@@ -212,6 +212,21 @@ func flattenComputeInstanceDnsRecords(specs []*compute.DnsRecord) []map[string]i
return res
}
+func flattenK8SNodeGroupDNSRecords(specs []*k8s.DnsRecordSpec) []map[string]interface{} {
+ res := make([]map[string]interface{}, len(specs))
+
+ for i, spec := range specs {
+ res[i] = map[string]interface{}{
+ "fqdn": spec.Fqdn,
+ "dns_zone_id": spec.DnsZoneId,
+ "ttl": int(spec.Ttl),
+ "ptr": spec.Ptr,
+ }
+ }
+
+ return res
+}
+
func expandInstanceResourcesSpec(d *schema.ResourceData) (*compute.ResourcesSpec, error) {
rs := &compute.ResourcesSpec{}
@@ -606,6 +621,27 @@ func expandComputeInstanceDnsRecords(data []interface{}) []*compute.DnsRecordSpe
return recs
}
+func expandK8SNodeGroupDNSRecords(data []interface{}) []*k8s.DnsRecordSpec {
+ recs := make([]*k8s.DnsRecordSpec, len(data))
+
+ for i, raw := range data {
+ d := raw.(map[string]interface{})
+ r := &k8s.DnsRecordSpec{Fqdn: d["fqdn"].(string)}
+ if s, ok := d["dns_zone_id"]; ok {
+ r.DnsZoneId = s.(string)
+ }
+ if s, ok := d["ttl"]; ok {
+ r.Ttl = int64(s.(int))
+ }
+ if s, ok := d["ptr"]; ok {
+ r.Ptr = s.(bool)
+ }
+ recs[i] = r
+ }
+
+ return recs
+}
+
func parseDiskMode(mode string) (compute.AttachedDiskSpec_Mode, error) {
val, ok := compute.AttachedDiskSpec_Mode_value[mode]
if !ok {
diff --git a/yandex/structures_test.go b/yandex/structures_test.go
index 8a169a8fa..d4b9d0fb3 100644
--- a/yandex/structures_test.go
+++ b/yandex/structures_test.go
@@ -747,3 +747,21 @@ func TestFlattenLocalDiskLocalDisks(t *testing.T) {
})
}
}
+
+func TestConvertFQDN(t *testing.T) {
+ testdata := map[string]string{
+ "123.auto.internal": "",
+ "breathtaking.ru-central1.internal": "breathtaking",
+ "hello.world": "hello.world",
+ "breathtaking": "breathtaking.",
+ }
+
+ for fqdn, hostname := range testdata {
+ t.Run("fqdn "+fqdn, func(t *testing.T) {
+ h, _ := parseHostnameFromFQDN(fqdn)
+ if h != hostname {
+ t.Errorf("%s is not equal to %s", h, hostname)
+ }
+ })
+ }
+}
diff --git a/yandex/utils_test.go b/yandex/utils_test.go
index 94dc9f1db..480695aa1 100644
--- a/yandex/utils_test.go
+++ b/yandex/utils_test.go
@@ -147,6 +147,38 @@ func testAccCheckFunctionIam(resourceName, role string, members []string) resour
}
}
+func testAccCheckServerlessContainerIam(resourceName, role string, members []string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ config := testAccProvider.Meta().(*Config)
+
+ rs, ok := s.RootModule().Resources[resourceName]
+ if !ok {
+ return fmt.Errorf("can't find %s in state", resourceName)
+ }
+
+ bindings, err := getServerlessContainerAccessBindings(config, rs.Primary.ID)
+ if err != nil {
+ return err
+ }
+
+ var roleMembers []string
+ for _, binding := range bindings {
+ if binding.RoleId == role {
+ member := binding.Subject.Type + ":" + binding.Subject.Id
+ roleMembers = append(roleMembers, member)
+ }
+ }
+ sort.Strings(members)
+ sort.Strings(roleMembers)
+
+ if reflect.DeepEqual(members, roleMembers) {
+ return nil
+ }
+
+ return fmt.Errorf("Binding found but expected members is %v, got %v", members, roleMembers)
+ }
+}
+
func testAccCheckServiceAccountIam(resourceName, role string, members []string) resource.TestCheckFunc {
return func(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)