diff --git a/CHANGELOG.md b/CHANGELOG.md index 5c49de47f..931c0d7dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,10 +4,14 @@ FEATURES: * **New Resource:** `yandex_datatransfer_transfer` * managed-kubernetes: add `container_runtime` attribute to `yandex_kubernetes_node_group` resource and data source * managed-elasticsearch: add `maintenance_window` section in `yandex_mdb_elasticsearch_cluster` resource and data source +* dns: support for resolving by name in `yandex_dns_zone` data source BUG FIXES: * mdb: fix error when simultaneously deleting `yandex_mdb_kafka_topic` resource and modifying `yandex_mdb_kafka_cluster` resource +ENHANCEMENTS: +* mdb: add `maintenance_window` section in `yandex_mdb_kafka_cluster` resource and data source + ## 0.69.0 (December 27, 2021) FEATURES: * **New Data Source:** `yandex_cdn_origin_group` diff --git a/website/docs/d/datasource_dns_zone.html.markdown b/website/docs/d/datasource_dns_zone.html.markdown index 80c8122c3..06ed18db5 100644 --- a/website/docs/d/datasource_dns_zone.html.markdown +++ b/website/docs/d/datasource_dns_zone.html.markdown @@ -24,7 +24,12 @@ output "zone" { ## Argument Reference -* `dns_zone_id` - (Required) The ID of the DNS Zone. +* `dns_zone_id` - (Optional) The ID of the DNS Zone. +* `name` - (Optional) - Name of the DNS Zone. + +~> **NOTE:** One of `dns_zone_id` or `name` should be specified. + +* `folder_id` - (Optional) Folder that the resource belongs to. If value is omitted, the default provider folder is used. ## Attributes Reference diff --git a/website/docs/d/datasource_mdb_kafka_cluster.html.markdown b/website/docs/d/datasource_mdb_kafka_cluster.html.markdown index 1e21640ff..2781505d2 100644 --- a/website/docs/d/datasource_mdb_kafka_cluster.html.markdown +++ b/website/docs/d/datasource_mdb_kafka_cluster.html.markdown @@ -53,6 +53,7 @@ exported: * `host` - A host of the Kafka cluster. The structure is documented below. * `security_group_ids` - A list of security groups IDs of the Kafka cluster. * `host_group_ids` - A list of IDs of the host groups hosting VMs of the cluster. +* `maintenance_window` - Maintenance window settings of the Kafka cluster. The structure is documented below. The `config` block supports: @@ -131,3 +132,9 @@ The `host` block supports: * `health` - Health of the host. * `subnet_id` - The ID of the subnet, to which the host belongs. * `assign_public_ip` - The flag that defines whether a public IP address is assigned to the node. + +The `maintenance_window` block supports: + +* `type` - Type of maintenance window. Can be either `ANYTIME` or `WEEKLY`. +* `day` - Day of the week (in `DDD` format). Value is one of: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" +* `hour` - Hour of the day in UTC (in `HH` format). Value is between 1 and 24. diff --git a/website/docs/d/datasource_mdb_mysql_cluster.html.markdown b/website/docs/d/datasource_mdb_mysql_cluster.html.markdown index 7732b7e27..c3e66ca71 100644 --- a/website/docs/d/datasource_mdb_mysql_cluster.html.markdown +++ b/website/docs/d/datasource_mdb_mysql_cluster.html.markdown @@ -116,6 +116,6 @@ The `access` block supports: The `maintenance_window` block supports: -* `type` - Type of maintenance window. Can be either `ANYTIME` or `WEEKLY`. A day and hour of window need to be specified with weekly window. +* `type` - Type of maintenance window. Can be either `ANYTIME` or `WEEKLY`. * `day` - Day of the week (in `DDD` format). Value is one of: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" -* `hour` - Hour of the day in UTC (in `HH` format). Values is between 0 and 23. \ No newline at end of file +* `hour` - Hour of the day in UTC (in `HH` format). Value is between 1 and 24. \ No newline at end of file diff --git a/website/docs/d/datasource_mdb_postgresql_cluster.html.markdown b/website/docs/d/datasource_mdb_postgresql_cluster.html.markdown index b725f953a..c665f52f3 100644 --- a/website/docs/d/datasource_mdb_postgresql_cluster.html.markdown +++ b/website/docs/d/datasource_mdb_postgresql_cluster.html.markdown @@ -134,8 +134,8 @@ The `host` block supports: The `maintenance_window` block supports: -* `type` - Type of maintenance window. Can be either `ANYTIME` or `WEEKLY`. A day and hour of window need to be specified with weekly window. +* `type` - Type of maintenance window. Can be either `ANYTIME` or `WEEKLY`. * `day` - Day of the week (in `DDD` format). Value is one of: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" -* `hour` - Hour of the day in UTC (in `HH` format). Values is between 0 and 23. +* `hour` - Hour of the day in UTC (in `HH` format). Value is between 1 and 24. diff --git a/website/docs/d/datasource_vpc_security_group.html.markdown b/website/docs/d/datasource_vpc_security_group.html.markdown index 1bd98cc9d..04ca4ddf9 100644 --- a/website/docs/d/datasource_vpc_security_group.html.markdown +++ b/website/docs/d/datasource_vpc_security_group.html.markdown @@ -60,6 +60,6 @@ The `ingress` and `egress` block supports: * `to_port` - Maximum port number. * `port` - Port number (if applied to a single port). * `security_group_id` - Target security group ID for this rule. -* `predefined_target` - Special-purpose targets such as "self_security_group". [See docs](https://cloud.yandex.ru/docs/vpc/concepts/security-groups) for possible options. +* `predefined_target` - Special-purpose targets. `self_security_group` refers to this particular security group. `loadbalancer_healthchecks` represents [loadbalancer health check nodes](https://cloud.yandex.com/docs/network-load-balancer/concepts/health-check). * `v4_cidr_blocks` - The blocks of IPv4 addresses for this rule. * `v6_cidr_blocks` - The blocks of IPv6 addresses for this rule. diff --git a/website/docs/d/datasource_vpc_security_group_rule.html.markdown b/website/docs/d/datasource_vpc_security_group_rule.html.markdown index c8d1af849..4ff5d491c 100644 --- a/website/docs/d/datasource_vpc_security_group_rule.html.markdown +++ b/website/docs/d/datasource_vpc_security_group_rule.html.markdown @@ -38,6 +38,6 @@ The following attribute is exported: * `to_port` - Maximum port number. * `port` - Port number (if applied to a single port). * `security_group_id` - Target security group ID for this rule. -* `predefined_target` - Special-purpose targets such as "self_security_group". [See docs](https://cloud.yandex.com/docs/vpc/concepts/security-groups) for possible options. +* `predefined_target` - Special-purpose targets. `self_security_group` refers to this particular security group. `loadbalancer_healthchecks` represents [loadbalancer health check nodes](https://cloud.yandex.com/docs/network-load-balancer/concepts/health-check). * `v4_cidr_blocks` - The blocks of IPv4 addresses for this rule. * `v6_cidr_blocks` - The blocks of IPv6 addresses for this rule. diff --git a/website/docs/r/mdb_kafka_cluster.html.markdown b/website/docs/r/mdb_kafka_cluster.html.markdown index cf366fd68..00e4fad13 100644 --- a/website/docs/r/mdb_kafka_cluster.html.markdown +++ b/website/docs/r/mdb_kafka_cluster.html.markdown @@ -205,6 +205,8 @@ The following arguments are supported: * `deletion_protection` - (Optional) Inhibits deletion of the cluster. Can be either `true` or `false`. +* `maintenance_window` - (Optional) Maintenance policy of the Kafka cluster. The structure is documented below. + ~> **Note:** Historically, `topic` blocks of the `yandex_mdb_kafka_cluster` resource were used to manage topics of the Kafka cluster. However, this approach has a number of disadvantages. In particular, when adding and removing topics from the tf recipe, terraform generates a diff that misleads the user about the planned changes. Also, this approach turned out to be @@ -213,6 +215,16 @@ type `yandex_mdb_kafka_topic` was implemented and is now recommended. - - - +The `maintenance_window` block supports: + +* `type` - (Required) Type of maintenance window. Can be either `ANYTIME` or `WEEKLY`. A day and hour of window need to be specified with weekly window. + +* `day` - (Optional) Day of the week (in `DDD` format). Allowed values: "MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN" + +* `hour` - (Optional) Hour of the day in UTC (in `HH` format). Allowed value is between 1 and 24. + +- - - + The `config` block supports: * `version` - (Required) Version of the Kafka server software. diff --git a/website/docs/r/vpc_default_security_group.html.markdown b/website/docs/r/vpc_default_security_group.html.markdown index 2016583b3..78c8dab22 100644 --- a/website/docs/r/vpc_default_security_group.html.markdown +++ b/website/docs/r/vpc_default_security_group.html.markdown @@ -95,7 +95,7 @@ The `ingress` and `egress` block supports: * `to_port` (Optional) - Maximum port number. * `port` (Optional) - Port number (if applied to a single port). * `security_group_id` (Optional) - Target security group ID for this rule. -* `predefined_target` (Optional) - Special-purpose targets such as "self_security_group". [See docs](https://cloud.yandex.com/docs/vpc/concepts/security-groups) for possible options. +* `predefined_target` (Optional) - Special-purpose targets. `self_security_group` refers to this particular security group. `loadbalancer_healthchecks` represents [loadbalancer health check nodes](https://cloud.yandex.com/docs/network-load-balancer/concepts/health-check). * `v4_cidr_blocks` (Optional) - The blocks of IPv4 addresses for this rule. * `v6_cidr_blocks` (Optional) - The blocks of IPv6 addresses for this rule. `v6_cidr_blocks` argument is currently not supported. It will be available in the future. diff --git a/website/docs/r/vpc_security_group.html.markdown b/website/docs/r/vpc_security_group.html.markdown index 4c0bd2ea3..79628ec96 100644 --- a/website/docs/r/vpc_security_group.html.markdown +++ b/website/docs/r/vpc_security_group.html.markdown @@ -85,7 +85,7 @@ The `ingress` and `egress` block supports: * `to_port` (Optional) - Maximum port number. * `port` (Optional) - Port number (if applied to a single port). * `security_group_id` (Optional) - Target security group ID for this rule. -* `predefined_target` (Optional) - Special-purpose targets such as "self_security_group" or "loadbalancer_healthchecks". [See docs](https://cloud.yandex.com/docs/vpc/concepts/security-groups) for possible options. +* `predefined_target` (Optional) - Special-purpose targets. `self_security_group` refers to this particular security group. `loadbalancer_healthchecks` represents [loadbalancer health check nodes](https://cloud.yandex.com/docs/network-load-balancer/concepts/health-check). * `v4_cidr_blocks` (Optional) - The blocks of IPv4 addresses for this rule. * `v6_cidr_blocks` (Optional) - The blocks of IPv6 addresses for this rule. `v6_cidr_blocks` argument is currently not supported. It will be available in the future. diff --git a/yandex/data_source_yandex_dns_zone.go b/yandex/data_source_yandex_dns_zone.go index d1f8e8d71..45abcae1e 100644 --- a/yandex/data_source_yandex_dns_zone.go +++ b/yandex/data_source_yandex_dns_zone.go @@ -5,6 +5,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/yandex-cloud/go-genproto/yandex/cloud/dns/v1" + "github.com/yandex-cloud/go-sdk/sdkresolvers" ) func dataSourceYandexDnsZone() *schema.Resource { @@ -22,20 +23,23 @@ func dataSourceYandexDnsZone() *schema.Resource { Schema: map[string]*schema.Schema{ "dns_zone_id": { Type: schema.TypeString, - Required: true, + Optional: true, + Computed: true, }, - "zone": { + "name": { Type: schema.TypeString, + Optional: true, Computed: true, }, "folder_id": { Type: schema.TypeString, + Optional: true, Computed: true, }, - "name": { + "zone": { Type: schema.TypeString, Computed: true, }, @@ -78,9 +82,19 @@ func dataSourceYandexDnsZoneRead(d *schema.ResourceData, meta interface{}) error config := meta.(*Config) sdk := getSDK(config) + err := checkOneOf(d, "dns_zone_id", "name") + if err != nil { + return err + } + id := d.Get("dns_zone_id").(string) - if id == "" { - return fmt.Errorf("dns_zone_id should be provided") + _, zoneNameOk := d.GetOk("name") + + if zoneNameOk { + id, err = resolveObjectID(config.Context(), config, d, sdkresolvers.DNSZoneResolver) + if err != nil { + return fmt.Errorf("failed to resolve data source dns zone by name: %v", err) + } } dnsZone, err := sdk.DNS().DnsZone().Get(config.Context(), &dns.GetDnsZoneRequest{ diff --git a/yandex/data_source_yandex_mdb_kafka_cluster.go b/yandex/data_source_yandex_mdb_kafka_cluster.go index 17d4bb8e8..d1d476253 100644 --- a/yandex/data_source_yandex_mdb_kafka_cluster.go +++ b/yandex/data_source_yandex_mdb_kafka_cluster.go @@ -104,6 +104,26 @@ func dataSourceYandexMDBKafkaCluster() *schema.Resource { Computed: true, Optional: true, }, + "maintenance_window": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Computed: true, + }, + "day": { + Type: schema.TypeString, + Computed: true, + }, + "hour": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, }, } } @@ -190,6 +210,14 @@ func dataSourceYandexMDBKafkaClusterRead(d *schema.ResourceData, meta interface{ d.Set("deletion_protection", cluster.DeletionProtection) + maintenanceWindow, err := flattenKafkaMaintenanceWindow(cluster.MaintenanceWindow) + if err != nil { + return err + } + if err := d.Set("maintenance_window", maintenanceWindow); err != nil { + return err + } + d.SetId(cluster.Id) return nil } diff --git a/yandex/mdb_kafka_structures.go b/yandex/mdb_kafka_structures.go index 3d45b118b..efe3189a8 100644 --- a/yandex/mdb_kafka_structures.go +++ b/yandex/mdb_kafka_structures.go @@ -978,3 +978,76 @@ func expandKafkaTopic(spec map[string]interface{}, version string) (*kafka.Topic } return topic, nil } + +func kafkaMaintenanceWindowSchemaValidateFunc(v interface{}, k string) (s []string, es []error) { + dayString := v.(string) + day, ok := kafka.WeeklyMaintenanceWindow_WeekDay_value[dayString] + if !ok || day == 0 { + es = append(es, fmt.Errorf(`expected %s value should be one of ("MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"). Current value is %v`, k, v)) + return + } + + return +} + +func flattenKafkaMaintenanceWindow(mw *kafka.MaintenanceWindow) ([]interface{}, error) { + maintenanceWindow := map[string]interface{}{} + if mw != nil { + switch p := mw.GetPolicy().(type) { + case *kafka.MaintenanceWindow_Anytime: + maintenanceWindow["type"] = "ANYTIME" + case *kafka.MaintenanceWindow_WeeklyMaintenanceWindow: + maintenanceWindow["type"] = "WEEKLY" + maintenanceWindow["hour"] = p.WeeklyMaintenanceWindow.Hour + maintenanceWindow["day"] = kafka.WeeklyMaintenanceWindow_WeekDay_name[int32(p.WeeklyMaintenanceWindow.GetDay())] + default: + return nil, fmt.Errorf("unsupported Kafka maintenance policy type") + } + } + + return []interface{}{maintenanceWindow}, nil +} + +func expandKafkaMaintenanceWindow(d *schema.ResourceData) (*kafka.MaintenanceWindow, error) { + if _, ok := d.GetOk("maintenance_window"); !ok { + return nil, nil + } + + out := &kafka.MaintenanceWindow{} + typeMW, _ := d.GetOk("maintenance_window.0.type") + if typeMW == "ANYTIME" { + if hour, ok := d.GetOk("maintenance_window.0.hour"); ok && hour != "" { + return nil, fmt.Errorf("hour should not be set, when using ANYTIME") + } + if day, ok := d.GetOk("maintenance_window.0.day"); ok && day != "" { + return nil, fmt.Errorf("day should not be set, when using ANYTIME") + } + out.Policy = &kafka.MaintenanceWindow_Anytime{ + Anytime: &kafka.AnytimeMaintenanceWindow{}, + } + } else if typeMW == "WEEKLY" { + hourInterface, ok := d.GetOk("maintenance_window.0.hour") + if !ok { + return nil, fmt.Errorf("hour should be set when using WEEKLY maintenance") + } + hour := hourInterface.(int) + + dayString := d.Get("maintenance_window.0.day").(string) + + day, ok := kafka.WeeklyMaintenanceWindow_WeekDay_value[dayString] + if !ok || day == 0 { + return nil, fmt.Errorf(`day value should be one of ("MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN")`) + } + + out.Policy = &kafka.MaintenanceWindow_WeeklyMaintenanceWindow{ + WeeklyMaintenanceWindow: &kafka.WeeklyMaintenanceWindow{ + Hour: int64(hour), + Day: kafka.WeeklyMaintenanceWindow_WeekDay(day), + }, + } + } else { + return nil, fmt.Errorf("maintenance_window.0.type should be ANYTIME or WEEKLY") + } + + return out, nil +} diff --git a/yandex/resource_yandex_mdb_kafka_cluster.go b/yandex/resource_yandex_mdb_kafka_cluster.go index 9e5afe18b..da8e7c5a2 100644 --- a/yandex/resource_yandex_mdb_kafka_cluster.go +++ b/yandex/resource_yandex_mdb_kafka_cluster.go @@ -10,6 +10,7 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/yandex-cloud/go-genproto/yandex/cloud/mdb/kafka/v1" "github.com/yandex-cloud/go-genproto/yandex/cloud/operation" "google.golang.org/genproto/protobuf/field_mask" @@ -135,6 +136,31 @@ func resourceYandexMDBKafkaCluster() *schema.Resource { Optional: true, Computed: true, }, + "maintenance_window": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"ANYTIME", "WEEKLY"}, false), + Required: true, + }, + "day": { + Type: schema.TypeString, + ValidateFunc: kafkaMaintenanceWindowSchemaValidateFunc, + Optional: true, + }, + "hour": { + Type: schema.TypeInt, + ValidateFunc: validation.IntBetween(1, 24), + Optional: true, + }, + }, + }, + }, }, } } @@ -601,6 +627,11 @@ func prepareKafkaCreateRequest(d *schema.ResourceData, meta *Config) (*kafka.Cre return nil, fmt.Errorf("Error while expanding network id on Kafka Cluster create: %s", err) } + maintenanceWindow, err := expandKafkaMaintenanceWindow(d) + if err != nil { + return nil, fmt.Errorf("error while expanding maintenance window settings on Kafka Cluster create: %s", err) + } + req := kafka.CreateClusterRequest{ FolderId: folderID, Name: d.Get("name").(string), @@ -615,6 +646,7 @@ func prepareKafkaCreateRequest(d *schema.ResourceData, meta *Config) (*kafka.Cre SecurityGroupIds: securityGroupIds, HostGroupIds: hostGroupIds, DeletionProtection: d.Get("deletion_protection").(bool), + MaintenanceWindow: maintenanceWindow, } return &req, nil } @@ -703,6 +735,14 @@ func resourceYandexMDBKafkaClusterRead(d *schema.ResourceData, meta interface{}) d.Set("deletion_protection", cluster.DeletionProtection) + maintenanceWindow, err := flattenKafkaMaintenanceWindow(cluster.MaintenanceWindow) + if err != nil { + return err + } + if err := d.Set("maintenance_window", maintenanceWindow); err != nil { + return err + } + return d.Set("labels", cluster.Labels) } @@ -833,6 +873,7 @@ var mdbKafkaUpdateFieldsMap = map[string]string{ "labels": "labels", "security_group_ids": "security_group_ids", "deletion_protection": "deletion_protection", + "maintenance_window": "maintenance_window", "config.0.zones": "config_spec.zone_id", "config.0.version": "config_spec.version", "config.0.brokers_count": "config_spec.brokers_count", @@ -871,6 +912,11 @@ func kafkaClusterUpdateRequest(d *schema.ResourceData) (*kafka.UpdateClusterRequ return nil, fmt.Errorf("error expanding configSpec while updating Kafka cluster: %s", err) } + maintenanceWindow, err := expandKafkaMaintenanceWindow(d) + if err != nil { + return nil, fmt.Errorf("error expanding maintenance window settings while updating Kafka cluster: %s", err) + } + req := &kafka.UpdateClusterRequest{ ClusterId: d.Id(), Name: d.Get("name").(string), @@ -879,6 +925,7 @@ func kafkaClusterUpdateRequest(d *schema.ResourceData) (*kafka.UpdateClusterRequ ConfigSpec: configSpec, SecurityGroupIds: expandSecurityGroupIds(d.Get("security_group_ids")), DeletionProtection: d.Get("deletion_protection").(bool), + MaintenanceWindow: maintenanceWindow, } return req, nil } diff --git a/yandex/resource_yandex_mdb_kafka_cluster_test.go b/yandex/resource_yandex_mdb_kafka_cluster_test.go index 006b48e9d..4f6abc2c8 100644 --- a/yandex/resource_yandex_mdb_kafka_cluster_test.go +++ b/yandex/resource_yandex_mdb_kafka_cluster_test.go @@ -173,6 +173,13 @@ func TestExpandKafkaClusterConfig(t *testing.T) { "subnet_ids": []interface{}{"rc1a-subnet", "rc1b-subnet", "rc1c-subnet"}, "security_group_ids": []interface{}{"security-group-x", "security-group-y"}, "host_group_ids": []interface{}{"hg1", "hg2", "hg3"}, + "maintenance_window": []interface{}{ + map[string]interface{}{ + "type": "WEEKLY", + "day": "WED", + "hour": 2, + }, + }, "topic": []interface{}{ map[string]interface{}{ "name": "raw_events", @@ -340,6 +347,14 @@ func TestExpandKafkaClusterConfig(t *testing.T) { }, SecurityGroupIds: []string{"security-group-x", "security-group-y"}, HostGroupIds: []string{"hg2", "hg1", "hg3"}, + MaintenanceWindow: &kafka.MaintenanceWindow{ + Policy: &kafka.MaintenanceWindow_WeeklyMaintenanceWindow{ + WeeklyMaintenanceWindow: &kafka.WeeklyMaintenanceWindow{ + Day: kafka.WeeklyMaintenanceWindow_WED, + Hour: 2, + }, + }, + }, } assert.Equal(t, expected, req) @@ -615,6 +630,11 @@ func TestKafkaClusterUpdateRequest(t *testing.T) { "subnet_ids": []interface{}{"rc1a-subnet", "rc1b-subnet", "rc1c-subnet"}, "security_group_ids": []interface{}{"security-group-x", "security-group-y"}, "host_group_ids": []interface{}{"hg1", "hg2", "hg3"}, + "maintenance_window": []interface{}{ + map[string]interface{}{ + "type": "ANYTIME", + }, + }, } resourceData := schema.TestResourceDataRaw(t, resourceYandexMDBKafkaCluster().Schema, raw) @@ -664,6 +684,11 @@ func TestKafkaClusterUpdateRequest(t *testing.T) { }, }, SecurityGroupIds: []string{"security-group-x", "security-group-y"}, + MaintenanceWindow: &kafka.MaintenanceWindow{ + Policy: &kafka.MaintenanceWindow_Anytime{ + Anytime: &kafka.AnytimeMaintenanceWindow{}, + }, + }, UpdateMask: &field_mask.FieldMask{Paths: []string{ "config_spec.brokers_count", "config_spec.kafka.kafka_config_2_8.auto_create_topics_enable", @@ -691,6 +716,7 @@ func TestKafkaClusterUpdateRequest(t *testing.T) { "config_spec.zookeeper.resources.resource_preset_id", "description", "labels", + "maintenance_window", "name", "security_group_ids", }},