diff --git a/.docs.version b/.docs.version index 36b834d8d1..62d27c1d7b 100644 --- a/.docs.version +++ b/.docs.version @@ -1 +1 @@ -d889c0ffa60e67860a97ecb57b38054bf83dcb8f +cddd40c58fe29009edc87b434dff37ed2144bea0 diff --git a/aws-cloudformation-schema/aws-appconfig-application.json b/aws-cloudformation-schema/aws-appconfig-application.json index 4da3c9b339..c896c1897c 100644 --- a/aws-cloudformation-schema/aws-appconfig-application.json +++ b/aws-cloudformation-schema/aws-appconfig-application.json @@ -12,8 +12,7 @@ "type" : "string", "description" : "The key-value string map. The valid character set is [a-zA-Z1-9 +-=._:/-]. The tag key can be up to 128 characters and must not start with aws:.", "minLength" : 1, - "maxLength" : 128, - "pattern" : "^(?!aws:.)[a-zA-Z1-9+=._:/-]*$" + "maxLength" : 128 }, "Value" : { "type" : "string", @@ -74,6 +73,7 @@ "tagOnCreate" : true, "tagUpdatable" : true, "cloudFormationSystemTags" : true, - "tagProperty" : "/properties/Tags" + "tagProperty" : "/properties/Tags", + "permissions" : [ "appconfig:TagResource", "appconfig:UntagResource", "appconfig:ListTagsForResource" ] } } \ No newline at end of file diff --git a/aws-cloudformation-schema/aws-appconfig-configurationprofile.json b/aws-cloudformation-schema/aws-appconfig-configurationprofile.json index bc3c3d7e1a..97d34d76c0 100644 --- a/aws-cloudformation-schema/aws-appconfig-configurationprofile.json +++ b/aws-cloudformation-schema/aws-appconfig-configurationprofile.json @@ -1,6 +1,7 @@ { "sourceUrl" : "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-appconfig", "tagging" : { + "permissions" : [ "appconfig:TagResource", "appconfig:UntagResource", "appconfig:ListTagsForResource" ], "taggable" : true, "tagOnCreate" : true, "tagUpdatable" : true, @@ -69,7 +70,6 @@ }, "Key" : { "minLength" : 1, - "pattern" : "^(?!aws:.)[a-zA-Z0-9 +=._:/-]*$", "description" : "The key-value string map. The tag key can be up to 128 characters and must not start with aws:.", "type" : "string", "maxLength" : 128 diff --git a/aws-cloudformation-schema/aws-appconfig-environment.json b/aws-cloudformation-schema/aws-appconfig-environment.json index 51dc4eeae0..c5eb7317f3 100644 --- a/aws-cloudformation-schema/aws-appconfig-environment.json +++ b/aws-cloudformation-schema/aws-appconfig-environment.json @@ -1,6 +1,7 @@ { "sourceUrl" : "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-appconfig.git", "tagging" : { + "permissions" : [ "appconfig:TagResource", "appconfig:UntagResource", "appconfig:ListTagsForResource" ], "taggable" : true, "tagOnCreate" : true, "tagUpdatable" : true, @@ -81,7 +82,6 @@ }, "Key" : { "minLength" : 1, - "pattern" : "^(?!aws:.)[a-zA-Z1-9+=._:/-]*$", "description" : "The key-value string map. The valid character set is [a-zA-Z1-9+-=._:/]. The tag key can be up to 128 characters and must not start with aws:.", "type" : "string", "maxLength" : 128 diff --git a/aws-cloudformation-schema/aws-appconfig-extension.json b/aws-cloudformation-schema/aws-appconfig-extension.json index 0642c8e45d..bb9fa10d9e 100644 --- a/aws-cloudformation-schema/aws-appconfig-extension.json +++ b/aws-cloudformation-schema/aws-appconfig-extension.json @@ -140,13 +140,6 @@ "readOnlyProperties" : [ "/properties/Id", "/properties/Arn", "/properties/VersionNumber" ], "writeOnlyProperties" : [ "/properties/LatestVersionNumber", "/properties/Tags", "/properties/Tags/*/Key", "/properties/Tags/*/Value" ], "primaryIdentifier" : [ "/properties/Id" ], - "tagging" : { - "taggable" : true, - "tagOnCreate" : true, - "tagUpdatable" : true, - "cloudFormationSystemTags" : true, - "tagProperty" : "/properties/Tags" - }, "handlers" : { "create" : { "permissions" : [ "appconfig:CreateExtension", "appconfig:TagResource", "iam:PassRole" ] @@ -163,5 +156,13 @@ "list" : { "permissions" : [ "appconfig:ListExtensions" ] } + }, + "tagging" : { + "taggable" : true, + "tagOnCreate" : true, + "tagUpdatable" : true, + "cloudFormationSystemTags" : true, + "tagProperty" : "/properties/Tags", + "permissions" : [ "appconfig:TagResource", "appconfig:UntagResource", "appconfig:ListTagsForResource" ] } } \ No newline at end of file diff --git a/aws-cloudformation-schema/aws-appconfig-extensionassociation.json b/aws-cloudformation-schema/aws-appconfig-extensionassociation.json index d3e087e907..ac4d97a04b 100644 --- a/aws-cloudformation-schema/aws-appconfig-extensionassociation.json +++ b/aws-cloudformation-schema/aws-appconfig-extensionassociation.json @@ -70,13 +70,6 @@ "createOnlyProperties" : [ "/properties/ExtensionIdentifier", "/properties/ResourceIdentifier", "/properties/ExtensionVersionNumber", "/properties/Tags", "/properties/Tags/*/Key", "/properties/Tags/*/Value" ], "writeOnlyProperties" : [ "/properties/ExtensionIdentifier", "/properties/ResourceIdentifier", "/properties/Tags", "/properties/Tags/*/Key", "/properties/Tags/*/Value" ], "primaryIdentifier" : [ "/properties/Id" ], - "tagging" : { - "taggable" : true, - "tagOnCreate" : true, - "tagUpdatable" : true, - "cloudFormationSystemTags" : true, - "tagProperty" : "/properties/Tags" - }, "handlers" : { "create" : { "permissions" : [ "appconfig:CreateExtensionAssociation", "appconfig:TagResource" ] @@ -93,5 +86,13 @@ "list" : { "permissions" : [ "appconfig:ListExtensionAssociations" ] } + }, + "tagging" : { + "taggable" : true, + "tagOnCreate" : true, + "tagUpdatable" : true, + "cloudFormationSystemTags" : true, + "tagProperty" : "/properties/Tags", + "permissions" : [ "appconfig:TagResource", "appconfig:UntagResource", "appconfig:ListTagsForResource" ] } } \ No newline at end of file diff --git a/aws-cloudformation-schema/aws-appsync-datasource.json b/aws-cloudformation-schema/aws-appsync-datasource.json index fb34a62aba..428ebf6efb 100644 --- a/aws-cloudformation-schema/aws-appsync-datasource.json +++ b/aws-cloudformation-schema/aws-appsync-datasource.json @@ -1,52 +1,68 @@ { "typeName" : "AWS::AppSync::DataSource", "description" : "Resource Type definition for AWS::AppSync::DataSource", + "sourceUrl" : "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-appsync", "additionalProperties" : false, + "tagging" : { + "taggable" : false + }, "properties" : { - "OpenSearchServiceConfig" : { - "$ref" : "#/definitions/OpenSearchServiceConfig" - }, - "Description" : { - "type" : "string" - }, - "ServiceRoleArn" : { - "type" : "string" - }, - "MetricsConfig" : { + "ApiId" : { + "description" : "Unique AWS AppSync GraphQL API identifier where this data source will be created.", "type" : "string" }, - "Name" : { + "Description" : { + "description" : "The description of the data source.", "type" : "string" }, - "DataSourceArn" : { - "type" : "string" + "DynamoDBConfig" : { + "description" : "AWS Region and TableName for an Amazon DynamoDB table in your account.", + "$ref" : "#/definitions/DynamoDBConfig" }, - "Type" : { - "type" : "string" + "ElasticsearchConfig" : { + "description" : "AWS Region and Endpoints for an Amazon OpenSearch Service domain in your account.\nAs of September 2021, Amazon Elasticsearch Service is Amazon OpenSearch Service. This property is deprecated. For new data sources, use OpenSearchServiceConfig to specify an OpenSearch Service data source.", + "$ref" : "#/definitions/ElasticsearchConfig" }, "EventBridgeConfig" : { + "description" : "ARN for the EventBridge bus.", "$ref" : "#/definitions/EventBridgeConfig" }, "HttpConfig" : { + "description" : "Endpoints for an HTTP data source.", "$ref" : "#/definitions/HttpConfig" }, - "RelationalDatabaseConfig" : { - "$ref" : "#/definitions/RelationalDatabaseConfig" - }, "LambdaConfig" : { + "description" : "An ARN of a Lambda function in valid ARN format. This can be the ARN of a Lambda function that exists in the current account or in another account.", "$ref" : "#/definitions/LambdaConfig" }, - "Id" : { + "Name" : { + "description" : "Friendly name for you to identify your AppSync data source after creation.", "type" : "string" }, - "ApiId" : { + "OpenSearchServiceConfig" : { + "description" : "AWS Region and Endpoints for an Amazon OpenSearch Service domain in your account.", + "$ref" : "#/definitions/OpenSearchServiceConfig" + }, + "RelationalDatabaseConfig" : { + "description" : "Relational Database configuration of the relational database data source.", + "$ref" : "#/definitions/RelationalDatabaseConfig" + }, + "ServiceRoleArn" : { + "description" : "The AWS Identity and Access Management service role ARN for the data source. The system assumes this role when accessing the data source.", "type" : "string" }, - "DynamoDBConfig" : { - "$ref" : "#/definitions/DynamoDBConfig" + "Type" : { + "description" : "The type of the data source.", + "type" : "string" }, - "ElasticsearchConfig" : { - "$ref" : "#/definitions/ElasticsearchConfig" + "DataSourceArn" : { + "description" : "The Amazon Resource Name (ARN) of the API key, such as arn:aws:appsync:us-east-1:123456789012:apis/graphqlapiid/datasources/datasourcename.", + "type" : "string" + }, + "MetricsConfig" : { + "description" : "", + "type" : "string", + "enum" : [ "DISABLED", "ENABLED" ] } }, "definitions" : { @@ -55,18 +71,23 @@ "additionalProperties" : false, "properties" : { "DatabaseName" : { + "description" : "Logical database name.", "type" : "string" }, "AwsRegion" : { + "description" : "AWS Region for RDS HTTP endpoint.", "type" : "string" }, "DbClusterIdentifier" : { + "description" : "Amazon RDS cluster Amazon Resource Name (ARN).", "type" : "string" }, "AwsSecretStoreArn" : { + "description" : "The ARN for database credentials stored in AWS Secrets Manager.", "type" : "string" }, "Schema" : { + "description" : "Logical schema name.", "type" : "string" } }, @@ -77,9 +98,11 @@ "additionalProperties" : false, "properties" : { "AwsRegion" : { + "description" : "The AWS Region.", "type" : "string" }, "Endpoint" : { + "description" : "The endpoint.", "type" : "string" } }, @@ -90,9 +113,11 @@ "additionalProperties" : false, "properties" : { "SigningRegion" : { + "description" : "The signing Region for AWS Identity and Access Management authorization.", "type" : "string" }, "SigningServiceName" : { + "description" : "The signing service name for AWS Identity and Access Management authorization.", "type" : "string" } } @@ -102,6 +127,7 @@ "additionalProperties" : false, "properties" : { "EventBusArn" : { + "description" : "ARN for the EventBridge bus.", "type" : "string" } }, @@ -112,9 +138,11 @@ "additionalProperties" : false, "properties" : { "AuthorizationType" : { + "description" : "The authorization type that the HTTP endpoint requires.", "type" : "string" }, "AwsIamConfig" : { + "description" : "The AWS Identity and Access Management settings.", "$ref" : "#/definitions/AwsIamConfig" } }, @@ -125,12 +153,15 @@ "additionalProperties" : false, "properties" : { "BaseTableTTL" : { + "description" : "The number of minutes that an Item is stored in the data source.", "type" : "string" }, "DeltaSyncTableTTL" : { + "description" : "The number of minutes that a Delta Sync log entry is stored in the Delta Sync table.", "type" : "string" }, "DeltaSyncTableName" : { + "description" : "The Delta Sync table name.", "type" : "string" } }, @@ -141,9 +172,11 @@ "additionalProperties" : false, "properties" : { "RdsHttpEndpointConfig" : { + "description" : "Information about the Amazon RDS resource.", "$ref" : "#/definitions/RdsHttpEndpointConfig" }, "RelationalDatabaseSourceType" : { + "description" : "The type of relational data source.", "type" : "string" } }, @@ -154,9 +187,11 @@ "additionalProperties" : false, "properties" : { "Endpoint" : { + "description" : "The endpoint.", "type" : "string" }, "AuthorizationConfig" : { + "description" : "The authorization configuration.", "$ref" : "#/definitions/AuthorizationConfig" } }, @@ -167,6 +202,7 @@ "additionalProperties" : false, "properties" : { "LambdaFunctionArn" : { + "description" : "The ARN for the Lambda function.", "type" : "string" } }, @@ -177,9 +213,11 @@ "additionalProperties" : false, "properties" : { "AwsRegion" : { + "description" : "The AWS Region.", "type" : "string" }, "Endpoint" : { + "description" : "The endpoint.", "type" : "string" } }, @@ -190,18 +228,23 @@ "additionalProperties" : false, "properties" : { "TableName" : { + "description" : "The table name.", "type" : "string" }, "DeltaSyncConfig" : { + "description" : "The DeltaSyncConfig for a versioned datasource.", "$ref" : "#/definitions/DeltaSyncConfig" }, "UseCallerCredentials" : { + "description" : "Set to TRUE to use AWS Identity and Access Management with this data source.", "type" : "boolean" }, "AwsRegion" : { + "description" : "The AWS Region.", "type" : "string" }, "Versioned" : { + "description" : "Set to TRUE to use Conflict Detection and Resolution with this data source.", "type" : "boolean" } }, @@ -210,6 +253,32 @@ }, "required" : [ "Type", "ApiId", "Name" ], "createOnlyProperties" : [ "/properties/ApiId", "/properties/Name" ], - "primaryIdentifier" : [ "/properties/Id" ], - "readOnlyProperties" : [ "/properties/Id", "/properties/DataSourceArn" ] + "primaryIdentifier" : [ "/properties/DataSourceArn" ], + "readOnlyProperties" : [ "/properties/DataSourceArn" ], + "deprecatedProperties" : [ "/properties/ElasticsearchConfig" ], + "handlers" : { + "create" : { + "permissions" : [ "appsync:CreateDataSource", "appsync:GetDataSource", "iam:PassRole" ] + }, + "read" : { + "permissions" : [ "appsync:GetDataSource" ] + }, + "update" : { + "permissions" : [ "appsync:UpdateDataSource", "iam:PassRole" ] + }, + "delete" : { + "permissions" : [ "appsync:DeleteDataSource", "appsync:GetDataSource" ] + }, + "list" : { + "handlerSchema" : { + "properties" : { + "ApiId" : { + "$ref" : "resource-schema.json#/properties/ApiId" + } + }, + "required" : [ "ApiId" ] + }, + "permissions" : [ "appsync:ListDataSources" ] + } + } } \ No newline at end of file diff --git a/aws-cloudformation-schema/aws-codepipeline-pipeline.json b/aws-cloudformation-schema/aws-codepipeline-pipeline.json index a82e660293..a2f5224d1f 100644 --- a/aws-cloudformation-schema/aws-codepipeline-pipeline.json +++ b/aws-cloudformation-schema/aws-codepipeline-pipeline.json @@ -60,7 +60,7 @@ "Category" : { "description" : "A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.", "type" : "string", - "enum" : [ "Source", "Build", "Test", "Deploy", "Invoke", "Approval" ] + "enum" : [ "Source", "Build", "Test", "Deploy", "Invoke", "Approval", "Compute" ] }, "Version" : { "description" : "A string that describes the action version.", @@ -183,6 +183,22 @@ "$ref" : "#/definitions/OutputArtifact" } }, + "Commands" : { + "description" : "The shell commands to run with your compute action in CodePipeline.", + "type" : "array", + "uniqueItems" : false, + "items" : { + "type" : "string" + } + }, + "OutputVariables" : { + "description" : "The list of variables that are to be exported from the compute action.", + "type" : "array", + "uniqueItems" : true, + "items" : { + "type" : "string" + } + }, "Region" : { "description" : "The action declaration's AWS Region, such as us-east-1.", "type" : "string" @@ -413,6 +429,14 @@ "Name" : { "description" : "The name of the output of an artifact, such as \"My App\".", "type" : "string" + }, + "Files" : { + "description" : "The files that you want to associate with the output artifact that will be exported from the compute action.", + "type" : "array", + "uniqueItems" : true, + "items" : { + "type" : "string" + } } }, "required" : [ "Name" ] diff --git a/aws-cloudformation-schema/aws-ecs-service.json b/aws-cloudformation-schema/aws-ecs-service.json index 3e53b1adc4..8f05db4960 100644 --- a/aws-cloudformation-schema/aws-ecs-service.json +++ b/aws-cloudformation-schema/aws-ecs-service.json @@ -6,6 +6,9 @@ "tagProperty" : "/properties/Tags", "cloudFormationSystemTags" : true }, + "propertyTransform" : { + "/properties/Role" : "Role $OR $join([\"arn:(aws)[-]{0,1}[a-z]{0,2}[-]{0,1}[a-z]{0,3}:iam::[0-9]{12}[:]role/{1}\", Role])" + }, "handlers" : { "read" : { "permissions" : [ "ecs:DescribeServices" ] diff --git a/aws-cloudformation-schema/aws-gamelift-containergroupdefinition.json b/aws-cloudformation-schema/aws-gamelift-containergroupdefinition.json index 3628072734..2028bae60d 100644 --- a/aws-cloudformation-schema/aws-gamelift-containergroupdefinition.json +++ b/aws-cloudformation-schema/aws-gamelift-containergroupdefinition.json @@ -9,7 +9,7 @@ }, "$schema" : "https://schema.cloudformation.us-east-1.amazonaws.com/provider.definition.schema.v1.json", "typeName" : "AWS::GameLift::ContainerGroupDefinition", - "readOnlyProperties" : [ "/properties/ContainerGroupDefinitionArn", "/properties/CreationTime", "/properties/ContainerDefinitions/*/ResolvedImageDigest" ], + "readOnlyProperties" : [ "/properties/ContainerGroupDefinitionArn", "/properties/CreationTime", "/properties/ContainerDefinitions/*/ResolvedImageDigest", "/properties/Status", "/properties/StatusReason" ], "description" : "The AWS::GameLift::ContainerGroupDefinition resource creates an Amazon GameLift container group definition.", "createOnlyProperties" : [ "/properties/Name", "/properties/SchedulingStrategy", "/properties/TotalMemoryLimit", "/properties/TotalCpuLimit", "/properties/ContainerDefinitions", "/properties/OperatingSystem" ], "primaryIdentifier" : [ "/properties/Name" ], @@ -59,6 +59,33 @@ }, "required" : [ "FromPort", "Protocol", "ToPort" ] }, + "ContainerMountPoint" : { + "description" : "Defines the mount point configuration within a container.", + "additionalProperties" : false, + "type" : "object", + "properties" : { + "InstancePath" : { + "minLength" : 1, + "pattern" : "^\\/[\\s\\S]*$", + "description" : "The path on the host that will be mounted in the container.", + "type" : "string", + "maxLength" : 1024 + }, + "ContainerPath" : { + "minLength" : 1, + "pattern" : "^(\\/+[^\\/]+\\/*)+$", + "description" : "The path inside the container where the mount is accessible.", + "type" : "string", + "maxLength" : 1024 + }, + "AccessLevel" : { + "description" : "The access permissions for the mounted path.", + "type" : "string", + "enum" : [ "READ_ONLY", "READ_AND_WRITE" ] + } + }, + "required" : [ "InstancePath" ] + }, "MemoryLimits" : { "description" : "Specifies how much memory is available to the container.", "additionalProperties" : false, @@ -314,11 +341,20 @@ } }, "properties" : { + "Status" : { + "description" : "A string indicating ContainerGroupDefinition status.", + "type" : "string", + "enum" : [ "READY", "COPYING", "FAILED" ] + }, "OperatingSystem" : { "description" : "The operating system of the container group", "type" : "string", "enum" : [ "AMAZON_LINUX_2023" ] }, + "StatusReason" : { + "description" : "A string indicating the reason for ContainerGroupDefinition status.", + "type" : "string" + }, "Name" : { "minLength" : 1, "pattern" : "^[a-zA-Z0-9-]+$", @@ -359,6 +395,11 @@ "$ref" : "#/definitions/ContainerDefinition" } }, + "SourceVersionNumber" : { + "description" : "A specific ContainerGroupDefinition version to be updated", + "type" : "integer", + "minimum" : 0 + }, "Tags" : { "minItems" : 0, "maxItems" : 200, @@ -370,6 +411,15 @@ "$ref" : "#/definitions/Tag" } }, + "SupportContainerDefinitions" : { + "minItems" : 1, + "maxItems" : 10, + "uniqueItems" : true, + "description" : "A collection of support container definitions that define the containers in this group.", + "insertionOrder" : false, + "type" : "array", + "items" : { } + }, "TotalCpuLimit" : { "description" : "The maximum number of CPU units reserved for this container group. The value is expressed as an integer amount of CPU units. (1 vCPU is equal to 1024 CPU units.)", "maximum" : 10240, diff --git a/aws-cloudformation-schema/aws-glue-registry.json b/aws-cloudformation-schema/aws-glue-registry.json index 5ee7913c9a..1502595587 100644 --- a/aws-cloudformation-schema/aws-glue-registry.json +++ b/aws-cloudformation-schema/aws-glue-registry.json @@ -1,6 +1,14 @@ { "typeName" : "AWS::Glue::Registry", "description" : "This resource creates a Registry for authoring schemas as part of Glue Schema Registry.", + "tagging" : { + "taggable" : true, + "tagOnCreate" : true, + "tagUpdatable" : true, + "cloudFormationSystemTags" : true, + "tagProperty" : "/properties/Tags", + "permissions" : [ "glue:GetTags", "glue:TagResource", "glue:UntagResource" ] + }, "sourceUrl" : "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-glue.git", "definitions" : { "Tag" : { @@ -58,7 +66,7 @@ "required" : [ "Name" ], "handlers" : { "create" : { - "permissions" : [ "glue:CreateRegistry", "glue:GetRegistry", "glue:GetTags" ] + "permissions" : [ "glue:CreateRegistry", "glue:GetRegistry", "glue:GetTags", "glue:TagResource" ] }, "read" : { "permissions" : [ "glue:GetRegistry", "glue:GetTags" ] diff --git a/aws-cloudformation-schema/aws-glue-schema.json b/aws-cloudformation-schema/aws-glue-schema.json index c82e3acfe2..79de502448 100644 --- a/aws-cloudformation-schema/aws-glue-schema.json +++ b/aws-cloudformation-schema/aws-glue-schema.json @@ -1,6 +1,14 @@ { "typeName" : "AWS::Glue::Schema", "description" : "This resource represents a schema of Glue Schema Registry.", + "tagging" : { + "taggable" : true, + "tagOnCreate" : true, + "tagUpdatable" : true, + "cloudFormationSystemTags" : true, + "tagProperty" : "/properties/Tags", + "permissions" : [ "glue:GetTags", "glue:TagResource", "glue:UntagResource" ] + }, "sourceUrl" : "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-glue.git", "definitions" : { "Registry" : { @@ -114,14 +122,14 @@ } }, "additionalProperties" : false, - "required" : [ "Name", "DataFormat", "SchemaDefinition", "Compatibility" ], + "required" : [ "Name", "DataFormat", "Compatibility" ], "readOnlyProperties" : [ "/properties/Arn", "/properties/InitialSchemaVersionId" ], "createOnlyProperties" : [ "/properties/Registry", "/properties/Name", "/properties/DataFormat", "/properties/SchemaDefinition" ], "writeOnlyProperties" : [ "/properties/SchemaDefinition" ], "primaryIdentifier" : [ "/properties/Arn" ], "handlers" : { "create" : { - "permissions" : [ "glue:CreateSchema" ] + "permissions" : [ "glue:CreateSchema", "glue:TagResource" ] }, "read" : { "permissions" : [ "glue:GetSchemaVersion", "glue:GetSchema", "glue:GetTags" ] diff --git a/aws-cloudformation-schema/aws-iam-oidcprovider.json b/aws-cloudformation-schema/aws-iam-oidcprovider.json index 8837f4eba4..b27d551144 100644 --- a/aws-cloudformation-schema/aws-iam-oidcprovider.json +++ b/aws-cloudformation-schema/aws-iam-oidcprovider.json @@ -89,6 +89,8 @@ "taggable" : true, "tagOnCreate" : true, "tagUpdatable" : true, - "cloudFormationSystemTags" : false + "cloudFormationSystemTags" : false, + "tagProperty" : "/properties/Tags", + "permissions" : [ "iam:TagOpenIDConnectProvider", "iam:UntagOpenIDConnectProvider", "iam:ListOpenIDConnectProviderTags" ] } } \ No newline at end of file diff --git a/aws-cloudformation-schema/aws-iam-samlprovider.json b/aws-cloudformation-schema/aws-iam-samlprovider.json index 0583e2d2fd..15cce382ec 100644 --- a/aws-cloudformation-schema/aws-iam-samlprovider.json +++ b/aws-cloudformation-schema/aws-iam-samlprovider.json @@ -76,6 +76,8 @@ "taggable" : true, "tagOnCreate" : true, "tagUpdatable" : true, - "cloudFormationSystemTags" : false + "cloudFormationSystemTags" : false, + "tagProperty" : "/properties/Tags", + "permissions" : [ "iam:TagSAMLProvider", "iam:ListSAMLProviderTags", "iam:UntagSAMLProvider" ] } } \ No newline at end of file diff --git a/aws-cloudformation-schema/aws-iam-servercertificate.json b/aws-cloudformation-schema/aws-iam-servercertificate.json index b045c600b9..a2d6fe07fc 100644 --- a/aws-cloudformation-schema/aws-iam-servercertificate.json +++ b/aws-cloudformation-schema/aws-iam-servercertificate.json @@ -95,6 +95,8 @@ "taggable" : true, "tagOnCreate" : true, "tagUpdatable" : true, - "cloudFormationSystemTags" : false + "cloudFormationSystemTags" : false, + "tagProperty" : "/properties/Tags", + "permissions" : [ "iam:TagServerCertificate", "iam:UntagServerCertificate", "iam:ListServerCertificateTags" ] } } \ No newline at end of file diff --git a/aws-cloudformation-schema/aws-lambda-eventsourcemapping.json b/aws-cloudformation-schema/aws-lambda-eventsourcemapping.json index 6428ff21c3..bbbbc8860c 100644 --- a/aws-cloudformation-schema/aws-lambda-eventsourcemapping.json +++ b/aws-cloudformation-schema/aws-lambda-eventsourcemapping.json @@ -1,313 +1,194 @@ { - "typeName" : "AWS::Lambda::EventSourceMapping", - "description" : "The ``AWS::Lambda::EventSourceMapping`` resource creates a mapping between an event source and an LAMlong function. LAM reads items from the event source and triggers the function.\n For details about each event source type, see the following topics. In particular, each of the topics describes the required and optional parameters for the specific event source. \n + [Configuring a Dynamo DB stream as an event source](https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html#services-dynamodb-eventsourcemapping) \n + [Configuring a Kinesis stream as an event source](https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html#services-kinesis-eventsourcemapping) \n + [Configuring an SQS queue as an event source](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#events-sqs-eventsource) \n + [Configuring an MQ broker as an event source](https://docs.aws.amazon.com/lambda/latest/dg/with-mq.html#services-mq-eventsourcemapping) \n + [Configuring MSK as an event source](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html) \n + [Configuring Self-Managed Apache Kafka as an event source](https://docs.aws.amazon.com/lambda/latest/dg/kafka-smaa.html) \n + [Configuring Amazon DocumentDB as an event source](https://docs.aws.amazon.com/lambda/latest/dg/with-documentdb.html)", - "additionalProperties" : false, - "properties" : { - "Id" : { - "description" : "", - "type" : "string", - "pattern" : "[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}", - "minLength" : 36, - "maxLength" : 36 - }, - "BatchSize" : { - "description" : "The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).\n + *Amazon Kinesis* – Default 100. Max 10,000.\n + *Amazon DynamoDB Streams* – Default 100. Max 10,000.\n + *Amazon Simple Queue Service* – Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10.\n + *Amazon Managed Streaming for Apache Kafka* – Default 100. Max 10,000.\n + *Self-managed Apache Kafka* – Default 100. Max 10,000.\n + *Amazon MQ (ActiveMQ and RabbitMQ)* – Default 100. Max 10,000.\n + *DocumentDB* – Default 100. Max 10,000.", - "type" : "integer", - "minimum" : 1, - "maximum" : 10000 - }, - "BisectBatchOnFunctionError" : { - "description" : "(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry. The default value is false.", - "type" : "boolean" - }, - "DestinationConfig" : { - "description" : "(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Apache Kafka event sources only) A configuration object that specifies the destination of an event after Lambda processes it.", - "$ref" : "#/definitions/DestinationConfig" - }, - "Enabled" : { - "description" : "When true, the event source mapping is active. When false, Lambda pauses polling and invocation.\n Default: True", - "type" : "boolean" - }, - "EventSourceArn" : { - "description" : "The Amazon Resource Name (ARN) of the event source.\n + *Amazon Kinesis* – The ARN of the data stream or a stream consumer.\n + *Amazon DynamoDB Streams* – The ARN of the stream.\n + *Amazon Simple Queue Service* – The ARN of the queue.\n + *Amazon Managed Streaming for Apache Kafka* – The ARN of the cluster or the ARN of the VPC connection (for [cross-account event source mappings](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#msk-multi-vpc)).\n + *Amazon MQ* – The ARN of the broker.\n + *Amazon DocumentDB* – The ARN of the DocumentDB change stream.", - "type" : "string", - "pattern" : "arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?(-iso([a-z])?)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)", - "minLength" : 12, - "maxLength" : 1024 - }, - "EventSourceMappingArn" : { - "description" : "", - "type" : "string", - "pattern" : "arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:\\d{12}:event-source-mapping:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}", - "minLength" : 85, - "maxLength" : 120 - }, - "FilterCriteria" : { - "description" : "An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see [Lambda event filtering](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html).", - "$ref" : "#/definitions/FilterCriteria" - }, - "KmsKeyArn" : { - "description" : "The ARN of the KMSlong (KMS) customer managed key that Lambda uses to encrypt your function's [filter criteria](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html#filtering-basics).", - "type" : "string", - "pattern" : "(arn:(aws[a-zA-Z-]*)?:[a-z0-9-.]+:.*)|()", - "minLength" : 12, - "maxLength" : 2048 - }, - "FunctionName" : { - "description" : "The name or ARN of the Lambda function.\n **Name formats**\n + *Function name* – ``MyFunction``.\n + *Function ARN* – ``arn:aws:lambda:us-west-2:123456789012:function:MyFunction``.\n + *Version or Alias ARN* – ``arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD``.\n + *Partial ARN* – ``123456789012:function:MyFunction``.\n \n The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.", - "type" : "string", - "pattern" : "(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}(-gov)?(-iso([a-z])?)?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?", - "minLength" : 1, - "maxLength" : 140 - }, - "MaximumBatchingWindowInSeconds" : { - "description" : "The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function.\n *Default (, , event sources)*: 0\n *Default (, Kafka, , event sources)*: 500 ms\n *Related setting:* For SQS event sources, when you set ``BatchSize`` to a value greater than 10, you must set ``MaximumBatchingWindowInSeconds`` to at least 1.", - "type" : "integer", - "minimum" : 0, - "maximum" : 300 - }, - "MaximumRecordAgeInSeconds" : { - "description" : "(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records.\n The minimum valid value for maximum record age is 60s. Although values less than 60 and greater than -1 fall within the parameter's absolute range, they are not allowed", - "type" : "integer", - "minimum" : -1, - "maximum" : 604800 - }, - "MaximumRetryAttempts" : { - "description" : "(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source.", - "type" : "integer", - "minimum" : -1, - "maximum" : 10000 + "tagging" : { + "taggable" : true, + "tagOnCreate" : true, + "tagUpdatable" : true, + "tagProperty" : "/properties/Tags", + "cloudFormationSystemTags" : true + }, + "propertyTransform" : { + "/properties/StartingPositionTimestamp" : "StartingPositionTimestamp * 1000" + }, + "handlers" : { + "read" : { + "permissions" : [ "lambda:GetEventSourceMapping", "lambda:ListTags", "kms:Decrypt" ] }, - "ParallelizationFactor" : { - "description" : "(Kinesis and DynamoDB Streams only) The number of batches to process concurrently from each shard. The default value is 1.", - "type" : "integer", - "minimum" : 1, - "maximum" : 10 + "create" : { + "permissions" : [ "lambda:CreateEventSourceMapping", "lambda:GetEventSourceMapping", "lambda:TagResource", "kms:DescribeKey", "kms:GenerateDataKey", "kms:Decrypt" ] }, - "StartingPosition" : { - "description" : "The position in a stream from which to start reading. Required for Amazon Kinesis and Amazon DynamoDB.\n + *LATEST* - Read only new records.\n + *TRIM_HORIZON* - Process all available records.\n + *AT_TIMESTAMP* - Specify a time from which to start reading records.", - "type" : "string", - "pattern" : "(LATEST|TRIM_HORIZON|AT_TIMESTAMP)+", - "minLength" : 6, - "maxLength" : 12 + "update" : { + "permissions" : [ "lambda:UpdateEventSourceMapping", "lambda:GetEventSourceMapping", "lambda:ListTags", "lambda:TagResource", "lambda:UntagResource", "kms:DescribeKey", "kms:GenerateDataKey", "kms:Decrypt" ] }, - "StartingPositionTimestamp" : { - "description" : "With ``StartingPosition`` set to ``AT_TIMESTAMP``, the time from which to start reading, in Unix time seconds. ``StartingPositionTimestamp`` cannot be in the future.", - "type" : "number" + "list" : { + "permissions" : [ "lambda:ListEventSourceMappings" ] }, - "Tags" : { - "description" : "", - "type" : "array", - "uniqueItems" : true, - "insertionOrder" : false, - "items" : { - "$ref" : "#/definitions/Tag" + "delete" : { + "permissions" : [ "lambda:DeleteEventSourceMapping", "lambda:GetEventSourceMapping", "kms:Decrypt" ] + } + }, + "typeName" : "AWS::Lambda::EventSourceMapping", + "readOnlyProperties" : [ "/properties/Id", "/properties/EventSourceMappingArn" ], + "description" : "The ``AWS::Lambda::EventSourceMapping`` resource creates a mapping between an event source and an LAMlong function. LAM reads items from the event source and triggers the function.\n For details about each event source type, see the following topics. In particular, each of the topics describes the required and optional parameters for the specific event source. \n + [Configuring a Dynamo DB stream as an event source](https://docs.aws.amazon.com/lambda/latest/dg/with-ddb.html#services-dynamodb-eventsourcemapping) \n + [Configuring a Kinesis stream as an event source](https://docs.aws.amazon.com/lambda/latest/dg/with-kinesis.html#services-kinesis-eventsourcemapping) \n + [Configuring an SQS queue as an event source](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#events-sqs-eventsource) \n + [Configuring an MQ broker as an event source](https://docs.aws.amazon.com/lambda/latest/dg/with-mq.html#services-mq-eventsourcemapping) \n + [Configuring MSK as an event source](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html) \n + [Configuring Self-Managed Apache Kafka as an event source](https://docs.aws.amazon.com/lambda/latest/dg/kafka-smaa.html) \n + [Configuring Amazon DocumentDB as an event source](https://docs.aws.amazon.com/lambda/latest/dg/with-documentdb.html)", + "createOnlyProperties" : [ "/properties/EventSourceArn", "/properties/StartingPosition", "/properties/StartingPositionTimestamp", "/properties/SelfManagedEventSource", "/properties/AmazonManagedKafkaEventSourceConfig", "/properties/SelfManagedKafkaEventSourceConfig" ], + "additionalProperties" : false, + "primaryIdentifier" : [ "/properties/Id" ], + "definitions" : { + "ScalingConfig" : { + "description" : "(Amazon SQS only) The scaling configuration for the event source. To remove the configuration, pass an empty value.", + "additionalProperties" : false, + "type" : "object", + "properties" : { + "MaximumConcurrency" : { + "description" : "Limits the number of concurrent instances that the SQS event source can invoke.", + "$ref" : "#/definitions/MaximumConcurrency" + } } }, - "Topics" : { - "description" : "The name of the Kafka topic.", - "type" : "array", - "uniqueItems" : true, - "items" : { - "type" : "string", - "pattern" : "^[^.]([a-zA-Z0-9\\-_.]+)", - "minLength" : 1, - "maxLength" : 249 - }, - "minItems" : 1, - "maxItems" : 1 - }, - "Queues" : { - "description" : "(Amazon MQ) The name of the Amazon MQ broker destination queue to consume.", - "type" : "array", - "uniqueItems" : true, - "items" : { - "type" : "string", - "pattern" : "[\\s\\S]*", - "minLength" : 1, - "maxLength" : 1000 - }, - "minItems" : 1, - "maxItems" : 1 - }, - "SourceAccessConfigurations" : { - "description" : "An array of the authentication protocol, VPC components, or virtual host to secure and define your event source.", - "type" : "array", - "uniqueItems" : true, - "items" : { - "$ref" : "#/definitions/SourceAccessConfiguration" - }, - "minItems" : 1, - "maxItems" : 22 - }, - "TumblingWindowInSeconds" : { - "description" : "(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window.", - "type" : "integer", - "minimum" : 0, - "maximum" : 900 - }, - "FunctionResponseTypes" : { - "description" : "(Kinesis, DynamoDB Streams, and SQS) A list of current response type enums applied to the event source mapping.\n Valid Values: ``ReportBatchItemFailures``", - "type" : "array", - "uniqueItems" : true, - "items" : { - "type" : "string", - "enum" : [ "ReportBatchItemFailures" ] - }, - "minLength" : 0, - "maxLength" : 1 - }, "SelfManagedEventSource" : { "description" : "The self-managed Apache Kafka cluster for your event source.", - "$ref" : "#/definitions/SelfManagedEventSource" - }, - "AmazonManagedKafkaEventSourceConfig" : { - "description" : "Specific configuration settings for an Amazon Managed Streaming for Apache Kafka (Amazon MSK) event source.", - "$ref" : "#/definitions/AmazonManagedKafkaEventSourceConfig" - }, - "SelfManagedKafkaEventSourceConfig" : { - "description" : "Specific configuration settings for a self-managed Apache Kafka event source.", - "$ref" : "#/definitions/SelfManagedKafkaEventSourceConfig" + "additionalProperties" : false, + "type" : "object", + "properties" : { + "Endpoints" : { + "description" : "The list of bootstrap servers for your Kafka brokers in the following format: ``\"KafkaBootstrapServers\": [\"abc.xyz.com:xxxx\",\"abc2.xyz.com:xxxx\"]``.", + "$ref" : "#/definitions/Endpoints" + } + } }, - "ScalingConfig" : { - "description" : "(Amazon SQS only) The scaling configuration for the event source. For more information, see [Configuring maximum concurrency for Amazon SQS event sources](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#events-sqs-max-concurrency).", - "$ref" : "#/definitions/ScalingConfig" + "MaximumConcurrency" : { + "description" : "The maximum number of concurrent functions that an event source can invoke.", + "maximum" : 1000, + "type" : "integer", + "minimum" : 2 }, - "DocumentDBEventSourceConfig" : { - "description" : "Specific configuration settings for a DocumentDB event source.", - "$ref" : "#/definitions/DocumentDBEventSourceConfig" - } - }, - "definitions" : { - "DestinationConfig" : { - "type" : "object", + "SourceAccessConfiguration" : { + "description" : "An array of the authentication protocol, VPC components, or virtual host to secure and define your event source.", "additionalProperties" : false, - "description" : "A configuration object that specifies the destination of an event after Lambda processes it.", + "type" : "object", "properties" : { - "OnFailure" : { - "description" : "The destination configuration for failed invocations.", - "$ref" : "#/definitions/OnFailure" + "Type" : { + "description" : "The type of authentication protocol, VPC components, or virtual host for your event source. For example: ``\"Type\":\"SASL_SCRAM_512_AUTH\"``.\n + ``BASIC_AUTH`` – (Amazon MQ) The ASMlong secret that stores your broker credentials.\n + ``BASIC_AUTH`` – (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key used for SASL/PLAIN authentication of your Apache Kafka brokers.\n + ``VPC_SUBNET`` – (Self-managed Apache Kafka) The subnets associated with your VPC. Lambda connects to these subnets to fetch data from your self-managed Apache Kafka cluster.\n + ``VPC_SECURITY_GROUP`` – (Self-managed Apache Kafka) The VPC security group used to manage access to your self-managed Apache Kafka brokers.\n + ``SASL_SCRAM_256_AUTH`` – (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key used for SASL SCRAM-256 authentication of your self-managed Apache Kafka brokers.\n + ``SASL_SCRAM_512_AUTH`` – (Amazon MSK, Self-managed Apache Kafka) The Secrets Manager ARN of your secret key used for SASL SCRAM-512 authentication of your self-managed Apache Kafka brokers.\n + ``VIRTUAL_HOST`` –- (RabbitMQ) The name of the virtual host in your RabbitMQ broker. Lambda uses this RabbitMQ host as the event source. This property cannot be specified in an UpdateEventSourceMapping API call.\n + ``CLIENT_CERTIFICATE_TLS_AUTH`` – (Amazon MSK, self-managed Apache Kafka) The Secrets Manager ARN of your secret key containing the certificate chain (X.509 PEM), private key (PKCS#8 PEM), and private key password (optional) used for mutual TLS authentication of your MSK/Apache Kafka brokers.\n + ``SERVER_ROOT_CA_CERTIFICATE`` – (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key containing the root CA certificate (X.509 PEM) used for TLS encryption of your Apache Kafka brokers.", + "type" : "string", + "enum" : [ "BASIC_AUTH", "VPC_SUBNET", "VPC_SECURITY_GROUP", "SASL_SCRAM_512_AUTH", "SASL_SCRAM_256_AUTH", "VIRTUAL_HOST", "CLIENT_CERTIFICATE_TLS_AUTH", "SERVER_ROOT_CA_CERTIFICATE" ] + }, + "URI" : { + "minLength" : 1, + "pattern" : "[a-zA-Z0-9-\\/*:_+=.@-]*", + "description" : "The value for your chosen configuration in ``Type``. For example: ``\"URI\": \"arn:aws:secretsmanager:us-east-1:01234567890:secret:MyBrokerSecretName\"``.", + "type" : "string", + "maxLength" : 200 } } }, "FilterCriteria" : { - "type" : "object", "description" : "An object that contains the filters for an event source.", "additionalProperties" : false, + "type" : "object", "properties" : { "Filters" : { + "minItems" : 1, + "maxItems" : 20, + "uniqueItems" : true, "description" : "A list of filters.", "type" : "array", - "uniqueItems" : true, "items" : { "$ref" : "#/definitions/Filter" - }, - "minItems" : 1, - "maxItems" : 20 + } } } }, - "Filter" : { - "type" : "object", - "description" : "A structure within a ``FilterCriteria`` object that defines an event filtering pattern.", + "SelfManagedKafkaEventSourceConfig" : { + "description" : "Specific configuration settings for a self-managed Apache Kafka event source.", "additionalProperties" : false, - "properties" : { - "Pattern" : { - "type" : "string", - "description" : "A filter pattern. For more information on the syntax of a filter pattern, see [Filter rule syntax](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html#filtering-syntax).", - "pattern" : ".*", - "minLength" : 0, - "maxLength" : 4096 - } - } - }, - "OnFailure" : { "type" : "object", - "description" : "A destination for events that failed processing.", - "additionalProperties" : false, "properties" : { - "Destination" : { - "description" : "The Amazon Resource Name (ARN) of the destination resource.\n To retain records of [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations), you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.\n To retain records of failed invocations from [Kinesis and DynamoDB event sources](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations), you can configure an Amazon SNS topic or Amazon SQS queue as the destination.\n To retain records of failed invocations from [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination), you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.", - "type" : "string", - "pattern" : "arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?(-iso([a-z])?)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)", - "minLength" : 12, - "maxLength" : 1024 + "ConsumerGroupId" : { + "description" : "The identifier for the Kafka consumer group to join. The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. For more information, see [Customizable consumer group ID](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-consumer-group-id).", + "$ref" : "#/definitions/ConsumerGroupId" } } }, - "SourceAccessConfiguration" : { - "type" : "object", + "DocumentDBEventSourceConfig" : { + "description" : "Specific configuration settings for a DocumentDB event source.", "additionalProperties" : false, - "description" : "An array of the authentication protocol, VPC components, or virtual host to secure and define your event source.", + "type" : "object", "properties" : { - "Type" : { - "description" : "The type of authentication protocol, VPC components, or virtual host for your event source. For example: ``\"Type\":\"SASL_SCRAM_512_AUTH\"``.\n + ``BASIC_AUTH`` – (Amazon MQ) The ASMlong secret that stores your broker credentials.\n + ``BASIC_AUTH`` – (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key used for SASL/PLAIN authentication of your Apache Kafka brokers.\n + ``VPC_SUBNET`` – (Self-managed Apache Kafka) The subnets associated with your VPC. Lambda connects to these subnets to fetch data from your self-managed Apache Kafka cluster.\n + ``VPC_SECURITY_GROUP`` – (Self-managed Apache Kafka) The VPC security group used to manage access to your self-managed Apache Kafka brokers.\n + ``SASL_SCRAM_256_AUTH`` – (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key used for SASL SCRAM-256 authentication of your self-managed Apache Kafka brokers.\n + ``SASL_SCRAM_512_AUTH`` – (Amazon MSK, Self-managed Apache Kafka) The Secrets Manager ARN of your secret key used for SASL SCRAM-512 authentication of your self-managed Apache Kafka brokers.\n + ``VIRTUAL_HOST`` –- (RabbitMQ) The name of the virtual host in your RabbitMQ broker. Lambda uses this RabbitMQ host as the event source. This property cannot be specified in an UpdateEventSourceMapping API call.\n + ``CLIENT_CERTIFICATE_TLS_AUTH`` – (Amazon MSK, self-managed Apache Kafka) The Secrets Manager ARN of your secret key containing the certificate chain (X.509 PEM), private key (PKCS#8 PEM), and private key password (optional) used for mutual TLS authentication of your MSK/Apache Kafka brokers.\n + ``SERVER_ROOT_CA_CERTIFICATE`` – (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key containing the root CA certificate (X.509 PEM) used for TLS encryption of your Apache Kafka brokers.", - "enum" : [ "BASIC_AUTH", "VPC_SUBNET", "VPC_SECURITY_GROUP", "SASL_SCRAM_512_AUTH", "SASL_SCRAM_256_AUTH", "VIRTUAL_HOST", "CLIENT_CERTIFICATE_TLS_AUTH", "SERVER_ROOT_CA_CERTIFICATE" ], - "type" : "string" + "FullDocument" : { + "description" : "Determines what DocumentDB sends to your event stream during document update operations. If set to UpdateLookup, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes.", + "type" : "string", + "enum" : [ "UpdateLookup", "Default" ] }, - "URI" : { - "description" : "The value for your chosen configuration in ``Type``. For example: ``\"URI\": \"arn:aws:secretsmanager:us-east-1:01234567890:secret:MyBrokerSecretName\"``.", + "CollectionName" : { + "minLength" : 1, + "description" : "The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.", "type" : "string", - "pattern" : "[a-zA-Z0-9-\\/*:_+=.@-]*", + "maxLength" : 57 + }, + "DatabaseName" : { "minLength" : 1, - "maxLength" : 200 - } - } - }, - "SelfManagedEventSource" : { - "type" : "object", - "additionalProperties" : false, - "description" : "The self-managed Apache Kafka cluster for your event source.", - "properties" : { - "Endpoints" : { - "description" : "The list of bootstrap servers for your Kafka brokers in the following format: ``\"KafkaBootstrapServers\": [\"abc.xyz.com:xxxx\",\"abc2.xyz.com:xxxx\"]``.", - "$ref" : "#/definitions/Endpoints" + "description" : "The name of the database to consume within the DocumentDB cluster.", + "type" : "string", + "maxLength" : 63 } } }, "Endpoints" : { - "type" : "object", - "additionalProperties" : false, "description" : "The list of bootstrap servers for your Kafka brokers in the following format: ``\"KafkaBootstrapServers\": [\"abc.xyz.com:xxxx\",\"abc2.xyz.com:xxxx\"]``.", + "additionalProperties" : false, + "type" : "object", "properties" : { "KafkaBootstrapServers" : { - "type" : "array", - "description" : "The list of bootstrap servers for your Kafka brokers in the following format: ``\"KafkaBootstrapServers\": [\"abc.xyz.com:xxxx\",\"abc2.xyz.com:xxxx\"]``.", + "minItems" : 1, + "maxItems" : 10, "uniqueItems" : true, + "description" : "The list of bootstrap servers for your Kafka brokers in the following format: ``\"KafkaBootstrapServers\": [\"abc.xyz.com:xxxx\",\"abc2.xyz.com:xxxx\"]``.", + "type" : "array", "items" : { - "type" : "string", - "description" : "The URL of a Kafka server.", - "pattern" : "^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9]):[0-9]{1,5}", "minLength" : 1, + "pattern" : "^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9]):[0-9]{1,5}", + "description" : "The URL of a Kafka server.", + "type" : "string", "maxLength" : 300 - }, - "minItems" : 1, - "maxItems" : 10 + } + } + } + }, + "DestinationConfig" : { + "description" : "A configuration object that specifies the destination of an event after Lambda processes it.", + "additionalProperties" : false, + "type" : "object", + "properties" : { + "OnFailure" : { + "description" : "The destination configuration for failed invocations.", + "$ref" : "#/definitions/OnFailure" } } }, "ConsumerGroupId" : { + "minLength" : 1, + "pattern" : "[a-zA-Z0-9-\\/*:_+=.@-]*", "description" : "The identifier for the Kafka Consumer Group to join.", "type" : "string", - "pattern" : "[a-zA-Z0-9-\\/*:_+=.@-]*", - "minLength" : 1, "maxLength" : 200 }, - "AmazonManagedKafkaEventSourceConfig" : { - "description" : "Specific configuration settings for an Amazon Managed Streaming for Apache Kafka (Amazon MSK) event source.", - "type" : "object", + "Filter" : { + "description" : "A structure within a ``FilterCriteria`` object that defines an event filtering pattern.", "additionalProperties" : false, + "type" : "object", "properties" : { - "ConsumerGroupId" : { - "description" : "The identifier for the Kafka consumer group to join. The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. For more information, see [Customizable consumer group ID](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-consumer-group-id).", - "$ref" : "#/definitions/ConsumerGroupId" + "Pattern" : { + "minLength" : 0, + "pattern" : ".*", + "description" : "A filter pattern. For more information on the syntax of a filter pattern, see [Filter rule syntax](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html#filtering-syntax).", + "type" : "string", + "maxLength" : 4096 } } }, - "SelfManagedKafkaEventSourceConfig" : { - "description" : "Specific configuration settings for a self-managed Apache Kafka event source.", - "type" : "object", + "AmazonManagedKafkaEventSourceConfig" : { + "description" : "Specific configuration settings for an Amazon Managed Streaming for Apache Kafka (Amazon MSK) event source.", "additionalProperties" : false, + "type" : "object", "properties" : { "ConsumerGroupId" : { "description" : "The identifier for the Kafka consumer group to join. The consumer group ID must be unique among all your Kafka event sources. After creating a Kafka event source mapping with the consumer group ID specified, you cannot update this value. For more information, see [Customizable consumer group ID](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-consumer-group-id).", @@ -315,97 +196,216 @@ } } }, - "MaximumConcurrency" : { - "description" : "The maximum number of concurrent functions that an event source can invoke.", - "type" : "integer", - "minimum" : 2, - "maximum" : 1000 - }, - "ScalingConfig" : { - "description" : "(Amazon SQS only) The scaling configuration for the event source. To remove the configuration, pass an empty value.", - "type" : "object", - "additionalProperties" : false, - "properties" : { - "MaximumConcurrency" : { - "description" : "Limits the number of concurrent instances that the SQS event source can invoke.", - "$ref" : "#/definitions/MaximumConcurrency" - } - } - }, "Tag" : { - "type" : "object", + "description" : "A [tag](https://docs.aws.amazon.com/lambda/latest/dg/tagging.html) to apply to the event source mapping.", "additionalProperties" : false, + "type" : "object", "properties" : { - "Key" : { - "type" : "string", - "description" : "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.", - "minLength" : 1, - "maxLength" : 128 - }, "Value" : { - "type" : "string", - "description" : "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.", "minLength" : 0, + "description" : "The value for this tag.", + "type" : "string", "maxLength" : 256 + }, + "Key" : { + "minLength" : 1, + "description" : "The key for this tag.", + "type" : "string", + "maxLength" : 128 } }, - "required" : [ "Key" ], - "description" : "" + "required" : [ "Key" ] }, - "DocumentDBEventSourceConfig" : { - "description" : "Specific configuration settings for a DocumentDB event source.", - "type" : "object", + "OnFailure" : { + "description" : "A destination for events that failed processing.", "additionalProperties" : false, + "type" : "object", "properties" : { - "DatabaseName" : { - "description" : "The name of the database to consume within the DocumentDB cluster.", - "type" : "string", - "minLength" : 1, - "maxLength" : 63 - }, - "CollectionName" : { - "description" : "The name of the collection to consume within the database. If you do not specify a collection, Lambda consumes all collections.", - "type" : "string", - "minLength" : 1, - "maxLength" : 57 - }, - "FullDocument" : { - "description" : "Determines what DocumentDB sends to your event stream during document update operations. If set to UpdateLookup, DocumentDB sends a delta describing the changes, along with a copy of the entire document. Otherwise, DocumentDB sends only a partial document that contains the changes.", + "Destination" : { + "minLength" : 12, + "pattern" : "arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?(-iso([a-z])?)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)", + "description" : "The Amazon Resource Name (ARN) of the destination resource.\n To retain records of [asynchronous invocations](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-async-destinations), you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.\n To retain records of failed invocations from [Kinesis and DynamoDB event sources](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventsourcemapping.html#event-source-mapping-destinations), you can configure an Amazon SNS topic or Amazon SQS queue as the destination.\n To retain records of failed invocations from [self-managed Kafka](https://docs.aws.amazon.com/lambda/latest/dg/with-kafka.html#services-smaa-onfailure-destination) or [Amazon MSK](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#services-msk-onfailure-destination), you can configure an Amazon SNS topic, Amazon SQS queue, or Amazon S3 bucket as the destination.", "type" : "string", - "enum" : [ "UpdateLookup", "Default" ] + "maxLength" : 1024 } } } }, "required" : [ "FunctionName" ], - "createOnlyProperties" : [ "/properties/EventSourceArn", "/properties/StartingPosition", "/properties/StartingPositionTimestamp", "/properties/SelfManagedEventSource", "/properties/AmazonManagedKafkaEventSourceConfig", "/properties/SelfManagedKafkaEventSourceConfig" ], - "readOnlyProperties" : [ "/properties/Id", "/properties/EventSourceMappingArn" ], - "primaryIdentifier" : [ "/properties/Id" ], - "propertyTransform" : { - "/properties/StartingPositionTimestamp" : "StartingPositionTimestamp * 1000" - }, - "handlers" : { - "create" : { - "permissions" : [ "lambda:CreateEventSourceMapping", "lambda:GetEventSourceMapping", "lambda:TagResource", "kms:DescribeKey", "kms:GenerateDataKey", "kms:Decrypt" ] + "properties" : { + "StartingPosition" : { + "minLength" : 6, + "pattern" : "(LATEST|TRIM_HORIZON|AT_TIMESTAMP)+", + "description" : "The position in a stream from which to start reading. Required for Amazon Kinesis and Amazon DynamoDB.\n + *LATEST* - Read only new records.\n + *TRIM_HORIZON* - Process all available records.\n + *AT_TIMESTAMP* - Specify a time from which to start reading records.", + "type" : "string", + "maxLength" : 12 }, - "delete" : { - "permissions" : [ "lambda:DeleteEventSourceMapping", "lambda:GetEventSourceMapping", "kms:Decrypt" ] + "SelfManagedEventSource" : { + "description" : "The self-managed Apache Kafka cluster for your event source.", + "$ref" : "#/definitions/SelfManagedEventSource" }, - "list" : { - "permissions" : [ "lambda:ListEventSourceMappings" ] + "ParallelizationFactor" : { + "description" : "(Kinesis and DynamoDB Streams only) The number of batches to process concurrently from each shard. The default value is 1.", + "maximum" : 10, + "type" : "integer", + "minimum" : 1 }, - "read" : { - "permissions" : [ "lambda:GetEventSourceMapping", "lambda:ListTags", "kms:Decrypt" ] + "FilterCriteria" : { + "description" : "An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see [Lambda event filtering](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html).", + "$ref" : "#/definitions/FilterCriteria" }, - "update" : { - "permissions" : [ "lambda:UpdateEventSourceMapping", "lambda:GetEventSourceMapping", "lambda:ListTags", "lambda:TagResource", "lambda:UntagResource", "kms:DescribeKey", "kms:GenerateDataKey", "kms:Decrypt" ] + "FunctionName" : { + "minLength" : 1, + "pattern" : "(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}(-gov)?(-iso([a-z])?)?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?", + "description" : "The name or ARN of the Lambda function.\n **Name formats**\n + *Function name* – ``MyFunction``.\n + *Function ARN* – ``arn:aws:lambda:us-west-2:123456789012:function:MyFunction``.\n + *Version or Alias ARN* – ``arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD``.\n + *Partial ARN* – ``123456789012:function:MyFunction``.\n \n The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.", + "type" : "string", + "maxLength" : 140 + }, + "DestinationConfig" : { + "description" : "(Kinesis, DynamoDB Streams, Amazon MSK, and self-managed Apache Kafka event sources only) A configuration object that specifies the destination of an event after Lambda processes it.", + "$ref" : "#/definitions/DestinationConfig" + }, + "KmsKeyArn" : { + "minLength" : 12, + "pattern" : "(arn:(aws[a-zA-Z-]*)?:[a-z0-9-.]+:.*)|()", + "description" : "The ARN of the KMSlong (KMS) customer managed key that Lambda uses to encrypt your function's [filter criteria](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html#filtering-basics).", + "type" : "string", + "maxLength" : 2048 + }, + "AmazonManagedKafkaEventSourceConfig" : { + "description" : "Specific configuration settings for an Amazon Managed Streaming for Apache Kafka (Amazon MSK) event source.", + "$ref" : "#/definitions/AmazonManagedKafkaEventSourceConfig" + }, + "SourceAccessConfigurations" : { + "minItems" : 1, + "maxItems" : 22, + "uniqueItems" : true, + "description" : "An array of the authentication protocol, VPC components, or virtual host to secure and define your event source.", + "type" : "array", + "items" : { + "$ref" : "#/definitions/SourceAccessConfiguration" + } + }, + "Tags" : { + "uniqueItems" : true, + "description" : "A list of tags to add to the event source mapping.\n You must have the ``lambda:TagResource``, ``lambda:UntagResource``, and ``lambda:ListTags`` permissions for your [principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the CFN stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update.", + "insertionOrder" : false, + "type" : "array", + "items" : { + "$ref" : "#/definitions/Tag" + } + }, + "MaximumBatchingWindowInSeconds" : { + "description" : "The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function.\n *Default (, , event sources)*: 0\n *Default (, Kafka, , event sources)*: 500 ms\n *Related setting:* For SQS event sources, when you set ``BatchSize`` to a value greater than 10, you must set ``MaximumBatchingWindowInSeconds`` to at least 1.", + "maximum" : 300, + "type" : "integer", + "minimum" : 0 + }, + "BatchSize" : { + "description" : "The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).\n + *Amazon Kinesis* – Default 100. Max 10,000.\n + *Amazon DynamoDB Streams* – Default 100. Max 10,000.\n + *Amazon Simple Queue Service* – Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10.\n + *Amazon Managed Streaming for Apache Kafka* – Default 100. Max 10,000.\n + *Self-managed Apache Kafka* – Default 100. Max 10,000.\n + *Amazon MQ (ActiveMQ and RabbitMQ)* – Default 100. Max 10,000.\n + *DocumentDB* – Default 100. Max 10,000.", + "maximum" : 10000, + "type" : "integer", + "minimum" : 1 + }, + "MaximumRetryAttempts" : { + "description" : "(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source.", + "maximum" : 10000, + "type" : "integer", + "minimum" : -1 + }, + "Topics" : { + "minItems" : 1, + "maxItems" : 1, + "uniqueItems" : true, + "description" : "The name of the Kafka topic.", + "type" : "array", + "items" : { + "minLength" : 1, + "pattern" : "^[^.]([a-zA-Z0-9\\-_.]+)", + "type" : "string", + "maxLength" : 249 + } + }, + "ScalingConfig" : { + "description" : "(Amazon SQS only) The scaling configuration for the event source. For more information, see [Configuring maximum concurrency for Amazon SQS event sources](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#events-sqs-max-concurrency).", + "$ref" : "#/definitions/ScalingConfig" + }, + "Enabled" : { + "description" : "When true, the event source mapping is active. When false, Lambda pauses polling and invocation.\n Default: True", + "type" : "boolean" + }, + "EventSourceArn" : { + "minLength" : 12, + "pattern" : "arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?(-iso([a-z])?)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)", + "description" : "The Amazon Resource Name (ARN) of the event source.\n + *Amazon Kinesis* – The ARN of the data stream or a stream consumer.\n + *Amazon DynamoDB Streams* – The ARN of the stream.\n + *Amazon Simple Queue Service* – The ARN of the queue.\n + *Amazon Managed Streaming for Apache Kafka* – The ARN of the cluster or the ARN of the VPC connection (for [cross-account event source mappings](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#msk-multi-vpc)).\n + *Amazon MQ* – The ARN of the broker.\n + *Amazon DocumentDB* – The ARN of the DocumentDB change stream.", + "type" : "string", + "maxLength" : 1024 + }, + "SelfManagedKafkaEventSourceConfig" : { + "description" : "Specific configuration settings for a self-managed Apache Kafka event source.", + "$ref" : "#/definitions/SelfManagedKafkaEventSourceConfig" + }, + "DocumentDBEventSourceConfig" : { + "description" : "Specific configuration settings for a DocumentDB event source.", + "$ref" : "#/definitions/DocumentDBEventSourceConfig" + }, + "TumblingWindowInSeconds" : { + "description" : "(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window.", + "maximum" : 900, + "type" : "integer", + "minimum" : 0 + }, + "BisectBatchOnFunctionError" : { + "description" : "(Kinesis and DynamoDB Streams only) If the function returns an error, split the batch in two and retry. The default value is false.", + "type" : "boolean" + }, + "EventSourceMappingArn" : { + "minLength" : 85, + "pattern" : "arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:\\d{12}:event-source-mapping:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}", + "description" : "", + "type" : "string", + "maxLength" : 120 + }, + "MaximumRecordAgeInSeconds" : { + "description" : "(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records.\n The minimum valid value for maximum record age is 60s. Although values less than 60 and greater than -1 fall within the parameter's absolute range, they are not allowed", + "maximum" : 604800, + "type" : "integer", + "minimum" : -1 + }, + "StartingPositionTimestamp" : { + "description" : "With ``StartingPosition`` set to ``AT_TIMESTAMP``, the time from which to start reading, in Unix time seconds. ``StartingPositionTimestamp`` cannot be in the future.", + "type" : "number" + }, + "Queues" : { + "minItems" : 1, + "maxItems" : 1, + "uniqueItems" : true, + "description" : "(Amazon MQ) The name of the Amazon MQ broker destination queue to consume.", + "type" : "array", + "items" : { + "minLength" : 1, + "pattern" : "[\\s\\S]*", + "type" : "string", + "maxLength" : 1000 + } + }, + "Id" : { + "minLength" : 36, + "pattern" : "[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}", + "description" : "", + "type" : "string", + "maxLength" : 36 + }, + "FunctionResponseTypes" : { + "uniqueItems" : true, + "minLength" : 0, + "description" : "(Kinesis, DynamoDB Streams, and SQS) A list of current response type enums applied to the event source mapping.\n Valid Values: ``ReportBatchItemFailures``", + "type" : "array", + "items" : { + "type" : "string", + "enum" : [ "ReportBatchItemFailures" ] + }, + "maxLength" : 1 } - }, - "tagging" : { - "taggable" : true, - "tagOnCreate" : true, - "tagUpdatable" : true, - "cloudFormationSystemTags" : true, - "tagProperty" : "/properties/Tags" } } \ No newline at end of file diff --git a/aws-cloudformation-schema/aws-lambda-version.json b/aws-cloudformation-schema/aws-lambda-version.json index 28bc811f3d..fcf6e4056e 100644 --- a/aws-cloudformation-schema/aws-lambda-version.json +++ b/aws-cloudformation-schema/aws-lambda-version.json @@ -27,22 +27,7 @@ "description" : "The name of the Lambda function.", "minLength" : 1, "maxLength" : 140, - "pattern" : "^(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?$", - "anyOf" : [ { - "relationshipRef" : { - "typeName" : "AWS::Lambda::Function", - "propertyPath" : "/properties/FunctionName" - } - }, { - "relationshipRef" : { - "typeName" : "AWS::Lambda::Function", - "propertyPath" : "/properties/Arn" - } - } ] - }, - "Policy" : { - "description" : "The resource policy of your function", - "type" : "object" + "pattern" : "^(arn:(aws[a-zA-Z-]*)?:lambda:)?([a-z]{2}((-gov)|(-iso([a-z]?)))?-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?$" }, "ProvisionedConcurrencyConfig" : { "description" : "Specifies a provisioned concurrency configuration for a function's version. Updates are not supported for this property.", diff --git a/aws-cloudformation-schema/aws-mediapackage-originendpoint.json b/aws-cloudformation-schema/aws-mediapackage-originendpoint.json index 2bc7e3d9ca..83378e13f7 100644 --- a/aws-cloudformation-schema/aws-mediapackage-originendpoint.json +++ b/aws-cloudformation-schema/aws-mediapackage-originendpoint.json @@ -506,9 +506,10 @@ "tagging" : { "taggable" : true, "tagOnCreate" : true, - "tagUpdatable" : false, + "tagUpdatable" : true, "cloudFormationSystemTags" : false, - "tagProperty" : "/properties/Tags" + "tagProperty" : "/properties/Tags", + "permissions" : [ "mediapackage:TagResource", "mediapackage:UntagResource" ] }, "additionalProperties" : false, "required" : [ "Id", "ChannelId" ], @@ -524,7 +525,7 @@ "permissions" : [ "mediapackage:DescribeOriginEndpoint" ] }, "update" : { - "permissions" : [ "mediapackage:UpdateOriginEndpoint", "iam:PassRole" ] + "permissions" : [ "mediapackage:UpdateOriginEndpoint", "mediapackage:TagResource", "mediapackage:ListTagsForResource", "mediapackage:UntagResource", "mediapackage:DescribeOriginEndpoint", "iam:PassRole" ] }, "delete" : { "permissions" : [ "mediapackage:DeleteOriginEndpoint" ] diff --git a/aws-cloudformation-schema/aws-msk-cluster.json b/aws-cloudformation-schema/aws-msk-cluster.json index 23e0c94bdf..ecdfed49ba 100644 --- a/aws-cloudformation-schema/aws-msk-cluster.json +++ b/aws-cloudformation-schema/aws-msk-cluster.json @@ -466,7 +466,8 @@ "tagOnCreate" : true, "tagUpdatable" : true, "cloudFormationSystemTags" : true, - "tagProperty" : "/properties/Tags" + "tagProperty" : "/properties/Tags", + "permissions" : [ "kafka:TagResource", "kafka:UntagResource", "kafka:ListTagsForResource" ] }, "handlers" : { "create" : { diff --git a/aws-cloudformation-schema/aws-organizations-account.json b/aws-cloudformation-schema/aws-organizations-account.json index 7d9a7681ae..15583047f0 100644 --- a/aws-cloudformation-schema/aws-organizations-account.json +++ b/aws-cloudformation-schema/aws-organizations-account.json @@ -99,7 +99,8 @@ "tagOnCreate" : true, "tagUpdatable" : true, "cloudFormationSystemTags" : false, - "tagProperty" : "/properties/Tags" + "tagProperty" : "/properties/Tags", + "permissions" : [ "organizations:TagResource", "organizations:UntagResource", "organizations:ListTagsForResource" ] }, "additionalProperties" : false, "required" : [ "AccountName", "Email" ], diff --git a/aws-cloudformation-schema/aws-organizations-organizationalunit.json b/aws-cloudformation-schema/aws-organizations-organizationalunit.json index fb50ee0169..dcf805a79f 100644 --- a/aws-cloudformation-schema/aws-organizations-organizationalunit.json +++ b/aws-cloudformation-schema/aws-organizations-organizationalunit.json @@ -89,7 +89,8 @@ "tagOnCreate" : true, "tagUpdatable" : true, "cloudFormationSystemTags" : false, - "tagProperty" : "/properties/Tags" + "tagProperty" : "/properties/Tags", + "permissions" : [ "organizations:TagResource", "organizations:UntagResource", "organizations:ListTagsForResource" ] }, "required" : [ "Name", "ParentId" ], "createOnlyProperties" : [ "/properties/ParentId" ], diff --git a/aws-cloudformation-schema/aws-organizations-policy.json b/aws-cloudformation-schema/aws-organizations-policy.json index 247aa66274..1d179d37cc 100644 --- a/aws-cloudformation-schema/aws-organizations-policy.json +++ b/aws-cloudformation-schema/aws-organizations-policy.json @@ -11,9 +11,9 @@ "maxLength" : 128 }, "Type" : { - "description" : "The type of policy to create. You can specify one of the following values: AISERVICES_OPT_OUT_POLICY, BACKUP_POLICY, SERVICE_CONTROL_POLICY, TAG_POLICY", + "description" : "The type of policy to create. You can specify one of the following values: AISERVICES_OPT_OUT_POLICY, BACKUP_POLICY, SERVICE_CONTROL_POLICY, TAG_POLICY, CHATBOT_POLICY", "type" : "string", - "enum" : [ "SERVICE_CONTROL_POLICY", "AISERVICES_OPT_OUT_POLICY", "BACKUP_POLICY", "TAG_POLICY" ] + "enum" : [ "SERVICE_CONTROL_POLICY", "AISERVICES_OPT_OUT_POLICY", "BACKUP_POLICY", "TAG_POLICY", "CHATBOT_POLICY" ] }, "Content" : { "description" : "The Policy text content. For AWS CloudFormation templates formatted in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation always converts a YAML policy to JSON format before submitting it.", @@ -92,7 +92,8 @@ "tagOnCreate" : true, "tagUpdatable" : true, "cloudFormationSystemTags" : false, - "tagProperty" : "/properties/Tags" + "tagProperty" : "/properties/Tags", + "permissions" : [ "organizations:TagResource", "organizations:UntagResource", "organizations:ListTagsForResource" ] }, "required" : [ "Name", "Type", "Content" ], "primaryIdentifier" : [ "/properties/Id" ], diff --git a/aws-cloudformation-schema/aws-organizations-resourcepolicy.json b/aws-cloudformation-schema/aws-organizations-resourcepolicy.json index 80f9cf4f10..124f2f947b 100644 --- a/aws-cloudformation-schema/aws-organizations-resourcepolicy.json +++ b/aws-cloudformation-schema/aws-organizations-resourcepolicy.json @@ -75,7 +75,8 @@ "tagOnCreate" : true, "tagUpdatable" : true, "cloudFormationSystemTags" : false, - "tagProperty" : "/properties/Tags" + "tagProperty" : "/properties/Tags", + "permissions" : [ "organizations:TagResource", "organizations:UntagResource", "organizations:ListTagsForResource" ] }, "required" : [ "Content" ], "readOnlyProperties" : [ "/properties/Id", "/properties/Arn" ], diff --git a/aws-cloudformation-schema/aws-qbusiness-webexperience.json b/aws-cloudformation-schema/aws-qbusiness-webexperience.json index a65bb8758d..006f63ba73 100644 --- a/aws-cloudformation-schema/aws-qbusiness-webexperience.json +++ b/aws-cloudformation-schema/aws-qbusiness-webexperience.json @@ -81,6 +81,12 @@ "WebExperienceStatus" : { "type" : "string", "enum" : [ "CREATING", "ACTIVE", "DELETING", "FAILED", "PENDING_AUTH_CONFIG" ] + }, + "Origin" : { + "type" : "string", + "maxLength" : 64, + "minLength" : 1, + "pattern" : "^(http:\\/\\/|https:\\/\\/)[a-zA-Z0-9-_.]+(?::[0-9]{1,5})?$" } }, "properties" : { @@ -156,6 +162,15 @@ "type" : "string", "maxLength" : 300, "minLength" : 0 + }, + "Origins" : { + "type" : "array", + "insertionOrder" : false, + "items" : { + "$ref" : "#/definitions/Origin" + }, + "maxItems" : 10, + "minItems" : 0 } }, "required" : [ "ApplicationId" ], diff --git a/aws-cloudformation-schema/aws-quicksight-analysis.json b/aws-cloudformation-schema/aws-quicksight-analysis.json index a524adcbd2..7d36abd520 100644 --- a/aws-cloudformation-schema/aws-quicksight-analysis.json +++ b/aws-cloudformation-schema/aws-quicksight-analysis.json @@ -5,7 +5,7 @@ "permissions" : [ "quicksight:DescribeAnalysis", "quicksight:DescribeAnalysisPermissions", "quicksight:ListTagsForResource" ] }, "create" : { - "permissions" : [ "quicksight:DescribeAnalysis", "quicksight:DescribeAnalysisPermissions", "quicksight:CreateAnalysis", "quicksight:DescribeTemplate", "quicksight:DescribeTheme", "quicksight:PassDataSet", "quicksight:TagResource", "quicksight:UntagResource", "quicksight:ListTagsForResource" ] + "permissions" : [ "quicksight:DescribeAnalysis", "quicksight:DescribeAnalysisPermissions", "quicksight:CreateAnalysis", "quicksight:DescribeTemplate", "quicksight:DescribeTheme", "quicksight:PassDataSet", "quicksight:TagResource", "quicksight:UntagResource", "quicksight:ListTagsForResource", "quicksight:CreateFolderMembership", "quicksight:DeleteFolderMembership", "quicksight:ListFoldersForResource" ] }, "update" : { "permissions" : [ "quicksight:DescribeAnalysis", "quicksight:DescribeAnalysisPermissions", "quicksight:UpdateAnalysis", "quicksight:UpdateAnalysisPermissions", "quicksight:CreateFolderMembership", "quicksight:DeleteFolderMembership", "quicksight:ListFoldersForResource", "quicksight:DescribeTemplate", "quicksight:DescribeTheme", "quicksight:PassDataSet", "quicksight:TagResource", "quicksight:UntagResource", "quicksight:ListTagsForResource" ] @@ -28,7 +28,7 @@ "typeName" : "AWS::QuickSight::Analysis", "readOnlyProperties" : [ "/properties/Arn", "/properties/CreatedTime", "/properties/DataSetArns", "/properties/LastUpdatedTime" ], "description" : "Definition of the AWS::QuickSight::Analysis Resource Type.", - "writeOnlyProperties" : [ "/properties/Definition", "/properties/Parameters", "/properties/SourceEntity", "/properties/Status", "/properties/ValidationStrategy" ], + "writeOnlyProperties" : [ "/properties/Definition", "/properties/Parameters", "/properties/SourceEntity", "/properties/Status", "/properties/ValidationStrategy", "/properties/FolderArns" ], "createOnlyProperties" : [ "/properties/AnalysisId", "/properties/AwsAccountId" ], "additionalProperties" : false, "primaryIdentifier" : [ "/properties/AnalysisId", "/properties/AwsAccountId" ], @@ -10384,6 +10384,14 @@ "ValidationStrategy" : { "$ref" : "#/definitions/ValidationStrategy" }, + "FolderArns" : { + "minItems" : 0, + "maxItems" : 10, + "type" : "array", + "items" : { + "type" : "string" + } + }, "Name" : { "minLength" : 1, "description" : "
The descriptive name of the analysis.
", diff --git a/aws-cloudformation-schema/aws-quicksight-dashboard.json b/aws-cloudformation-schema/aws-quicksight-dashboard.json index 621a17b4b2..9ca460df64 100644 --- a/aws-cloudformation-schema/aws-quicksight-dashboard.json +++ b/aws-cloudformation-schema/aws-quicksight-dashboard.json @@ -5,7 +5,7 @@ "permissions" : [ "quicksight:DescribeDashboard", "quicksight:DescribeDashboardPermissions", "quicksight:ListTagsForResource" ] }, "create" : { - "permissions" : [ "quicksight:DescribeDashboard", "quicksight:DescribeDashboardPermissions", "quicksight:CreateDashboard", "quicksight:DescribeTemplate", "quicksight:DescribeTheme", "quicksight:PassDataSet", "quicksight:TagResource", "quicksight:UntagResource", "quicksight:ListTagsForResource" ] + "permissions" : [ "quicksight:DescribeDashboard", "quicksight:DescribeDashboardPermissions", "quicksight:CreateDashboard", "quicksight:DescribeTemplate", "quicksight:DescribeTheme", "quicksight:PassDataSet", "quicksight:TagResource", "quicksight:UntagResource", "quicksight:ListTagsForResource", "quicksight:CreateFolderMembership", "quicksight:DeleteFolderMembership", "quicksight:ListFoldersForResource" ] }, "update" : { "permissions" : [ "quicksight:DescribeDashboard", "quicksight:DescribeDashboardPermissions", "quicksight:UpdateDashboard", "quicksight:UpdateDashboardLinks", "quicksight:UpdateDashboardPermissions", "quicksight:UpdateDashboardPublishedVersion", "quicksight:DescribeTemplate", "quicksight:DescribeTheme", "quicksight:PassDataSet", "quicksight:CreateFolderMembership", "quicksight:DeleteFolderMembership", "quicksight:ListFoldersForResource", "quicksight:TagResource", "quicksight:UntagResource", "quicksight:ListTagsForResource" ] @@ -28,7 +28,7 @@ "typeName" : "AWS::QuickSight::Dashboard", "readOnlyProperties" : [ "/properties/Arn", "/properties/CreatedTime", "/properties/LastPublishedTime", "/properties/LastUpdatedTime", "/properties/Version" ], "description" : "Definition of the AWS::QuickSight::Dashboard Resource Type.", - "writeOnlyProperties" : [ "/properties/DashboardPublishOptions", "/properties/Definition", "/properties/LinkSharingConfiguration", "/properties/Parameters", "/properties/SourceEntity", "/properties/ThemeArn", "/properties/VersionDescription", "/properties/ValidationStrategy" ], + "writeOnlyProperties" : [ "/properties/DashboardPublishOptions", "/properties/Definition", "/properties/LinkSharingConfiguration", "/properties/Parameters", "/properties/SourceEntity", "/properties/ThemeArn", "/properties/VersionDescription", "/properties/ValidationStrategy", "/properties/FolderArns" ], "createOnlyProperties" : [ "/properties/AwsAccountId", "/properties/DashboardId" ], "additionalProperties" : false, "primaryIdentifier" : [ "/properties/AwsAccountId", "/properties/DashboardId" ], @@ -10604,6 +10604,14 @@ "ValidationStrategy" : { "$ref" : "#/definitions/ValidationStrategy" }, + "FolderArns" : { + "minItems" : 0, + "maxItems" : 10, + "type" : "array", + "items" : { + "type" : "string" + } + }, "DashboardId" : { "minLength" : 1, "pattern" : "^[\\w\\-]+$", diff --git a/aws-cloudformation-schema/aws-quicksight-dataset.json b/aws-cloudformation-schema/aws-quicksight-dataset.json index 1b68bdf981..c5d8c48e53 100644 --- a/aws-cloudformation-schema/aws-quicksight-dataset.json +++ b/aws-cloudformation-schema/aws-quicksight-dataset.json @@ -11,7 +11,7 @@ "permissions" : [ "quicksight:DescribeDataSet", "quicksight:DescribeDataSetPermissions", "quicksight:ListTagsForResource", "quicksight:DescribeDataSetRefreshProperties" ] }, "create" : { - "permissions" : [ "quicksight:DescribeDataSet", "quicksight:DescribeDataSetPermissions", "quicksight:DescribeIngestion", "quicksight:ListIngestions", "quicksight:CreateDataSet", "quicksight:PassDataSource", "quicksight:PassDataSet", "quicksight:TagResource", "quicksight:ListTagsForResource", "quicksight:DescribeDataSetRefreshProperties", "quicksight:PutDataSetRefreshProperties" ] + "permissions" : [ "quicksight:DescribeDataSet", "quicksight:DescribeDataSetPermissions", "quicksight:DescribeIngestion", "quicksight:ListIngestions", "quicksight:CreateDataSet", "quicksight:PassDataSource", "quicksight:PassDataSet", "quicksight:TagResource", "quicksight:ListTagsForResource", "quicksight:DescribeDataSetRefreshProperties", "quicksight:PutDataSetRefreshProperties", "quicksight:CreateFolderMembership", "quicksight:DeleteFolderMembership", "quicksight:ListFoldersForResource" ] }, "update" : { "permissions" : [ "quicksight:DescribeDataSet", "quicksight:DescribeDataSetPermissions", "quicksight:PassDataSource", "quicksight:UpdateDataSet", "quicksight:UpdateDataSetPermissions", "quicksight:PassDataSet", "quicksight:DescribeIngestion", "quicksight:ListIngestions", "quicksight:CancelIngestion", "quicksight:CreateFolderMembership", "quicksight:DeleteFolderMembership", "quicksight:ListFoldersForResource", "quicksight:TagResource", "quicksight:UntagResource", "quicksight:ListTagsForResource", "quicksight:PutDataSetRefreshProperties", "quicksight:DescribeDataSetRefreshProperties", "quicksight:DeleteDataSetRefreshProperties" ] @@ -463,7 +463,7 @@ "maxLength" : 256 } }, - "required" : [ "DataSourceArn", "InputColumns", "Name" ] + "required" : [ "DataSourceArn", "Name" ] }, "DatasetParameterValueType" : { "type" : "string", @@ -1200,7 +1200,7 @@ "maxLength" : 128 } }, - "required" : [ "Columns", "DataSourceArn", "Name", "SqlQuery" ] + "required" : [ "DataSourceArn", "Name", "SqlQuery" ] }, "ProjectOperation" : { "description" : "A transform operation that projects columns. Operations that come after a projection\n can only refer to projected columns.
", diff --git a/aws-cloudformation-schema/aws-quicksight-datasource.json b/aws-cloudformation-schema/aws-quicksight-datasource.json index 9bb8e76494..9716fefd4f 100644 --- a/aws-cloudformation-schema/aws-quicksight-datasource.json +++ b/aws-cloudformation-schema/aws-quicksight-datasource.json @@ -1,903 +1,911 @@ { - "sourceUrl" : "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-quicksight", - "handlers" : { - "read" : { - "permissions" : [ "quicksight:DescribeDataSource", "quicksight:DescribeDataSourcePermissions", "quicksight:ListTagsForResource" ] - }, - "create" : { - "permissions" : [ "quicksight:CreateDataSource", "quicksight:DescribeDataSource", "quicksight:DescribeDataSourcePermissions", "quicksight:TagResource", "quicksight:ListTagsForResource" ] - }, - "update" : { - "permissions" : [ "quicksight:DescribeDataSource", "quicksight:DescribeDataSourcePermissions", "quicksight:UpdateDataSource", "quicksight:UpdateDataSourcePermissions", "quicksight:CreateFolderMembership", "quicksight:DeleteFolderMembership", "quicksight:ListFoldersForResource", "quicksight:TagResource", "quicksight:UntagResource", "quicksight:ListTagsForResource" ] - }, - "list" : { - "permissions" : [ "quicksight:DescribeDataSource", "quicksight:ListDataSources" ] - }, - "delete" : { - "permissions" : [ "quicksight:DescribeDataSource", "quicksight:DescribeDataSourcePermissions", "quicksight:DeleteDataSource", "quicksight:ListTagsForResource" ] - } - }, "typeName" : "AWS::QuickSight::DataSource", - "readOnlyProperties" : [ "/properties/Arn", "/properties/CreatedTime", "/properties/LastUpdatedTime", "/properties/Status" ], "description" : "Definition of the AWS::QuickSight::DataSource Resource Type.", - "writeOnlyProperties" : [ "/properties/Credentials" ], - "createOnlyProperties" : [ "/properties/AwsAccountId", "/properties/DataSourceId", "/properties/Type" ], - "additionalProperties" : false, - "primaryIdentifier" : [ "/properties/AwsAccountId", "/properties/DataSourceId" ], "definitions" : { - "AuroraPostgreSqlParameters" : { - "description" : "Parameters for Amazon Aurora PostgreSQL-Compatible Edition.
", - "additionalProperties" : false, + "AmazonElasticsearchParameters" : { "type" : "object", + "description" : "The parameters for OpenSearch.
", "properties" : { - "Port" : { - "default" : 0, - "maximum" : 65535, - "description" : "The port that Amazon Aurora PostgreSQL is listening on.
", - "type" : "number", - "minimum" : 1 - }, - "Database" : { - "minLength" : 1, - "description" : "The Amazon Aurora PostgreSQL database to connect to.
", + "Domain" : { "type" : "string", - "maxLength" : 128 - }, - "Host" : { + "maxLength" : 64, "minLength" : 1, - "description" : "The Amazon Aurora PostgreSQL-Compatible host to connect to.
", - "type" : "string", - "maxLength" : 256 + "description" : "The OpenSearch domain.
" } }, - "required" : [ "Database", "Host", "Port" ] - }, - "DataSourceCredentials" : { - "description" : "Data source credentials. This is a variant type structure. For this structure to be\n valid, only one of the attributes can be non-null.
", - "additionalProperties" : false, - "type" : "object", - "properties" : { - "SecretArn" : { - "minLength" : 1, - "pattern" : "^arn:[-a-z0-9]*:secretsmanager:[-a-z0-9]*:[0-9]{12}:secret:.+$", - "description" : "The Amazon Resource Name (ARN) of the secret associated with the data source in Amazon Secrets Manager.
", - "type" : "string", - "maxLength" : 2048 - }, - "CopySourceArn" : { - "pattern" : "^arn:[-a-z0-9]*:quicksight:[-a-z0-9]*:[0-9]{12}:datasource/.+$", - "description" : "The Amazon Resource Name (ARN) of a data source that has the credential pair that you\n want to use. When CopySourceArn
is not null, the credential pair from the\n data source in the ARN is used as the credentials for the\n DataSourceCredentials
structure.
Amazon S3 manifest file location.
", - "additionalProperties" : false, + "AmazonOpenSearchParameters" : { "type" : "object", + "description" : "The parameters for OpenSearch.
", "properties" : { - "Bucket" : { - "minLength" : 1, - "description" : "Amazon S3 bucket.
", + "Domain" : { "type" : "string", - "maxLength" : 1024 - }, - "Key" : { + "maxLength" : 64, "minLength" : 1, - "description" : "Amazon S3 key that identifies an object.
", - "type" : "string", - "maxLength" : 1024 + "description" : "The OpenSearch domain.
" } }, - "required" : [ "Bucket", "Key" ] + "required" : [ "Domain" ], + "additionalProperties" : false }, - "StarburstParameters" : { - "description" : "The parameters that are required to connect to a Starburst data source.
", - "additionalProperties" : false, + "AthenaParameters" : { "type" : "object", + "description" : "Parameters for Amazon Athena.
", "properties" : { - "Port" : { - "default" : 0, - "maximum" : 65535, - "description" : "The port for the Starburst data source.
", - "type" : "number", - "minimum" : 1 - }, - "ProductType" : { - "$ref" : "#/definitions/StarburstProductType" - }, - "Host" : { - "minLength" : 1, - "description" : "The host name of the Starburst data source.
", + "WorkGroup" : { "type" : "string", - "maxLength" : 256 + "maxLength" : 128, + "minLength" : 1, + "description" : "The workgroup that Amazon Athena uses.
" }, - "Catalog" : { - "minLength" : 0, - "description" : "The catalog name for the Starburst data source.
", + "RoleArn" : { "type" : "string", - "maxLength" : 128 + "maxLength" : 2048, + "minLength" : 20, + "description" : "Use the RoleArn
structure to override an account-wide role for a specific Athena data source. For example, say an account administrator has turned off all Athena access with an account-wide role. The administrator can then use RoleArn
to bypass the account-wide role and allow Athena access for the single Athena data source that is specified in the structure, even if the account-wide role forbidding Athena access is still active.
The parameters for Amazon Redshift. The ClusterId
field can be blank if\n Host
and Port
are both set. The Host
and Port
fields can be blank if the ClusterId
field is set.
Parameters for Amazon Aurora.
", "properties" : { - "IAMParameters" : { - "$ref" : "#/definitions/RedshiftIAMParameters" - }, - "ClusterId" : { - "minLength" : 1, - "description" : "Cluster ID. This field can be blank if the Host
and Port
are\n provided.
Host.
" }, "Port" : { + "type" : "number", "default" : 0, "maximum" : 65535, - "description" : "Port. This field can be blank if the ClusterId
is provided.
Port.
" }, "Database" : { - "minLength" : 1, - "description" : "Database.
", "type" : "string", - "maxLength" : 128 - }, - "Host" : { + "maxLength" : 128, "minLength" : 1, - "description" : "Host. This field can be blank if ClusterId
is provided.
Database.
" } }, - "required" : [ "Database" ] + "required" : [ "Database", "Host", "Port" ], + "additionalProperties" : false }, - "VpcConnectionProperties" : { - "description" : "VPC connection properties.
", - "additionalProperties" : false, - "type" : "object", - "properties" : { - "VpcConnectionArn" : { - "description" : "The Amazon Resource Name (ARN) for the VPC connection.
", - "type" : "string" - } - }, - "required" : [ "VpcConnectionArn" ] - }, - "SnowflakeParameters" : { - "description" : "The parameters for Snowflake.
", - "additionalProperties" : false, + "AuroraPostgreSqlParameters" : { "type" : "object", + "description" : "Parameters for Amazon Aurora PostgreSQL-Compatible Edition.
", "properties" : { - "Warehouse" : { - "minLength" : 0, - "description" : "Warehouse.
", + "Host" : { "type" : "string", - "maxLength" : 128 + "maxLength" : 256, + "minLength" : 1, + "description" : "The Amazon Aurora PostgreSQL-Compatible host to connect to.
" + }, + "Port" : { + "type" : "number", + "default" : 0, + "maximum" : 65535, + "minimum" : 1, + "description" : "The port that Amazon Aurora PostgreSQL is listening on.
" }, "Database" : { - "minLength" : 1, - "description" : "Database.
", "type" : "string", - "maxLength" : 128 - }, - "Host" : { + "maxLength" : 128, "minLength" : 1, - "description" : "Host.
", - "type" : "string", - "maxLength" : 256 + "description" : "The Amazon Aurora PostgreSQL database to connect to.
" } }, - "required" : [ "Database", "Host", "Warehouse" ] + "required" : [ "Database", "Host", "Port" ], + "additionalProperties" : false }, - "DataSourceType" : { - "type" : "string", - "enum" : [ "ADOBE_ANALYTICS", "AMAZON_ELASTICSEARCH", "AMAZON_OPENSEARCH", "ATHENA", "AURORA", "AURORA_POSTGRESQL", "AWS_IOT_ANALYTICS", "DATABRICKS", "DENODO", "DREMIO", "DYNAMODB", "SAPHANA", "DB2_AS400", "EXASOL", "FILE", "GITHUB", "JIRA", "MARIADB", "MYSQL", "ORACLE", "POSTGRESQL", "PRESTO", "REDSHIFT", "S3", "SALESFORCE", "SERVICENOW", "SNOWFLAKE", "SPARK", "SQLSERVER", "TERADATA", "TIMESTREAM", "TWITTER", "BIGQUERY", "GOOGLE_ANALYTICS", "TRINO", "STARBURST", "MONGO", "MONGO_ATLAS", "DOCUMENTDB", "APPFLOW", "IMPALA", "GLUE" ] - }, - "AmazonElasticsearchParameters" : { - "description" : "The parameters for OpenSearch.
", - "additionalProperties" : false, + "AwsIotAnalyticsParameters" : { "type" : "object", + "description" : "The parameters for IoT Analytics.
", "properties" : { - "Domain" : { - "minLength" : 1, - "description" : "The OpenSearch domain.
", + "DataSetName" : { "type" : "string", - "maxLength" : 64 + "maxLength" : 128, + "minLength" : 1, + "description" : "Dataset name.
" } }, - "required" : [ "Domain" ] + "required" : [ "DataSetName" ], + "additionalProperties" : false }, - "AmazonOpenSearchParameters" : { - "description" : "The parameters for OpenSearch.
", - "additionalProperties" : false, + "CredentialPair" : { "type" : "object", + "description" : "The combination of user name and password that are used as credentials.
", "properties" : { - "Domain" : { + "Username" : { + "type" : "string", + "maxLength" : 64, "minLength" : 1, - "description" : "The OpenSearch domain.
", + "description" : "User name.
" + }, + "Password" : { "type" : "string", - "maxLength" : 64 + "maxLength" : 1024, + "minLength" : 1, + "description" : "Password.
" + }, + "AlternateDataSourceParameters" : { + "type" : "array", + "items" : { + "$ref" : "#/definitions/DataSourceParameters" + }, + "maxItems" : 50, + "minItems" : 1, + "description" : "A set of alternate data source parameters that you want to share for these\n credentials. The credentials are applied in tandem with the data source parameters when\n you copy a data source by using a create or update request. The API operation compares\n the DataSourceParameters
structure that's in the request with the\n structures in the AlternateDataSourceParameters
allow list. If the\n structures are an exact match, the request is allowed to use the new data source with\n the existing credentials. If the AlternateDataSourceParameters
list is\n null, the DataSourceParameters
originally used with these\n Credentials
is automatically allowed.
Parameters for Amazon Aurora.
", - "additionalProperties" : false, + "DataSourceCredentials" : { "type" : "object", + "description" : "Data source credentials. This is a variant type structure. For this structure to be\n valid, only one of the attributes can be non-null.
", "properties" : { - "Port" : { - "default" : 0, - "maximum" : 65535, - "description" : "Port.
", - "type" : "number", - "minimum" : 1 + "CredentialPair" : { + "$ref" : "#/definitions/CredentialPair" }, - "Database" : { - "minLength" : 1, - "description" : "Database.
", + "CopySourceArn" : { "type" : "string", - "maxLength" : 128 + "pattern" : "^arn:[-a-z0-9]*:quicksight:[-a-z0-9]*:[0-9]{12}:datasource/.+$", + "description" : "The Amazon Resource Name (ARN) of a data source that has the credential pair that you\n want to use. When CopySourceArn
is not null, the credential pair from the\n data source in the ARN is used as the credentials for the\n DataSourceCredentials
structure.
Host.
", + "SecretArn" : { "type" : "string", - "maxLength" : 256 + "maxLength" : 2048, + "minLength" : 1, + "pattern" : "^arn:[-a-z0-9]*:secretsmanager:[-a-z0-9]*:[0-9]{12}:secret:.+$", + "description" : "The Amazon Resource Name (ARN) of the secret associated with the data source in Amazon Secrets Manager.
" } }, - "required" : [ "Database", "Host", "Port" ] + "additionalProperties" : false }, - "S3Parameters" : { - "description" : "The parameters for S3.
", - "additionalProperties" : false, + "DataSourceErrorInfo" : { "type" : "object", + "description" : "Error information for the data source creation or update.
", "properties" : { - "ManifestFileLocation" : { - "$ref" : "#/definitions/ManifestFileLocation" + "Type" : { + "$ref" : "#/definitions/DataSourceErrorInfoType" }, - "RoleArn" : { - "minLength" : 20, - "description" : "Use the RoleArn
structure to override an account-wide role for a specific S3 data source. For example, say an account administrator has turned off all S3 access with an account-wide role. The administrator can then use RoleArn
to bypass the account-wide role and allow S3 access for the single S3 data source that is specified in the structure, even if the account-wide role forbidding S3 access is still active.
Error message.
" } }, - "required" : [ "ManifestFileLocation" ] - }, - "IdentityCenterConfiguration" : { - "description" : "The parameters for an IAM Identity Center configuration.
", - "additionalProperties" : false, - "type" : "object", - "properties" : { - "EnableIdentityPropagation" : { - "default" : null, - "description" : "A Boolean option that controls whether Trusted Identity Propagation should be used.
", - "type" : "boolean" - } - } - }, - "SslProperties" : { - "description" : "Secure Socket Layer (SSL) properties that apply when Amazon QuickSight connects to your\n underlying data source.
", - "additionalProperties" : false, - "type" : "object", - "properties" : { - "DisableSsl" : { - "default" : false, - "description" : "A Boolean option to control whether SSL should be disabled.
", - "type" : "boolean" - } - } + "additionalProperties" : false }, "DataSourceErrorInfoType" : { "type" : "string", "enum" : [ "ACCESS_DENIED", "COPY_SOURCE_NOT_FOUND", "TIMEOUT", "ENGINE_VERSION_NOT_SUPPORTED", "UNKNOWN_HOST", "GENERIC_SQL_FAILURE", "CONFLICT", "UNKNOWN" ] }, - "ResourcePermission" : { - "description" : "Permission for the resource.
", - "additionalProperties" : false, + "DataSourceParameters" : { "type" : "object", + "description" : "The parameters that Amazon QuickSight uses to connect to your underlying data source.\n This is a variant type structure. For this structure to be valid, only one of the\n attributes can be non-null.
", "properties" : { - "Actions" : { - "minItems" : 1, - "maxItems" : 20, - "description" : "The IAM action to grant or revoke permissions on.
", - "type" : "array", - "items" : { - "type" : "string" - } + "AmazonElasticsearchParameters" : { + "$ref" : "#/definitions/AmazonElasticsearchParameters" }, - "Resource" : { - "type" : "string" + "AthenaParameters" : { + "$ref" : "#/definitions/AthenaParameters" + }, + "AuroraParameters" : { + "$ref" : "#/definitions/AuroraParameters" + }, + "AuroraPostgreSqlParameters" : { + "$ref" : "#/definitions/AuroraPostgreSqlParameters" + }, + "MariaDbParameters" : { + "$ref" : "#/definitions/MariaDbParameters" + }, + "MySqlParameters" : { + "$ref" : "#/definitions/MySqlParameters" + }, + "OracleParameters" : { + "$ref" : "#/definitions/OracleParameters" + }, + "PostgreSqlParameters" : { + "$ref" : "#/definitions/PostgreSqlParameters" + }, + "PrestoParameters" : { + "$ref" : "#/definitions/PrestoParameters" + }, + "RdsParameters" : { + "$ref" : "#/definitions/RdsParameters" + }, + "RedshiftParameters" : { + "$ref" : "#/definitions/RedshiftParameters" + }, + "S3Parameters" : { + "$ref" : "#/definitions/S3Parameters" + }, + "SnowflakeParameters" : { + "$ref" : "#/definitions/SnowflakeParameters" + }, + "SparkParameters" : { + "$ref" : "#/definitions/SparkParameters" + }, + "SqlServerParameters" : { + "$ref" : "#/definitions/SqlServerParameters" + }, + "TeradataParameters" : { + "$ref" : "#/definitions/TeradataParameters" + }, + "AmazonOpenSearchParameters" : { + "$ref" : "#/definitions/AmazonOpenSearchParameters" + }, + "DatabricksParameters" : { + "$ref" : "#/definitions/DatabricksParameters" + }, + "StarburstParameters" : { + "$ref" : "#/definitions/StarburstParameters" }, - "Principal" : { - "minLength" : 1, - "description" : "The Amazon Resource Name (ARN) of the principal. This can be one of the\n following:
\nThe ARN of an Amazon QuickSight user or group associated with a data source or dataset. (This is common.)
\nThe ARN of an Amazon QuickSight user, group, or namespace associated with an analysis, dashboard, template, or theme. (This is common.)
\nThe ARN of an Amazon Web Services account root: This is an IAM ARN rather than a QuickSight\n ARN. Use this option only to share resources (templates) across Amazon Web Services accounts.\n (This is less common.)
\nError information for the data source creation or update.
", - "additionalProperties" : false, - "type" : "object", - "properties" : { - "Type" : { - "$ref" : "#/definitions/DataSourceErrorInfoType" - }, - "Message" : { - "description" : "Error message.
", - "type" : "string" - } - } + "DataSourceType" : { + "type" : "string", + "enum" : [ "ADOBE_ANALYTICS", "AMAZON_ELASTICSEARCH", "AMAZON_OPENSEARCH", "ATHENA", "AURORA", "AURORA_POSTGRESQL", "AWS_IOT_ANALYTICS", "DATABRICKS", "DENODO", "DREMIO", "DYNAMODB", "SAPHANA", "DB2_AS400", "EXASOL", "FILE", "GITHUB", "JIRA", "MARIADB", "MYSQL", "ORACLE", "POSTGRESQL", "PRESTO", "REDSHIFT", "S3", "S3_TABLES", "SALESFORCE", "SERVICENOW", "SNOWFLAKE", "SPARK", "SQLSERVER", "TERADATA", "TIMESTREAM", "TWITTER", "BIGQUERY", "GOOGLE_ANALYTICS", "TRINO", "STARBURST", "MONGO", "MONGO_ATLAS", "DOCUMENTDB", "APPFLOW", "IMPALA", "GLUE" ] }, - "TeradataParameters" : { - "description" : "The parameters for Teradata.
", - "additionalProperties" : false, + "DatabricksParameters" : { "type" : "object", + "description" : "The parameters that are required to connect to a Databricks data source.
", "properties" : { + "Host" : { + "type" : "string", + "maxLength" : 256, + "minLength" : 1, + "description" : "The host name of the Databricks data source.
" + }, "Port" : { + "type" : "number", "default" : 0, "maximum" : 65535, - "description" : "Port.
", - "type" : "number", - "minimum" : 1 + "minimum" : 1, + "description" : "The port for the Databricks data source.
" }, - "Database" : { - "minLength" : 1, - "description" : "Database.
", + "SqlEndpointPath" : { "type" : "string", - "maxLength" : 128 - }, - "Host" : { + "maxLength" : 4096, "minLength" : 1, - "description" : "Host.
", - "type" : "string", - "maxLength" : 256 + "description" : "The HTTP path of the Databricks data source.
" } }, - "required" : [ "Database", "Host", "Port" ] + "required" : [ "Host", "Port", "SqlEndpointPath" ], + "additionalProperties" : false }, - "RdsParameters" : { - "description" : "The parameters for Amazon RDS.
", - "additionalProperties" : false, + "IdentityCenterConfiguration" : { "type" : "object", + "description" : "The parameters for an IAM Identity Center configuration.
", "properties" : { - "InstanceId" : { - "minLength" : 1, - "description" : "Instance ID.
", - "type" : "string", - "maxLength" : 64 - }, - "Database" : { - "minLength" : 1, - "description" : "Database.
", - "type" : "string", - "maxLength" : 128 + "EnableIdentityPropagation" : { + "type" : "boolean", + "default" : null, + "description" : "A Boolean option that controls whether Trusted Identity Propagation should be used.
" } }, - "required" : [ "Database", "InstanceId" ] + "additionalProperties" : false }, - "AthenaParameters" : { - "description" : "Parameters for Amazon Athena.
", - "additionalProperties" : false, + "ManifestFileLocation" : { "type" : "object", + "description" : "Amazon S3 manifest file location.
", "properties" : { - "WorkGroup" : { - "minLength" : 1, - "description" : "The workgroup that Amazon Athena uses.
", + "Bucket" : { "type" : "string", - "maxLength" : 128 + "maxLength" : 1024, + "minLength" : 1, + "description" : "Amazon S3 bucket.
" }, - "RoleArn" : { - "minLength" : 20, - "description" : "Use the RoleArn
structure to override an account-wide role for a specific Athena data source. For example, say an account administrator has turned off all Athena access with an account-wide role. The administrator can then use RoleArn
to bypass the account-wide role and allow Athena access for the single Athena data source that is specified in the structure, even if the account-wide role forbidding Athena access is still active.
Amazon S3 key that identifies an object.
" } - } + }, + "required" : [ "Bucket", "Key" ], + "additionalProperties" : false }, - "SparkParameters" : { - "description" : "The parameters for Spark.
", - "additionalProperties" : false, + "MariaDbParameters" : { "type" : "object", + "description" : "The parameters for MariaDB.
", "properties" : { + "Host" : { + "type" : "string", + "maxLength" : 256, + "minLength" : 1, + "description" : "Host.
" + }, "Port" : { + "type" : "number", "default" : 0, "maximum" : 65535, - "description" : "Port.
", - "type" : "number", - "minimum" : 1 + "minimum" : 1, + "description" : "Port.
" }, - "Host" : { - "minLength" : 1, - "description" : "Host.
", + "Database" : { "type" : "string", - "maxLength" : 256 + "maxLength" : 128, + "minLength" : 1, + "description" : "Database.
" } }, - "required" : [ "Host", "Port" ] + "required" : [ "Database", "Host", "Port" ], + "additionalProperties" : false }, - "MariaDbParameters" : { - "description" : "The parameters for MariaDB.
", - "additionalProperties" : false, + "MySqlParameters" : { "type" : "object", + "description" : "The parameters for MySQL.
", "properties" : { + "Host" : { + "type" : "string", + "maxLength" : 256, + "minLength" : 1, + "description" : "Host.
" + }, "Port" : { + "type" : "number", "default" : 0, "maximum" : 65535, - "description" : "Port.
", - "type" : "number", - "minimum" : 1 + "minimum" : 1, + "description" : "Port.
" }, "Database" : { - "minLength" : 1, - "description" : "Database.
", "type" : "string", - "maxLength" : 128 - }, - "Host" : { + "maxLength" : 128, "minLength" : 1, - "description" : "Host.
", - "type" : "string", - "maxLength" : 256 + "description" : "Database.
" } }, - "required" : [ "Database", "Host", "Port" ] + "required" : [ "Database", "Host", "Port" ], + "additionalProperties" : false }, "OracleParameters" : { - "description" : "The parameters for Oracle.
", - "additionalProperties" : false, "type" : "object", + "description" : "The parameters for Oracle.
", "properties" : { + "Host" : { + "type" : "string", + "maxLength" : 256, + "minLength" : 1, + "description" : "An Oracle host.
" + }, "Port" : { + "type" : "number", "default" : 0, "maximum" : 65535, - "description" : "The port.
", - "type" : "number", - "minimum" : 1 + "minimum" : 1, + "description" : "The port.
" }, "Database" : { - "minLength" : 1, - "description" : "The database.
", "type" : "string", - "maxLength" : 128 - }, - "Host" : { + "maxLength" : 128, "minLength" : 1, - "description" : "An Oracle host.
", - "type" : "string", - "maxLength" : 256 + "description" : "The database.
" } }, - "required" : [ "Database", "Host", "Port" ] + "required" : [ "Database", "Host", "Port" ], + "additionalProperties" : false }, - "PrestoParameters" : { - "description" : "The parameters for Presto.
", - "additionalProperties" : false, + "PostgreSqlParameters" : { "type" : "object", + "description" : "The parameters for PostgreSQL.
", "properties" : { + "Host" : { + "type" : "string", + "maxLength" : 256, + "minLength" : 1, + "description" : "Host.
" + }, "Port" : { + "type" : "number", "default" : 0, "maximum" : 65535, - "description" : "Port.
", - "type" : "number", - "minimum" : 1 + "minimum" : 1, + "description" : "Port.
" }, - "Host" : { + "Database" : { + "type" : "string", + "maxLength" : 128, "minLength" : 1, - "description" : "Host.
", + "description" : "Database.
" + } + }, + "required" : [ "Database", "Host", "Port" ], + "additionalProperties" : false + }, + "PrestoParameters" : { + "type" : "object", + "description" : "The parameters for Presto.
", + "properties" : { + "Host" : { "type" : "string", - "maxLength" : 256 + "maxLength" : 256, + "minLength" : 1, + "description" : "Host.
" + }, + "Port" : { + "type" : "number", + "default" : 0, + "maximum" : 65535, + "minimum" : 1, + "description" : "Port.
" }, "Catalog" : { - "minLength" : 0, - "description" : "Catalog.
", "type" : "string", - "maxLength" : 128 + "maxLength" : 128, + "minLength" : 0, + "description" : "Catalog.
" } }, - "required" : [ "Catalog", "Host", "Port" ] + "required" : [ "Catalog", "Host", "Port" ], + "additionalProperties" : false }, - "AwsIotAnalyticsParameters" : { - "description" : "The parameters for IoT Analytics.
", - "additionalProperties" : false, + "RdsParameters" : { "type" : "object", + "description" : "The parameters for Amazon RDS.
", "properties" : { - "DataSetName" : { + "InstanceId" : { + "type" : "string", + "maxLength" : 64, "minLength" : 1, - "description" : "Dataset name.
", + "description" : "Instance ID.
" + }, + "Database" : { "type" : "string", - "maxLength" : 128 + "maxLength" : 128, + "minLength" : 1, + "description" : "Database.
" } }, - "required" : [ "DataSetName" ] - }, - "StarburstProductType" : { - "type" : "string", - "enum" : [ "GALAXY", "ENTERPRISE" ] + "required" : [ "Database", "InstanceId" ], + "additionalProperties" : false }, - "DataSourceParameters" : { - "description" : "The parameters that Amazon QuickSight uses to connect to your underlying data source.\n This is a variant type structure. For this structure to be valid, only one of the\n attributes can be non-null.
", - "additionalProperties" : false, + "RedshiftIAMParameters" : { "type" : "object", + "description" : "A structure that grants Amazon QuickSight access to your cluster and make a call to the redshift:GetClusterCredentials
API. For more information on the redshift:GetClusterCredentials
API, see \n GetClusterCredentials
\n .
Use the RoleArn
structure to allow Amazon QuickSight to call redshift:GetClusterCredentials
on your cluster. The calling principal must have iam:PassRole
access to pass the role to Amazon QuickSight. The role's trust policy must allow the Amazon QuickSight service principal to assume the role.
The user whose permissions and group memberships will be used by Amazon QuickSight to access the cluster. If this user already exists in your database, Amazon QuickSight is granted the same permissions that the user has. If the user doesn't exist, set the value of AutoCreateDatabaseUser
to True
to create a new user with PUBLIC permissions.
A list of groups whose permissions will be granted to Amazon QuickSight to access the cluster. These permissions are combined with the permissions granted to Amazon QuickSight by the DatabaseUser
. If you choose to include this parameter, the RoleArn
must grant access to redshift:JoinGroup
.
Automatically creates a database user. If your database doesn't have a DatabaseUser
, set this parameter to True
. If there is no DatabaseUser
, Amazon QuickSight can't connect to your cluster. The RoleArn
that you use for this operation must grant access to redshift:CreateClusterUser
to successfully create the user.
The parameters for Amazon Redshift. The ClusterId
field can be blank if\n Host
and Port
are both set. The Host
and Port
fields can be blank if the ClusterId
field is set.
Host. This field can be blank if ClusterId
is provided.
Port. This field can be blank if the ClusterId
is provided.
Database.
" }, - "AmazonOpenSearchParameters" : { - "$ref" : "#/definitions/AmazonOpenSearchParameters" + "ClusterId" : { + "type" : "string", + "maxLength" : 64, + "minLength" : 1, + "description" : "Cluster ID. This field can be blank if the Host
and Port
are\n provided.
Permission for the resource.
", + "properties" : { + "Principal" : { + "type" : "string", + "maxLength" : 256, + "minLength" : 1, + "description" : "The Amazon Resource Name (ARN) of the principal. This can be one of the\n following:
\nThe ARN of an Amazon QuickSight user or group associated with a data source or dataset. (This is common.)
\nThe ARN of an Amazon QuickSight user, group, or namespace associated with an analysis, dashboard, template, or theme. (This is common.)
\nThe ARN of an Amazon Web Services account root: This is an IAM ARN rather than a QuickSight\n ARN. Use this option only to share resources (templates) across Amazon Web Services accounts.\n (This is less common.)
\nThe IAM action to grant or revoke permissions on.
" + } + }, + "required" : [ "Actions", "Principal" ], + "additionalProperties" : false + }, + "ResourceStatus" : { + "type" : "string", + "enum" : [ "CREATION_IN_PROGRESS", "CREATION_SUCCESSFUL", "CREATION_FAILED", "UPDATE_IN_PROGRESS", "UPDATE_SUCCESSFUL", "UPDATE_FAILED", "DELETED" ] + }, + "S3Parameters" : { + "type" : "object", + "description" : "The parameters for S3.
", + "properties" : { + "ManifestFileLocation" : { + "$ref" : "#/definitions/ManifestFileLocation" }, - "DatabricksParameters" : { - "$ref" : "#/definitions/DatabricksParameters" + "RoleArn" : { + "type" : "string", + "maxLength" : 2048, + "minLength" : 20, + "description" : "Use the RoleArn
structure to override an account-wide role for a specific S3 data source. For example, say an account administrator has turned off all S3 access with an account-wide role. The administrator can then use RoleArn
to bypass the account-wide role and allow S3 access for the single S3 data source that is specified in the structure, even if the account-wide role forbidding S3 access is still active.
The parameters for MySQL.
", - "additionalProperties" : false, + "SnowflakeParameters" : { "type" : "object", + "description" : "The parameters for Snowflake.
", "properties" : { - "Port" : { - "default" : 0, - "maximum" : 65535, - "description" : "Port.
", - "type" : "number", - "minimum" : 1 + "Host" : { + "type" : "string", + "maxLength" : 256, + "minLength" : 1, + "description" : "Host.
" }, "Database" : { - "minLength" : 1, - "description" : "Database.
", "type" : "string", - "maxLength" : 128 - }, - "Host" : { + "maxLength" : 128, "minLength" : 1, - "description" : "Host.
", + "description" : "Database.
" + }, + "Warehouse" : { "type" : "string", - "maxLength" : 256 + "maxLength" : 128, + "minLength" : 0, + "description" : "Warehouse.
" } }, - "required" : [ "Database", "Host", "Port" ] + "required" : [ "Database", "Host", "Warehouse" ], + "additionalProperties" : false }, - "RedshiftIAMParameters" : { - "description" : "A structure that grants Amazon QuickSight access to your cluster and make a call to the redshift:GetClusterCredentials
API. For more information on the redshift:GetClusterCredentials
API, see \n GetClusterCredentials
\n .
The parameters for Spark.
", "properties" : { - "AutoCreateDatabaseUser" : { - "default" : false, - "description" : "Automatically creates a database user. If your database doesn't have a DatabaseUser
, set this parameter to True
. If there is no DatabaseUser
, Amazon QuickSight can't connect to your cluster. The RoleArn
that you use for this operation must grant access to redshift:CreateClusterUser
to successfully create the user.
The user whose permissions and group memberships will be used by Amazon QuickSight to access the cluster. If this user already exists in your database, Amazon QuickSight is granted the same permissions that the user has. If the user doesn't exist, set the value of AutoCreateDatabaseUser
to True
to create a new user with PUBLIC permissions.
Use the RoleArn
structure to allow Amazon QuickSight to call redshift:GetClusterCredentials
on your cluster. The calling principal must have iam:PassRole
access to pass the role to Amazon QuickSight. The role's trust policy must allow the Amazon QuickSight service principal to assume the role.
Host.
" }, - "DatabaseGroups" : { - "minItems" : 1, - "maxItems" : 50, - "description" : "A list of groups whose permissions will be granted to Amazon QuickSight to access the cluster. These permissions are combined with the permissions granted to Amazon QuickSight by the DatabaseUser
. If you choose to include this parameter, the RoleArn
must grant access to redshift:JoinGroup
.
Port.
" } }, - "required" : [ "RoleArn" ] + "required" : [ "Host", "Port" ], + "additionalProperties" : false }, "SqlServerParameters" : { - "description" : "The parameters for SQL Server.
", - "additionalProperties" : false, "type" : "object", + "description" : "The parameters for SQL Server.
", "properties" : { + "Host" : { + "type" : "string", + "maxLength" : 256, + "minLength" : 1, + "description" : "Host.
" + }, "Port" : { + "type" : "number", "default" : 0, "maximum" : 65535, - "description" : "Port.
", - "type" : "number", - "minimum" : 1 + "minimum" : 1, + "description" : "Port.
" }, "Database" : { - "minLength" : 1, - "description" : "Database.
", "type" : "string", - "maxLength" : 128 - }, - "Host" : { + "maxLength" : 128, "minLength" : 1, - "description" : "Host.
", - "type" : "string", - "maxLength" : 256 + "description" : "Database.
" } }, - "required" : [ "Database", "Host", "Port" ] + "required" : [ "Database", "Host", "Port" ], + "additionalProperties" : false }, - "CredentialPair" : { - "description" : "The combination of user name and password that are used as credentials.
", - "additionalProperties" : false, + "SslProperties" : { "type" : "object", + "description" : "Secure Socket Layer (SSL) properties that apply when Amazon QuickSight connects to your\n underlying data source.
", "properties" : { - "AlternateDataSourceParameters" : { - "minItems" : 1, - "maxItems" : 50, - "description" : "A set of alternate data source parameters that you want to share for these\n credentials. The credentials are applied in tandem with the data source parameters when\n you copy a data source by using a create or update request. The API operation compares\n the DataSourceParameters
structure that's in the request with the\n structures in the AlternateDataSourceParameters
allow list. If the\n structures are an exact match, the request is allowed to use the new data source with\n the existing credentials. If the AlternateDataSourceParameters
list is\n null, the DataSourceParameters
originally used with these\n Credentials
is automatically allowed.
User name.
", - "type" : "string", - "maxLength" : 64 - }, - "Password" : { - "minLength" : 1, - "description" : "Password.
", - "type" : "string", - "maxLength" : 1024 + "DisableSsl" : { + "type" : "boolean", + "default" : false, + "description" : "A Boolean option to control whether SSL should be disabled.
" } }, - "required" : [ "Password", "Username" ] + "additionalProperties" : false }, - "PostgreSqlParameters" : { - "description" : "The parameters for PostgreSQL.
", - "additionalProperties" : false, + "StarburstParameters" : { "type" : "object", + "description" : "The parameters that are required to connect to a Starburst data source.
", "properties" : { + "Host" : { + "type" : "string", + "maxLength" : 256, + "minLength" : 1, + "description" : "The host name of the Starburst data source.
" + }, "Port" : { + "type" : "number", "default" : 0, "maximum" : 65535, - "description" : "Port.
", - "type" : "number", - "minimum" : 1 + "minimum" : 1, + "description" : "The port for the Starburst data source.
" }, - "Database" : { - "minLength" : 1, - "description" : "Database.
", + "Catalog" : { "type" : "string", - "maxLength" : 128 + "maxLength" : 128, + "minLength" : 0, + "description" : "The catalog name for the Starburst data source.
" }, - "Host" : { - "minLength" : 1, - "description" : "Host.
", - "type" : "string", - "maxLength" : 256 + "ProductType" : { + "$ref" : "#/definitions/StarburstProductType" } }, - "required" : [ "Database", "Host", "Port" ] + "required" : [ "Catalog", "Host", "Port" ], + "additionalProperties" : false + }, + "StarburstProductType" : { + "type" : "string", + "enum" : [ "GALAXY", "ENTERPRISE" ] }, "Tag" : { - "description" : "The key or keys of the key-value pairs for the resource tag or tags assigned to the\n resource.
", - "additionalProperties" : false, "type" : "object", + "description" : "The key or keys of the key-value pairs for the resource tag or tags assigned to the\n resource.
", "properties" : { - "Value" : { - "minLength" : 1, - "description" : "Tag value.
", - "type" : "string", - "maxLength" : 256 - }, "Key" : { + "type" : "string", + "maxLength" : 128, "minLength" : 1, - "description" : "Tag key.
", + "description" : "Tag key.
" + }, + "Value" : { "type" : "string", - "maxLength" : 128 + "maxLength" : 256, + "minLength" : 1, + "description" : "Tag value.
" } }, - "required" : [ "Key", "Value" ] + "required" : [ "Key", "Value" ], + "additionalProperties" : false }, - "TrinoParameters" : { - "description" : "The parameters that are required to connect to a Trino data source.
", - "additionalProperties" : false, + "TeradataParameters" : { "type" : "object", + "description" : "The parameters for Teradata.
", "properties" : { + "Host" : { + "type" : "string", + "maxLength" : 256, + "minLength" : 1, + "description" : "Host.
" + }, "Port" : { + "type" : "number", "default" : 0, "maximum" : 65535, - "description" : "The port for the Trino data source.
", - "type" : "number", - "minimum" : 1 - }, - "Host" : { - "minLength" : 1, - "description" : "The host name of the Trino data source.
", - "type" : "string", - "maxLength" : 256 + "minimum" : 1, + "description" : "Port.
" }, - "Catalog" : { - "minLength" : 0, - "description" : "The catalog name for the Trino data source.
", + "Database" : { "type" : "string", - "maxLength" : 128 + "maxLength" : 128, + "minLength" : 1, + "description" : "Database.
" } }, - "required" : [ "Catalog", "Host", "Port" ] + "required" : [ "Database", "Host", "Port" ], + "additionalProperties" : false }, - "DatabricksParameters" : { - "description" : "The parameters that are required to connect to a Databricks data source.
", - "additionalProperties" : false, + "TrinoParameters" : { "type" : "object", + "description" : "The parameters that are required to connect to a Trino data source.
", "properties" : { + "Host" : { + "type" : "string", + "maxLength" : 256, + "minLength" : 1, + "description" : "The host name of the Trino data source.
" + }, "Port" : { + "type" : "number", "default" : 0, "maximum" : 65535, - "description" : "The port for the Databricks data source.
", - "type" : "number", - "minimum" : 1 + "minimum" : 1, + "description" : "The port for the Trino data source.
" }, - "Host" : { - "minLength" : 1, - "description" : "The host name of the Databricks data source.
", + "Catalog" : { "type" : "string", - "maxLength" : 256 - }, - "SqlEndpointPath" : { - "minLength" : 1, - "description" : "The HTTP path of the Databricks data source.
", + "maxLength" : 128, + "minLength" : 0, + "description" : "The catalog name for the Trino data source.
" + } + }, + "required" : [ "Catalog", "Host", "Port" ], + "additionalProperties" : false + }, + "VpcConnectionProperties" : { + "type" : "object", + "description" : "VPC connection properties.
", + "properties" : { + "VpcConnectionArn" : { "type" : "string", - "maxLength" : 4096 + "description" : "The Amazon Resource Name (ARN) for the VPC connection.
" } }, - "required" : [ "Host", "Port", "SqlEndpointPath" ] + "required" : [ "VpcConnectionArn" ], + "additionalProperties" : false } }, - "required" : [ "Name", "Type" ], "properties" : { - "Status" : { - "$ref" : "#/definitions/ResourceStatus" + "AlternateDataSourceParameters" : { + "type" : "array", + "items" : { + "$ref" : "#/definitions/DataSourceParameters" + }, + "maxItems" : 50, + "minItems" : 1, + "description" : "A set of alternate data source parameters that you want to share for the credentials\n stored with this data source. The credentials are applied in tandem with the data source\n parameters when you copy a data source by using a create or update request. The API\n operation compares the DataSourceParameters
structure that's in the request\n with the structures in the AlternateDataSourceParameters
allow list. If the\n structures are an exact match, the request is allowed to use the credentials from this\n existing data source. If the AlternateDataSourceParameters
list is null,\n the Credentials
originally used with this DataSourceParameters
\n are automatically allowed.
The Amazon Resource Name (ARN) of the data source.
" + }, + "AwsAccountId" : { + "type" : "string", + "maxLength" : 12, + "minLength" : 12, + "pattern" : "^[0-9]{12}$" }, "CreatedTime" : { - "format" : "date-time", + "type" : "string", "description" : "The time that this data source was created.
", - "type" : "string" + "format" : "date-time" }, - "ErrorInfo" : { - "$ref" : "#/definitions/DataSourceErrorInfo" + "Credentials" : { + "$ref" : "#/definitions/DataSourceCredentials" }, - "LastUpdatedTime" : { - "format" : "date-time", - "description" : "The last time that this data source was updated.
", + "DataSourceId" : { "type" : "string" }, - "Name" : { - "minLength" : 1, - "type" : "string", - "maxLength" : 128 - }, "DataSourceParameters" : { "$ref" : "#/definitions/DataSourceParameters" }, - "Type" : { - "$ref" : "#/definitions/DataSourceType" - }, - "VpcConnectionProperties" : { - "$ref" : "#/definitions/VpcConnectionProperties" + "ErrorInfo" : { + "$ref" : "#/definitions/DataSourceErrorInfo" }, - "AlternateDataSourceParameters" : { - "minItems" : 1, - "maxItems" : 50, - "description" : "A set of alternate data source parameters that you want to share for the credentials\n stored with this data source. The credentials are applied in tandem with the data source\n parameters when you copy a data source by using a create or update request. The API\n operation compares the DataSourceParameters
structure that's in the request\n with the structures in the AlternateDataSourceParameters
allow list. If the\n structures are an exact match, the request is allowed to use the credentials from this\n existing data source. If the AlternateDataSourceParameters
list is null,\n the Credentials
originally used with this DataSourceParameters
\n are automatically allowed.
The last time that this data source was updated.
", + "format" : "date-time" + }, + "Name" : { "type" : "string", - "maxLength" : 12 + "maxLength" : 128, + "minLength" : 1 }, "Permissions" : { - "minItems" : 1, - "maxItems" : 64, "type" : "array", "items" : { "$ref" : "#/definitions/ResourcePermission" - } - }, - "Arn" : { - "description" : "The Amazon Resource Name (ARN) of the data source.
", - "type" : "string" + }, + "maxItems" : 64, + "minItems" : 1 }, "SslProperties" : { "$ref" : "#/definitions/SslProperties" }, - "Credentials" : { - "$ref" : "#/definitions/DataSourceCredentials" - }, - "DataSourceId" : { - "type" : "string" + "Status" : { + "$ref" : "#/definitions/ResourceStatus" }, "Tags" : { - "minItems" : 1, - "maxItems" : 200, "type" : "array", "items" : { "$ref" : "#/definitions/Tag" - } + }, + "maxItems" : 200, + "minItems" : 1 + }, + "Type" : { + "$ref" : "#/definitions/DataSourceType" + }, + "VpcConnectionProperties" : { + "$ref" : "#/definitions/VpcConnectionProperties" + } + }, + "required" : [ "Name", "Type" ], + "readOnlyProperties" : [ "/properties/Arn", "/properties/CreatedTime", "/properties/LastUpdatedTime", "/properties/Status" ], + "writeOnlyProperties" : [ "/properties/Credentials", "/properties/FolderArns" ], + "createOnlyProperties" : [ "/properties/AwsAccountId", "/properties/DataSourceId", "/properties/Type" ], + "primaryIdentifier" : [ "/properties/AwsAccountId", "/properties/DataSourceId" ], + "handlers" : { + "read" : { + "permissions" : [ "quicksight:DescribeDataSource", "quicksight:DescribeDataSourcePermissions", "quicksight:ListTagsForResource" ] + }, + "create" : { + "permissions" : [ "quicksight:CreateDataSource", "quicksight:DescribeDataSource", "quicksight:DescribeDataSourcePermissions", "quicksight:TagResource", "quicksight:ListTagsForResource", "quicksight:CreateFolderMembership", "quicksight:DeleteFolderMembership", "quicksight:ListFoldersForResource" ] + }, + "update" : { + "permissions" : [ "quicksight:DescribeDataSource", "quicksight:DescribeDataSourcePermissions", "quicksight:UpdateDataSource", "quicksight:UpdateDataSourcePermissions", "quicksight:CreateFolderMembership", "quicksight:DeleteFolderMembership", "quicksight:ListFoldersForResource", "quicksight:TagResource", "quicksight:UntagResource", "quicksight:ListTagsForResource" ] + }, + "delete" : { + "permissions" : [ "quicksight:DescribeDataSource", "quicksight:DescribeDataSourcePermissions", "quicksight:DeleteDataSource", "quicksight:ListTagsForResource" ] + }, + "list" : { + "permissions" : [ "quicksight:DescribeDataSource", "quicksight:ListDataSources" ] } - } + }, + "additionalProperties" : false, + "sourceUrl" : "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-quicksight" } \ No newline at end of file diff --git a/aws-cloudformation-schema/aws-rds-dbcluster.json b/aws-cloudformation-schema/aws-rds-dbcluster.json index 9ff806926d..c376d33cc5 100644 --- a/aws-cloudformation-schema/aws-rds-dbcluster.json +++ b/aws-cloudformation-schema/aws-rds-dbcluster.json @@ -1,458 +1,458 @@ { + "tagging" : { + "permissions" : [ "rds:AddTagsToResource", "rds:RemoveTagsFromResource" ], + "taggable" : true, + "tagOnCreate" : true, + "tagUpdatable" : true, + "tagProperty" : "/properties/Tags", + "cloudFormationSystemTags" : true + }, "typeName" : "AWS::RDS::DBCluster", + "readOnlyProperties" : [ "/properties/DBClusterArn", "/properties/DBClusterResourceId", "/properties/Endpoint", "/properties/Endpoint/Address", "/properties/Endpoint/Port", "/properties/ReadEndpoint/Address", "/properties/MasterUserSecret/SecretArn", "/properties/StorageThroughput" ], "description" : "The ``AWS::RDS::DBCluster`` resource creates an Amazon Aurora DB cluster or Multi-AZ DB cluster.\n For more information about creating an Aurora DB cluster, see [Creating an Amazon Aurora DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.CreateInstance.html) in the *Amazon Aurora User Guide*.\n For more information about creating a Multi-AZ DB cluster, see [Creating a Multi-AZ DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html) in the *Amazon RDS User Guide*.\n You can only create this resource in AWS Regions where Amazon Aurora or Multi-AZ DB clusters are supported.\n *Updating DB clusters* \n When properties labeled \"*Update requires:* [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement)\" are updated, AWS CloudFormation first creates a replacement DB cluster, then changes references from other dependent resources to point to the replacement DB cluster, and finally deletes the old DB cluster.\n We highly recommend that you take a snapshot of the database before updating the stack. If you don't, you lose the data when AWS CloudFormation replaces your DB cluster. To preserve your data, perform the following procedure:\n 1. Deactivate any applications that are using the DB cluster so that there's no activity on the DB instance.\n 1. Create a snapshot of the DB cluster. For more information, see [Creating a DB cluster snapshot](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_CreateSnapshotCluster.html).\n 1. If you want to restore your DB cluster using a DB cluster snapshot, modify the updated template with your DB cluster changes and add the ``SnapshotIdentifier`` property with the ID of the DB cluster snapshot that you want to use.\n After you restore a DB cluster with a ``SnapshotIdentifier`` property, you must specify the same ``SnapshotIdentifier`` property for any future updates to the DB cluster. When you specify this property for an update, the DB cluster is not restored from the DB cluster snapshot again, and the data in the database is not changed. However, if you don't specify the ``SnapshotIdentifier`` property, an empty DB cluster is created, and the original DB cluster is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB cluster is restored from the specified ``SnapshotIdentifier`` property, and the original DB cluster is deleted.\n 1. Update the stack.\n \n Currently, when you are updating the stack for an Aurora Serverless DB cluster, you can't include changes to any other properties when you specify one of the following properties: ``PreferredBackupWindow``, ``PreferredMaintenanceWindow``, and ``Port``. This limitation doesn't apply to provisioned DB clusters.\n For more information about updating other properties of this resource, see ``ModifyDBCluster``. For more information about updating stacks, see [CloudFormation Stacks Updates](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks.html).\n *Deleting DB clusters* \n The default ``DeletionPolicy`` for ``AWS::RDS::DBCluster`` resources is ``Snapshot``. For more information about how AWS CloudFormation deletes resources, see [DeletionPolicy Attribute](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-deletionpolicy.html).", + "createOnlyProperties" : [ "/properties/AvailabilityZones", "/properties/DBClusterIdentifier", "/properties/DBSubnetGroupName", "/properties/DBSystemId", "/properties/DatabaseName", "/properties/EngineMode", "/properties/KmsKeyId", "/properties/PubliclyAccessible", "/properties/RestoreToTime", "/properties/RestoreType", "/properties/SnapshotIdentifier", "/properties/SourceDBClusterIdentifier", "/properties/SourceRegion", "/properties/StorageEncrypted", "/properties/UseLatestRestorableTime" ], + "primaryIdentifier" : [ "/properties/DBClusterIdentifier" ], + "conditionalCreateOnlyProperties" : [ "/properties/Engine", "/properties/GlobalClusterIdentifier", "/properties/MasterUsername" ], "sourceUrl" : "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-rds", - "properties" : { - "Endpoint" : { - "$ref" : "#/definitions/Endpoint", - "description" : "" + "propertyTransform" : { + "/properties/KmsKeyId" : "$join([\"arn:(aws)[-]{0,1}[a-z]{0,2}[-]{0,1}[a-z]{0,3}:kms:[a-z]{2}[-]{1}[a-z]{3,10}[-]{0,1}[a-z]{0,10}[-]{1}[1-3]{1}:[0-9]{12}[:]{1}key\\/\", KmsKeyId])", + "/properties/SourceDBClusterIdentifier" : "$lowercase(SourceDBClusterIdentifier)", + "/properties/StorageType" : "$lowercase(StorageType)", + "/properties/MasterUserSecret/KmsKeyId" : "$join([\"arn:(aws)[-]{0,1}[a-z]{0,2}[-]{0,1}[a-z]{0,3}:kms:[a-z]{2}[-]{1}[a-z]{3,10}[-]{0,1}[a-z]{0,10}[-]{1}[1-3]{1}:[0-9]{12}[:]{1}key\\/\", MasterUserSecret.KmsKeyId])", + "/properties/DBClusterIdentifier" : "$lowercase(DBClusterIdentifier)", + "/properties/EnableHttpEndpoint" : "$lowercase($string(EngineMode)) = 'serverless' ? EnableHttpEndpoint : ($lowercase($string(Engine)) in ['aurora-postgresql', 'aurora-mysql'] ? EnableHttpEndpoint : false )", + "/properties/NetworkType" : "$lowercase(NetworkType)", + "/properties/PerformanceInsightsKmsKeyId" : "$join([\"arn:(aws)[-]{0,1}[a-z]{0,2}[-]{0,1}[a-z]{0,3}:kms:[a-z]{2}[-]{1}[a-z]{3,10}[-]{0,1}[a-z]{0,10}[-]{1}[1-3]{1}:[0-9]{12}[:]{1}key\\/\", PerformanceInsightsKmsKeyId])", + "/properties/DBSubnetGroupName" : "$lowercase(DBSubnetGroupName)", + "/properties/SnapshotIdentifier" : "$lowercase(SnapshotIdentifier)", + "/properties/PreferredMaintenanceWindow" : "$lowercase(PreferredMaintenanceWindow)", + "/properties/DBClusterParameterGroupName" : "$lowercase(DBClusterParameterGroupName)", + "/properties/EngineVersion" : "$join([$string(EngineVersion), \".*\"])", + "/properties/Engine" : "$lowercase(Engine)" + }, + "handlers" : { + "read" : { + "permissions" : [ "rds:DescribeDBClusters" ] }, - "ReadEndpoint" : { - "$ref" : "#/definitions/ReadEndpoint", - "description" : "This data type represents the information you need to connect to an Amazon RDS DB instance. This data type is used as a response element in the following actions:\n + ``CreateDBInstance`` \n + ``DescribeDBInstances`` \n + ``DeleteDBInstance`` \n \n For the data structure that represents Amazon Aurora DB cluster endpoints, see ``DBClusterEndpoint``." + "create" : { + "permissions" : [ "iam:CreateServiceLinkedRole", "iam:PassRole", "rds:AddRoleToDBCluster", "rds:AddTagsToResource", "rds:CreateDBCluster", "rds:CreateDBInstance", "rds:DescribeDBClusters", "rds:DescribeDBClusterSnapshots", "rds:DescribeDBSnapshots", "rds:DescribeEvents", "rds:EnableHttpEndpoint", "rds:ModifyDBCluster", "rds:RestoreDBClusterFromSnapshot", "rds:RestoreDBClusterToPointInTime", "secretsmanager:CreateSecret", "secretsmanager:TagResource" ], + "timeoutInMinutes" : 2160 }, - "AllocatedStorage" : { - "description" : "The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster.\n Valid for Cluster Type: Multi-AZ DB clusters only\n This setting is required to create a Multi-AZ DB cluster.", - "type" : "integer" + "update" : { + "permissions" : [ "ec2:DescribeSecurityGroups", "iam:PassRole", "rds:AddRoleToDBCluster", "rds:AddTagsToResource", "rds:DescribeDBClusters", "rds:DescribeDBSubnetGroups", "rds:DescribeEvents", "rds:DescribeGlobalClusters", "rds:DisableHttpEndpoint", "rds:EnableHttpEndpoint", "rds:ModifyDBCluster", "rds:ModifyDBInstance", "rds:RemoveFromGlobalCluster", "rds:RemoveRoleFromDBCluster", "rds:RemoveTagsFromResource", "secretsmanager:CreateSecret", "secretsmanager:TagResource" ], + "timeoutInMinutes" : 2160 }, - "AssociatedRoles" : { - "description" : "Provides a list of the AWS Identity and Access Management (IAM) roles that are associated with the DB cluster. IAM roles that are associated with a DB cluster grant permission for the DB cluster to access other Amazon Web Services on your behalf.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", - "type" : "array", - "uniqueItems" : true, - "items" : { - "$ref" : "#/definitions/DBClusterRole" - } + "list" : { + "permissions" : [ "rds:DescribeDBClusters" ] }, - "AvailabilityZones" : { - "description" : "A list of Availability Zones (AZs) where instances in the DB cluster can be created. For information on AWS Regions and Availability Zones, see [Choosing the Regions and Availability Zones](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.RegionsAndAvailabilityZones.html) in the *Amazon Aurora User Guide*. \n Valid for: Aurora DB clusters only", - "type" : "array", - "uniqueItems" : true, - "items" : { - "type" : "string" + "delete" : { + "permissions" : [ "rds:AddTagsToResource", "rds:CreateDBClusterSnapshot", "rds:DeleteDBCluster", "rds:DeleteDBInstance", "rds:DescribeDBClusters", "rds:DescribeGlobalClusters", "rds:RemoveFromGlobalCluster" ] + } + }, + "writeOnlyProperties" : [ "/properties/DBInstanceParameterGroupName", "/properties/MasterUserPassword", "/properties/RestoreToTime", "/properties/RestoreType", "/properties/SnapshotIdentifier", "/properties/SourceDBClusterIdentifier", "/properties/SourceRegion", "/properties/UseLatestRestorableTime" ], + "additionalProperties" : false, + "definitions" : { + "MasterUserSecret" : { + "description" : "The ``MasterUserSecret`` return value specifies the secret managed by RDS in AWS Secrets Manager for the master user password.\n For more information, see [Password management with Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the *Amazon RDS User Guide* and [Password management with Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) in the *Amazon Aurora User Guide.*", + "additionalProperties" : false, + "type" : "object", + "properties" : { + "SecretArn" : { + "description" : "The Amazon Resource Name (ARN) of the secret. This parameter is a return value that you can retrieve using the ``Fn::GetAtt`` intrinsic function. For more information, see [Return values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbcluster.html#aws-resource-rds-dbcluster-return-values).", + "type" : "string" + }, + "KmsKeyId" : { + "description" : "The AWS KMS key identifier that is used to encrypt the secret.", + "type" : "string" + } } }, - "AutoMinorVersionUpgrade" : { - "description" : "Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically.\n Valid for Cluster Type: Multi-AZ DB clusters only", - "type" : "boolean" - }, - "BacktrackWindow" : { - "description" : "The target backtrack window, in seconds. To disable backtracking, set this value to ``0``.\n Valid for Cluster Type: Aurora MySQL DB clusters only\n Default: ``0`` \n Constraints:\n + If specified, this value must be set to a number from 0 to 259,200 (72 hours).", - "minimum" : 0, - "type" : "integer" + "Endpoint" : { + "description" : "The ``Endpoint`` return value specifies the connection endpoint for the primary instance of the DB cluster.", + "additionalProperties" : false, + "type" : "object", + "properties" : { + "Address" : { + "description" : "Specifies the connection endpoint for the primary instance of the DB cluster.", + "type" : "string" + }, + "Port" : { + "description" : "Specifies the port that the database engine is listening on.", + "type" : "string" + } + } }, - "BackupRetentionPeriod" : { - "description" : "The number of days for which automated backups are retained.\n Default: 1\n Constraints:\n + Must be a value from 1 to 35\n \n Valid for: Aurora DB clusters and Multi-AZ DB clusters", - "default" : 1, - "minimum" : 1, - "type" : "integer" + "ServerlessV2ScalingConfiguration" : { + "description" : "The ``ServerlessV2ScalingConfiguration`` property type specifies the scaling configuration of an Aurora Serverless V2 DB cluster. For more information, see [Using Amazon Aurora Serverless v2](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.html) in the *Amazon Aurora User Guide*.\n If you have an Aurora cluster, you must set this attribute before you add a DB instance that uses the ``db.serverless`` DB instance class. For more information, see [Clusters that use Aurora Serverless v2 must have a capacity range specified](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html#aurora-serverless-v2.requirements.capacity-range) in the *Amazon Aurora User Guide*.\n This property is only supported for Aurora Serverless v2. For Aurora Serverless v1, use the ``ScalingConfiguration`` property.\n Valid for: Aurora Serverless v2 DB clusters", + "additionalProperties" : false, + "type" : "object", + "properties" : { + "MinCapacity" : { + "description" : "The minimum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster. You can specify ACU values in half-step increments, such as 8, 8.5, 9, and so on. The smallest value that you can use is 0.5.", + "type" : "number" + }, + "MaxCapacity" : { + "description" : "The maximum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster. You can specify ACU values in half-step increments, such as 40, 40.5, 41, and so on. The largest value that you can use is 128.\n The maximum capacity must be higher than 0.5 ACUs. For more information, see [Choosing the maximum Aurora Serverless v2 capacity setting for a cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.setting-capacity.html#aurora-serverless-v2.max_capacity_considerations) in the *Amazon Aurora User Guide*.\n Aurora automatically sets certain parameters for Aurora Serverless V2 DB instances to values that depend on the maximum ACU value in the capacity range. When you update the maximum capacity value, the ``ParameterApplyStatus`` value for the DB instance changes to ``pending-reboot``. You can update the parameter values by rebooting the DB instance after changing the capacity range.", + "type" : "number" + } + } }, - "CopyTagsToSnapshot" : { - "description" : "A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", - "type" : "boolean" + "ScalingConfiguration" : { + "description" : "The ``ScalingConfiguration`` property type specifies the scaling configuration of an Aurora Serverless v1 DB cluster. \n For more information, see [Using Amazon Aurora Serverless](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html) in the *Amazon Aurora User Guide*.\n This property is only supported for Aurora Serverless v1. For Aurora Serverless v2, Use the ``ServerlessV2ScalingConfiguration`` property.\n Valid for: Aurora Serverless v1 DB clusters only", + "additionalProperties" : false, + "type" : "object", + "properties" : { + "TimeoutAction" : { + "description" : "The action to take when the timeout is reached, either ``ForceApplyCapacityChange`` or ``RollbackCapacityChange``.\n ``ForceApplyCapacityChange`` sets the capacity to the specified value as soon as possible.\n ``RollbackCapacityChange``, the default, ignores the capacity change if a scaling point isn't found in the timeout period.\n If you specify ``ForceApplyCapacityChange``, connections that prevent Aurora Serverless v1 from finding a scaling point might be dropped.\n For more information, see [Autoscaling for Aurora Serverless v1](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.how-it-works.html#aurora-serverless.how-it-works.auto-scaling) in the *Amazon Aurora User Guide*.", + "type" : "string" + }, + "SecondsBeforeTimeout" : { + "description" : "The amount of time, in seconds, that Aurora Serverless v1 tries to find a scaling point to perform seamless scaling before enforcing the timeout action. The default is 300.\n Specify a value between 60 and 600 seconds.", + "type" : "integer" + }, + "SecondsUntilAutoPause" : { + "description" : "The time, in seconds, before an Aurora DB cluster in ``serverless`` mode is paused.\n Specify a value between 300 and 86,400 seconds.", + "type" : "integer" + }, + "AutoPause" : { + "description" : "Indicates whether to allow or disallow automatic pause for an Aurora DB cluster in ``serverless`` DB engine mode. A DB cluster can be paused only when it's idle (it has no connections).\n If a DB cluster is paused for more than seven days, the DB cluster might be backed up with a snapshot. In this case, the DB cluster is restored when there is a request to connect to it.", + "type" : "boolean" + }, + "MinCapacity" : { + "description" : "The minimum capacity for an Aurora DB cluster in ``serverless`` DB engine mode.\n For Aurora MySQL, valid capacity values are ``1``, ``2``, ``4``, ``8``, ``16``, ``32``, ``64``, ``128``, and ``256``.\n For Aurora PostgreSQL, valid capacity values are ``2``, ``4``, ``8``, ``16``, ``32``, ``64``, ``192``, and ``384``.\n The minimum capacity must be less than or equal to the maximum capacity.", + "type" : "integer" + }, + "MaxCapacity" : { + "description" : "The maximum capacity for an Aurora DB cluster in ``serverless`` DB engine mode.\n For Aurora MySQL, valid capacity values are ``1``, ``2``, ``4``, ``8``, ``16``, ``32``, ``64``, ``128``, and ``256``.\n For Aurora PostgreSQL, valid capacity values are ``2``, ``4``, ``8``, ``16``, ``32``, ``64``, ``192``, and ``384``.\n The maximum capacity must be greater than or equal to the minimum capacity.", + "type" : "integer" + } + } }, - "DatabaseName" : { - "description" : "The name of your database. If you don't provide a name, then Amazon RDS won't create a database in this DB cluster. For naming constraints, see [Naming Constraints](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_Limits.html#RDS_Limits.Constraints) in the *Amazon Aurora User Guide*. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters", - "type" : "string" + "ReadEndpoint" : { + "description" : "The ``ReadEndpoint`` return value specifies the reader endpoint for the DB cluster.\n The reader endpoint for a DB cluster load-balances connections across the Aurora Replicas that are available in a DB cluster. As clients request new connections to the reader endpoint, Aurora distributes the connection requests among the Aurora Replicas in the DB cluster. This functionality can help balance your read workload across multiple Aurora Replicas in your DB cluster.\n If a failover occurs, and the Aurora Replica that you are connected to is promoted to be the primary instance, your connection is dropped. To continue sending your read workload to other Aurora Replicas in the cluster, you can then reconnect to the reader endpoint.\n For more information about Aurora endpoints, see [Amazon Aurora connection management](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.Endpoints.html) in the *Amazon Aurora User Guide*.", + "additionalProperties" : false, + "type" : "object", + "properties" : { + "Address" : { + "description" : "The host address of the reader endpoint.", + "type" : "string" + } + } }, - "DBClusterArn" : { - "type" : "string", - "description" : "" + "DBClusterRole" : { + "description" : "Describes an AWS Identity and Access Management (IAM) role that is associated with a DB cluster.", + "additionalProperties" : false, + "type" : "object", + "properties" : { + "RoleArn" : { + "description" : "The Amazon Resource Name (ARN) of the IAM role that is associated with the DB cluster.", + "type" : "string" + }, + "FeatureName" : { + "description" : "The name of the feature associated with the AWS Identity and Access Management (IAM) role. IAM roles that are associated with a DB cluster grant permission for the DB cluster to access other AWS services on your behalf. For the list of supported feature names, see the ``SupportedFeatureNames`` description in [DBEngineVersion](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DBEngineVersion.html) in the *Amazon RDS API Reference*.", + "type" : "string" + } + }, + "required" : [ "RoleArn" ] }, - "DBClusterInstanceClass" : { - "description" : "The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example ``db.m6gd.xlarge``. Not all DB instance classes are available in all AWS-Regions, or for all database engines.\n For the full list of DB instance classes and availability for your engine, see [DB instance class](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the *Amazon RDS User Guide*.\n This setting is required to create a Multi-AZ DB cluster.\n Valid for Cluster Type: Multi-AZ DB clusters only", - "type" : "string" + "Tag" : { + "description" : "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.\n For more information, see [Tagging Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide* or [Tagging Amazon Aurora and Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.html) in the *Amazon Aurora User Guide*.", + "additionalProperties" : false, + "type" : "object", + "properties" : { + "Value" : { + "minLength" : 0, + "description" : "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with ``aws:`` or ``rds:``. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", + "type" : "string", + "maxLength" : 256 + }, + "Key" : { + "minLength" : 1, + "description" : "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with ``aws:`` or ``rds:``. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", + "type" : "string", + "maxLength" : 128 + } + }, + "required" : [ "Key" ] + } + }, + "properties" : { + "StorageEncrypted" : { + "description" : "Indicates whether the DB cluster is encrypted.\n If you specify the ``KmsKeyId`` property, then you must enable encryption.\n If you specify the ``SourceDBClusterIdentifier`` property, don't specify this property. The value is inherited from the source DB cluster, and if the DB cluster is encrypted, the specified ``KmsKeyId`` property is used.\n If you specify the ``SnapshotIdentifier`` and the specified snapshot is encrypted, don't specify this property. The value is inherited from the snapshot, and the specified ``KmsKeyId`` property is used.\n If you specify the ``SnapshotIdentifier`` and the specified snapshot isn't encrypted, you can use this property to specify that the restored DB cluster is encrypted. Specify the ``KmsKeyId`` property for the KMS key to use for encryption. If you don't want the restored DB cluster to be encrypted, then don't set this property or set it to ``false``.\n If you specify both the ``StorageEncrypted`` and ``SnapshotIdentifier`` properties without specifying the ``KmsKeyId`` property, then the restored DB cluster inherits the encryption settings from the DB snapshot that provide.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", + "type" : "boolean" }, - "DBClusterResourceId" : { - "description" : "", + "DBSystemId" : { + "description" : "Reserved for future use.", "type" : "string" }, - "DBInstanceParameterGroupName" : { - "description" : "The name of the DB parameter group to apply to all instances of the DB cluster.\n When you apply a parameter group using the ``DBInstanceParameterGroupName`` parameter, the DB cluster isn't rebooted automatically. Also, parameter changes are applied immediately rather than during the next maintenance window.\n Valid for Cluster Type: Aurora DB clusters only\n Default: The existing name setting\n Constraints:\n + The DB parameter group must be in the same DB parameter group family as this DB cluster.\n + The ``DBInstanceParameterGroupName`` parameter is valid in combination with the ``AllowMajorVersionUpgrade`` parameter for a major version upgrade only.", + "RestoreToTime" : { + "description" : "The date and time to restore the DB cluster to.\n Valid Values: Value must be a time in Universal Coordinated Time (UTC) format\n Constraints:\n + Must be before the latest restorable time for the DB instance\n + Must be specified if ``UseLatestRestorableTime`` parameter isn't provided\n + Can't be specified if the ``UseLatestRestorableTime`` parameter is enabled\n + Can't be specified if the ``RestoreType`` parameter is ``copy-on-write`` \n \n This property must be used with ``SourceDBClusterIdentifier`` property. The resulting cluster will have the identifier that matches the value of the ``DBclusterIdentifier`` property.\n Example: ``2015-03-07T23:45:00Z`` \n Valid for: Aurora DB clusters and Multi-AZ DB clusters", "type" : "string" }, - "DBSystemId" : { - "description" : "Reserved for future use.", + "EngineMode" : { + "description" : "The DB engine mode of the DB cluster, either ``provisioned`` or ``serverless``.\n The ``serverless`` engine mode only applies for Aurora Serverless v1 DB clusters. Aurora Serverless v2 DB clusters use the ``provisioned`` engine mode.\n For information about limitations and requirements for Serverless DB clusters, see the following sections in the *Amazon Aurora User Guide*:\n + [Limitations of Aurora Serverless v1](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations) \n + [Requirements for Aurora Serverless v2](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html) \n \n Valid for Cluster Type: Aurora DB clusters only", "type" : "string" }, - "GlobalClusterIdentifier" : { - "description" : "If you are configuring an Aurora global database cluster and want your Aurora DB cluster to be a secondary member in the global database cluster, specify the global cluster ID of the global database cluster. To define the primary database cluster of the global cluster, use the [AWS::RDS::GlobalCluster](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-globalcluster.html) resource. \n If you aren't configuring a global database cluster, don't specify this property. \n To remove the DB cluster from a global database cluster, specify an empty value for the ``GlobalClusterIdentifier`` property.\n For information about Aurora global databases, see [Working with Amazon Aurora Global Databases](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html) in the *Amazon Aurora User Guide*.\n Valid for: Aurora DB clusters only", - "type" : "string", - "pattern" : "^$|^[a-zA-Z]{1}(?:-?[a-zA-Z0-9]){0,62}$", - "minLength" : 0, - "maxLength" : 63 + "Port" : { + "description" : "The port number on which the DB instances in the DB cluster accept connections.\n Default:\n + When ``EngineMode`` is ``provisioned``, ``3306`` (for both Aurora MySQL and Aurora PostgreSQL)\n + When ``EngineMode`` is ``serverless``:\n + ``3306`` when ``Engine`` is ``aurora`` or ``aurora-mysql`` \n + ``5432`` when ``Engine`` is ``aurora-postgresql`` \n \n \n The ``No interruption`` on update behavior only applies to DB clusters. If you are updating a DB instance, see [Port](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html#cfn-rds-dbinstance-port) for the AWS::RDS::DBInstance resource.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", + "type" : "integer" }, "DBClusterIdentifier" : { + "minLength" : 1, + "pattern" : "^[a-zA-Z]{1}(?:-?[a-zA-Z0-9]){0,62}$", "description" : "The DB cluster identifier. This parameter is stored as a lowercase string.\n Constraints:\n + Must contain from 1 to 63 letters, numbers, or hyphens.\n + First character must be a letter.\n + Can't end with a hyphen or contain two consecutive hyphens.\n \n Example: ``my-cluster1`` \n Valid for: Aurora DB clusters and Multi-AZ DB clusters", "type" : "string", - "pattern" : "^[a-zA-Z]{1}(?:-?[a-zA-Z0-9]){0,62}$", - "minLength" : 1, "maxLength" : 63 }, - "DBClusterParameterGroupName" : { - "description" : "The name of the DB cluster parameter group to associate with this DB cluster.\n If you apply a parameter group to an existing DB cluster, then its DB instances might need to reboot. This can result in an outage while the DB instances are rebooting.\n If you apply a change to parameter group associated with a stopped DB cluster, then the update stack waits until the DB cluster is started.\n To list all of the available DB cluster parameter group names, use the following command:\n ``aws rds describe-db-cluster-parameter-groups --query \"DBClusterParameterGroups[].DBClusterParameterGroupName\" --output text`` \n Valid for: Aurora DB clusters and Multi-AZ DB clusters", - "type" : "string" + "StorageThroughput" : { + "description" : "", + "type" : "integer" }, - "DBSubnetGroupName" : { - "description" : "A DB subnet group that you want to associate with this DB cluster. \n If you are restoring a DB cluster to a point in time with ``RestoreType`` set to ``copy-on-write``, and don't specify a DB subnet group name, then the DB cluster is restored with a default DB subnet group.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", - "type" : "string" + "MonitoringInterval" : { + "description" : "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify ``0``.\n If ``MonitoringRoleArn`` is specified, also set ``MonitoringInterval`` to a value other than ``0``.\n Valid for Cluster Type: Multi-AZ DB clusters only\n Valid Values: ``0 | 1 | 5 | 10 | 15 | 30 | 60`` \n Default: ``0``", + "type" : "integer" }, - "DeletionProtection" : { - "description" : "A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", - "type" : "boolean" + "Endpoint" : { + "description" : "", + "$ref" : "#/definitions/Endpoint" }, - "Domain" : { - "description" : "Indicates the directory ID of the Active Directory to create the DB cluster.\n For Amazon Aurora DB clusters, Amazon RDS can use Kerberos authentication to authenticate users that connect to the DB cluster.\n For more information, see [Kerberos authentication](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/kerberos-authentication.html) in the *Amazon Aurora User Guide*.\n Valid for: Aurora DB clusters only", + "ReplicationSourceIdentifier" : { + "description" : "The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a read replica.\n Valid for: Aurora DB clusters only", "type" : "string" }, - "DomainIAMRoleName" : { - "description" : "Specifies the name of the IAM role to use when making API calls to the Directory Service.\n Valid for: Aurora DB clusters only", + "Engine" : { + "description" : "The name of the database engine to be used for this DB cluster.\n Valid Values:\n + ``aurora-mysql`` \n + ``aurora-postgresql`` \n + ``mysql`` \n + ``postgres`` \n \n Valid for: Aurora DB clusters and Multi-AZ DB clusters", "type" : "string" }, - "EnableCloudwatchLogsExports" : { - "description" : "The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see [Publishing Database Logs to Amazon CloudWatch Logs](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) in the *Amazon Aurora User Guide*.\n *Aurora MySQL* \n Valid values: ``audit``, ``error``, ``general``, ``slowquery`` \n *Aurora PostgreSQL* \n Valid values: ``postgresql`` \n Valid for: Aurora DB clusters and Multi-AZ DB clusters", - "type" : "array", + "Tags" : { + "maxItems" : 50, "uniqueItems" : true, + "description" : "Tags to assign to the DB cluster.\n Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", + "insertionOrder" : false, + "type" : "array", "items" : { - "type" : "string" + "$ref" : "#/definitions/Tag" } }, - "EnableGlobalWriteForwarding" : { - "description" : "Specifies whether to enable this DB cluster to forward write operations to the primary cluster of a global cluster (Aurora global database). By default, write operations are not allowed on Aurora DB clusters that are secondary clusters in an Aurora global database.\n You can set this value only on Aurora DB clusters that are members of an Aurora global database. With this parameter enabled, a secondary cluster can forward writes to the current primary cluster, and the resulting changes are replicated back to this cluster. For the primary DB cluster of an Aurora global database, this value is used immediately if the primary is demoted by a global cluster API operation, but it does nothing until then.\n Valid for Cluster Type: Aurora DB clusters only", - "type" : "boolean" + "EngineVersion" : { + "description" : "The version number of the database engine to use.\n To list all of the available engine versions for Aurora MySQL version 2 (5.7-compatible) and version 3 (8.0-compatible), use the following command:\n ``aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"`` \n You can supply either ``5.7`` or ``8.0`` to use the default engine version for Aurora MySQL version 2 or version 3, respectively.\n To list all of the available engine versions for Aurora PostgreSQL, use the following command:\n ``aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"`` \n To list all of the available engine versions for RDS for MySQL, use the following command:\n ``aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\"`` \n To list all of the available engine versions for RDS for PostgreSQL, use the following command:\n ``aws rds describe-db-engine-versions --engine postgres --query \"DBEngineVersions[].EngineVersion\"`` \n *Aurora MySQL* \n For information, see [Database engine updates for Amazon Aurora MySQL](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Updates.html) in the *Amazon Aurora User Guide*.\n *Aurora PostgreSQL* \n For information, see [Amazon Aurora PostgreSQL releases and engine versions](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Updates.20180305.html) in the *Amazon Aurora User Guide*.\n *MySQL* \n For information, see [Amazon RDS for MySQL](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MySQL.html#MySQL.Concepts.VersionMgmt) in the *Amazon RDS User Guide*.\n *PostgreSQL* \n For information, see [Amazon RDS for PostgreSQL](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html#PostgreSQL.Concepts) in the *Amazon RDS User Guide*.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", + "type" : "string" }, - "EnableHttpEndpoint" : { - "description" : "Specifies whether to enable the HTTP endpoint for the DB cluster. By default, the HTTP endpoint isn't enabled.\n When enabled, the HTTP endpoint provides a connectionless web service API (RDS Data API) for running SQL queries on the DB cluster. You can also query your database from inside the RDS console with the RDS query editor.\n RDS Data API is supported with the following DB clusters:\n + Aurora PostgreSQL Serverless v2 and provisioned\n + Aurora PostgreSQL and Aurora MySQL Serverless v1\n \n For more information, see [Using RDS Data API](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the *Amazon Aurora User Guide*.\n Valid for Cluster Type: Aurora DB clusters only", - "type" : "boolean" + "StorageType" : { + "description" : "The storage type to associate with the DB cluster.\n For information on storage types for Aurora DB clusters, see [Storage configurations for Amazon Aurora DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html#aurora-storage-type). For information on storage types for Multi-AZ DB clusters, see [Settings for creating Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html#create-multi-az-db-cluster-settings).\n This setting is required to create a Multi-AZ DB cluster.\n When specified for a Multi-AZ DB cluster, a value for the ``Iops`` parameter is required.\n Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n Valid Values:\n + Aurora DB clusters - ``aurora | aurora-iopt1`` \n + Multi-AZ DB clusters - ``io1 | io2 | gp3`` \n \n Default:\n + Aurora DB clusters - ``aurora`` \n + Multi-AZ DB clusters - ``io1`` \n \n When you create an Aurora DB cluster with the storage type set to ``aurora-iopt1``, the storage type is returned in the response. The storage type isn't returned when you set it to ``aurora``.", + "type" : "string" }, - "EnableIAMDatabaseAuthentication" : { - "description" : "A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.\n For more information, see [IAM Database Authentication](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) in the *Amazon Aurora User Guide.* \n Valid for: Aurora DB clusters only", - "type" : "boolean" + "KmsKeyId" : { + "description" : "The Amazon Resource Name (ARN) of the AWS KMS key that is used to encrypt the database instances in the DB cluster, such as ``arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef``. If you enable the ``StorageEncrypted`` property but don't specify this property, the default KMS key is used. If you specify this property, you must set the ``StorageEncrypted`` property to ``true``.\n If you specify the ``SnapshotIdentifier`` property, the ``StorageEncrypted`` property value is inherited from the snapshot, and if the DB cluster is encrypted, the specified ``KmsKeyId`` property is used.\n If you create a read replica of an encrypted DB cluster in another AWS Region, make sure to set ``KmsKeyId`` to a KMS key identifier that is valid in the destination AWS Region. This KMS key is used to encrypt the read replica in that AWS Region.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", + "type" : "string" + }, + "ServerlessV2ScalingConfiguration" : { + "description" : "The scaling configuration of an Aurora Serverless V2 DB cluster. \n This property is only supported for Aurora Serverless v2. For Aurora Serverless v1, Use the ``ScalingConfiguration`` property.\n Valid for: Aurora Serverless v2 DB clusters only", + "$ref" : "#/definitions/ServerlessV2ScalingConfiguration" + }, + "PerformanceInsightsRetentionPeriod" : { + "description" : "The number of days to retain Performance Insights data.\n Valid for Cluster Type: Multi-AZ DB clusters only\n Valid Values:\n + ``7`` \n + *month* * 31, where *month* is a number of months from 1-23. Examples: ``93`` (3 months * 31), ``341`` (11 months * 31), ``589`` (19 months * 31)\n + ``731`` \n \n Default: ``7`` days\n If you specify a retention period that isn't valid, such as ``94``, Amazon RDS issues an error.", + "type" : "integer" + }, + "DatabaseName" : { + "description" : "The name of your database. If you don't provide a name, then Amazon RDS won't create a database in this DB cluster. For naming constraints, see [Naming Constraints](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_Limits.html#RDS_Limits.Constraints) in the *Amazon Aurora User Guide*. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters", + "type" : "string" }, "EnableLocalWriteForwarding" : { "description" : "Specifies whether read replicas can forward write operations to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.\n Valid for: Aurora DB clusters only", "type" : "boolean" }, - "Engine" : { - "description" : "The name of the database engine to be used for this DB cluster.\n Valid Values:\n + ``aurora-mysql`` \n + ``aurora-postgresql`` \n + ``mysql`` \n + ``postgres`` \n \n Valid for: Aurora DB clusters and Multi-AZ DB clusters", - "type" : "string" - }, - "EngineLifecycleSupport" : { - "description" : "The life cycle type for this DB cluster.\n By default, this value is set to ``open-source-rds-extended-support``, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to ``open-source-rds-extended-support-disabled``. In this case, creating the DB cluster will fail if the DB major version is past its end of standard support date.\n You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:\n + Amazon Aurora (PostgreSQL only) - [Using Amazon RDS Extended Support](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/extended-support.html) in the *Amazon Aurora User Guide* \n + Amazon RDS - [Using Amazon RDS Extended Support](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/extended-support.html) in the *Amazon RDS User Guide* \n \n Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n Valid Values: ``open-source-rds-extended-support | open-source-rds-extended-support-disabled`` \n Default: ``open-source-rds-extended-support``", + "DBClusterResourceId" : { + "description" : "", "type" : "string" }, - "EngineMode" : { - "description" : "The DB engine mode of the DB cluster, either ``provisioned`` or ``serverless``.\n The ``serverless`` engine mode only applies for Aurora Serverless v1 DB clusters. Aurora Serverless v2 DB clusters use the ``provisioned`` engine mode.\n For information about limitations and requirements for Serverless DB clusters, see the following sections in the *Amazon Aurora User Guide*:\n + [Limitations of Aurora Serverless v1](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html#aurora-serverless.limitations) \n + [Requirements for Aurora Serverless v2](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html) \n \n Valid for Cluster Type: Aurora DB clusters only", - "type" : "string" + "AutoMinorVersionUpgrade" : { + "description" : "Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically.\n Valid for Cluster Type: Multi-AZ DB clusters only", + "type" : "boolean" }, - "EngineVersion" : { - "description" : "The version number of the database engine to use.\n To list all of the available engine versions for Aurora MySQL version 2 (5.7-compatible) and version 3 (8.0-compatible), use the following command:\n ``aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"`` \n You can supply either ``5.7`` or ``8.0`` to use the default engine version for Aurora MySQL version 2 or version 3, respectively.\n To list all of the available engine versions for Aurora PostgreSQL, use the following command:\n ``aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"`` \n To list all of the available engine versions for RDS for MySQL, use the following command:\n ``aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\"`` \n To list all of the available engine versions for RDS for PostgreSQL, use the following command:\n ``aws rds describe-db-engine-versions --engine postgres --query \"DBEngineVersions[].EngineVersion\"`` \n *Aurora MySQL* \n For information, see [Database engine updates for Amazon Aurora MySQL](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Updates.html) in the *Amazon Aurora User Guide*.\n *Aurora PostgreSQL* \n For information, see [Amazon Aurora PostgreSQL releases and engine versions](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Updates.20180305.html) in the *Amazon Aurora User Guide*.\n *MySQL* \n For information, see [Amazon RDS for MySQL](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MySQL.html#MySQL.Concepts.VersionMgmt) in the *Amazon RDS User Guide*.\n *PostgreSQL* \n For information, see [Amazon RDS for PostgreSQL](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html#PostgreSQL.Concepts) in the *Amazon RDS User Guide*.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", + "DBSubnetGroupName" : { + "description" : "A DB subnet group that you want to associate with this DB cluster. \n If you are restoring a DB cluster to a point in time with ``RestoreType`` set to ``copy-on-write``, and don't specify a DB subnet group name, then the DB cluster is restored with a default DB subnet group.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", "type" : "string" }, - "ManageMasterUserPassword" : { - "description" : "Specifies whether to manage the master user password with AWS Secrets Manager.\n For more information, see [Password management with Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the *Amazon RDS User Guide* and [Password management with Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) in the *Amazon Aurora User Guide.* \n Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n Constraints:\n + Can't manage the master user password with AWS Secrets Manager if ``MasterUserPassword`` is specified.", + "DeletionProtection" : { + "description" : "A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", "type" : "boolean" }, - "Iops" : { - "description" : "The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster.\n For information about valid IOPS values, see [Provisioned IOPS storage](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS) in the *Amazon RDS User Guide*.\n This setting is required to create a Multi-AZ DB cluster.\n Valid for Cluster Type: Multi-AZ DB clusters only\n Constraints:\n + Must be a multiple between .5 and 50 of the storage amount for the DB cluster.", + "AllocatedStorage" : { + "description" : "The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster.\n Valid for Cluster Type: Multi-AZ DB clusters only\n This setting is required to create a Multi-AZ DB cluster.", "type" : "integer" }, - "KmsKeyId" : { - "description" : "The Amazon Resource Name (ARN) of the AWS KMS key that is used to encrypt the database instances in the DB cluster, such as ``arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef``. If you enable the ``StorageEncrypted`` property but don't specify this property, the default KMS key is used. If you specify this property, you must set the ``StorageEncrypted`` property to ``true``.\n If you specify the ``SnapshotIdentifier`` property, the ``StorageEncrypted`` property value is inherited from the snapshot, and if the DB cluster is encrypted, the specified ``KmsKeyId`` property is used.\n If you create a read replica of an encrypted DB cluster in another AWS Region, make sure to set ``KmsKeyId`` to a KMS key identifier that is valid in the destination AWS Region. This KMS key is used to encrypt the read replica in that AWS Region.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", - "type" : "string" - }, - "MasterUsername" : { - "description" : "The name of the master user for the DB cluster.\n If you specify the ``SourceDBClusterIdentifier``, ``SnapshotIdentifier``, or ``GlobalClusterIdentifier`` property, don't specify this property. The value is inherited from the source DB cluster, the snapshot, or the primary DB cluster for the global database cluster, respectively.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", - "type" : "string", - "pattern" : "^[a-zA-Z]{1}[a-zA-Z0-9_]*$", - "minLength" : 1 - }, "MasterUserPassword" : { "description" : "The master password for the DB instance.\n If you specify the ``SourceDBClusterIdentifier``, ``SnapshotIdentifier``, or ``GlobalClusterIdentifier`` property, don't specify this property. The value is inherited from the source DB cluster, the snapshot, or the primary DB cluster for the global database cluster, respectively.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", "type" : "string" }, "MasterUserSecret" : { - "$ref" : "#/definitions/MasterUserSecret", - "description" : "The secret managed by RDS in AWS Secrets Manager for the master user password.\n For more information, see [Password management with Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the *Amazon RDS User Guide* and [Password management with Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) in the *Amazon Aurora User Guide.*" - }, - "MonitoringInterval" : { - "description" : "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB cluster. To turn off collecting Enhanced Monitoring metrics, specify ``0``.\n If ``MonitoringRoleArn`` is specified, also set ``MonitoringInterval`` to a value other than ``0``.\n Valid for Cluster Type: Multi-AZ DB clusters only\n Valid Values: ``0 | 1 | 5 | 10 | 15 | 30 | 60`` \n Default: ``0``", - "type" : "integer" + "description" : "The secret managed by RDS in AWS Secrets Manager for the master user password.\n For more information, see [Password management with Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the *Amazon RDS User Guide* and [Password management with Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) in the *Amazon Aurora User Guide.*", + "$ref" : "#/definitions/MasterUserSecret" }, - "MonitoringRoleArn" : { - "description" : "The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is ``arn:aws:iam:123456789012:role/emaccess``. For information on creating a monitoring role, see [Setting up and enabling Enhanced Monitoring](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling) in the *Amazon RDS User Guide*.\n If ``MonitoringInterval`` is set to a value other than ``0``, supply a ``MonitoringRoleArn`` value.\n Valid for Cluster Type: Multi-AZ DB clusters only", + "SourceDBClusterIdentifier" : { + "description" : "When restoring a DB cluster to a point in time, the identifier of the source DB cluster from which to restore.\n Constraints:\n + Must match the identifier of an existing DBCluster.\n \n Valid for: Aurora DB clusters and Multi-AZ DB clusters", "type" : "string" }, - "NetworkType" : { - "description" : "The network type of the DB cluster.\n Valid values:\n + ``IPV4`` \n + ``DUAL`` \n \n The network type is determined by the ``DBSubnetGroup`` specified for the DB cluster. A ``DBSubnetGroup`` can support only the IPv4 protocol or the IPv4 and IPv6 protocols (``DUAL``).\n For more information, see [Working with a DB instance in a VPC](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) in the *Amazon Aurora User Guide.* \n Valid for: Aurora DB clusters only", + "MasterUsername" : { + "minLength" : 1, + "pattern" : "^[a-zA-Z]{1}[a-zA-Z0-9_]*$", + "description" : "The name of the master user for the DB cluster.\n If you specify the ``SourceDBClusterIdentifier``, ``SnapshotIdentifier``, or ``GlobalClusterIdentifier`` property, don't specify this property. The value is inherited from the source DB cluster, the snapshot, or the primary DB cluster for the global database cluster, respectively.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", "type" : "string" }, - "PerformanceInsightsEnabled" : { - "description" : "Specifies whether to turn on Performance Insights for the DB cluster.\n For more information, see [Using Amazon Performance Insights](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) in the *Amazon RDS User Guide*.\n Valid for Cluster Type: Multi-AZ DB clusters only", - "type" : "boolean" + "ScalingConfiguration" : { + "description" : "The scaling configuration of an Aurora Serverless v1 DB cluster.\n This property is only supported for Aurora Serverless v1. For Aurora Serverless v2, Use the ``ServerlessV2ScalingConfiguration`` property.\n Valid for: Aurora Serverless v1 DB clusters only", + "$ref" : "#/definitions/ScalingConfiguration" + }, + "ReadEndpoint" : { + "description" : "This data type represents the information you need to connect to an Amazon RDS DB instance. This data type is used as a response element in the following actions:\n + ``CreateDBInstance`` \n + ``DescribeDBInstances`` \n + ``DeleteDBInstance`` \n \n For the data structure that represents Amazon Aurora DB cluster endpoints, see ``DBClusterEndpoint``.", + "$ref" : "#/definitions/ReadEndpoint" }, "PerformanceInsightsKmsKeyId" : { "description" : "The AWS KMS key identifier for encryption of Performance Insights data.\n The AWS KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.\n If you don't specify a value for ``PerformanceInsightsKMSKeyId``, then Amazon RDS uses your default KMS key. There is a default KMS key for your AWS-account. Your AWS-account has a different default KMS key for each AWS-Region.\n Valid for Cluster Type: Multi-AZ DB clusters only", "type" : "string" }, - "PerformanceInsightsRetentionPeriod" : { - "description" : "The number of days to retain Performance Insights data.\n Valid for Cluster Type: Multi-AZ DB clusters only\n Valid Values:\n + ``7`` \n + *month* * 31, where *month* is a number of months from 1-23. Examples: ``93`` (3 months * 31), ``341`` (11 months * 31), ``589`` (19 months * 31)\n + ``731`` \n \n Default: ``7`` days\n If you specify a retention period that isn't valid, such as ``94``, Amazon RDS issues an error.", - "type" : "integer" - }, - "Port" : { - "description" : "The port number on which the DB instances in the DB cluster accept connections.\n Default:\n + When ``EngineMode`` is ``provisioned``, ``3306`` (for both Aurora MySQL and Aurora PostgreSQL)\n + When ``EngineMode`` is ``serverless``:\n + ``3306`` when ``Engine`` is ``aurora`` or ``aurora-mysql`` \n + ``5432`` when ``Engine`` is ``aurora-postgresql`` \n \n \n The ``No interruption`` on update behavior only applies to DB clusters. If you are updating a DB instance, see [Port](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html#cfn-rds-dbinstance-port) for the AWS::RDS::DBInstance resource.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", - "type" : "integer" - }, - "PreferredBackupWindow" : { - "description" : "The daily time range during which automated backups are created. For more information, see [Backup Window](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.Backups.BackupWindow) in the *Amazon Aurora User Guide.* \n Constraints:\n + Must be in the format ``hh24:mi-hh24:mi``.\n + Must be in Universal Coordinated Time (UTC).\n + Must not conflict with the preferred maintenance window.\n + Must be at least 30 minutes.\n \n Valid for: Aurora DB clusters and Multi-AZ DB clusters", - "type" : "string" - }, - "PreferredMaintenanceWindow" : { - "description" : "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\n Format: ``ddd:hh24:mi-ddd:hh24:mi`` \n The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. To see the time blocks available, see [Adjusting the Preferred DB Cluster Maintenance Window](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) in the *Amazon Aurora User Guide.* \n Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.\n Constraints: Minimum 30-minute window.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", - "type" : "string" - }, "PubliclyAccessible" : { "description" : "Specifies whether the DB cluster is publicly accessible.\n When the DB cluster is publicly accessible and you connect from outside of the DB cluster's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB cluster, the endpoint resolves to the private IP address. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.\n When the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.\n Valid for Cluster Type: Multi-AZ DB clusters only\n Default: The default behavior varies depending on whether ``DBSubnetGroupName`` is specified.\n If ``DBSubnetGroupName`` isn't specified, and ``PubliclyAccessible`` isn't specified, the following applies:\n + If the default VPC in the target Region doesn’t have an internet gateway attached to it, the DB cluster is private.\n + If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.\n \n If ``DBSubnetGroupName`` is specified, and ``PubliclyAccessible`` isn't specified, the following applies:\n + If the subnets are part of a VPC that doesn’t have an internet gateway attached to it, the DB cluster is private.\n + If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.", "type" : "boolean" }, - "ReplicationSourceIdentifier" : { - "description" : "The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a read replica.\n Valid for: Aurora DB clusters only", + "Domain" : { + "description" : "Indicates the directory ID of the Active Directory to create the DB cluster.\n For Amazon Aurora DB clusters, Amazon RDS can use Kerberos authentication to authenticate users that connect to the DB cluster.\n For more information, see [Kerberos authentication](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/kerberos-authentication.html) in the *Amazon Aurora User Guide*.\n Valid for: Aurora DB clusters only", "type" : "string" }, - "RestoreToTime" : { - "description" : "The date and time to restore the DB cluster to.\n Valid Values: Value must be a time in Universal Coordinated Time (UTC) format\n Constraints:\n + Must be before the latest restorable time for the DB instance\n + Must be specified if ``UseLatestRestorableTime`` parameter isn't provided\n + Can't be specified if the ``UseLatestRestorableTime`` parameter is enabled\n + Can't be specified if the ``RestoreType`` parameter is ``copy-on-write`` \n \n This property must be used with ``SourceDBClusterIdentifier`` property. The resulting cluster will have the identifier that matches the value of the ``DBclusterIdentifier`` property.\n Example: ``2015-03-07T23:45:00Z`` \n Valid for: Aurora DB clusters and Multi-AZ DB clusters", + "BacktrackWindow" : { + "description" : "The target backtrack window, in seconds. To disable backtracking, set this value to ``0``.\n Valid for Cluster Type: Aurora MySQL DB clusters only\n Default: ``0`` \n Constraints:\n + If specified, this value must be set to a number from 0 to 259,200 (72 hours).", + "type" : "integer", + "minimum" : 0 + }, + "DBInstanceParameterGroupName" : { + "description" : "The name of the DB parameter group to apply to all instances of the DB cluster.\n When you apply a parameter group using the ``DBInstanceParameterGroupName`` parameter, the DB cluster isn't rebooted automatically. Also, parameter changes are applied immediately rather than during the next maintenance window.\n Valid for Cluster Type: Aurora DB clusters only\n Default: The existing name setting\n Constraints:\n + The DB parameter group must be in the same DB parameter group family as this DB cluster.\n + The ``DBInstanceParameterGroupName`` parameter is valid in combination with the ``AllowMajorVersionUpgrade`` parameter for a major version upgrade only.", "type" : "string" }, - "RestoreType" : { - "description" : "The type of restore to be performed. You can specify one of the following values:\n + ``full-copy`` - The new DB cluster is restored as a full copy of the source DB cluster.\n + ``copy-on-write`` - The new DB cluster is restored as a clone of the source DB cluster.\n \n If you don't specify a ``RestoreType`` value, then the new DB cluster is restored as a full copy of the source DB cluster.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", + "EnableGlobalWriteForwarding" : { + "description" : "Specifies whether to enable this DB cluster to forward write operations to the primary cluster of a global cluster (Aurora global database). By default, write operations are not allowed on Aurora DB clusters that are secondary clusters in an Aurora global database.\n You can set this value only on Aurora DB clusters that are members of an Aurora global database. With this parameter enabled, a secondary cluster can forward writes to the current primary cluster, and the resulting changes are replicated back to this cluster. For the primary DB cluster of an Aurora global database, this value is used immediately if the primary is demoted by a global cluster API operation, but it does nothing until then.\n Valid for Cluster Type: Aurora DB clusters only", + "type" : "boolean" + }, + "MonitoringRoleArn" : { + "description" : "The Amazon Resource Name (ARN) for the IAM role that permits RDS to send Enhanced Monitoring metrics to Amazon CloudWatch Logs. An example is ``arn:aws:iam:123456789012:role/emaccess``. For information on creating a monitoring role, see [Setting up and enabling Enhanced Monitoring](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling) in the *Amazon RDS User Guide*.\n If ``MonitoringInterval`` is set to a value other than ``0``, supply a ``MonitoringRoleArn`` value.\n Valid for Cluster Type: Multi-AZ DB clusters only", "type" : "string" }, - "ServerlessV2ScalingConfiguration" : { - "description" : "The scaling configuration of an Aurora Serverless V2 DB cluster. \n This property is only supported for Aurora Serverless v2. For Aurora Serverless v1, Use the ``ScalingConfiguration`` property.\n Valid for: Aurora Serverless v2 DB clusters only", - "$ref" : "#/definitions/ServerlessV2ScalingConfiguration" + "AssociatedRoles" : { + "uniqueItems" : true, + "description" : "Provides a list of the AWS Identity and Access Management (IAM) roles that are associated with the DB cluster. IAM roles that are associated with a DB cluster grant permission for the DB cluster to access other Amazon Web Services on your behalf.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", + "type" : "array", + "items" : { + "$ref" : "#/definitions/DBClusterRole" + } }, - "ScalingConfiguration" : { - "description" : "The scaling configuration of an Aurora Serverless v1 DB cluster.\n This property is only supported for Aurora Serverless v1. For Aurora Serverless v2, Use the ``ServerlessV2ScalingConfiguration`` property.\n Valid for: Aurora Serverless v1 DB clusters only", - "$ref" : "#/definitions/ScalingConfiguration" + "EnableHttpEndpoint" : { + "description" : "Specifies whether to enable the HTTP endpoint for the DB cluster. By default, the HTTP endpoint isn't enabled.\n When enabled, the HTTP endpoint provides a connectionless web service API (RDS Data API) for running SQL queries on the DB cluster. You can also query your database from inside the RDS console with the RDS query editor.\n RDS Data API is supported with the following DB clusters:\n + Aurora PostgreSQL Serverless v2 and provisioned\n + Aurora PostgreSQL and Aurora MySQL Serverless v1\n \n For more information, see [Using RDS Data API](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/data-api.html) in the *Amazon Aurora User Guide*.\n Valid for Cluster Type: Aurora DB clusters only", + "type" : "boolean" }, "SnapshotIdentifier" : { "description" : "The identifier for the DB snapshot or DB cluster snapshot to restore from.\n You can use either the name or the Amazon Resource Name (ARN) to specify a DB cluster snapshot. However, you can use only the ARN to specify a DB snapshot.\n After you restore a DB cluster with a ``SnapshotIdentifier`` property, you must specify the same ``SnapshotIdentifier`` property for any future updates to the DB cluster. When you specify this property for an update, the DB cluster is not restored from the snapshot again, and the data in the database is not changed. However, if you don't specify the ``SnapshotIdentifier`` property, an empty DB cluster is created, and the original DB cluster is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB cluster is restored from the specified ``SnapshotIdentifier`` property, and the original DB cluster is deleted.\n If you specify the ``SnapshotIdentifier`` property to restore a DB cluster (as opposed to specifying it for DB cluster updates), then don't specify the following properties:\n + ``GlobalClusterIdentifier`` \n + ``MasterUsername`` \n + ``MasterUserPassword`` \n + ``ReplicationSourceIdentifier`` \n + ``RestoreType`` \n + ``SourceDBClusterIdentifier`` \n + ``SourceRegion`` \n + ``StorageEncrypted`` (for an encrypted snapshot)\n + ``UseLatestRestorableTime`` \n \n Constraints:\n + Must match the identifier of an existing Snapshot.\n \n Valid for: Aurora DB clusters and Multi-AZ DB clusters", "type" : "string" }, - "SourceDBClusterIdentifier" : { - "description" : "When restoring a DB cluster to a point in time, the identifier of the source DB cluster from which to restore.\n Constraints:\n + Must match the identifier of an existing DBCluster.\n \n Valid for: Aurora DB clusters and Multi-AZ DB clusters", + "PreferredBackupWindow" : { + "description" : "The daily time range during which automated backups are created. For more information, see [Backup Window](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.Backups.BackupWindow) in the *Amazon Aurora User Guide.* \n Constraints:\n + Must be in the format ``hh24:mi-hh24:mi``.\n + Must be in Universal Coordinated Time (UTC).\n + Must not conflict with the preferred maintenance window.\n + Must be at least 30 minutes.\n \n Valid for: Aurora DB clusters and Multi-AZ DB clusters", "type" : "string" }, - "SourceRegion" : { - "description" : "The AWS Region which contains the source DB cluster when replicating a DB cluster. For example, ``us-east-1``. \n Valid for: Aurora DB clusters only", + "NetworkType" : { + "description" : "The network type of the DB cluster.\n Valid values:\n + ``IPV4`` \n + ``DUAL`` \n \n The network type is determined by the ``DBSubnetGroup`` specified for the DB cluster. A ``DBSubnetGroup`` can support only the IPv4 protocol or the IPv4 and IPv6 protocols (``DUAL``).\n For more information, see [Working with a DB instance in a VPC](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) in the *Amazon Aurora User Guide.* \n Valid for: Aurora DB clusters only", "type" : "string" }, - "StorageEncrypted" : { - "description" : "Indicates whether the DB cluster is encrypted.\n If you specify the ``KmsKeyId`` property, then you must enable encryption.\n If you specify the ``SourceDBClusterIdentifier`` property, don't specify this property. The value is inherited from the source DB cluster, and if the DB cluster is encrypted, the specified ``KmsKeyId`` property is used.\n If you specify the ``SnapshotIdentifier`` and the specified snapshot is encrypted, don't specify this property. The value is inherited from the snapshot, and the specified ``KmsKeyId`` property is used.\n If you specify the ``SnapshotIdentifier`` and the specified snapshot isn't encrypted, you can use this property to specify that the restored DB cluster is encrypted. Specify the ``KmsKeyId`` property for the KMS key to use for encryption. If you don't want the restored DB cluster to be encrypted, then don't set this property or set it to ``false``.\n If you specify both the ``StorageEncrypted`` and ``SnapshotIdentifier`` properties without specifying the ``KmsKeyId`` property, then the restored DB cluster inherits the encryption settings from the DB snapshot that provide.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", + "VpcSecurityGroupIds" : { + "uniqueItems" : true, + "description" : "A list of EC2 VPC security groups to associate with this DB cluster.\n If you plan to update the resource, don't specify VPC security groups in a shared VPC.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", + "type" : "array", + "items" : { + "type" : "string" + } + }, + "CopyTagsToSnapshot" : { + "description" : "A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", "type" : "boolean" }, - "StorageThroughput" : { - "description" : "", - "type" : "integer" + "GlobalClusterIdentifier" : { + "minLength" : 0, + "pattern" : "^$|^[a-zA-Z]{1}(?:-?[a-zA-Z0-9]){0,62}$", + "description" : "If you are configuring an Aurora global database cluster and want your Aurora DB cluster to be a secondary member in the global database cluster, specify the global cluster ID of the global database cluster. To define the primary database cluster of the global cluster, use the [AWS::RDS::GlobalCluster](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-globalcluster.html) resource. \n If you aren't configuring a global database cluster, don't specify this property. \n To remove the DB cluster from a global database cluster, specify an empty value for the ``GlobalClusterIdentifier`` property.\n For information about Aurora global databases, see [Working with Amazon Aurora Global Databases](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html) in the *Amazon Aurora User Guide*.\n Valid for: Aurora DB clusters only", + "type" : "string", + "maxLength" : 63 }, - "StorageType" : { - "description" : "The storage type to associate with the DB cluster.\n For information on storage types for Aurora DB clusters, see [Storage configurations for Amazon Aurora DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html#aurora-storage-type). For information on storage types for Multi-AZ DB clusters, see [Settings for creating Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html#create-multi-az-db-cluster-settings).\n This setting is required to create a Multi-AZ DB cluster.\n When specified for a Multi-AZ DB cluster, a value for the ``Iops`` parameter is required.\n Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n Valid Values:\n + Aurora DB clusters - ``aurora | aurora-iopt1`` \n + Multi-AZ DB clusters - ``io1 | io2 | gp3`` \n \n Default:\n + Aurora DB clusters - ``aurora`` \n + Multi-AZ DB clusters - ``io1`` \n \n When you create an Aurora DB cluster with the storage type set to ``aurora-iopt1``, the storage type is returned in the response. The storage type isn't returned when you set it to ``aurora``.", + "RestoreType" : { + "description" : "The type of restore to be performed. You can specify one of the following values:\n + ``full-copy`` - The new DB cluster is restored as a full copy of the source DB cluster.\n + ``copy-on-write`` - The new DB cluster is restored as a clone of the source DB cluster.\n \n If you don't specify a ``RestoreType`` value, then the new DB cluster is restored as a full copy of the source DB cluster.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", "type" : "string" }, - "Tags" : { - "type" : "array", - "maxItems" : 50, - "uniqueItems" : true, - "insertionOrder" : false, - "description" : "Tags to assign to the DB cluster.\n Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", - "items" : { - "$ref" : "#/definitions/Tag" - } + "DomainIAMRoleName" : { + "description" : "Specifies the name of the IAM role to use when making API calls to the Directory Service.\n Valid for: Aurora DB clusters only", + "type" : "string" }, - "UseLatestRestorableTime" : { - "description" : "A value that indicates whether to restore the DB cluster to the latest restorable backup time. By default, the DB cluster is not restored to the latest restorable backup time. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters", - "type" : "boolean" + "EngineLifecycleSupport" : { + "description" : "The life cycle type for this DB cluster.\n By default, this value is set to ``open-source-rds-extended-support``, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to ``open-source-rds-extended-support-disabled``. In this case, creating the DB cluster will fail if the DB major version is past its end of standard support date.\n You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:\n + Amazon Aurora (PostgreSQL only) - [Using Amazon RDS Extended Support](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/extended-support.html) in the *Amazon Aurora User Guide* \n + Amazon RDS - [Using Amazon RDS Extended Support](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/extended-support.html) in the *Amazon RDS User Guide* \n \n Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n Valid Values: ``open-source-rds-extended-support | open-source-rds-extended-support-disabled`` \n Default: ``open-source-rds-extended-support``", + "type" : "string" }, - "VpcSecurityGroupIds" : { - "description" : "A list of EC2 VPC security groups to associate with this DB cluster.\n If you plan to update the resource, don't specify VPC security groups in a shared VPC.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", + "DBClusterInstanceClass" : { + "description" : "The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example ``db.m6gd.xlarge``. Not all DB instance classes are available in all AWS-Regions, or for all database engines.\n For the full list of DB instance classes and availability for your engine, see [DB instance class](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the *Amazon RDS User Guide*.\n This setting is required to create a Multi-AZ DB cluster.\n Valid for Cluster Type: Multi-AZ DB clusters only", + "type" : "string" + }, + "AvailabilityZones" : { "uniqueItems" : true, + "description" : "A list of Availability Zones (AZs) where instances in the DB cluster can be created. For information on AWS Regions and Availability Zones, see [Choosing the Regions and Availability Zones](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.RegionsAndAvailabilityZones.html) in the *Amazon Aurora User Guide*. \n Valid for: Aurora DB clusters only", + "type" : "array", "items" : { "type" : "string" - }, - "type" : "array" - } - }, - "definitions" : { - "Endpoint" : { - "type" : "object", - "additionalProperties" : false, - "properties" : { - "Address" : { - "description" : "Specifies the connection endpoint for the primary instance of the DB cluster.", - "type" : "string" - }, - "Port" : { - "description" : "Specifies the port that the database engine is listening on.", - "type" : "string" - } - }, - "description" : "The ``Endpoint`` return value specifies the connection endpoint for the primary instance of the DB cluster." + } }, - "ReadEndpoint" : { - "type" : "object", - "additionalProperties" : false, - "properties" : { - "Address" : { - "description" : "The host address of the reader endpoint.", - "type" : "string" - } - }, - "description" : "The ``ReadEndpoint`` return value specifies the reader endpoint for the DB cluster.\n The reader endpoint for a DB cluster load-balances connections across the Aurora Replicas that are available in a DB cluster. As clients request new connections to the reader endpoint, Aurora distributes the connection requests among the Aurora Replicas in the DB cluster. This functionality can help balance your read workload across multiple Aurora Replicas in your DB cluster.\n If a failover occurs, and the Aurora Replica that you are connected to is promoted to be the primary instance, your connection is dropped. To continue sending your read workload to other Aurora Replicas in the cluster, you can then reconnect to the reader endpoint.\n For more information about Aurora endpoints, see [Amazon Aurora connection management](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.Endpoints.html) in the *Amazon Aurora User Guide*." + "DBClusterArn" : { + "description" : "", + "type" : "string" }, - "DBClusterRole" : { - "description" : "Describes an AWS Identity and Access Management (IAM) role that is associated with a DB cluster.", - "type" : "object", - "additionalProperties" : false, - "properties" : { - "FeatureName" : { - "description" : "The name of the feature associated with the AWS Identity and Access Management (IAM) role. IAM roles that are associated with a DB cluster grant permission for the DB cluster to access other AWS services on your behalf. For the list of supported feature names, see the ``SupportedFeatureNames`` description in [DBEngineVersion](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DBEngineVersion.html) in the *Amazon RDS API Reference*.", - "type" : "string" - }, - "RoleArn" : { - "description" : "The Amazon Resource Name (ARN) of the IAM role that is associated with the DB cluster.", - "type" : "string" - } - }, - "required" : [ "RoleArn" ] + "PreferredMaintenanceWindow" : { + "description" : "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\n Format: ``ddd:hh24:mi-ddd:hh24:mi`` \n The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. To see the time blocks available, see [Adjusting the Preferred DB Cluster Maintenance Window](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) in the *Amazon Aurora User Guide.* \n Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.\n Constraints: Minimum 30-minute window.\n Valid for: Aurora DB clusters and Multi-AZ DB clusters", + "type" : "string" }, - "ServerlessV2ScalingConfiguration" : { - "description" : "The ``ServerlessV2ScalingConfiguration`` property type specifies the scaling configuration of an Aurora Serverless V2 DB cluster. For more information, see [Using Amazon Aurora Serverless v2](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.html) in the *Amazon Aurora User Guide*.\n If you have an Aurora cluster, you must set this attribute before you add a DB instance that uses the ``db.serverless`` DB instance class. For more information, see [Clusters that use Aurora Serverless v2 must have a capacity range specified](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.requirements.html#aurora-serverless-v2.requirements.capacity-range) in the *Amazon Aurora User Guide*.\n This property is only supported for Aurora Serverless v2. For Aurora Serverless v1, use the ``ScalingConfiguration`` property.\n Valid for: Aurora Serverless v2 DB clusters", - "type" : "object", - "additionalProperties" : false, - "properties" : { - "MinCapacity" : { - "description" : "The minimum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster. You can specify ACU values in half-step increments, such as 8, 8.5, 9, and so on. The smallest value that you can use is 0.5.", - "type" : "number" - }, - "MaxCapacity" : { - "description" : "The maximum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster. You can specify ACU values in half-step increments, such as 40, 40.5, 41, and so on. The largest value that you can use is 128.\n The maximum capacity must be higher than 0.5 ACUs. For more information, see [Choosing the maximum Aurora Serverless v2 capacity setting for a cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless-v2.setting-capacity.html#aurora-serverless-v2.max_capacity_considerations) in the *Amazon Aurora User Guide*.\n Aurora automatically sets certain parameters for Aurora Serverless V2 DB instances to values that depend on the maximum ACU value in the capacity range. When you update the maximum capacity value, the ``ParameterApplyStatus`` value for the DB instance changes to ``pending-reboot``. You can update the parameter values by rebooting the DB instance after changing the capacity range.", - "type" : "number" - } - } + "Iops" : { + "description" : "The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster.\n For information about valid IOPS values, see [Provisioned IOPS storage](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS) in the *Amazon RDS User Guide*.\n This setting is required to create a Multi-AZ DB cluster.\n Valid for Cluster Type: Multi-AZ DB clusters only\n Constraints:\n + Must be a multiple between .5 and 50 of the storage amount for the DB cluster.", + "type" : "integer" }, - "ScalingConfiguration" : { - "description" : "The ``ScalingConfiguration`` property type specifies the scaling configuration of an Aurora Serverless v1 DB cluster. \n For more information, see [Using Amazon Aurora Serverless](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html) in the *Amazon Aurora User Guide*.\n This property is only supported for Aurora Serverless v1. For Aurora Serverless v2, Use the ``ServerlessV2ScalingConfiguration`` property.\n Valid for: Aurora Serverless v1 DB clusters only", - "type" : "object", - "additionalProperties" : false, - "properties" : { - "AutoPause" : { - "description" : "Indicates whether to allow or disallow automatic pause for an Aurora DB cluster in ``serverless`` DB engine mode. A DB cluster can be paused only when it's idle (it has no connections).\n If a DB cluster is paused for more than seven days, the DB cluster might be backed up with a snapshot. In this case, the DB cluster is restored when there is a request to connect to it.", - "type" : "boolean" - }, - "MaxCapacity" : { - "description" : "The maximum capacity for an Aurora DB cluster in ``serverless`` DB engine mode.\n For Aurora MySQL, valid capacity values are ``1``, ``2``, ``4``, ``8``, ``16``, ``32``, ``64``, ``128``, and ``256``.\n For Aurora PostgreSQL, valid capacity values are ``2``, ``4``, ``8``, ``16``, ``32``, ``64``, ``192``, and ``384``.\n The maximum capacity must be greater than or equal to the minimum capacity.", - "type" : "integer" - }, - "MinCapacity" : { - "description" : "The minimum capacity for an Aurora DB cluster in ``serverless`` DB engine mode.\n For Aurora MySQL, valid capacity values are ``1``, ``2``, ``4``, ``8``, ``16``, ``32``, ``64``, ``128``, and ``256``.\n For Aurora PostgreSQL, valid capacity values are ``2``, ``4``, ``8``, ``16``, ``32``, ``64``, ``192``, and ``384``.\n The minimum capacity must be less than or equal to the maximum capacity.", - "type" : "integer" - }, - "SecondsBeforeTimeout" : { - "description" : "The amount of time, in seconds, that Aurora Serverless v1 tries to find a scaling point to perform seamless scaling before enforcing the timeout action. The default is 300.\n Specify a value between 60 and 600 seconds.", - "type" : "integer" - }, - "SecondsUntilAutoPause" : { - "description" : "The time, in seconds, before an Aurora DB cluster in ``serverless`` mode is paused.\n Specify a value between 300 and 86,400 seconds.", - "type" : "integer" - }, - "TimeoutAction" : { - "description" : "The action to take when the timeout is reached, either ``ForceApplyCapacityChange`` or ``RollbackCapacityChange``.\n ``ForceApplyCapacityChange`` sets the capacity to the specified value as soon as possible.\n ``RollbackCapacityChange``, the default, ignores the capacity change if a scaling point isn't found in the timeout period.\n If you specify ``ForceApplyCapacityChange``, connections that prevent Aurora Serverless v1 from finding a scaling point might be dropped.\n For more information, see [Autoscaling for Aurora Serverless v1](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.how-it-works.html#aurora-serverless.how-it-works.auto-scaling) in the *Amazon Aurora User Guide*.", - "type" : "string" - } - } + "SourceRegion" : { + "description" : "The AWS Region which contains the source DB cluster when replicating a DB cluster. For example, ``us-east-1``. \n Valid for: Aurora DB clusters only", + "type" : "string" }, - "Tag" : { - "description" : "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.\n For more information, see [Tagging Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide* or [Tagging Amazon Aurora and Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.html) in the *Amazon Aurora User Guide*.", - "type" : "object", - "additionalProperties" : false, - "properties" : { - "Key" : { - "type" : "string", - "description" : "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with ``aws:`` or ``rds:``. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", - "minLength" : 1, - "maxLength" : 128 - }, - "Value" : { - "type" : "string", - "description" : "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with ``aws:`` or ``rds:``. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", - "minLength" : 0, - "maxLength" : 256 - } - }, - "required" : [ "Key" ] + "UseLatestRestorableTime" : { + "description" : "A value that indicates whether to restore the DB cluster to the latest restorable backup time. By default, the DB cluster is not restored to the latest restorable backup time. \n Valid for: Aurora DB clusters and Multi-AZ DB clusters", + "type" : "boolean" }, - "MasterUserSecret" : { - "type" : "object", - "additionalProperties" : false, - "properties" : { - "SecretArn" : { - "type" : "string", - "description" : "The Amazon Resource Name (ARN) of the secret. This parameter is a return value that you can retrieve using the ``Fn::GetAtt`` intrinsic function. For more information, see [Return values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbcluster.html#aws-resource-rds-dbcluster-return-values)." - }, - "KmsKeyId" : { - "type" : "string", - "description" : "The AWS KMS key identifier that is used to encrypt the secret." - } - }, - "description" : "The ``MasterUserSecret`` return value specifies the secret managed by RDS in AWS Secrets Manager for the master user password.\n For more information, see [Password management with Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the *Amazon RDS User Guide* and [Password management with Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) in the *Amazon Aurora User Guide.*" - } - }, - "additionalProperties" : false, - "propertyTransform" : { - "/properties/DBClusterIdentifier" : "$lowercase(DBClusterIdentifier)", - "/properties/DBClusterParameterGroupName" : "$lowercase(DBClusterParameterGroupName)", - "/properties/DBSubnetGroupName" : "$lowercase(DBSubnetGroupName)", - "/properties/EnableHttpEndpoint" : "$lowercase($string(EngineMode)) = 'serverless' ? EnableHttpEndpoint : ($lowercase($string(Engine)) = 'aurora-postgresql' ? EnableHttpEndpoint : false )", - "/properties/Engine" : "$lowercase(Engine)", - "/properties/EngineVersion" : "$join([$string(EngineVersion), \".*\"])", - "/properties/KmsKeyId" : "$join([\"arn:(aws)[-]{0,1}[a-z]{0,2}[-]{0,1}[a-z]{0,3}:kms:[a-z]{2}[-]{1}[a-z]{3,10}[-]{0,1}[a-z]{0,10}[-]{1}[1-3]{1}:[0-9]{12}[:]{1}key\\/\", KmsKeyId])", - "/properties/MasterUserSecret/KmsKeyId" : "$join([\"arn:(aws)[-]{0,1}[a-z]{0,2}[-]{0,1}[a-z]{0,3}:kms:[a-z]{2}[-]{1}[a-z]{3,10}[-]{0,1}[a-z]{0,10}[-]{1}[1-3]{1}:[0-9]{12}[:]{1}key\\/\", MasterUserSecret.KmsKeyId])", - "/properties/NetworkType" : "$lowercase(NetworkType)", - "/properties/PerformanceInsightsKmsKeyId" : "$join([\"arn:(aws)[-]{0,1}[a-z]{0,2}[-]{0,1}[a-z]{0,3}:kms:[a-z]{2}[-]{1}[a-z]{3,10}[-]{0,1}[a-z]{0,10}[-]{1}[1-3]{1}:[0-9]{12}[:]{1}key\\/\", PerformanceInsightsKmsKeyId])", - "/properties/PreferredMaintenanceWindow" : "$lowercase(PreferredMaintenanceWindow)", - "/properties/SnapshotIdentifier" : "$lowercase(SnapshotIdentifier)", - "/properties/SourceDBClusterIdentifier" : "$lowercase(SourceDBClusterIdentifier)", - "/properties/StorageType" : "$lowercase(StorageType)" - }, - "readOnlyProperties" : [ "/properties/DBClusterArn", "/properties/DBClusterResourceId", "/properties/Endpoint", "/properties/Endpoint/Address", "/properties/Endpoint/Port", "/properties/ReadEndpoint/Address", "/properties/MasterUserSecret/SecretArn", "/properties/StorageThroughput" ], - "createOnlyProperties" : [ "/properties/AvailabilityZones", "/properties/DBClusterIdentifier", "/properties/DBSubnetGroupName", "/properties/DBSystemId", "/properties/DatabaseName", "/properties/EngineMode", "/properties/KmsKeyId", "/properties/PubliclyAccessible", "/properties/RestoreToTime", "/properties/RestoreType", "/properties/SnapshotIdentifier", "/properties/SourceDBClusterIdentifier", "/properties/SourceRegion", "/properties/StorageEncrypted", "/properties/UseLatestRestorableTime" ], - "conditionalCreateOnlyProperties" : [ "/properties/Engine", "/properties/GlobalClusterIdentifier", "/properties/MasterUsername" ], - "primaryIdentifier" : [ "/properties/DBClusterIdentifier" ], - "writeOnlyProperties" : [ "/properties/DBInstanceParameterGroupName", "/properties/MasterUserPassword", "/properties/RestoreToTime", "/properties/RestoreType", "/properties/SnapshotIdentifier", "/properties/SourceDBClusterIdentifier", "/properties/SourceRegion", "/properties/UseLatestRestorableTime" ], - "handlers" : { - "create" : { - "permissions" : [ "iam:CreateServiceLinkedRole", "iam:PassRole", "rds:AddRoleToDBCluster", "rds:AddTagsToResource", "rds:CreateDBCluster", "rds:CreateDBInstance", "rds:DescribeDBClusters", "rds:DescribeDBClusterSnapshots", "rds:DescribeDBSnapshots", "rds:DescribeEvents", "rds:EnableHttpEndpoint", "rds:ModifyDBCluster", "rds:RestoreDBClusterFromSnapshot", "rds:RestoreDBClusterToPointInTime", "secretsmanager:CreateSecret", "secretsmanager:TagResource" ], - "timeoutInMinutes" : 2160 + "ManageMasterUserPassword" : { + "description" : "Specifies whether to manage the master user password with AWS Secrets Manager.\n For more information, see [Password management with Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the *Amazon RDS User Guide* and [Password management with Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/rds-secrets-manager.html) in the *Amazon Aurora User Guide.* \n Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n Constraints:\n + Can't manage the master user password with AWS Secrets Manager if ``MasterUserPassword`` is specified.", + "type" : "boolean" }, - "read" : { - "permissions" : [ "rds:DescribeDBClusters" ] + "EnableIAMDatabaseAuthentication" : { + "description" : "A value that indicates whether to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. By default, mapping is disabled.\n For more information, see [IAM Database Authentication](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html) in the *Amazon Aurora User Guide.* \n Valid for: Aurora DB clusters only", + "type" : "boolean" }, - "update" : { - "permissions" : [ "ec2:DescribeSecurityGroups", "iam:PassRole", "rds:AddRoleToDBCluster", "rds:AddTagsToResource", "rds:DescribeDBClusters", "rds:DescribeDBSubnetGroups", "rds:DescribeEvents", "rds:DescribeGlobalClusters", "rds:DisableHttpEndpoint", "rds:EnableHttpEndpoint", "rds:ModifyDBCluster", "rds:ModifyDBInstance", "rds:RemoveFromGlobalCluster", "rds:RemoveRoleFromDBCluster", "rds:RemoveTagsFromResource", "secretsmanager:CreateSecret", "secretsmanager:TagResource" ], - "timeoutInMinutes" : 2160 + "DBClusterParameterGroupName" : { + "description" : "The name of the DB cluster parameter group to associate with this DB cluster.\n If you apply a parameter group to an existing DB cluster, then its DB instances might need to reboot. This can result in an outage while the DB instances are rebooting.\n If you apply a change to parameter group associated with a stopped DB cluster, then the update stack waits until the DB cluster is started.\n To list all of the available DB cluster parameter group names, use the following command:\n ``aws rds describe-db-cluster-parameter-groups --query \"DBClusterParameterGroups[].DBClusterParameterGroupName\" --output text`` \n Valid for: Aurora DB clusters and Multi-AZ DB clusters", + "type" : "string" }, - "delete" : { - "permissions" : [ "rds:AddTagsToResource", "rds:CreateDBClusterSnapshot", "rds:DeleteDBCluster", "rds:DeleteDBInstance", "rds:DescribeDBClusters", "rds:DescribeGlobalClusters", "rds:RemoveFromGlobalCluster" ] + "PerformanceInsightsEnabled" : { + "description" : "Specifies whether to turn on Performance Insights for the DB cluster.\n For more information, see [Using Amazon Performance Insights](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) in the *Amazon RDS User Guide*.\n Valid for Cluster Type: Multi-AZ DB clusters only", + "type" : "boolean" }, - "list" : { - "permissions" : [ "rds:DescribeDBClusters" ] + "BackupRetentionPeriod" : { + "default" : 1, + "description" : "The number of days for which automated backups are retained.\n Default: 1\n Constraints:\n + Must be a value from 1 to 35\n \n Valid for: Aurora DB clusters and Multi-AZ DB clusters", + "type" : "integer", + "minimum" : 1 + }, + "EnableCloudwatchLogsExports" : { + "uniqueItems" : true, + "description" : "The list of log types that need to be enabled for exporting to CloudWatch Logs. The values in the list depend on the DB engine being used. For more information, see [Publishing Database Logs to Amazon CloudWatch Logs](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) in the *Amazon Aurora User Guide*.\n *Aurora MySQL* \n Valid values: ``audit``, ``error``, ``general``, ``slowquery`` \n *Aurora PostgreSQL* \n Valid values: ``postgresql`` \n Valid for: Aurora DB clusters and Multi-AZ DB clusters", + "type" : "array", + "items" : { + "type" : "string" + } } - }, - "tagging" : { - "taggable" : true, - "tagOnCreate" : true, - "tagUpdatable" : true, - "cloudFormationSystemTags" : true, - "tagProperty" : "/properties/Tags", - "permissions" : [ "rds:AddTagsToResource", "rds:RemoveTagsFromResource" ] } } \ No newline at end of file diff --git a/aws-cloudformation-schema/aws-rds-dbinstance.json b/aws-cloudformation-schema/aws-rds-dbinstance.json index acdeb752cb..8ca1cfb66d 100644 --- a/aws-cloudformation-schema/aws-rds-dbinstance.json +++ b/aws-cloudformation-schema/aws-rds-dbinstance.json @@ -213,7 +213,7 @@ }, "DBSnapshotIdentifier" : { "type" : "string", - "description" : "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n By specifying this property, you can create a DB instance from the specified DB snapshot. If the ``DBSnapshotIdentifier`` property is an empty string or the ``AWS::RDS::DBInstance`` declaration has no ``DBSnapshotIdentifier`` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n Some DB instance properties aren't valid when you restore from a snapshot, such as the ``MasterUsername`` and ``MasterUserPassword`` properties. For information about the properties that you can specify, see the ``RestoreDBInstanceFromDBSnapshot`` action in the *Amazon RDS API Reference*.\n After you restore a DB instance with a ``DBSnapshotIdentifier`` property, you must specify the same ``DBSnapshotIdentifier`` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the ``DBSnapshotIdentifier`` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified ``DBSnapshotIdentifier`` property, and the original DB instance is deleted.\n If you specify the ``DBSnapshotIdentifier`` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n + ``CharacterSetName`` \n + ``DBClusterIdentifier`` \n + ``DBName`` \n + ``DeleteAutomatedBackups`` \n + ``KmsKeyId`` \n + ``MasterUsername`` \n + ``MasterUserPassword`` \n + ``PerformanceInsightsKMSKeyId`` \n + ``PerformanceInsightsRetentionPeriod`` \n + ``PromotionTier`` \n + ``SourceDBInstanceIdentifier`` \n + ``SourceRegion`` \n + ``StorageEncrypted`` (for an encrypted snapshot)\n + ``Timezone`` \n \n *Amazon Aurora* \n Not applicable. Snapshot restore is managed by the DB cluster." + "description" : "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n By specifying this property, you can create a DB instance from the specified DB snapshot. If the ``DBSnapshotIdentifier`` property is an empty string or the ``AWS::RDS::DBInstance`` declaration has no ``DBSnapshotIdentifier`` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n Some DB instance properties aren't valid when you restore from a snapshot, such as the ``MasterUsername`` and ``MasterUserPassword`` properties. For information about the properties that you can specify, see the ``RestoreDBInstanceFromDBSnapshot`` action in the *Amazon RDS API Reference*.\n After you restore a DB instance with a ``DBSnapshotIdentifier`` property, you must specify the same ``DBSnapshotIdentifier`` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the ``DBSnapshotIdentifier`` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified ``DBSnapshotIdentifier`` property, and the original DB instance is deleted.\n If you specify the ``DBSnapshotIdentifier`` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n + ``CharacterSetName`` \n + ``DBClusterIdentifier`` \n + ``DBName`` \n + ``KmsKeyId`` \n + ``MasterUsername`` \n + ``MasterUserPassword`` \n + ``PromotionTier`` \n + ``SourceDBInstanceIdentifier`` \n + ``SourceRegion`` \n + ``StorageEncrypted`` (for an unencrypted snapshot)\n + ``Timezone`` \n \n *Amazon Aurora* \n Not applicable. Snapshot restore is managed by the DB cluster." }, "DBSubnetGroupName" : { "type" : "string", diff --git a/aws-cloudformation-schema/aws-rds-dbsubnetgroup.json b/aws-cloudformation-schema/aws-rds-dbsubnetgroup.json index 0bedd7ee97..8ae6f98883 100644 --- a/aws-cloudformation-schema/aws-rds-dbsubnetgroup.json +++ b/aws-cloudformation-schema/aws-rds-dbsubnetgroup.json @@ -14,6 +14,7 @@ "SubnetIds" : { "type" : "array", "uniqueItems" : false, + "insertionOrder" : false, "items" : { "type" : "string" }, @@ -59,7 +60,6 @@ }, "createOnlyProperties" : [ "/properties/DBSubnetGroupName" ], "primaryIdentifier" : [ "/properties/DBSubnetGroupName" ], - "writeOnlyProperties" : [ "/properties/SubnetIds" ], "handlers" : { "create" : { "permissions" : [ "iam:CreateServiceLinkedRole", "rds:CreateDBSubnetGroup", "rds:DescribeDBSubnetGroups", "rds:AddTagsToResource", "rds:RemoveTagsFromResource", "rds:ListTagsForResource" ] @@ -76,5 +76,13 @@ "list" : { "permissions" : [ "rds:DescribeDBSubnetGroups" ] } + }, + "tagging" : { + "taggable" : true, + "tagOnCreate" : true, + "tagUpdatable" : true, + "cloudFormationSystemTags" : true, + "tagProperty" : "/properties/Tags", + "permissions" : [ "rds:AddTagsToResource", "rds:RemoveTagsFromResource" ] } } \ No newline at end of file diff --git a/aws-cloudformation-schema/aws-refactorspaces-route.json b/aws-cloudformation-schema/aws-refactorspaces-route.json index dace8715e5..dfc7f316b0 100644 --- a/aws-cloudformation-schema/aws-refactorspaces-route.json +++ b/aws-cloudformation-schema/aws-refactorspaces-route.json @@ -162,12 +162,5 @@ "permissions" : [ "refactor-spaces:UpdateRoute", "refactor-spaces:GetRoute", "refactor-spaces:TagResource", "iam:CreateServiceLinkedRole", "apigateway:GET", "apigateway:PATCH", "apigateway:POST", "apigateway:PUT", "apigateway:DELETE", "apigateway:UpdateRestApiPolicy", "lambda:GetFunctionConfiguration", "lambda:AddPermission", "elasticloadbalancing:DescribeListeners", "elasticloadbalancing:DescribeTargetGroups", "elasticloadbalancing:CreateListener", "elasticloadbalancing:CreateTargetGroup", "elasticloadbalancing:DeleteListener", "elasticloadbalancing:DeleteTargetGroup", "elasticloadbalancing:DescribeTags", "elasticloadbalancing:AddTags", "elasticloadbalancing:RegisterTargets", "elasticloadbalancing:DescribeTargetHealth", "ec2:DescribeSubnets", "ec2:DescribeSubnets", "tag:GetResources" ] } }, - "tagging" : { - "taggable" : true, - "tagOnCreate" : true, - "tagUpdatable" : true, - "cloudFormationSystemTags" : false, - "tagProperty" : "/properties/Tags", - "permissions" : [ "refactor-spaces:ListTagsForResource", "tag:GetResources", "elasticloadbalancing:AddTags", "refactor-spaces:UntagResource", "refactor-spaces:TagResource", "elasticloadbalancing:DescribeTags" ] - } + "taggable" : true } \ No newline at end of file diff --git a/aws-cloudformation-schema/aws-secretsmanager-secrettargetattachment.json b/aws-cloudformation-schema/aws-secretsmanager-secrettargetattachment.json index 865d715f33..3780ccc700 100644 --- a/aws-cloudformation-schema/aws-secretsmanager-secrettargetattachment.json +++ b/aws-cloudformation-schema/aws-secretsmanager-secrettargetattachment.json @@ -1,5 +1,6 @@ { "typeName" : "AWS::SecretsManager::SecretTargetAttachment", + "$schema" : "https://raw.githubusercontent.com/aws-cloudformation/cloudformation-resource-schema/blob/master/src/main/resources/schema/provider.definition.schema.v1.json", "description" : "Resource Type definition for AWS::SecretsManager::SecretTargetAttachment", "additionalProperties" : false, "properties" : { @@ -17,6 +18,26 @@ } }, "required" : [ "TargetType", "TargetId", "SecretId" ], + "tagging" : { + "taggable" : false + }, "primaryIdentifier" : [ "/properties/Id" ], - "readOnlyProperties" : [ "/properties/Id" ] + "readOnlyProperties" : [ "/properties/Id" ], + "handlers" : { + "read" : { + "permissions" : [ "secretsmanager:GetSecretValue" ] + }, + "list" : { + "permissions" : [ "secretsmanager:GetSecretValue", "secretsmanager:ListSecrets" ] + }, + "create" : { + "permissions" : [ "secretsmanager:GetSecretValue", "secretsmanager:PutSecretValue", "rds:DescribeDBInstances", "redshift:DescribeClusters", "rds:DescribeDBClusters", "docdb-elastic:GetCluster", "redshift-serverless:ListWorkgroups", "redshift-serverless:GetNamespace" ] + }, + "delete" : { + "permissions" : [ "secretsmanager:GetSecretValue", "secretsmanager:PutSecretValue" ] + }, + "update" : { + "permissions" : [ "secretsmanager:GetSecretValue", "secretsmanager:PutSecretValue", "rds:DescribeDBInstances", "redshift:DescribeClusters", "rds:DescribeDBClusters", "docdb-elastic:GetCluster", "redshift-serverless:ListWorkgroups", "redshift-serverless:GetNamespace" ] + } + } } \ No newline at end of file diff --git a/aws-cloudformation-schema/aws-securitylake-datalake.json b/aws-cloudformation-schema/aws-securitylake-datalake.json index f3958fd624..e60957480a 100644 --- a/aws-cloudformation-schema/aws-securitylake-datalake.json +++ b/aws-cloudformation-schema/aws-securitylake-datalake.json @@ -148,7 +148,8 @@ "tagOnCreate" : true, "tagUpdatable" : true, "cloudFormationSystemTags" : false, - "tagProperty" : "/properties/Tags" + "tagProperty" : "/properties/Tags", + "permissions" : [ "securitylake:TagResource", "securitylake:UntagResource", "securitylake:ListTagsForResource" ] }, "replacementStrategy" : "delete_then_create", "handlers" : { diff --git a/aws-cloudformation-schema/aws-securitylake-subscriber.json b/aws-cloudformation-schema/aws-securitylake-subscriber.json index a1fa794b64..27484602af 100644 --- a/aws-cloudformation-schema/aws-securitylake-subscriber.json +++ b/aws-cloudformation-schema/aws-securitylake-subscriber.json @@ -175,7 +175,7 @@ "replacementStrategy" : "delete_then_create", "handlers" : { "create" : { - "permissions" : [ "securitylake:CreateSubscriber", "securitylake:CreateCustomLogSource", "securitylake:CreateDataLake", "securitylake:TagResource", "securitylake:GetSubscriber", "securitylake:ListTagsForResource", "iam:GetRole", "iam:GetRolePolicy", "iam:PutRolePolicy", "iam:CreateRole", "iam:CreateServiceLinkedRole", "glue:GetDatabase", "glue:GetTable", "lakeformation:RegisterResource", "lakeformation:GrantPermissions", "lakeformation:RevokePermissions", "lakeformation:ListPermissions", "ram:GetResourceShareAssociations", "ram:CreateResourceShare", "ram:UpdateResourceShare", "ram:GetResourceShares" ] + "permissions" : [ "securitylake:CreateSubscriber", "securitylake:CreateCustomLogSource", "securitylake:CreateDataLake", "securitylake:TagResource", "securitylake:GetSubscriber", "securitylake:ListSubscribers", "securitylake:ListTagsForResource", "iam:GetRole", "iam:GetRolePolicy", "iam:PutRolePolicy", "iam:CreateRole", "iam:CreateServiceLinkedRole", "glue:GetDatabase", "glue:GetTable", "lakeformation:RegisterResource", "lakeformation:GrantPermissions", "lakeformation:RevokePermissions", "lakeformation:ListPermissions", "ram:GetResourceShareAssociations", "ram:CreateResourceShare", "ram:UpdateResourceShare", "ram:GetResourceShares" ] }, "read" : { "permissions" : [ "securitylake:GetSubscriber", "securitylake:ListTagsForResource" ] diff --git a/aws-cloudformation-schema/aws-sqs-queue.json b/aws-cloudformation-schema/aws-sqs-queue.json index a158d58b09..706c9c17e9 100644 --- a/aws-cloudformation-schema/aws-sqs-queue.json +++ b/aws-cloudformation-schema/aws-sqs-queue.json @@ -1,6 +1,6 @@ { "typeName" : "AWS::SQS::Queue", - "description" : "The ``AWS::SQS::Queue`` resource creates an SQS standard or FIFO queue.\n Keep the following caveats in mind:\n + If you don't specify the ``FifoQueue`` property, SQS creates a standard queue.\n You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see [Moving from a standard queue to a FIFO queue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-moving.html) in the *Developer Guide*. \n + If you don't provide a value for a property, the queue is created with the default value for the property.\n + If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.\n + To successfully create a new queue, you must provide a queue name that adheres to the [limits related to queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) and is unique within the scope of your queues.\n \n For more information about creating FIFO (first-in-first-out) queues, see [Creating an queue ()](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/screate-queue-cloudformation.html) in the *Developer Guide*.", + "description" : "The ``AWS::SQS::Queue`` resource creates an SQS standard or FIFO queue.\n Keep the following caveats in mind:\n + If you don't specify the ``FifoQueue`` property, SQS creates a standard queue.\n You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see [Moving from a standard queue to a FIFO queue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-moving.html) in the *Developer Guide*. \n + If you don't provide a value for a property, the queue is created with the default value for the property.\n + If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.\n + To successfully create a new queue, you must provide a queue name that adheres to the [limits related to queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) and is unique within the scope of your queues.\n \n For more information about creating FIFO (first-in-first-out) queues, see [Creating an queue ()](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/create-queue-cloudformation.html) in the *Developer Guide*.", "sourceUrl" : "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-sqs.git", "definitions" : { "Tag" : { @@ -43,7 +43,7 @@ }, "FifoQueue" : { "type" : "boolean", - "description" : "If set to true, creates a FIFO queue. If you don't specify this property, SQS creates a standard queue. For more information, see [FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) in the *Developer Guide*." + "description" : "If set to true, creates a FIFO queue. If you don't specify this property, SQS creates a standard queue. For more information, see [Amazon SQS FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-fifo-queues.html) in the *Developer Guide*." }, "FifoThroughputLimit" : { "description" : "For high throughput for FIFO queues, specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are ``perQueue`` and ``perMessageGroupId``.\n To enable high throughput for a FIFO queue, set this attribute to ``perMessageGroupId`` *and* set the ``DeduplicationScope`` attribute to ``messageGroup``. If you set these attributes to anything other than these values, normal throughput is in effect and deduplication occurs as specified. For more information, see [High throughput for FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/high-throughput-fifo.html) and [Quotas related to messages](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-messages.html) in the *Developer Guide*.", @@ -55,7 +55,7 @@ }, "KmsMasterKeyId" : { "type" : "string", - "description" : "The ID of an AWS Key Management Service (KMS) for SQS, or a custom KMS. To use the AWS managed KMS for SQS, specify a (default) alias ARN, alias name (e.g. ``alias/aws/sqs``), key ARN, or key ID. For more information, see the following:\n + [Encryption at rest](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) in the *Developer Guide* \n + [CreateQueue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) in the *API Reference* \n + [Request Parameters](https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) in the *Key Management Service API Reference* \n + The Key Management Service (KMS) section of the [Best Practices](https://docs.aws.amazon.com/https://d0.awsstatic.com/whitepapers/aws-kms-best-practices.pdf) whitepaper" + "description" : "The ID of an AWS Key Management Service (KMS) for SQS, or a custom KMS. To use the AWS managed KMS for SQS, specify a (default) alias ARN, alias name (for example ``alias/aws/sqs``), key ARN, or key ID. For more information, see the following:\n + [Encryption at rest](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) in the *Developer Guide* \n + [CreateQueue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) in the *API Reference* \n + [Request Parameters](https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) in the *Key Management Service API Reference* \n + The Key Management Service (KMS) section of the [Security best practices for Key Management Service](https://docs.aws.amazon.com/kms/latest/developerguide/best-practices.html) in the *Key Management Service Developer Guide*" }, "SqsManagedSseEnabled" : { "type" : "boolean", @@ -71,7 +71,7 @@ }, "QueueName" : { "type" : "string", - "description" : "A name for the queue. To create a FIFO queue, the name of your FIFO queue must end with the ``.fifo`` suffix. For more information, see [FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) in the *Developer Guide*.\n If you don't specify a name, CFN generates a unique physical ID and uses that ID for the queue name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) in the *User Guide*. \n If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name." + "description" : "A name for the queue. To create a FIFO queue, the name of your FIFO queue must end with the ``.fifo`` suffix. For more information, see [Amazon SQS FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-fifo-queues.html) in the *Developer Guide*.\n If you don't specify a name, CFN generates a unique physical ID and uses that ID for the queue name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) in the *User Guide*. \n If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name." }, "ReceiveMessageWaitTimeSeconds" : { "type" : "integer", @@ -79,11 +79,11 @@ }, "RedriveAllowPolicy" : { "type" : [ "object", "string" ], - "description" : "The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows:\n + ``redrivePermission``: The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are:\n + ``allowAll``: (Default) Any source queues in this AWS account in the same Region can specify this queue as the dead-letter queue.\n + ``denyAll``: No source queues can specify this queue as the dead-letter queue.\n + ``byQueue``: Only queues specified by the ``sourceQueueArns`` parameter can specify this queue as the dead-letter queue.\n \n + ``sourceQueueArns``: The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the ``redrivePermission`` parameter is set to ``byQueue``. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the ``redrivePermission`` parameter to ``allowAll``." + "description" : "The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows:\n + ``redrivePermission``: The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are:\n + ``allowAll``: (Default) Any source queues in this AWS account in the same Region can specify this queue as the dead-letter queue.\n + ``denyAll``: No source queues can specify this queue as the dead-letter queue.\n + ``byQueue``: Only queues specified by the ``sourceQueueArns`` parameter can specify this queue as the dead-letter queue.\n \n + ``sourceQueueArns``: The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the ``redrivePermission`` parameter is set to ``byQueue``. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the ``redrivePermission`` parameter to ``allowAll``." }, "RedrivePolicy" : { "type" : [ "object", "string" ], - "description" : "The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows:\n + ``deadLetterTargetArn``: The Amazon Resource Name (ARN) of the dead-letter queue to which SQS moves messages after the value of ``maxReceiveCount`` is exceeded.\n + ``maxReceiveCount``: The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ``ReceiveCount`` for a message exceeds the ``maxReceiveCount`` for a queue, SQS moves the message to the dead-letter-queue.\n \n The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.\n *JSON* \n ``{ \"deadLetterTargetArn\" : String, \"maxReceiveCount\" : Integer }`` \n *YAML* \n ``deadLetterTargetArn : String`` \n ``maxReceiveCount : Integer``" + "description" : "The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows:\n + ``deadLetterTargetArn``: The Amazon Resource Name (ARN) of the dead-letter queue to which SQS moves messages after the value of ``maxReceiveCount`` is exceeded.\n + ``maxReceiveCount``: The number of times a message is received by a consumer of the source queue before being moved to the dead-letter queue. When the ``ReceiveCount`` for a message exceeds the ``maxReceiveCount`` for a queue, SQS moves the message to the dead-letter-queue.\n \n The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.\n *JSON* \n ``{ \"deadLetterTargetArn\" : String, \"maxReceiveCount\" : Integer }`` \n *YAML* \n ``deadLetterTargetArn : String`` \n ``maxReceiveCount : Integer``" }, "Tags" : { "type" : "array", diff --git a/aws-cloudformation-schema/aws-sqs-queuepolicy.json b/aws-cloudformation-schema/aws-sqs-queuepolicy.json index e0877737e1..dd8718f2d3 100644 --- a/aws-cloudformation-schema/aws-sqs-queuepolicy.json +++ b/aws-cloudformation-schema/aws-sqs-queuepolicy.json @@ -18,18 +18,7 @@ "uniqueItems" : false, "insertionOrder" : false, "items" : { - "type" : "string", - "anyOf" : [ { - "relationshipRef" : { - "typeName" : "AWS::SQS::Queue", - "propertyPath" : "/properties/QueueUrl" - } - }, { - "relationshipRef" : { - "typeName" : "AWS::SQS::Queue", - "propertyPath" : "/properties/QueueName" - } - } ] + "type" : "string" } } }, diff --git a/aws-cloudformation-schema/aws-ssmquicksetup-configurationmanager.json b/aws-cloudformation-schema/aws-ssmquicksetup-configurationmanager.json index 0fca20efc0..5a21136a30 100644 --- a/aws-cloudformation-schema/aws-ssmquicksetup-configurationmanager.json +++ b/aws-cloudformation-schema/aws-ssmquicksetup-configurationmanager.json @@ -92,11 +92,11 @@ "TagsMap" : { "type" : "object", "patternProperties" : { - "^[A-Za-z0-9+=@_\\/:-]+$" : { + "^[A-Za-z0-9+=@_\\/:.-]+$" : { "type" : "string", "maxLength" : 256, "minLength" : 1, - "pattern" : "^[A-Za-z0-9+=@_\\/:-]+$" + "pattern" : "^[A-Za-z0-9+=@_\\/:.-]+$" } }, "additionalProperties" : false diff --git a/aws-cloudformation-schema/aws-transfer-server.json b/aws-cloudformation-schema/aws-transfer-server.json index a9b8cf703b..a9ddcc907a 100644 --- a/aws-cloudformation-schema/aws-transfer-server.json +++ b/aws-cloudformation-schema/aws-transfer-server.json @@ -1,221 +1,348 @@ { "typeName" : "AWS::Transfer::Server", - "description" : "Resource Type definition for AWS::Transfer::Server", - "additionalProperties" : false, - "properties" : { - "LoggingRole" : { - "type" : "string" - }, - "Protocols" : { - "type" : "array", - "uniqueItems" : false, - "items" : { - "$ref" : "#/definitions/Protocol" - } - }, - "IdentityProviderDetails" : { - "$ref" : "#/definitions/IdentityProviderDetails" - }, - "EndpointDetails" : { - "$ref" : "#/definitions/EndpointDetails" - }, - "StructuredLogDestinations" : { - "type" : "array", - "uniqueItems" : false, - "items" : { - "$ref" : "#/definitions/StructuredLogDestination" - } - }, - "PreAuthenticationLoginBanner" : { - "type" : "string" - }, - "ServerId" : { - "type" : "string" - }, - "PostAuthenticationLoginBanner" : { - "type" : "string" - }, - "EndpointType" : { - "type" : "string" - }, - "SecurityPolicyName" : { - "type" : "string" - }, - "ProtocolDetails" : { - "$ref" : "#/definitions/ProtocolDetails" - }, - "S3StorageOptions" : { - "$ref" : "#/definitions/S3StorageOptions" - }, - "WorkflowDetails" : { - "$ref" : "#/definitions/WorkflowDetails" + "description" : "Definition of AWS::Transfer::Server Resource Type", + "definitions" : { + "As2Transport" : { + "type" : "string", + "enum" : [ "HTTP" ] }, - "Arn" : { - "type" : "string" + "DirectoryListingOptimization" : { + "type" : "string", + "description" : "Indicates whether optimization to directory listing on S3 servers is used. Disabled by default for compatibility.", + "enum" : [ "ENABLED", "DISABLED" ] }, "Domain" : { - "type" : "string" - }, - "IdentityProviderType" : { - "type" : "string" - }, - "Tags" : { - "type" : "array", - "uniqueItems" : false, - "items" : { - "$ref" : "#/definitions/Tag" - } + "type" : "string", + "enum" : [ "S3", "EFS" ] }, - "Certificate" : { - "type" : "string" - } - }, - "definitions" : { - "StructuredLogDestination" : { + "EndpointDetails" : { "type" : "object", + "properties" : { + "AddressAllocationIds" : { + "type" : "array", + "insertionOrder" : true, + "items" : { + "type" : "string" + } + }, + "SubnetIds" : { + "type" : "array", + "insertionOrder" : true, + "items" : { + "type" : "string" + } + }, + "VpcEndpointId" : { + "type" : "string", + "maxLength" : 22, + "minLength" : 22, + "pattern" : "^vpce-[0-9a-f]{17}$" + }, + "VpcId" : { + "type" : "string" + }, + "SecurityGroupIds" : { + "type" : "array", + "insertionOrder" : false, + "items" : { + "type" : "string", + "maxLength" : 20, + "minLength" : 11, + "pattern" : "^sg-[0-9a-f]{8,17}$" + } + } + }, "additionalProperties" : false }, + "EndpointType" : { + "type" : "string", + "enum" : [ "PUBLIC", "VPC", "VPC_ENDPOINT" ] + }, "IdentityProviderDetails" : { "type" : "object", - "additionalProperties" : false, "properties" : { - "Function" : { - "type" : "string" - }, - "DirectoryId" : { - "type" : "string" - }, "Url" : { - "type" : "string" + "type" : "string", + "maxLength" : 255, + "minLength" : 0 }, "InvocationRole" : { - "type" : "string" + "type" : "string", + "maxLength" : 2048, + "minLength" : 20, + "pattern" : "^arn:.*role/\\S+$" + }, + "DirectoryId" : { + "type" : "string", + "maxLength" : 12, + "minLength" : 12, + "pattern" : "^d-[0-9a-f]{10}$" + }, + "Function" : { + "type" : "string", + "maxLength" : 170, + "minLength" : 1, + "pattern" : "^arn:[a-z-]+:lambda:.*$" }, "SftpAuthenticationMethods" : { - "type" : "string" + "$ref" : "#/definitions/SftpAuthenticationMethods" } - } + }, + "additionalProperties" : false + }, + "IdentityProviderType" : { + "type" : "string", + "enum" : [ "SERVICE_MANAGED", "API_GATEWAY", "AWS_DIRECTORY_SERVICE", "AWS_LAMBDA" ] + }, + "Protocol" : { + "type" : "string", + "enum" : [ "SFTP", "FTP", "FTPS", "AS2" ] }, "ProtocolDetails" : { "type" : "object", - "additionalProperties" : false, "properties" : { - "As2Transports" : { - "type" : "array", - "uniqueItems" : false, - "items" : { - "$ref" : "#/definitions/As2Transport" - } - }, "PassiveIp" : { - "type" : "string" + "type" : "string", + "maxLength" : 15, + "minLength" : 0 + }, + "TlsSessionResumptionMode" : { + "$ref" : "#/definitions/TlsSessionResumptionMode" }, "SetStatOption" : { - "type" : "string" + "$ref" : "#/definitions/SetStatOption" }, - "TlsSessionResumptionMode" : { - "type" : "string" + "As2Transports" : { + "type" : "array", + "insertionOrder" : false, + "items" : { + "$ref" : "#/definitions/As2Transport" + }, + "maxItems" : 1, + "minItems" : 1 } - } + }, + "additionalProperties" : false }, "S3StorageOptions" : { "type" : "object", - "additionalProperties" : false, "properties" : { "DirectoryListingOptimization" : { - "type" : "string" + "$ref" : "#/definitions/DirectoryListingOptimization" } - } + }, + "additionalProperties" : false }, - "WorkflowDetails" : { + "SetStatOption" : { + "type" : "string", + "enum" : [ "DEFAULT", "ENABLE_NO_OP" ] + }, + "SftpAuthenticationMethods" : { + "type" : "string", + "enum" : [ "PASSWORD", "PUBLIC_KEY", "PUBLIC_KEY_OR_PASSWORD", "PUBLIC_KEY_AND_PASSWORD" ] + }, + "Tag" : { "type" : "object", - "additionalProperties" : false, "properties" : { - "OnUpload" : { - "type" : "array", - "uniqueItems" : false, - "items" : { - "$ref" : "#/definitions/WorkflowDetail" - } + "Key" : { + "type" : "string", + "maxLength" : 128, + "minLength" : 0 }, - "OnPartialUpload" : { - "type" : "array", - "uniqueItems" : false, - "items" : { - "$ref" : "#/definitions/WorkflowDetail" - } + "Value" : { + "type" : "string", + "maxLength" : 256, + "minLength" : 0 } - } + }, + "required" : [ "Key", "Value" ], + "additionalProperties" : false + }, + "TlsSessionResumptionMode" : { + "type" : "string", + "enum" : [ "DISABLED", "ENABLED", "ENFORCED" ] }, "WorkflowDetail" : { "type" : "object", - "additionalProperties" : false, "properties" : { "WorkflowId" : { - "type" : "string" + "type" : "string", + "maxLength" : 19, + "minLength" : 19, + "pattern" : "^w-([a-z0-9]{17})$" }, "ExecutionRole" : { - "type" : "string" + "type" : "string", + "maxLength" : 2048, + "minLength" : 20, + "pattern" : "^arn:.*role/\\S+$" } }, - "required" : [ "WorkflowId", "ExecutionRole" ] - }, - "Tag" : { - "type" : "object", - "additionalProperties" : false, - "properties" : { - "Value" : { - "type" : "string" - }, - "Key" : { - "type" : "string" - } - }, - "required" : [ "Value", "Key" ] - }, - "Protocol" : { - "type" : "object", + "required" : [ "ExecutionRole", "WorkflowId" ], "additionalProperties" : false }, - "EndpointDetails" : { + "WorkflowDetails" : { "type" : "object", - "additionalProperties" : false, "properties" : { - "AddressAllocationIds" : { - "type" : "array", - "uniqueItems" : false, - "items" : { - "type" : "string" - } - }, - "VpcId" : { - "type" : "string" - }, - "VpcEndpointId" : { - "type" : "string" - }, - "SecurityGroupIds" : { + "OnUpload" : { "type" : "array", - "uniqueItems" : false, + "insertionOrder" : true, "items" : { - "type" : "string" - } + "$ref" : "#/definitions/WorkflowDetail" + }, + "maxItems" : 1, + "minItems" : 0 }, - "SubnetIds" : { + "OnPartialUpload" : { "type" : "array", - "uniqueItems" : false, + "insertionOrder" : true, "items" : { - "type" : "string" - } + "$ref" : "#/definitions/WorkflowDetail" + }, + "maxItems" : 1, + "minItems" : 0 } - } - }, - "As2Transport" : { - "type" : "object", + }, "additionalProperties" : false } }, - "createOnlyProperties" : [ "/properties/IdentityProviderType", "/properties/Domain" ], - "primaryIdentifier" : [ "/properties/ServerId" ], - "readOnlyProperties" : [ "/properties/ServerId", "/properties/Arn" ] + "properties" : { + "Arn" : { + "type" : "string", + "maxLength" : 1600, + "minLength" : 20, + "pattern" : "^arn:\\S+$" + }, + "As2ServiceManagedEgressIpAddresses" : { + "type" : "array", + "insertionOrder" : false, + "items" : { + "type" : "string", + "pattern" : "^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$" + }, + "description" : "The list of egress IP addresses of this server. These IP addresses are only relevant for servers that use the AS2 protocol. They are used for sending asynchronous MDNs. These IP addresses are assigned automatically when you create an AS2 server. Additionally, if you update an existing server and add the AS2 protocol, static IP addresses are assigned as well." + }, + "Certificate" : { + "type" : "string", + "maxLength" : 1600, + "minLength" : 0 + }, + "Domain" : { + "$ref" : "#/definitions/Domain" + }, + "EndpointDetails" : { + "$ref" : "#/definitions/EndpointDetails" + }, + "EndpointType" : { + "$ref" : "#/definitions/EndpointType" + }, + "IdentityProviderDetails" : { + "$ref" : "#/definitions/IdentityProviderDetails" + }, + "IdentityProviderType" : { + "$ref" : "#/definitions/IdentityProviderType" + }, + "LoggingRole" : { + "type" : "string", + "maxLength" : 2048, + "minLength" : 0, + "pattern" : "^(|arn:.*role/\\S+)$" + }, + "PostAuthenticationLoginBanner" : { + "type" : "string", + "maxLength" : 4096, + "minLength" : 0, + "pattern" : "^[\\x09-\\x0D\\x20-\\x7E]*$" + }, + "PreAuthenticationLoginBanner" : { + "type" : "string", + "maxLength" : 4096, + "minLength" : 0, + "pattern" : "^[\\x09-\\x0D\\x20-\\x7E]*$" + }, + "ProtocolDetails" : { + "$ref" : "#/definitions/ProtocolDetails" + }, + "Protocols" : { + "type" : "array", + "insertionOrder" : false, + "items" : { + "$ref" : "#/definitions/Protocol" + }, + "maxItems" : 4, + "minItems" : 1 + }, + "S3StorageOptions" : { + "$ref" : "#/definitions/S3StorageOptions" + }, + "SecurityPolicyName" : { + "type" : "string", + "maxLength" : 100, + "minLength" : 0, + "pattern" : "^TransferSecurityPolicy-.+$" + }, + "ServerId" : { + "type" : "string", + "maxLength" : 19, + "minLength" : 19, + "pattern" : "^s-([0-9a-f]{17})$" + }, + "StructuredLogDestinations" : { + "type" : "array", + "insertionOrder" : false, + "items" : { + "type" : "string", + "maxLength" : 1600, + "minLength" : 20, + "pattern" : "^arn:\\S+$" + }, + "maxItems" : 1, + "minItems" : 0 + }, + "Tags" : { + "type" : "array", + "insertionOrder" : false, + "items" : { + "$ref" : "#/definitions/Tag" + }, + "maxItems" : 50, + "minItems" : 1 + }, + "WorkflowDetails" : { + "$ref" : "#/definitions/WorkflowDetails" + } + }, + "readOnlyProperties" : [ "/properties/Arn", "/properties/As2ServiceManagedEgressIpAddresses", "/properties/ServerId" ], + "writeOnlyProperties" : [ "/properties/IdentityProviderType" ], + "createOnlyProperties" : [ "/properties/Domain", "/properties/IdentityProviderType" ], + "primaryIdentifier" : [ "/properties/Arn" ], + "additionalIdentifiers" : [ [ "/properties/ServerId" ] ], + "handlers" : { + "create" : { + "permissions" : [ "apigateway:GET", "ds:AuthorizeApplication", "ds:DescribeDirectories", "ec2:AssociateAddress", "ec2:CreateVpcEndpoint", "ec2:DescribeAddresses", "ec2:DescribeNetworkInterfaces", "ec2:DescribeVpcEndpoints", "iam:PassRole", "logs:CreateLogDelivery", "logs:DeleteLogDelivery", "logs:DescribeLogGroups", "logs:DescribeResourcePolicies", "logs:GetLogDelivery", "logs:ListLogDeliveries", "logs:PutResourcePolicy", "logs:UpdateLogDelivery", "transfer:CreateServer", "transfer:DescribeServer", "transfer:StartServer", "transfer:StopServer", "transfer:TagResource", "transfer:UpdateServer" ] + }, + "read" : { + "permissions" : [ "ec2:DescribeVpcEndpoints", "transfer:DescribeServer" ] + }, + "update" : { + "permissions" : [ "apigateway:GET", "ec2:AssociateAddress", "ec2:CreateVpcEndpoint", "ec2:DeleteVpcEndpoints", "ec2:DescribeAddresses", "ec2:DescribeNetworkInterfaces", "ec2:DescribeVpcEndpoints", "ec2:DisassociateAddress", "ec2:ModifyVpcEndpoint", "iam:PassRole", "logs:CreateLogDelivery", "logs:DeleteLogDelivery", "logs:DescribeLogGroups", "logs:DescribeResourcePolicies", "logs:GetLogDelivery", "logs:ListLogDeliveries", "logs:PutResourcePolicy", "logs:UpdateLogDelivery", "transfer:DescribeServer", "transfer:StartServer", "transfer:StopServer", "transfer:TagResource", "transfer:UnTagResource", "transfer:UpdateServer" ] + }, + "delete" : { + "permissions" : [ "ds:DescribeDirectories", "ds:UnauthorizeApplication", "ec2:DeleteVpcEndpoints", "ec2:DescribeAddresses", "ec2:DescribeNetworkInterfaces", "ec2:DescribeVpcEndpoints", "ec2:DisassociateAddress", "logs:DeleteLogDelivery", "logs:GetLogDelivery", "logs:ListLogDeliveries", "transfer:DeleteServer" ] + }, + "list" : { + "permissions" : [ "transfer:ListServers" ] + } + }, + "tagging" : { + "cloudFormationSystemTags" : true, + "permissions" : [ "transfer:TagResource", "transfer:UnTagResource", "transfer:ListTagsForResource" ], + "tagOnCreate" : true, + "tagProperty" : "/properties/Tags", + "tagUpdatable" : true, + "taggable" : true + }, + "additionalProperties" : false, + "conditionalCreateOnlyProperties" : [ "/properties/EndpointDetails", "/properties/EndpointDetails/AddressAllocationIds" ], + "sourceUrl" : "https://github.com/aws-cloudformation/aws-cloudformation-resource-providers-transfer" } \ No newline at end of file diff --git a/aws-cloudformation-schema/aws-wisdom-aiprompt.json b/aws-cloudformation-schema/aws-wisdom-aiprompt.json new file mode 100644 index 0000000000..cf76ccc234 --- /dev/null +++ b/aws-cloudformation-schema/aws-wisdom-aiprompt.json @@ -0,0 +1,145 @@ +{ + "typeName" : "AWS::Wisdom::AIPrompt", + "description" : "Definition of AWS::Wisdom::AIPrompt Resource Type", + "sourceUrl" : "https://github.com/aws-cloudformation/aws-cloudformation-rpdk.git", + "definitions" : { + "AIPromptAPIFormat" : { + "type" : "string", + "enum" : [ "ANTHROPIC_CLAUDE_MESSAGES", "ANTHROPIC_CLAUDE_TEXT_COMPLETIONS" ] + }, + "AIPromptTemplateConfiguration" : { + "type" : "object", + "oneOf" : [ { + "type" : "object", + "title" : "TextFullAIPromptEditTemplateConfiguration", + "properties" : { + "TextFullAIPromptEditTemplateConfiguration" : { + "$ref" : "#/definitions/TextFullAIPromptEditTemplateConfiguration" + } + }, + "required" : [ "TextFullAIPromptEditTemplateConfiguration" ], + "additionalProperties" : false + } ] + }, + "AIPromptTemplateType" : { + "type" : "string", + "enum" : [ "TEXT" ] + }, + "AIPromptType" : { + "type" : "string", + "enum" : [ "ANSWER_GENERATION", "INTENT_LABELING_GENERATION", "QUERY_REFORMULATION" ] + }, + "Tags" : { + "type" : "object", + "patternProperties" : { + "^(?!aws:)[a-zA-Z+-=._:/]+$" : { + "type" : "string", + "maxLength" : 256, + "minLength" : 1 + } + }, + "additionalProperties" : false + }, + "TextFullAIPromptEditTemplateConfiguration" : { + "type" : "object", + "properties" : { + "Text" : { + "type" : "string", + "maxLength" : 200000, + "minLength" : 1 + } + }, + "required" : [ "Text" ], + "additionalProperties" : false + } + }, + "properties" : { + "AIPromptId" : { + "type" : "string", + "pattern" : "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(:[A-Z0-9_$]+){0,1}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}(:[A-Z0-9_$]+){0,1}$" + }, + "AIPromptArn" : { + "type" : "string", + "pattern" : "^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$" + }, + "ApiFormat" : { + "$ref" : "#/definitions/AIPromptAPIFormat" + }, + "AssistantId" : { + "type" : "string", + "pattern" : "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$" + }, + "AssistantArn" : { + "type" : "string", + "pattern" : "^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}){0,2}$" + }, + "Description" : { + "type" : "string", + "maxLength" : 255, + "minLength" : 1, + "pattern" : "^[a-zA-Z0-9\\s_.,-]+" + }, + "ModelId" : { + "type" : "string", + "maxLength" : 2048, + "minLength" : 1 + }, + "Name" : { + "type" : "string", + "maxLength" : 255, + "minLength" : 1, + "pattern" : "^[a-zA-Z0-9\\s_.,-]+" + }, + "Tags" : { + "$ref" : "#/definitions/Tags" + }, + "TemplateConfiguration" : { + "$ref" : "#/definitions/AIPromptTemplateConfiguration" + }, + "TemplateType" : { + "$ref" : "#/definitions/AIPromptTemplateType" + }, + "Type" : { + "$ref" : "#/definitions/AIPromptType" + } + }, + "required" : [ "ApiFormat", "ModelId", "TemplateConfiguration", "TemplateType", "Type" ], + "readOnlyProperties" : [ "/properties/AIPromptArn", "/properties/AIPromptId", "/properties/AssistantArn" ], + "createOnlyProperties" : [ "/properties/ApiFormat", "/properties/AssistantId", "/properties/ModelId", "/properties/Name", "/properties/Tags", "/properties/TemplateType", "/properties/Type" ], + "primaryIdentifier" : [ "/properties/AIPromptId", "/properties/AssistantId" ], + "additionalIdentifiers" : [ [ "/properties/AIPromptArn", "/properties/AssistantArn" ] ], + "tagging" : { + "taggable" : true, + "tagOnCreate" : true, + "tagUpdatable" : false, + "cloudFormationSystemTags" : false, + "tagProperty" : "/properties/Tags", + "permissions" : [ "wisdom:TagResource" ] + }, + "handlers" : { + "create" : { + "permissions" : [ "wisdom:CreateAIPrompt", "wisdom:TagResource" ] + }, + "read" : { + "permissions" : [ "wisdom:GetAIPrompt" ] + }, + "update" : { + "permissions" : [ "wisdom:UpdateAIPrompt" ] + }, + "delete" : { + "permissions" : [ "wisdom:DeleteAIPrompt" ] + }, + "list" : { + "permissions" : [ "wisdom:ListAIPrompts" ], + "handlerSchema" : { + "properties" : { + "AssistantId" : { + "$ref" : "resource-schema.json#/properties/AssistantId" + } + }, + "required" : [ "AssistantId" ] + } + } + }, + "additionalProperties" : false +} \ No newline at end of file diff --git a/meta/.botocore.version b/meta/.botocore.version index a7411d4e09..6d0d2f9f2f 100644 --- a/meta/.botocore.version +++ b/meta/.botocore.version @@ -1 +1 @@ -1.35.39 +1.35.42 diff --git a/provider/cmd/pulumi-gen-aws-native/deprecated-types.txt b/provider/cmd/pulumi-gen-aws-native/deprecated-types.txt index bab3e0d8d5..1fa8ceae82 100644 --- a/provider/cmd/pulumi-gen-aws-native/deprecated-types.txt +++ b/provider/cmd/pulumi-gen-aws-native/deprecated-types.txt @@ -5,7 +5,6 @@ AWS::GameCast::Application AWS::GameCast::StreamGroup AWS::SNS::TopicPolicy AWS::SSMGuiConnect::Preferences -AWS::SecretsManager::SecretTargetAttachment AWSQS::EKS::Cluster AWSQS::Kubernetes::Get AWSQS::Kubernetes::Helm diff --git a/provider/cmd/pulumi-gen-aws-native/supported-types.txt b/provider/cmd/pulumi-gen-aws-native/supported-types.txt index 6350353f41..f1c84a7f6c 100644 --- a/provider/cmd/pulumi-gen-aws-native/supported-types.txt +++ b/provider/cmd/pulumi-gen-aws-native/supported-types.txt @@ -70,6 +70,7 @@ AWS::AppStream::ApplicationFleetAssociation AWS::AppStream::DirectoryConfig AWS::AppStream::Entitlement AWS::AppStream::ImageBuilder +AWS::AppSync::DataSource AWS::AppSync::DomainName AWS::AppSync::DomainNameApiAssociation AWS::AppSync::FunctionConfiguration @@ -1017,6 +1018,7 @@ AWS::Transfer::Agreement AWS::Transfer::Certificate AWS::Transfer::Connector AWS::Transfer::Profile +AWS::Transfer::Server AWS::Transfer::Workflow AWS::VerifiedPermissions::IdentitySource AWS::VerifiedPermissions::Policy @@ -1039,6 +1041,7 @@ AWS::WAFv2::RegexPatternSet AWS::WAFv2::RuleGroup AWS::WAFv2::WebACL AWS::WAFv2::WebACLAssociation +AWS::Wisdom::AIPrompt AWS::Wisdom::Assistant AWS::Wisdom::AssistantAssociation AWS::Wisdom::KnowledgeBase diff --git a/provider/cmd/pulumi-resource-aws-native/metadata.json b/provider/cmd/pulumi-resource-aws-native/metadata.json index 45f2dc4e01..db0c6096c0 100644 --- a/provider/cmd/pulumi-resource-aws-native/metadata.json +++ b/provider/cmd/pulumi-resource-aws-native/metadata.json @@ -6776,6 +6776,137 @@ "tagsProperty": "tags", "tagsStyle": "keyValueArray" }, + "aws-native:appsync:DataSource": { + "cf": "AWS::AppSync::DataSource", + "inputs": { + "apiId": { + "type": "string", + "description": "Unique AWS AppSync GraphQL API identifier where this data source will be created." + }, + "description": { + "type": "string", + "description": "The description of the data source." + }, + "dynamoDbConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceDynamoDbConfig", + "description": "AWS Region and TableName for an Amazon DynamoDB table in your account." + }, + "elasticsearchConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceElasticsearchConfig", + "description": "AWS Region and Endpoints for an Amazon OpenSearch Service domain in your account.\nAs of September 2021, Amazon Elasticsearch Service is Amazon OpenSearch Service. This property is deprecated. For new data sources, use OpenSearchServiceConfig to specify an OpenSearch Service data source." + }, + "eventBridgeConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceEventBridgeConfig", + "description": "ARN for the EventBridge bus." + }, + "httpConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceHttpConfig", + "description": "Endpoints for an HTTP data source." + }, + "lambdaConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceLambdaConfig", + "description": "An ARN of a Lambda function in valid ARN format. This can be the ARN of a Lambda function that exists in the current account or in another account." + }, + "metricsConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceMetricsConfig", + "description": "Enables or disables enhanced data source metrics for specified data sources. Note that `MetricsConfig` won't be used unless the `dataSourceLevelMetricsBehavior` value is set to `PER_DATA_SOURCE_METRICS` . If the `dataSourceLevelMetricsBehavior` is set to `FULL_REQUEST_DATA_SOURCE_METRICS` instead, `MetricsConfig` will be ignored. However, you can still set its value.\n\n`MetricsConfig` can be `ENABLED` or `DISABLED` ." + }, + "name": { + "type": "string", + "description": "Friendly name for you to identify your AppSync data source after creation." + }, + "openSearchServiceConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceOpenSearchServiceConfig", + "description": "AWS Region and Endpoints for an Amazon OpenSearch Service domain in your account." + }, + "relationalDatabaseConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceRelationalDatabaseConfig", + "description": "Relational Database configuration of the relational database data source." + }, + "serviceRoleArn": { + "type": "string", + "description": "The AWS Identity and Access Management service role ARN for the data source. The system assumes this role when accessing the data source." + }, + "type": { + "type": "string", + "description": "The type of the data source." + } + }, + "outputs": { + "apiId": { + "type": "string", + "description": "Unique AWS AppSync GraphQL API identifier where this data source will be created.", + "replaceOnChanges": true + }, + "dataSourceArn": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the API key, such as arn:aws:appsync:us-east-1:123456789012:apis/graphqlapiid/datasources/datasourcename." + }, + "description": { + "type": "string", + "description": "The description of the data source." + }, + "dynamoDbConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceDynamoDbConfig", + "description": "AWS Region and TableName for an Amazon DynamoDB table in your account." + }, + "elasticsearchConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceElasticsearchConfig", + "description": "AWS Region and Endpoints for an Amazon OpenSearch Service domain in your account.\nAs of September 2021, Amazon Elasticsearch Service is Amazon OpenSearch Service. This property is deprecated. For new data sources, use OpenSearchServiceConfig to specify an OpenSearch Service data source." + }, + "eventBridgeConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceEventBridgeConfig", + "description": "ARN for the EventBridge bus." + }, + "httpConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceHttpConfig", + "description": "Endpoints for an HTTP data source." + }, + "lambdaConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceLambdaConfig", + "description": "An ARN of a Lambda function in valid ARN format. This can be the ARN of a Lambda function that exists in the current account or in another account." + }, + "metricsConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceMetricsConfig", + "description": "Enables or disables enhanced data source metrics for specified data sources. Note that `MetricsConfig` won't be used unless the `dataSourceLevelMetricsBehavior` value is set to `PER_DATA_SOURCE_METRICS` . If the `dataSourceLevelMetricsBehavior` is set to `FULL_REQUEST_DATA_SOURCE_METRICS` instead, `MetricsConfig` will be ignored. However, you can still set its value.\n\n`MetricsConfig` can be `ENABLED` or `DISABLED` ." + }, + "name": { + "type": "string", + "description": "Friendly name for you to identify your AppSync data source after creation.", + "replaceOnChanges": true + }, + "openSearchServiceConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceOpenSearchServiceConfig", + "description": "AWS Region and Endpoints for an Amazon OpenSearch Service domain in your account." + }, + "relationalDatabaseConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceRelationalDatabaseConfig", + "description": "Relational Database configuration of the relational database data source." + }, + "serviceRoleArn": { + "type": "string", + "description": "The AWS Identity and Access Management service role ARN for the data source. The system assumes this role when accessing the data source." + }, + "type": { + "type": "string", + "description": "The type of the data source." + } + }, + "autoNamingSpec": { + "sdkName": "name" + }, + "required": [ + "apiId", + "type" + ], + "createOnly": [ + "apiId", + "name" + ], + "irreversibleNames": { + "dynamoDbConfig": "DynamoDBConfig" + } + }, "aws-native:appsync:DomainName": { "cf": "AWS::AppSync::DomainName", "inputs": { @@ -16177,11 +16308,11 @@ }, "computeType": { "$ref": "#/types/aws-native:codebuild:FleetComputeType", - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nInformation about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*" + "description": "Information about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*" }, "environmentType": { "$ref": "#/types/aws-native:codebuild:FleetEnvironmentType", - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nThe environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (São Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (São Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* ." + "description": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (São Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (São Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* ." }, "fleetServiceRole": { "type": "string", @@ -16189,11 +16320,11 @@ }, "fleetVpcConfig": { "$ref": "#/types/aws-native:codebuild:FleetVpcConfig", - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nInformation about the VPC configuration that AWS CodeBuild accesses." + "description": "Information about the VPC configuration that AWS CodeBuild accesses." }, "imageId": { "type": "string", - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nThe Amazon Machine Image (AMI) of the compute fleet." + "description": "The Amazon Machine Image (AMI) of the compute fleet." }, "name": { "type": "string", @@ -16222,11 +16353,11 @@ }, "computeType": { "$ref": "#/types/aws-native:codebuild:FleetComputeType", - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nInformation about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*" + "description": "Information about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*" }, "environmentType": { "$ref": "#/types/aws-native:codebuild:FleetEnvironmentType", - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nThe environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (São Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (São Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* ." + "description": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (São Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (São Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* ." }, "fleetServiceRole": { "type": "string", @@ -16234,11 +16365,11 @@ }, "fleetVpcConfig": { "$ref": "#/types/aws-native:codebuild:FleetVpcConfig", - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nInformation about the VPC configuration that AWS CodeBuild accesses." + "description": "Information about the VPC configuration that AWS CodeBuild accesses." }, "imageId": { "type": "string", - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nThe Amazon Machine Image (AMI) of the compute fleet." + "description": "The Amazon Machine Image (AMI) of the compute fleet." }, "name": { "type": "string", @@ -17570,7 +17701,7 @@ "inputs": { "accountRecoverySetting": { "$ref": "#/types/aws-native:cognito:UserPoolAccountRecoverySetting", - "description": "Use this setting to define which verified available method a user can use to recover their password when they call `ForgotPassword` . It allows you to define a preferred method when a user has more than one method available. With this setting, SMS does not qualify for a valid password recovery mechanism if the user also has SMS MFA enabled. In the absence of this setting, Cognito uses the legacy behavior to determine the recovery method where SMS is preferred over email." + "description": "The available verified method a user can use to recover their password when they call `ForgotPassword` . You can use this setting to define a preferred method when a user has more than one method available. With this setting, SMS doesn't qualify for a valid password recovery mechanism if the user also has SMS multi-factor authentication (MFA) activated. In the absence of this setting, Amazon Cognito uses the legacy behavior to determine the recovery method where SMS is preferred through email." }, "adminCreateUserConfig": { "$ref": "#/types/aws-native:cognito:UserPoolAdminCreateUserConfig", @@ -17581,7 +17712,7 @@ "items": { "type": "string" }, - "description": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* .\n\n\u003e This user pool property cannot be updated." + "description": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* ." }, "autoVerifiedAttributes": { "type": "array", @@ -17610,18 +17741,18 @@ }, "emailVerificationMessage": { "type": "string", - "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) ." + "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) ." }, "emailVerificationSubject": { "type": "string", - "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) ." + "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) ." }, "enabledMfas": { "type": "array", "items": { "type": "string" }, - "description": "Enables MFA on a specified user pool. To disable all MFAs after it has been enabled, set MfaConfiguration to \"OFF\" and remove EnabledMfas. MFAs can only be all disabled if MfaConfiguration is OFF. Once SMS_MFA is enabled, SMS_MFA can only be disabled by setting MfaConfiguration to \"OFF\". Can be one of the following values:\n\n- `SMS_MFA` - Enables SMS MFA for the user pool. SMS_MFA can only be enabled if SMS configuration is provided.\n- `SOFTWARE_TOKEN_MFA` - Enables software token MFA for the user pool.\n\nAllowed values: `SMS_MFA` | `SOFTWARE_TOKEN_MFA`" + "description": "Set enabled MFA options on a specified user pool. To disable all MFAs after it has been enabled, set `MfaConfiguration` to `OFF` and remove EnabledMfas. MFAs can only be all disabled if `MfaConfiguration` is `OFF` . After you enable `SMS_MFA` , you can only disable it by setting `MfaConfiguration` to `OFF` . Can be one of the following values:\n\n- `SMS_MFA` - Enables MFA with SMS for the user pool. To select this option, you must also provide values for `SmsConfiguration` .\n- `SOFTWARE_TOKEN_MFA` - Enables software token MFA for the user pool.\n- `EMAIL_OTP` - Enables MFA with email for the user pool. To select this option, you must provide values for `EmailConfiguration` and within those, set `EmailSendingAccount` to `DEVELOPER` .\n\nAllowed values: `SMS_MFA` | `SOFTWARE_TOKEN_MFA` | `EMAIL_OTP`" }, "lambdaConfig": { "$ref": "#/types/aws-native:cognito:UserPoolLambdaConfig", @@ -17640,11 +17771,11 @@ "items": { "$ref": "#/types/aws-native:cognito:UserPoolSchemaAttribute" }, - "description": "The schema attributes for the new user pool. These attributes can be standard or custom attributes.\n\n\u003e During a user pool update, you can add new schema attributes but you cannot modify or delete an existing schema attribute." + "description": "An array of schema attributes for the new user pool. These attributes can be standard or custom attributes." }, "smsAuthenticationMessage": { "type": "string", - "description": "A string representing the SMS authentication message." + "description": "The contents of the SMS authentication message." }, "smsConfiguration": { "$ref": "#/types/aws-native:cognito:UserPoolSmsConfiguration", @@ -17652,7 +17783,7 @@ }, "smsVerificationMessage": { "type": "string", - "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) ." + "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) ." }, "userAttributeUpdateSettings": { "$ref": "#/types/aws-native:cognito:UserPoolUserAttributeUpdateSettings", @@ -17678,11 +17809,11 @@ "items": { "type": "string" }, - "description": "Determines whether email addresses or phone numbers can be specified as user names when a user signs up. Possible values: `phone_number` or `email` .\n\nThis user pool property cannot be updated." + "description": "Specifies whether a user can use an email address or phone number as a username when they sign up." }, "usernameConfiguration": { "$ref": "#/types/aws-native:cognito:UserPoolUsernameConfiguration", - "description": "You can choose to set case sensitivity on the username input for the selected sign-in option. For example, when this is set to `False` , users will be able to sign in using either \"username\" or \"Username\". This configuration is immutable once it has been set." + "description": "Case sensitivity on the username input for the selected sign-in option. When case sensitivity is set to `False` (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, `username` , `USERNAME` , or `UserName` , or for email, `email@example.com` or `EMaiL@eXamplE.Com` . For most use cases, set case sensitivity to `False` (case insensitive) as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user.\n\nThis configuration is immutable after you set it. For more information, see [UsernameConfigurationType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UsernameConfigurationType.html) ." }, "verificationMessageTemplate": { "$ref": "#/types/aws-native:cognito:UserPoolVerificationMessageTemplate", @@ -17692,7 +17823,7 @@ "outputs": { "accountRecoverySetting": { "$ref": "#/types/aws-native:cognito:UserPoolAccountRecoverySetting", - "description": "Use this setting to define which verified available method a user can use to recover their password when they call `ForgotPassword` . It allows you to define a preferred method when a user has more than one method available. With this setting, SMS does not qualify for a valid password recovery mechanism if the user also has SMS MFA enabled. In the absence of this setting, Cognito uses the legacy behavior to determine the recovery method where SMS is preferred over email." + "description": "The available verified method a user can use to recover their password when they call `ForgotPassword` . You can use this setting to define a preferred method when a user has more than one method available. With this setting, SMS doesn't qualify for a valid password recovery mechanism if the user also has SMS multi-factor authentication (MFA) activated. In the absence of this setting, Amazon Cognito uses the legacy behavior to determine the recovery method where SMS is preferred through email." }, "adminCreateUserConfig": { "$ref": "#/types/aws-native:cognito:UserPoolAdminCreateUserConfig", @@ -17703,7 +17834,7 @@ "items": { "type": "string" }, - "description": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* .\n\n\u003e This user pool property cannot be updated." + "description": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* ." }, "arn": { "type": "string", @@ -17736,18 +17867,18 @@ }, "emailVerificationMessage": { "type": "string", - "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) ." + "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) ." }, "emailVerificationSubject": { "type": "string", - "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) ." + "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) ." }, "enabledMfas": { "type": "array", "items": { "type": "string" }, - "description": "Enables MFA on a specified user pool. To disable all MFAs after it has been enabled, set MfaConfiguration to \"OFF\" and remove EnabledMfas. MFAs can only be all disabled if MfaConfiguration is OFF. Once SMS_MFA is enabled, SMS_MFA can only be disabled by setting MfaConfiguration to \"OFF\". Can be one of the following values:\n\n- `SMS_MFA` - Enables SMS MFA for the user pool. SMS_MFA can only be enabled if SMS configuration is provided.\n- `SOFTWARE_TOKEN_MFA` - Enables software token MFA for the user pool.\n\nAllowed values: `SMS_MFA` | `SOFTWARE_TOKEN_MFA`" + "description": "Set enabled MFA options on a specified user pool. To disable all MFAs after it has been enabled, set `MfaConfiguration` to `OFF` and remove EnabledMfas. MFAs can only be all disabled if `MfaConfiguration` is `OFF` . After you enable `SMS_MFA` , you can only disable it by setting `MfaConfiguration` to `OFF` . Can be one of the following values:\n\n- `SMS_MFA` - Enables MFA with SMS for the user pool. To select this option, you must also provide values for `SmsConfiguration` .\n- `SOFTWARE_TOKEN_MFA` - Enables software token MFA for the user pool.\n- `EMAIL_OTP` - Enables MFA with email for the user pool. To select this option, you must provide values for `EmailConfiguration` and within those, set `EmailSendingAccount` to `DEVELOPER` .\n\nAllowed values: `SMS_MFA` | `SOFTWARE_TOKEN_MFA` | `EMAIL_OTP`" }, "lambdaConfig": { "$ref": "#/types/aws-native:cognito:UserPoolLambdaConfig", @@ -17763,7 +17894,7 @@ }, "providerName": { "type": "string", - "description": "The provider name of the Amazon Cognito user pool, specified as a `String` ." + "description": "A friendly name for the IdP." }, "providerUrl": { "type": "string", @@ -17774,11 +17905,11 @@ "items": { "$ref": "#/types/aws-native:cognito:UserPoolSchemaAttribute" }, - "description": "The schema attributes for the new user pool. These attributes can be standard or custom attributes.\n\n\u003e During a user pool update, you can add new schema attributes but you cannot modify or delete an existing schema attribute." + "description": "An array of schema attributes for the new user pool. These attributes can be standard or custom attributes." }, "smsAuthenticationMessage": { "type": "string", - "description": "A string representing the SMS authentication message." + "description": "The contents of the SMS authentication message." }, "smsConfiguration": { "$ref": "#/types/aws-native:cognito:UserPoolSmsConfiguration", @@ -17786,7 +17917,7 @@ }, "smsVerificationMessage": { "type": "string", - "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) ." + "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) ." }, "userAttributeUpdateSettings": { "$ref": "#/types/aws-native:cognito:UserPoolUserAttributeUpdateSettings", @@ -17816,11 +17947,11 @@ "items": { "type": "string" }, - "description": "Determines whether email addresses or phone numbers can be specified as user names when a user signs up. Possible values: `phone_number` or `email` .\n\nThis user pool property cannot be updated." + "description": "Specifies whether a user can use an email address or phone number as a username when they sign up." }, "usernameConfiguration": { "$ref": "#/types/aws-native:cognito:UserPoolUsernameConfiguration", - "description": "You can choose to set case sensitivity on the username input for the selected sign-in option. For example, when this is set to `False` , users will be able to sign in using either \"username\" or \"Username\". This configuration is immutable once it has been set." + "description": "Case sensitivity on the username input for the selected sign-in option. When case sensitivity is set to `False` (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, `username` , `USERNAME` , or `UserName` , or for email, `email@example.com` or `EMaiL@eXamplE.Com` . For most use cases, set case sensitivity to `False` (case insensitive) as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user.\n\nThis configuration is immutable after you set it. For more information, see [UsernameConfigurationType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UsernameConfigurationType.html) ." }, "verificationMessageTemplate": { "$ref": "#/types/aws-native:cognito:UserPoolVerificationMessageTemplate", @@ -17846,7 +17977,7 @@ "inputs": { "accessTokenValidity": { "type": "integer", - "description": "The access token time limit. After this limit expires, your user can't use their access token. To specify the time unit for `AccessTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `AccessTokenValidity` to `10` and `TokenValidityUnits` to `hours` , your user can authorize access with their access token for 10 hours.\n\nThe default time unit for `AccessTokenValidity` in an API request is hours." + "description": "The access token time limit. After this limit expires, your user can't use their access token. To specify the time unit for `AccessTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `AccessTokenValidity` to `10` and `TokenValidityUnits` to `hours` , your user can authorize access with\ntheir access token for 10 hours.\n\nThe default time unit for `AccessTokenValidity` in an API request is hours. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your access\ntokens are valid for one hour." }, "allowedOAuthFlows": { "type": "array", @@ -17910,7 +18041,7 @@ }, "idTokenValidity": { "type": "integer", - "description": "The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for `IdTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `IdTokenValidity` as `10` and `TokenValidityUnits` as `hours` , your user can authenticate their session with their ID token for 10 hours.\n\nThe default time unit for `IdTokenValidity` in an API request is hours." + "description": "The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for `IdTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `IdTokenValidity` as `10` and `TokenValidityUnits` as `hours` , your user can authenticate their session with their ID token for 10 hours.\n\nThe default time unit for `IdTokenValidity` in an API request is hours. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your ID\ntokens are valid for one hour." }, "logoutUrls": { "type": "array", @@ -17921,7 +18052,7 @@ }, "preventUserExistenceErrors": { "type": "string", - "description": "Use this setting to choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to `ENABLED` and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY` , those APIs will return a `UserNotFoundException` exception if the user does not exist in the user pool." + "description": "Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to `ENABLED` and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY` , those APIs return a `UserNotFoundException` exception if the user doesn't exist in the user pool.\n\nValid values include:\n\n- `ENABLED` - This prevents user existence-related errors.\n- `LEGACY` - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented.\n\nDefaults to `LEGACY` when you don't provide a value." }, "readAttributes": { "type": "array", @@ -17932,7 +18063,7 @@ }, "refreshTokenValidity": { "type": "integer", - "description": "The refresh token time limit. After this limit expires, your user can't use their refresh token. To specify the time unit for `RefreshTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `RefreshTokenValidity` as `10` and `TokenValidityUnits` as `days` , your user can refresh their session and retrieve new access and ID tokens for 10 days.\n\nThe default time unit for `RefreshTokenValidity` in an API request is days. You can't set `RefreshTokenValidity` to 0. If you do, Amazon Cognito overrides the value with the default value of 30 days." + "description": "The refresh token time limit. After this limit expires, your user can't use their refresh token. To specify the time unit for `RefreshTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `RefreshTokenValidity` as `10` and `TokenValidityUnits` as `days` , your user can refresh their session\nand retrieve new access and ID tokens for 10 days.\n\nThe default time unit for `RefreshTokenValidity` in an API request is days. You can't set `RefreshTokenValidity` to 0. If you do, Amazon Cognito overrides the value with the default value of 30 days. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your refresh\ntokens are valid for 30 days." }, "supportedIdentityProviders": { "type": "array", @@ -17960,7 +18091,7 @@ "outputs": { "accessTokenValidity": { "type": "integer", - "description": "The access token time limit. After this limit expires, your user can't use their access token. To specify the time unit for `AccessTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `AccessTokenValidity` to `10` and `TokenValidityUnits` to `hours` , your user can authorize access with their access token for 10 hours.\n\nThe default time unit for `AccessTokenValidity` in an API request is hours." + "description": "The access token time limit. After this limit expires, your user can't use their access token. To specify the time unit for `AccessTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `AccessTokenValidity` to `10` and `TokenValidityUnits` to `hours` , your user can authorize access with\ntheir access token for 10 hours.\n\nThe default time unit for `AccessTokenValidity` in an API request is hours. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your access\ntokens are valid for one hour." }, "allowedOAuthFlows": { "type": "array", @@ -18032,7 +18163,7 @@ }, "idTokenValidity": { "type": "integer", - "description": "The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for `IdTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `IdTokenValidity` as `10` and `TokenValidityUnits` as `hours` , your user can authenticate their session with their ID token for 10 hours.\n\nThe default time unit for `IdTokenValidity` in an API request is hours." + "description": "The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for `IdTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `IdTokenValidity` as `10` and `TokenValidityUnits` as `hours` , your user can authenticate their session with their ID token for 10 hours.\n\nThe default time unit for `IdTokenValidity` in an API request is hours. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your ID\ntokens are valid for one hour." }, "logoutUrls": { "type": "array", @@ -18046,7 +18177,7 @@ }, "preventUserExistenceErrors": { "type": "string", - "description": "Use this setting to choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to `ENABLED` and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY` , those APIs will return a `UserNotFoundException` exception if the user does not exist in the user pool." + "description": "Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to `ENABLED` and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY` , those APIs return a `UserNotFoundException` exception if the user doesn't exist in the user pool.\n\nValid values include:\n\n- `ENABLED` - This prevents user existence-related errors.\n- `LEGACY` - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented.\n\nDefaults to `LEGACY` when you don't provide a value." }, "readAttributes": { "type": "array", @@ -18057,7 +18188,7 @@ }, "refreshTokenValidity": { "type": "integer", - "description": "The refresh token time limit. After this limit expires, your user can't use their refresh token. To specify the time unit for `RefreshTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `RefreshTokenValidity` as `10` and `TokenValidityUnits` as `days` , your user can refresh their session and retrieve new access and ID tokens for 10 days.\n\nThe default time unit for `RefreshTokenValidity` in an API request is days. You can't set `RefreshTokenValidity` to 0. If you do, Amazon Cognito overrides the value with the default value of 30 days." + "description": "The refresh token time limit. After this limit expires, your user can't use their refresh token. To specify the time unit for `RefreshTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `RefreshTokenValidity` as `10` and `TokenValidityUnits` as `days` , your user can refresh their session\nand retrieve new access and ID tokens for 10 days.\n\nThe default time unit for `RefreshTokenValidity` in an API request is days. You can't set `RefreshTokenValidity` to 0. If you do, Amazon Cognito overrides the value with the default value of 30 days. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your refresh\ntokens are valid for 30 days." }, "supportedIdentityProviders": { "type": "array", @@ -18110,11 +18241,11 @@ }, "domain": { "type": "string", - "description": "The domain name for the domain that hosts the sign-up and sign-in pages for your application. For example: `auth.example.com` . If you're using a prefix domain, this field denotes the first part of the domain before `.auth.[region].amazoncognito.com` .\n\nThis string can include only lowercase letters, numbers, and hyphens. Don't use a hyphen for the first or last character. Use periods to separate subdomain names." + "description": "The domain name for the custom domain that hosts the sign-up and sign-in pages for your application. One example might be `auth.example.com` .\n\nThis string can include only lowercase letters, numbers, and hyphens. Don't use a hyphen for the first or last character. Use periods to separate subdomain names." }, "userPoolId": { "type": "string", - "description": "The user pool ID for the user pool where you want to associate a user pool domain." + "description": "The ID of the user pool that is associated with the custom domain whose certificate you're updating." } }, "outputs": { @@ -18132,12 +18263,12 @@ }, "domain": { "type": "string", - "description": "The domain name for the domain that hosts the sign-up and sign-in pages for your application. For example: `auth.example.com` . If you're using a prefix domain, this field denotes the first part of the domain before `.auth.[region].amazoncognito.com` .\n\nThis string can include only lowercase letters, numbers, and hyphens. Don't use a hyphen for the first or last character. Use periods to separate subdomain names.", + "description": "The domain name for the custom domain that hosts the sign-up and sign-in pages for your application. One example might be `auth.example.com` .\n\nThis string can include only lowercase letters, numbers, and hyphens. Don't use a hyphen for the first or last character. Use periods to separate subdomain names.", "replaceOnChanges": true }, "userPoolId": { "type": "string", - "description": "The user pool ID for the user pool where you want to associate a user pool domain.", + "description": "The ID of the user pool that is associated with the custom domain whose certificate you're updating.", "replaceOnChanges": true } }, @@ -18300,7 +18431,7 @@ "inputs": { "identifier": { "type": "string", - "description": "A unique resource server identifier for the resource server. This could be an HTTPS endpoint where the resource server is located. For example: `https://my-weather-api.example.com` ." + "description": "A unique resource server identifier for the resource server. The identifier can be an API friendly name like `solar-system-data` . You can also set an API URL like `https://solar-system-data-api.example.com` as your identifier.\n\nAmazon Cognito represents scopes in the access token in the format `$resource-server-identifier/$scope` . Longer scope-identifier strings increase the size of your access tokens." }, "name": { "type": "string", @@ -18321,7 +18452,7 @@ "outputs": { "identifier": { "type": "string", - "description": "A unique resource server identifier for the resource server. This could be an HTTPS endpoint where the resource server is located. For example: `https://my-weather-api.example.com` .", + "description": "A unique resource server identifier for the resource server. The identifier can be an API friendly name like `solar-system-data` . You can also set an API URL like `https://solar-system-data-api.example.com` as your identifier.\n\nAmazon Cognito represents scopes in the access token in the format `$resource-server-identifier/$scope` . Longer scope-identifier strings increase the size of your access tokens.", "replaceOnChanges": true }, "name": { @@ -18362,7 +18493,7 @@ }, "clientId": { "type": "string", - "description": "The app client ID. You can specify the risk configuration for a single client (with a specific ClientId) or for all clients (by setting the ClientId to `ALL` )." + "description": "The app client where this configuration is applied. When this parameter isn't present, the risk configuration applies to all user pool app clients that don't have client-level settings." }, "compromisedCredentialsRiskConfiguration": { "$ref": "#/types/aws-native:cognito:UserPoolRiskConfigurationAttachmentCompromisedCredentialsRiskConfigurationType", @@ -18384,7 +18515,7 @@ }, "clientId": { "type": "string", - "description": "The app client ID. You can specify the risk configuration for a single client (with a specific ClientId) or for all clients (by setting the ClientId to `ALL` ).", + "description": "The app client where this configuration is applied. When this parameter isn't present, the risk configuration applies to all user pool app clients that don't have client-level settings.", "replaceOnChanges": true }, "compromisedCredentialsRiskConfiguration": { @@ -18415,7 +18546,7 @@ "inputs": { "clientId": { "type": "string", - "description": "The client ID for the client app. You can specify the UI customization settings for a single client (with a specific clientId) or for all clients (by setting the clientId to `ALL` )." + "description": "The app client ID for your UI customization. When this value isn't present, the customization applies to all user pool app clients that don't have client-level settings.." }, "css": { "type": "string", @@ -18429,7 +18560,7 @@ "outputs": { "clientId": { "type": "string", - "description": "The client ID for the client app. You can specify the UI customization settings for a single client (with a specific clientId) or for all clients (by setting the clientId to `ALL` ).", + "description": "The app client ID for your UI customization. When this value isn't present, the customization applies to all user pool app clients that don't have client-level settings..", "replaceOnChanges": true }, "css": { @@ -18484,7 +18615,7 @@ "items": { "$ref": "#/types/aws-native:cognito:UserPoolUserAttributeType" }, - "description": "An array of name-value pairs that contain user attributes and attribute values." + "description": "An array of name-value pairs that contain user attributes and attribute values to be set for the user to be created. You can create a user without specifying any attributes other than `Username` . However, any attributes that you specify as required (when creating a user pool or in the *Attributes* tab of the console) either you should supply (in your call to `AdminCreateUser` ) or the user should supply (when they sign up in response to your welcome message).\n\nFor custom attributes, you must prepend the `custom:` prefix to the attribute name.\n\nTo send a message inviting the user to sign up, you must specify the user's email address or phone number. You can do this in your call to AdminCreateUser or in the *Users* tab of the Amazon Cognito console for managing your user pools.\n\nIn your call to `AdminCreateUser` , you can set the `email_verified` attribute to `True` , and you can set the `phone_number_verified` attribute to `True` . You can also do this by calling [AdminUpdateUserAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminUpdateUserAttributes.html) .\n\n- *email* : The email address of the user to whom the message that contains the code and username will be sent. Required if the `email_verified` attribute is set to `True` , or if `\"EMAIL\"` is specified in the `DesiredDeliveryMediums` parameter.\n- *phone_number* : The phone number of the user to whom the message that contains the code and username will be sent. Required if the `phone_number_verified` attribute is set to `True` , or if `\"SMS\"` is specified in the `DesiredDeliveryMediums` parameter." }, "userPoolId": { "type": "string", @@ -18534,7 +18665,7 @@ "items": { "$ref": "#/types/aws-native:cognito:UserPoolUserAttributeType" }, - "description": "An array of name-value pairs that contain user attributes and attribute values.", + "description": "An array of name-value pairs that contain user attributes and attribute values to be set for the user to be created. You can create a user without specifying any attributes other than `Username` . However, any attributes that you specify as required (when creating a user pool or in the *Attributes* tab of the console) either you should supply (in your call to `AdminCreateUser` ) or the user should supply (when they sign up in response to your welcome message).\n\nFor custom attributes, you must prepend the `custom:` prefix to the attribute name.\n\nTo send a message inviting the user to sign up, you must specify the user's email address or phone number. You can do this in your call to AdminCreateUser or in the *Users* tab of the Amazon Cognito console for managing your user pools.\n\nIn your call to `AdminCreateUser` , you can set the `email_verified` attribute to `True` , and you can set the `phone_number_verified` attribute to `True` . You can also do this by calling [AdminUpdateUserAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminUpdateUserAttributes.html) .\n\n- *email* : The email address of the user to whom the message that contains the code and username will be sent. Required if the `email_verified` attribute is set to `True` , or if `\"EMAIL\"` is specified in the `DesiredDeliveryMediums` parameter.\n- *phone_number* : The phone number of the user to whom the message that contains the code and username will be sent. Required if the `phone_number_verified` attribute is set to `True` , or if `\"SMS\"` is specified in the `DesiredDeliveryMediums` parameter.", "replaceOnChanges": true }, "userPoolId": { @@ -18592,7 +18723,8 @@ "description": "The user pool ID for the user pool." }, "username": { - "type": "string" + "type": "string", + "description": "The user's username." } }, "outputs": { @@ -18608,6 +18740,7 @@ }, "username": { "type": "string", + "description": "The user's username.", "replaceOnChanges": true } }, @@ -36399,7 +36532,8 @@ "type": "array", "items": { "$ref": "#/types/aws-native:ecs:TaskSetCapacityProviderStrategyItem" - } + }, + "description": "The capacity provider strategy that are associated with the task set." }, "cluster": { "type": "string", @@ -36465,6 +36599,7 @@ "items": { "$ref": "#/types/aws-native:ecs:TaskSetCapacityProviderStrategyItem" }, + "description": "The capacity provider strategy that are associated with the task set.", "replaceOnChanges": true }, "cluster": { @@ -43200,6 +43335,17 @@ "$ref": "#/types/aws-native:gamelift:ContainerGroupDefinitionSchedulingStrategy", "description": "Specifies whether the container group includes replica or daemon containers." }, + "sourceVersionNumber": { + "type": "integer", + "description": "A specific ContainerGroupDefinition version to be updated" + }, + "supportContainerDefinitions": { + "type": "array", + "items": { + "$ref": "pulumi.json#/Any" + }, + "description": "A collection of support container definitions that define the containers in this group." + }, "tags": { "type": "array", "items": { @@ -43248,6 +43394,25 @@ "description": "Specifies whether the container group includes replica or daemon containers.", "replaceOnChanges": true }, + "sourceVersionNumber": { + "type": "integer", + "description": "A specific ContainerGroupDefinition version to be updated" + }, + "status": { + "$ref": "#/types/aws-native:gamelift:ContainerGroupDefinitionStatus", + "description": "A string indicating ContainerGroupDefinition status." + }, + "statusReason": { + "type": "string", + "description": "A string indicating the reason for ContainerGroupDefinition status." + }, + "supportContainerDefinitions": { + "type": "array", + "items": { + "$ref": "pulumi.json#/Any" + }, + "description": "A collection of support container definitions that define the containers in this group." + }, "tags": { "type": "array", "items": { @@ -45084,8 +45249,7 @@ }, "required": [ "compatibility", - "dataFormat", - "schemaDefinition" + "dataFormat" ], "createOnly": [ "dataFormat", @@ -56800,7 +56964,7 @@ "items": { "$ref": "#/types/aws-native:index:Tag" }, - "description": "A list of tags to add to the event source mapping.\n\n\u003e You must have the `lambda:TagResource` , `lambda:UntagResource` , and `lambda:ListTags` permissions for your [IAM principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the AWS CloudFormation stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update." + "description": "A list of tags to add to the event source mapping.\n You must have the ``lambda:TagResource``, ``lambda:UntagResource``, and ``lambda:ListTags`` permissions for your [principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the CFN stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update." }, "topics": { "type": "array", @@ -56931,7 +57095,7 @@ "items": { "$ref": "#/types/aws-native:index:Tag" }, - "description": "A list of tags to add to the event source mapping.\n\n\u003e You must have the `lambda:TagResource` , `lambda:UntagResource` , and `lambda:ListTags` permissions for your [IAM principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the AWS CloudFormation stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update." + "description": "A list of tags to add to the event source mapping.\n You must have the ``lambda:TagResource``, ``lambda:UntagResource``, and ``lambda:ListTags`` permissions for your [principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the CFN stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update." }, "topics": { "type": "array", @@ -57566,10 +57730,6 @@ "type": "string", "description": "The name of the Lambda function." }, - "policy": { - "$ref": "pulumi.json#/Any", - "description": "The resource policy of your function\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::Lambda::Version` for more information about the expected schema for this property." - }, "provisionedConcurrencyConfig": { "$ref": "#/types/aws-native:lambda:VersionProvisionedConcurrencyConfiguration", "description": "Specifies a provisioned concurrency configuration for a function's version. Updates are not supported for this property." @@ -57599,10 +57759,6 @@ "description": "The name of the Lambda function.", "replaceOnChanges": true }, - "policy": { - "$ref": "pulumi.json#/Any", - "description": "The resource policy of your function\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::Lambda::Version` for more information about the expected schema for this property." - }, "provisionedConcurrencyConfig": { "$ref": "#/types/aws-native:lambda:VersionProvisionedConcurrencyConfiguration", "description": "Specifies a provisioned concurrency configuration for a function's version. Updates are not supported for this property.", @@ -70743,7 +70899,7 @@ }, "type": { "$ref": "#/types/aws-native:organizations:PolicyType", - "description": "The type of policy to create. You can specify one of the following values: AISERVICES_OPT_OUT_POLICY, BACKUP_POLICY, SERVICE_CONTROL_POLICY, TAG_POLICY" + "description": "The type of policy to create. You can specify one of the following values: AISERVICES_OPT_OUT_POLICY, BACKUP_POLICY, SERVICE_CONTROL_POLICY, TAG_POLICY, CHATBOT_POLICY" } }, "outputs": { @@ -70787,7 +70943,7 @@ }, "type": { "$ref": "#/types/aws-native:organizations:PolicyType", - "description": "The type of policy to create. You can specify one of the following values: AISERVICES_OPT_OUT_POLICY, BACKUP_POLICY, SERVICE_CONTROL_POLICY, TAG_POLICY", + "description": "The type of policy to create. You can specify one of the following values: AISERVICES_OPT_OUT_POLICY, BACKUP_POLICY, SERVICE_CONTROL_POLICY, TAG_POLICY, CHATBOT_POLICY", "replaceOnChanges": true } }, @@ -72734,7 +72890,7 @@ }, "configuration": { "$ref": "pulumi.json#/Any", - "description": "Use this property to specify a JSON or YAML schema with configuration information specific to your data source connector to connect your data source repository to Amazon Q Business . You must use the JSON or YAML schema provided by Amazon Q .\n\nYou can find configuration templates for your specific data source using the following steps:\n\n- Navigate to the [Supported connectors](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/connectors-list.html) page in the Amazon Q Business User Guide, and select the data source connector of your choice.\n- Then, from that specific data source connector's page, select *Using AWS CloudFormation* to find the schemas for your data source connector, including parameter descriptions and examples.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::QBusiness::DataSource` for more information about the expected schema for this property." + "description": "Configuration information to connect your data source repository to Amazon Q Business. Use this parameter to provide a JSON schema with configuration information specific to your data source connector.\n\nEach data source has a JSON schema provided by Amazon Q Business that you must use. For example, the Amazon S3 and Web Crawler connectors require the following JSON schemas:\n\n- [Amazon S3 JSON schema](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/s3-api.html)\n- [Web Crawler JSON schema](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/web-crawler-api.html)\n\nYou can find configuration templates for your specific data source using the following steps:\n\n- Navigate to the [Supported connectors](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/connectors-list.html) page in the Amazon Q Business User Guide, and select the data source of your choice.\n- Then, from your specific data source connector page, select *Using the API* . You will find the JSON schema for your data source, including parameter descriptions, in this section.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::QBusiness::DataSource` for more information about the expected schema for this property." }, "description": { "type": "string", @@ -72780,7 +72936,7 @@ }, "configuration": { "$ref": "pulumi.json#/Any", - "description": "Use this property to specify a JSON or YAML schema with configuration information specific to your data source connector to connect your data source repository to Amazon Q Business . You must use the JSON or YAML schema provided by Amazon Q .\n\nYou can find configuration templates for your specific data source using the following steps:\n\n- Navigate to the [Supported connectors](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/connectors-list.html) page in the Amazon Q Business User Guide, and select the data source connector of your choice.\n- Then, from that specific data source connector's page, select *Using AWS CloudFormation* to find the schemas for your data source connector, including parameter descriptions and examples.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::QBusiness::DataSource` for more information about the expected schema for this property." + "description": "Configuration information to connect your data source repository to Amazon Q Business. Use this parameter to provide a JSON schema with configuration information specific to your data source connector.\n\nEach data source has a JSON schema provided by Amazon Q Business that you must use. For example, the Amazon S3 and Web Crawler connectors require the following JSON schemas:\n\n- [Amazon S3 JSON schema](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/s3-api.html)\n- [Web Crawler JSON schema](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/web-crawler-api.html)\n\nYou can find configuration templates for your specific data source using the following steps:\n\n- Navigate to the [Supported connectors](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/connectors-list.html) page in the Amazon Q Business User Guide, and select the data source of your choice.\n- Then, from your specific data source connector page, select *Using the API* . You will find the JSON schema for your data source, including parameter descriptions, in this section.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::QBusiness::DataSource` for more information about the expected schema for this property." }, "createdAt": { "type": "string", @@ -73224,6 +73380,12 @@ ], "description": "Provides information about the identity provider (IdP) used to authenticate end users of an Amazon Q Business web experience." }, + "origins": { + "type": "array", + "items": { + "type": "string" + } + }, "roleArn": { "type": "string", "description": "The Amazon Resource Name (ARN) of the service role attached to your web experience.\n\n\u003e You must provide this value if you're using IAM Identity Center to manage end user access to your application. If you're using legacy identity management to manage user access, you don't need to provide this value." @@ -73277,6 +73439,12 @@ ], "description": "Provides information about the identity provider (IdP) used to authenticate end users of an Amazon Q Business web experience." }, + "origins": { + "type": "array", + "items": { + "type": "string" + } + }, "roleArn": { "type": "string", "description": "The Amazon Resource Name (ARN) of the service role attached to your web experience.\n\n\u003e You must provide this value if you're using IAM Identity Center to manage end user access to your application. If you're using legacy identity management to manage user access, you don't need to provide this value." @@ -73456,6 +73624,12 @@ }, "description": "\u003cp\u003eErrors associated with the analysis.\u003c/p\u003e" }, + "folderArns": { + "type": "array", + "items": { + "type": "string" + } + }, "name": { "type": "string", "description": "\u003cp\u003eThe descriptive name of the analysis.\u003c/p\u003e" @@ -73538,6 +73712,12 @@ }, "description": "\u003cp\u003eErrors associated with the analysis.\u003c/p\u003e" }, + "folderArns": { + "type": "array", + "items": { + "type": "string" + } + }, "lastUpdatedTime": { "type": "string", "description": "\u003cp\u003eThe time that the analysis was last updated.\u003c/p\u003e" @@ -73603,6 +73783,7 @@ ], "writeOnly": [ "definition", + "folderArns", "parameters", "sourceEntity", "status", @@ -73629,6 +73810,12 @@ "definition": { "$ref": "#/types/aws-native:quicksight:DashboardVersionDefinition" }, + "folderArns": { + "type": "array", + "items": { + "type": "string" + } + }, "linkEntities": { "type": "array", "items": { @@ -73705,6 +73892,12 @@ "definition": { "$ref": "#/types/aws-native:quicksight:DashboardVersionDefinition" }, + "folderArns": { + "type": "array", + "items": { + "type": "string" + } + }, "lastPublishedTime": { "type": "string", "description": "\u003cp\u003eThe last time that this dashboard was published.\u003c/p\u003e" @@ -73782,6 +73975,7 @@ "writeOnly": [ "dashboardPublishOptions", "definition", + "folderArns", "linkSharingConfiguration", "parameters", "sourceEntity", @@ -74068,6 +74262,12 @@ "$ref": "#/types/aws-native:quicksight:DataSourceErrorInfo", "description": "Error information from the last update or the creation of the data source." }, + "folderArns": { + "type": "array", + "items": { + "type": "string" + } + }, "name": { "type": "string", "description": "A display name for the data source." @@ -74137,6 +74337,12 @@ "$ref": "#/types/aws-native:quicksight:DataSourceErrorInfo", "description": "Error information from the last update or the creation of the data source." }, + "folderArns": { + "type": "array", + "items": { + "type": "string" + } + }, "lastUpdatedTime": { "type": "string", "description": "\u003cp\u003eThe last time that this data source was updated.\u003c/p\u003e" @@ -74191,7 +74397,8 @@ "type" ], "writeOnly": [ - "credentials" + "credentials", + "folderArns" ], "tagsProperty": "tags", "tagsStyle": "keyValueArray" @@ -74200,7 +74407,8 @@ "cf": "AWS::QuickSight::Folder", "inputs": { "awsAccountId": { - "type": "string" + "type": "string", + "description": "The ID for the AWS account where you want to create the folder." }, "folderId": { "type": "string", @@ -74216,13 +74424,14 @@ }, "parentFolderArn": { "type": "string", - "description": "A new parent folder arn. This change can only be applied if the import creates a brand new folder. Existing folders cannot be moved." + "description": "The Amazon Resource Name (ARN) for the folder." }, "permissions": { "type": "array", "items": { "$ref": "#/types/aws-native:quicksight:FolderResourcePermission" - } + }, + "description": "A structure that describes the principals and the resource-level permissions of a folder.\n\nTo specify no permissions, omit `Permissions` ." }, "sharingModel": { "$ref": "#/types/aws-native:quicksight:FolderSharingModel", @@ -74243,6 +74452,7 @@ }, "awsAccountId": { "type": "string", + "description": "The ID for the AWS account where you want to create the folder.", "replaceOnChanges": true }, "createdTime": { @@ -74269,14 +74479,15 @@ }, "parentFolderArn": { "type": "string", - "description": "A new parent folder arn. This change can only be applied if the import creates a brand new folder. Existing folders cannot be moved.", + "description": "The Amazon Resource Name (ARN) for the folder.", "replaceOnChanges": true }, "permissions": { "type": "array", "items": { "$ref": "#/types/aws-native:quicksight:FolderResourcePermission" - } + }, + "description": "A structure that describes the principals and the resource-level permissions of a folder.\n\nTo specify no permissions, omit `Permissions` ." }, "sharingModel": { "$ref": "#/types/aws-native:quicksight:FolderSharingModel", @@ -75793,7 +76004,7 @@ }, "dbSnapshotIdentifier": { "type": "string", - "description": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n By specifying this property, you can create a DB instance from the specified DB snapshot. If the ``DBSnapshotIdentifier`` property is an empty string or the ``AWS::RDS::DBInstance`` declaration has no ``DBSnapshotIdentifier`` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n Some DB instance properties aren't valid when you restore from a snapshot, such as the ``MasterUsername`` and ``MasterUserPassword`` properties. For information about the properties that you can specify, see the ``RestoreDBInstanceFromDBSnapshot`` action in the *Amazon RDS API Reference*.\n After you restore a DB instance with a ``DBSnapshotIdentifier`` property, you must specify the same ``DBSnapshotIdentifier`` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the ``DBSnapshotIdentifier`` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified ``DBSnapshotIdentifier`` property, and the original DB instance is deleted.\n If you specify the ``DBSnapshotIdentifier`` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n + ``CharacterSetName`` \n + ``DBClusterIdentifier`` \n + ``DBName`` \n + ``DeleteAutomatedBackups`` \n + ``KmsKeyId`` \n + ``MasterUsername`` \n + ``MasterUserPassword`` \n + ``PerformanceInsightsKMSKeyId`` \n + ``PerformanceInsightsRetentionPeriod`` \n + ``PromotionTier`` \n + ``SourceDBInstanceIdentifier`` \n + ``SourceRegion`` \n + ``StorageEncrypted`` (for an encrypted snapshot)\n + ``Timezone`` \n \n *Amazon Aurora* \n Not applicable. Snapshot restore is managed by the DB cluster." + "description": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n By specifying this property, you can create a DB instance from the specified DB snapshot. If the ``DBSnapshotIdentifier`` property is an empty string or the ``AWS::RDS::DBInstance`` declaration has no ``DBSnapshotIdentifier`` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n Some DB instance properties aren't valid when you restore from a snapshot, such as the ``MasterUsername`` and ``MasterUserPassword`` properties. For information about the properties that you can specify, see the ``RestoreDBInstanceFromDBSnapshot`` action in the *Amazon RDS API Reference*.\n After you restore a DB instance with a ``DBSnapshotIdentifier`` property, you must specify the same ``DBSnapshotIdentifier`` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the ``DBSnapshotIdentifier`` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified ``DBSnapshotIdentifier`` property, and the original DB instance is deleted.\n If you specify the ``DBSnapshotIdentifier`` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n + ``CharacterSetName`` \n + ``DBClusterIdentifier`` \n + ``DBName`` \n + ``KmsKeyId`` \n + ``MasterUsername`` \n + ``MasterUserPassword`` \n + ``PromotionTier`` \n + ``SourceDBInstanceIdentifier`` \n + ``SourceRegion`` \n + ``StorageEncrypted`` (for an unencrypted snapshot)\n + ``Timezone`` \n \n *Amazon Aurora* \n Not applicable. Snapshot restore is managed by the DB cluster." }, "dbSubnetGroupName": { "type": "string", @@ -76135,7 +76346,7 @@ }, "dbSnapshotIdentifier": { "type": "string", - "description": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n By specifying this property, you can create a DB instance from the specified DB snapshot. If the ``DBSnapshotIdentifier`` property is an empty string or the ``AWS::RDS::DBInstance`` declaration has no ``DBSnapshotIdentifier`` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n Some DB instance properties aren't valid when you restore from a snapshot, such as the ``MasterUsername`` and ``MasterUserPassword`` properties. For information about the properties that you can specify, see the ``RestoreDBInstanceFromDBSnapshot`` action in the *Amazon RDS API Reference*.\n After you restore a DB instance with a ``DBSnapshotIdentifier`` property, you must specify the same ``DBSnapshotIdentifier`` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the ``DBSnapshotIdentifier`` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified ``DBSnapshotIdentifier`` property, and the original DB instance is deleted.\n If you specify the ``DBSnapshotIdentifier`` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n + ``CharacterSetName`` \n + ``DBClusterIdentifier`` \n + ``DBName`` \n + ``DeleteAutomatedBackups`` \n + ``KmsKeyId`` \n + ``MasterUsername`` \n + ``MasterUserPassword`` \n + ``PerformanceInsightsKMSKeyId`` \n + ``PerformanceInsightsRetentionPeriod`` \n + ``PromotionTier`` \n + ``SourceDBInstanceIdentifier`` \n + ``SourceRegion`` \n + ``StorageEncrypted`` (for an encrypted snapshot)\n + ``Timezone`` \n \n *Amazon Aurora* \n Not applicable. Snapshot restore is managed by the DB cluster." + "description": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n By specifying this property, you can create a DB instance from the specified DB snapshot. If the ``DBSnapshotIdentifier`` property is an empty string or the ``AWS::RDS::DBInstance`` declaration has no ``DBSnapshotIdentifier`` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n Some DB instance properties aren't valid when you restore from a snapshot, such as the ``MasterUsername`` and ``MasterUserPassword`` properties. For information about the properties that you can specify, see the ``RestoreDBInstanceFromDBSnapshot`` action in the *Amazon RDS API Reference*.\n After you restore a DB instance with a ``DBSnapshotIdentifier`` property, you must specify the same ``DBSnapshotIdentifier`` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the ``DBSnapshotIdentifier`` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified ``DBSnapshotIdentifier`` property, and the original DB instance is deleted.\n If you specify the ``DBSnapshotIdentifier`` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n + ``CharacterSetName`` \n + ``DBClusterIdentifier`` \n + ``DBName`` \n + ``KmsKeyId`` \n + ``MasterUsername`` \n + ``MasterUserPassword`` \n + ``PromotionTier`` \n + ``SourceDBInstanceIdentifier`` \n + ``SourceRegion`` \n + ``StorageEncrypted`` (for an unencrypted snapshot)\n + ``Timezone`` \n \n *Amazon Aurora* \n Not applicable. Snapshot restore is managed by the DB cluster." }, "dbSubnetGroupName": { "type": "string", @@ -76921,9 +77132,6 @@ "createOnly": [ "dbSubnetGroupName" ], - "writeOnly": [ - "subnetIds" - ], "irreversibleNames": { "dbSubnetGroupDescription": "DBSubnetGroupDescription", "dbSubnetGroupName": "DBSubnetGroupName" @@ -84472,7 +84680,7 @@ }, "version": { "type": "integer", - "description": "The version number." + "description": "The version of the image." } }, "required": [ @@ -89717,7 +89925,7 @@ }, "fifoQueue": { "type": "boolean", - "description": "If set to true, creates a FIFO queue. If you don't specify this property, SQS creates a standard queue. For more information, see [FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) in the *Developer Guide*." + "description": "If set to true, creates a FIFO queue. If you don't specify this property, SQS creates a standard queue. For more information, see [Amazon SQS FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-fifo-queues.html) in the *Developer Guide*." }, "fifoThroughputLimit": { "type": "string", @@ -89729,7 +89937,7 @@ }, "kmsMasterKeyId": { "type": "string", - "description": "The ID of an AWS Key Management Service (KMS) for SQS, or a custom KMS. To use the AWS managed KMS for SQS, specify a (default) alias ARN, alias name (e.g. ``alias/aws/sqs``), key ARN, or key ID. For more information, see the following:\n + [Encryption at rest](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) in the *Developer Guide* \n + [CreateQueue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) in the *API Reference* \n + [Request Parameters](https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) in the *Key Management Service API Reference* \n + The Key Management Service (KMS) section of the [Best Practices](https://docs.aws.amazon.com/https://d0.awsstatic.com/whitepapers/aws-kms-best-practices.pdf) whitepaper" + "description": "The ID of an AWS Key Management Service (KMS) for SQS, or a custom KMS. To use the AWS managed KMS for SQS, specify a (default) alias ARN, alias name (for example ``alias/aws/sqs``), key ARN, or key ID. For more information, see the following:\n + [Encryption at rest](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) in the *Developer Guide* \n + [CreateQueue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) in the *API Reference* \n + [Request Parameters](https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) in the *Key Management Service API Reference* \n + The Key Management Service (KMS) section of the [Security best practices for Key Management Service](https://docs.aws.amazon.com/kms/latest/developerguide/best-practices.html) in the *Key Management Service Developer Guide*" }, "maximumMessageSize": { "type": "integer", @@ -89741,7 +89949,7 @@ }, "queueName": { "type": "string", - "description": "A name for the queue. To create a FIFO queue, the name of your FIFO queue must end with the ``.fifo`` suffix. For more information, see [FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) in the *Developer Guide*.\n If you don't specify a name, CFN generates a unique physical ID and uses that ID for the queue name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) in the *User Guide*. \n If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name." + "description": "A name for the queue. To create a FIFO queue, the name of your FIFO queue must end with the ``.fifo`` suffix. For more information, see [Amazon SQS FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-fifo-queues.html) in the *Developer Guide*.\n If you don't specify a name, CFN generates a unique physical ID and uses that ID for the queue name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) in the *User Guide*. \n If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name." }, "receiveMessageWaitTimeSeconds": { "type": "integer", @@ -89749,11 +89957,11 @@ }, "redriveAllowPolicy": { "$ref": "pulumi.json#/Any", - "description": "The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows:\n + ``redrivePermission``: The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are:\n + ``allowAll``: (Default) Any source queues in this AWS account in the same Region can specify this queue as the dead-letter queue.\n + ``denyAll``: No source queues can specify this queue as the dead-letter queue.\n + ``byQueue``: Only queues specified by the ``sourceQueueArns`` parameter can specify this queue as the dead-letter queue.\n \n + ``sourceQueueArns``: The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the ``redrivePermission`` parameter is set to ``byQueue``. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the ``redrivePermission`` parameter to ``allowAll``.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::SQS::Queue` for more information about the expected schema for this property." + "description": "The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows:\n + ``redrivePermission``: The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are:\n + ``allowAll``: (Default) Any source queues in this AWS account in the same Region can specify this queue as the dead-letter queue.\n + ``denyAll``: No source queues can specify this queue as the dead-letter queue.\n + ``byQueue``: Only queues specified by the ``sourceQueueArns`` parameter can specify this queue as the dead-letter queue.\n \n + ``sourceQueueArns``: The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the ``redrivePermission`` parameter is set to ``byQueue``. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the ``redrivePermission`` parameter to ``allowAll``.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::SQS::Queue` for more information about the expected schema for this property." }, "redrivePolicy": { "$ref": "pulumi.json#/Any", - "description": "The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows:\n + ``deadLetterTargetArn``: The Amazon Resource Name (ARN) of the dead-letter queue to which SQS moves messages after the value of ``maxReceiveCount`` is exceeded.\n + ``maxReceiveCount``: The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ``ReceiveCount`` for a message exceeds the ``maxReceiveCount`` for a queue, SQS moves the message to the dead-letter-queue.\n \n The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.\n *JSON* \n ``{ \"deadLetterTargetArn\" : String, \"maxReceiveCount\" : Integer }`` \n *YAML* \n ``deadLetterTargetArn : String`` \n ``maxReceiveCount : Integer``\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::SQS::Queue` for more information about the expected schema for this property." + "description": "The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows:\n + ``deadLetterTargetArn``: The Amazon Resource Name (ARN) of the dead-letter queue to which SQS moves messages after the value of ``maxReceiveCount`` is exceeded.\n + ``maxReceiveCount``: The number of times a message is received by a consumer of the source queue before being moved to the dead-letter queue. When the ``ReceiveCount`` for a message exceeds the ``maxReceiveCount`` for a queue, SQS moves the message to the dead-letter-queue.\n \n The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.\n *JSON* \n ``{ \"deadLetterTargetArn\" : String, \"maxReceiveCount\" : Integer }`` \n *YAML* \n ``deadLetterTargetArn : String`` \n ``maxReceiveCount : Integer``\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::SQS::Queue` for more information about the expected schema for this property." }, "sqsManagedSseEnabled": { "type": "boolean", @@ -89790,7 +89998,7 @@ }, "fifoQueue": { "type": "boolean", - "description": "If set to true, creates a FIFO queue. If you don't specify this property, SQS creates a standard queue. For more information, see [FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) in the *Developer Guide*.", + "description": "If set to true, creates a FIFO queue. If you don't specify this property, SQS creates a standard queue. For more information, see [Amazon SQS FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-fifo-queues.html) in the *Developer Guide*.", "replaceOnChanges": true }, "fifoThroughputLimit": { @@ -89803,7 +90011,7 @@ }, "kmsMasterKeyId": { "type": "string", - "description": "The ID of an AWS Key Management Service (KMS) for SQS, or a custom KMS. To use the AWS managed KMS for SQS, specify a (default) alias ARN, alias name (e.g. ``alias/aws/sqs``), key ARN, or key ID. For more information, see the following:\n + [Encryption at rest](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) in the *Developer Guide* \n + [CreateQueue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) in the *API Reference* \n + [Request Parameters](https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) in the *Key Management Service API Reference* \n + The Key Management Service (KMS) section of the [Best Practices](https://docs.aws.amazon.com/https://d0.awsstatic.com/whitepapers/aws-kms-best-practices.pdf) whitepaper" + "description": "The ID of an AWS Key Management Service (KMS) for SQS, or a custom KMS. To use the AWS managed KMS for SQS, specify a (default) alias ARN, alias name (for example ``alias/aws/sqs``), key ARN, or key ID. For more information, see the following:\n + [Encryption at rest](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) in the *Developer Guide* \n + [CreateQueue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) in the *API Reference* \n + [Request Parameters](https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) in the *Key Management Service API Reference* \n + The Key Management Service (KMS) section of the [Security best practices for Key Management Service](https://docs.aws.amazon.com/kms/latest/developerguide/best-practices.html) in the *Key Management Service Developer Guide*" }, "maximumMessageSize": { "type": "integer", @@ -89815,7 +90023,7 @@ }, "queueName": { "type": "string", - "description": "A name for the queue. To create a FIFO queue, the name of your FIFO queue must end with the ``.fifo`` suffix. For more information, see [FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) in the *Developer Guide*.\n If you don't specify a name, CFN generates a unique physical ID and uses that ID for the queue name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) in the *User Guide*. \n If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", + "description": "A name for the queue. To create a FIFO queue, the name of your FIFO queue must end with the ``.fifo`` suffix. For more information, see [Amazon SQS FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-fifo-queues.html) in the *Developer Guide*.\n If you don't specify a name, CFN generates a unique physical ID and uses that ID for the queue name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) in the *User Guide*. \n If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", "replaceOnChanges": true }, "queueUrl": { @@ -89828,11 +90036,11 @@ }, "redriveAllowPolicy": { "$ref": "pulumi.json#/Any", - "description": "The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows:\n + ``redrivePermission``: The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are:\n + ``allowAll``: (Default) Any source queues in this AWS account in the same Region can specify this queue as the dead-letter queue.\n + ``denyAll``: No source queues can specify this queue as the dead-letter queue.\n + ``byQueue``: Only queues specified by the ``sourceQueueArns`` parameter can specify this queue as the dead-letter queue.\n \n + ``sourceQueueArns``: The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the ``redrivePermission`` parameter is set to ``byQueue``. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the ``redrivePermission`` parameter to ``allowAll``.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::SQS::Queue` for more information about the expected schema for this property." + "description": "The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows:\n + ``redrivePermission``: The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are:\n + ``allowAll``: (Default) Any source queues in this AWS account in the same Region can specify this queue as the dead-letter queue.\n + ``denyAll``: No source queues can specify this queue as the dead-letter queue.\n + ``byQueue``: Only queues specified by the ``sourceQueueArns`` parameter can specify this queue as the dead-letter queue.\n \n + ``sourceQueueArns``: The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the ``redrivePermission`` parameter is set to ``byQueue``. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the ``redrivePermission`` parameter to ``allowAll``.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::SQS::Queue` for more information about the expected schema for this property." }, "redrivePolicy": { "$ref": "pulumi.json#/Any", - "description": "The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows:\n + ``deadLetterTargetArn``: The Amazon Resource Name (ARN) of the dead-letter queue to which SQS moves messages after the value of ``maxReceiveCount`` is exceeded.\n + ``maxReceiveCount``: The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ``ReceiveCount`` for a message exceeds the ``maxReceiveCount`` for a queue, SQS moves the message to the dead-letter-queue.\n \n The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.\n *JSON* \n ``{ \"deadLetterTargetArn\" : String, \"maxReceiveCount\" : Integer }`` \n *YAML* \n ``deadLetterTargetArn : String`` \n ``maxReceiveCount : Integer``\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::SQS::Queue` for more information about the expected schema for this property." + "description": "The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows:\n + ``deadLetterTargetArn``: The Amazon Resource Name (ARN) of the dead-letter queue to which SQS moves messages after the value of ``maxReceiveCount`` is exceeded.\n + ``maxReceiveCount``: The number of times a message is received by a consumer of the source queue before being moved to the dead-letter queue. When the ``ReceiveCount`` for a message exceeds the ``maxReceiveCount`` for a queue, SQS moves the message to the dead-letter-queue.\n \n The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.\n *JSON* \n ``{ \"deadLetterTargetArn\" : String, \"maxReceiveCount\" : Integer }`` \n *YAML* \n ``deadLetterTargetArn : String`` \n ``maxReceiveCount : Integer``\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::SQS::Queue` for more information about the expected schema for this property." }, "sqsManagedSseEnabled": { "type": "boolean", @@ -93319,6 +93527,188 @@ "tagsProperty": "tags", "tagsStyle": "keyValueArray" }, + "aws-native:transfer:Server": { + "cf": "AWS::Transfer::Server", + "inputs": { + "certificate": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. Required when `Protocols` is set to `FTPS` .\n\nTo request a new public certificate, see [Request a public certificate](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-public.html) in the *AWS Certificate Manager User Guide* .\n\nTo import an existing certificate into ACM, see [Importing certificates into ACM](https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) in the *AWS Certificate Manager User Guide* .\n\nTo request a private certificate to use FTPS through private IP addresses, see [Request a private certificate](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-private.html) in the *AWS Certificate Manager User Guide* .\n\nCertificates with the following cryptographic algorithms and key sizes are supported:\n\n- 2048-bit RSA (RSA_2048)\n- 4096-bit RSA (RSA_4096)\n- Elliptic Prime Curve 256 bit (EC_prime256v1)\n- Elliptic Prime Curve 384 bit (EC_secp384r1)\n- Elliptic Prime Curve 521 bit (EC_secp521r1)\n\n\u003e The certificate must be a valid SSL/TLS X.509 version 3 certificate with FQDN or IP address specified and information about the issuer." + }, + "domain": { + "$ref": "#/types/aws-native:transfer:ServerDomain", + "description": "Specifies the domain of the storage system that is used for file transfers. There are two domains available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The default value is S3." + }, + "endpointDetails": { + "$ref": "#/types/aws-native:transfer:ServerEndpointDetails", + "description": "The virtual private cloud (VPC) endpoint settings that are configured for your server. When you host your endpoint within your VPC, you can make your endpoint accessible only to resources within your VPC, or you can attach Elastic IP addresses and make your endpoint accessible to clients over the internet. Your VPC's default security groups are automatically assigned to your endpoint." + }, + "endpointType": { + "$ref": "#/types/aws-native:transfer:ServerEndpointType", + "description": "The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC) or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it.\n\n\u003e After May 19, 2021, you won't be able to create a server using `EndpointType=VPC_ENDPOINT` in your AWS account if your account hasn't already done so before May 19, 2021. If you have already created servers with `EndpointType=VPC_ENDPOINT` in your AWS account on or before May 19, 2021, you will not be affected. After this date, use `EndpointType` = `VPC` .\n\u003e \n\u003e For more information, see [Discontinuing the use of VPC_ENDPOINT](https://docs.aws.amazon.com//transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint) .\n\u003e \n\u003e It is recommended that you use `VPC` as the `EndpointType` . With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with `EndpointType` set to `VPC_ENDPOINT` ." + }, + "identityProviderDetails": { + "$ref": "#/types/aws-native:transfer:ServerIdentityProviderDetails", + "description": "Required when `IdentityProviderType` is set to `AWS_DIRECTORY_SERVICE` , `AWS _LAMBDA` or `API_GATEWAY` . Accepts an array containing all of the information required to use a directory in `AWS_DIRECTORY_SERVICE` or invoke a customer-supplied authentication API, including the API Gateway URL. Cannot be specified when `IdentityProviderType` is set to `SERVICE_MANAGED` ." + }, + "identityProviderType": { + "$ref": "#/types/aws-native:transfer:ServerIdentityProviderType", + "description": "The mode of authentication for a server. The default value is `SERVICE_MANAGED` , which allows you to store and access user credentials within the AWS Transfer Family service.\n\nUse `AWS_DIRECTORY_SERVICE` to provide access to Active Directory groups in AWS Directory Service for Microsoft Active Directory or Microsoft Active Directory in your on-premises environment or in AWS using AD Connector. This option also requires you to provide a Directory ID by using the `IdentityProviderDetails` parameter.\n\nUse the `API_GATEWAY` value to integrate with an identity provider of your choosing. The `API_GATEWAY` setting requires you to provide an Amazon API Gateway endpoint URL to call for authentication by using the `IdentityProviderDetails` parameter.\n\nUse the `AWS_LAMBDA` value to directly use an AWS Lambda function as your identity provider. If you choose this value, you must specify the ARN for the Lambda function in the `Function` parameter for the `IdentityProviderDetails` data type." + }, + "loggingRole": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging for Amazon S3 or Amazon EFSevents. When set, you can view user activity in your CloudWatch logs." + }, + "postAuthenticationLoginBanner": { + "type": "string", + "description": "Specifies a string to display when users connect to a server. This string is displayed after the user authenticates.\n\n\u003e The SFTP protocol does not support post-authentication display banners." + }, + "preAuthenticationLoginBanner": { + "type": "string", + "description": "Specifies a string to display when users connect to a server. This string is displayed before the user authenticates. For example, the following banner displays details about using the system:\n\n`This system is for the use of authorized users only. Individuals using this computer system without authority, or in excess of their authority, are subject to having all of their activities on this system monitored and recorded by system personnel.`" + }, + "protocolDetails": { + "$ref": "#/types/aws-native:transfer:ServerProtocolDetails", + "description": "The protocol settings that are configured for your server.\n\n- To indicate passive mode (for FTP and FTPS protocols), use the `PassiveIp` parameter. Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer.\n- To ignore the error that is generated when the client attempts to use the `SETSTAT` command on a file that you are uploading to an Amazon S3 bucket, use the `SetStatOption` parameter. To have the AWS Transfer Family server ignore the `SETSTAT` command and upload files without needing to make any changes to your SFTP client, set the value to `ENABLE_NO_OP` . If you set the `SetStatOption` parameter to `ENABLE_NO_OP` , Transfer Family generates a log entry to Amazon CloudWatch Logs, so that you can determine when the client is making a `SETSTAT` call.\n- To determine whether your AWS Transfer Family server resumes recent, negotiated sessions through a unique session ID, use the `TlsSessionResumptionMode` parameter.\n- `As2Transports` indicates the transport method for the AS2 messages. Currently, only HTTP is supported.\n\nThe `Protocols` parameter is an array of strings.\n\n*Allowed values* : One or more of `SFTP` , `FTPS` , `FTP` , `AS2`" + }, + "protocols": { + "type": "array", + "items": { + "$ref": "#/types/aws-native:transfer:ServerProtocol" + }, + "description": "Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:\n\n- `SFTP` (Secure Shell (SSH) File Transfer Protocol): File transfer over SSH\n- `FTPS` (File Transfer Protocol Secure): File transfer with TLS encryption\n- `FTP` (File Transfer Protocol): Unencrypted file transfer\n- `AS2` (Applicability Statement 2): used for transporting structured business-to-business data\n\n\u003e - If you select `FTPS` , you must choose a certificate stored in AWS Certificate Manager (ACM) which is used to identify your server when clients connect to it over FTPS.\n\u003e - If `Protocol` includes either `FTP` or `FTPS` , then the `EndpointType` must be `VPC` and the `IdentityProviderType` must be either `AWS_DIRECTORY_SERVICE` , `AWS_LAMBDA` , or `API_GATEWAY` .\n\u003e - If `Protocol` includes `FTP` , then `AddressAllocationIds` cannot be associated.\n\u003e - If `Protocol` is set only to `SFTP` , the `EndpointType` can be set to `PUBLIC` and the `IdentityProviderType` can be set any of the supported identity types: `SERVICE_MANAGED` , `AWS_DIRECTORY_SERVICE` , `AWS_LAMBDA` , or `API_GATEWAY` .\n\u003e - If `Protocol` includes `AS2` , then the `EndpointType` must be `VPC` , and domain must be Amazon S3. \n\nThe `Protocols` parameter is an array of strings.\n\n*Allowed values* : One or more of `SFTP` , `FTPS` , `FTP` , `AS2`" + }, + "s3StorageOptions": { + "$ref": "#/types/aws-native:transfer:ServerS3StorageOptions", + "description": "Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target." + }, + "securityPolicyName": { + "type": "string", + "description": "Specifies the name of the security policy for the server." + }, + "structuredLogDestinations": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Specifies the log groups to which your server logs are sent.\n\nTo specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows:\n\n`arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:*`\n\nFor example, `arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:*`\n\nIf you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty value for this parameter in an `update-server` call. For example:\n\n`update-server --server-id s-1234567890abcdef0 --structured-log-destinations`" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/types/aws-native:index:Tag" + }, + "description": "Key-value pairs that can be used to group and search for servers." + }, + "workflowDetails": { + "$ref": "#/types/aws-native:transfer:ServerWorkflowDetails", + "description": "Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.\n\nIn addition to a workflow to execute when a file is uploaded completely, `WorkflowDetails` can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when the session disconnects." + } + }, + "outputs": { + "arn": { + "type": "string", + "description": "The Amazon Resource Name associated with the server, in the form `arn:aws:transfer:region: *account-id* :server/ *server-id* /` .\n\nAn example of a server ARN is: `arn:aws:transfer:us-east-1:123456789012:server/s-01234567890abcdef` ." + }, + "as2ServiceManagedEgressIpAddresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The list of egress IP addresses of this server. These IP addresses are only relevant for servers that use the AS2 protocol. They are used for sending asynchronous MDNs. These IP addresses are assigned automatically when you create an AS2 server. Additionally, if you update an existing server and add the AS2 protocol, static IP addresses are assigned as well." + }, + "certificate": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. Required when `Protocols` is set to `FTPS` .\n\nTo request a new public certificate, see [Request a public certificate](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-public.html) in the *AWS Certificate Manager User Guide* .\n\nTo import an existing certificate into ACM, see [Importing certificates into ACM](https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) in the *AWS Certificate Manager User Guide* .\n\nTo request a private certificate to use FTPS through private IP addresses, see [Request a private certificate](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-private.html) in the *AWS Certificate Manager User Guide* .\n\nCertificates with the following cryptographic algorithms and key sizes are supported:\n\n- 2048-bit RSA (RSA_2048)\n- 4096-bit RSA (RSA_4096)\n- Elliptic Prime Curve 256 bit (EC_prime256v1)\n- Elliptic Prime Curve 384 bit (EC_secp384r1)\n- Elliptic Prime Curve 521 bit (EC_secp521r1)\n\n\u003e The certificate must be a valid SSL/TLS X.509 version 3 certificate with FQDN or IP address specified and information about the issuer." + }, + "domain": { + "$ref": "#/types/aws-native:transfer:ServerDomain", + "description": "Specifies the domain of the storage system that is used for file transfers. There are two domains available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The default value is S3.", + "replaceOnChanges": true + }, + "endpointDetails": { + "$ref": "#/types/aws-native:transfer:ServerEndpointDetails", + "description": "The virtual private cloud (VPC) endpoint settings that are configured for your server. When you host your endpoint within your VPC, you can make your endpoint accessible only to resources within your VPC, or you can attach Elastic IP addresses and make your endpoint accessible to clients over the internet. Your VPC's default security groups are automatically assigned to your endpoint." + }, + "endpointType": { + "$ref": "#/types/aws-native:transfer:ServerEndpointType", + "description": "The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC) or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it.\n\n\u003e After May 19, 2021, you won't be able to create a server using `EndpointType=VPC_ENDPOINT` in your AWS account if your account hasn't already done so before May 19, 2021. If you have already created servers with `EndpointType=VPC_ENDPOINT` in your AWS account on or before May 19, 2021, you will not be affected. After this date, use `EndpointType` = `VPC` .\n\u003e \n\u003e For more information, see [Discontinuing the use of VPC_ENDPOINT](https://docs.aws.amazon.com//transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint) .\n\u003e \n\u003e It is recommended that you use `VPC` as the `EndpointType` . With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with `EndpointType` set to `VPC_ENDPOINT` ." + }, + "identityProviderDetails": { + "$ref": "#/types/aws-native:transfer:ServerIdentityProviderDetails", + "description": "Required when `IdentityProviderType` is set to `AWS_DIRECTORY_SERVICE` , `AWS _LAMBDA` or `API_GATEWAY` . Accepts an array containing all of the information required to use a directory in `AWS_DIRECTORY_SERVICE` or invoke a customer-supplied authentication API, including the API Gateway URL. Cannot be specified when `IdentityProviderType` is set to `SERVICE_MANAGED` ." + }, + "identityProviderType": { + "$ref": "#/types/aws-native:transfer:ServerIdentityProviderType", + "description": "The mode of authentication for a server. The default value is `SERVICE_MANAGED` , which allows you to store and access user credentials within the AWS Transfer Family service.\n\nUse `AWS_DIRECTORY_SERVICE` to provide access to Active Directory groups in AWS Directory Service for Microsoft Active Directory or Microsoft Active Directory in your on-premises environment or in AWS using AD Connector. This option also requires you to provide a Directory ID by using the `IdentityProviderDetails` parameter.\n\nUse the `API_GATEWAY` value to integrate with an identity provider of your choosing. The `API_GATEWAY` setting requires you to provide an Amazon API Gateway endpoint URL to call for authentication by using the `IdentityProviderDetails` parameter.\n\nUse the `AWS_LAMBDA` value to directly use an AWS Lambda function as your identity provider. If you choose this value, you must specify the ARN for the Lambda function in the `Function` parameter for the `IdentityProviderDetails` data type.", + "replaceOnChanges": true + }, + "loggingRole": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging for Amazon S3 or Amazon EFSevents. When set, you can view user activity in your CloudWatch logs." + }, + "postAuthenticationLoginBanner": { + "type": "string", + "description": "Specifies a string to display when users connect to a server. This string is displayed after the user authenticates.\n\n\u003e The SFTP protocol does not support post-authentication display banners." + }, + "preAuthenticationLoginBanner": { + "type": "string", + "description": "Specifies a string to display when users connect to a server. This string is displayed before the user authenticates. For example, the following banner displays details about using the system:\n\n`This system is for the use of authorized users only. Individuals using this computer system without authority, or in excess of their authority, are subject to having all of their activities on this system monitored and recorded by system personnel.`" + }, + "protocolDetails": { + "$ref": "#/types/aws-native:transfer:ServerProtocolDetails", + "description": "The protocol settings that are configured for your server.\n\n- To indicate passive mode (for FTP and FTPS protocols), use the `PassiveIp` parameter. Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer.\n- To ignore the error that is generated when the client attempts to use the `SETSTAT` command on a file that you are uploading to an Amazon S3 bucket, use the `SetStatOption` parameter. To have the AWS Transfer Family server ignore the `SETSTAT` command and upload files without needing to make any changes to your SFTP client, set the value to `ENABLE_NO_OP` . If you set the `SetStatOption` parameter to `ENABLE_NO_OP` , Transfer Family generates a log entry to Amazon CloudWatch Logs, so that you can determine when the client is making a `SETSTAT` call.\n- To determine whether your AWS Transfer Family server resumes recent, negotiated sessions through a unique session ID, use the `TlsSessionResumptionMode` parameter.\n- `As2Transports` indicates the transport method for the AS2 messages. Currently, only HTTP is supported.\n\nThe `Protocols` parameter is an array of strings.\n\n*Allowed values* : One or more of `SFTP` , `FTPS` , `FTP` , `AS2`" + }, + "protocols": { + "type": "array", + "items": { + "$ref": "#/types/aws-native:transfer:ServerProtocol" + }, + "description": "Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:\n\n- `SFTP` (Secure Shell (SSH) File Transfer Protocol): File transfer over SSH\n- `FTPS` (File Transfer Protocol Secure): File transfer with TLS encryption\n- `FTP` (File Transfer Protocol): Unencrypted file transfer\n- `AS2` (Applicability Statement 2): used for transporting structured business-to-business data\n\n\u003e - If you select `FTPS` , you must choose a certificate stored in AWS Certificate Manager (ACM) which is used to identify your server when clients connect to it over FTPS.\n\u003e - If `Protocol` includes either `FTP` or `FTPS` , then the `EndpointType` must be `VPC` and the `IdentityProviderType` must be either `AWS_DIRECTORY_SERVICE` , `AWS_LAMBDA` , or `API_GATEWAY` .\n\u003e - If `Protocol` includes `FTP` , then `AddressAllocationIds` cannot be associated.\n\u003e - If `Protocol` is set only to `SFTP` , the `EndpointType` can be set to `PUBLIC` and the `IdentityProviderType` can be set any of the supported identity types: `SERVICE_MANAGED` , `AWS_DIRECTORY_SERVICE` , `AWS_LAMBDA` , or `API_GATEWAY` .\n\u003e - If `Protocol` includes `AS2` , then the `EndpointType` must be `VPC` , and domain must be Amazon S3. \n\nThe `Protocols` parameter is an array of strings.\n\n*Allowed values* : One or more of `SFTP` , `FTPS` , `FTP` , `AS2`" + }, + "s3StorageOptions": { + "$ref": "#/types/aws-native:transfer:ServerS3StorageOptions", + "description": "Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target." + }, + "securityPolicyName": { + "type": "string", + "description": "Specifies the name of the security policy for the server." + }, + "serverId": { + "type": "string", + "description": "The service-assigned ID of the server that is created.\n\nAn example `ServerId` is `s-01234567890abcdef` ." + }, + "structuredLogDestinations": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Specifies the log groups to which your server logs are sent.\n\nTo specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows:\n\n`arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:*`\n\nFor example, `arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:*`\n\nIf you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty value for this parameter in an `update-server` call. For example:\n\n`update-server --server-id s-1234567890abcdef0 --structured-log-destinations`" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/types/aws-native:index:Tag" + }, + "description": "Key-value pairs that can be used to group and search for servers." + }, + "workflowDetails": { + "$ref": "#/types/aws-native:transfer:ServerWorkflowDetails", + "description": "Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.\n\nIn addition to a workflow to execute when a file is uploaded completely, `WorkflowDetails` can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when the session disconnects." + } + }, + "createOnly": [ + "domain", + "identityProviderType" + ], + "writeOnly": [ + "identityProviderType" + ], + "irreversibleNames": { + "s3StorageOptions": "S3StorageOptions" + }, + "tagsProperty": "tags", + "tagsStyle": "keyValueArray" + }, "aws-native:transfer:Workflow": { "cf": "AWS::Transfer::Workflow", "inputs": { @@ -95056,6 +95446,137 @@ "webAclArn": "WebACLArn" } }, + "aws-native:wisdom:AiPrompt": { + "cf": "AWS::Wisdom::AIPrompt", + "inputs": { + "apiFormat": { + "$ref": "#/types/aws-native:wisdom:AiPromptAiPromptApiFormat", + "description": "The API format used for this AI Prompt." + }, + "assistantId": { + "type": "string", + "description": "The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN." + }, + "description": { + "type": "string", + "description": "The description of the AI Prompt." + }, + "modelId": { + "type": "string", + "description": "The identifier of the model used for this AI Prompt. Model Ids supported are: `CLAUDE_3_HAIKU_20240307_V1` ." + }, + "name": { + "type": "string", + "description": "The name of the AI Prompt" + }, + "tags": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "The tags used to organize, track, or control access for this resource." + }, + "templateConfiguration": { + "$ref": "#/types/aws-native:wisdom:AiPromptAiPromptTemplateConfiguration", + "description": "The configuration of the prompt template for this AI Prompt." + }, + "templateType": { + "$ref": "#/types/aws-native:wisdom:AiPromptAiPromptTemplateType", + "description": "The type of the prompt template for this AI Prompt." + }, + "type": { + "$ref": "#/types/aws-native:wisdom:AiPromptAiPromptType", + "description": "The type of this AI Prompt." + } + }, + "outputs": { + "aiPromptArn": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the AI Prompt." + }, + "aiPromptId": { + "type": "string", + "description": "The identifier of the Amazon Q in Connect AI prompt." + }, + "apiFormat": { + "$ref": "#/types/aws-native:wisdom:AiPromptAiPromptApiFormat", + "description": "The API format used for this AI Prompt.", + "replaceOnChanges": true + }, + "assistantArn": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant." + }, + "assistantId": { + "type": "string", + "description": "The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN.", + "replaceOnChanges": true + }, + "description": { + "type": "string", + "description": "The description of the AI Prompt." + }, + "modelId": { + "type": "string", + "description": "The identifier of the model used for this AI Prompt. Model Ids supported are: `CLAUDE_3_HAIKU_20240307_V1` .", + "replaceOnChanges": true + }, + "name": { + "type": "string", + "description": "The name of the AI Prompt", + "replaceOnChanges": true + }, + "tags": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "The tags used to organize, track, or control access for this resource.", + "replaceOnChanges": true + }, + "templateConfiguration": { + "$ref": "#/types/aws-native:wisdom:AiPromptAiPromptTemplateConfiguration", + "description": "The configuration of the prompt template for this AI Prompt." + }, + "templateType": { + "$ref": "#/types/aws-native:wisdom:AiPromptAiPromptTemplateType", + "description": "The type of the prompt template for this AI Prompt.", + "replaceOnChanges": true + }, + "type": { + "$ref": "#/types/aws-native:wisdom:AiPromptAiPromptType", + "description": "The type of this AI Prompt.", + "replaceOnChanges": true + } + }, + "autoNamingSpec": { + "sdkName": "name", + "minLength": 1, + "maxLength": 255 + }, + "required": [ + "apiFormat", + "modelId", + "templateConfiguration", + "templateType", + "type" + ], + "createOnly": [ + "apiFormat", + "assistantId", + "modelId", + "name", + "tags", + "templateType", + "type" + ], + "irreversibleNames": { + "aiPromptArn": "AIPromptArn", + "aiPromptId": "AIPromptId" + }, + "tagsProperty": "tags", + "tagsStyle": "stringMap" + }, "aws-native:wisdom:Assistant": { "cf": "AWS::Wisdom::Assistant", "inputs": { @@ -100820,7 +101341,8 @@ "type": "object", "properties": { "maxPageSize": { - "type": "integer" + "type": "integer", + "description": "The maximum number of records that Amazon AppFlow receives in each page of the response from your SAP application. For transfers of OData records, the maximum page size is 3,000. For transfers of data that comes from an ODP provider, the maximum page size is 10,000." } } }, @@ -100828,7 +101350,8 @@ "type": "object", "properties": { "maxParallelism": { - "type": "integer" + "type": "integer", + "description": "The maximum number of processes that Amazon AppFlow runs at the same time when it retrieves your data from your SAP application." } } }, @@ -100840,10 +101363,12 @@ "description": "The object path specified in the SAPOData flow source." }, "paginationConfig": { - "$ref": "#/types/aws-native:appflow:FlowSapoDataPaginationConfig" + "$ref": "#/types/aws-native:appflow:FlowSapoDataPaginationConfig", + "description": "Sets the page size for each concurrent process that transfers OData records from your SAP instance." }, "parallelismConfig": { - "$ref": "#/types/aws-native:appflow:FlowSapoDataParallelismConfig" + "$ref": "#/types/aws-native:appflow:FlowSapoDataParallelismConfig", + "description": "Sets the number of concurrent processes that transfers OData records from your SAP instance." } } }, @@ -102987,6 +103512,176 @@ } } }, + "aws-native:appsync:DataSourceAuthorizationConfig": { + "type": "object", + "properties": { + "authorizationType": { + "type": "string", + "description": "The authorization type that the HTTP endpoint requires." + }, + "awsIamConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceAwsIamConfig", + "description": "The AWS Identity and Access Management settings." + } + } + }, + "aws-native:appsync:DataSourceAwsIamConfig": { + "type": "object", + "properties": { + "signingRegion": { + "type": "string", + "description": "The signing Region for AWS Identity and Access Management authorization." + }, + "signingServiceName": { + "type": "string", + "description": "The signing service name for AWS Identity and Access Management authorization." + } + } + }, + "aws-native:appsync:DataSourceDeltaSyncConfig": { + "type": "object", + "properties": { + "baseTableTtl": { + "type": "string", + "description": "The number of minutes that an Item is stored in the data source." + }, + "deltaSyncTableName": { + "type": "string", + "description": "The Delta Sync table name." + }, + "deltaSyncTableTtl": { + "type": "string", + "description": "The number of minutes that a Delta Sync log entry is stored in the Delta Sync table." + } + }, + "irreversibleNames": { + "baseTableTtl": "BaseTableTTL", + "deltaSyncTableTtl": "DeltaSyncTableTTL" + } + }, + "aws-native:appsync:DataSourceDynamoDbConfig": { + "type": "object", + "properties": { + "awsRegion": { + "type": "string", + "description": "The AWS Region." + }, + "deltaSyncConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceDeltaSyncConfig", + "description": "The DeltaSyncConfig for a versioned datasource." + }, + "tableName": { + "type": "string", + "description": "The table name." + }, + "useCallerCredentials": { + "type": "boolean", + "description": "Set to TRUE to use AWS Identity and Access Management with this data source." + }, + "versioned": { + "type": "boolean", + "description": "Set to TRUE to use Conflict Detection and Resolution with this data source." + } + } + }, + "aws-native:appsync:DataSourceElasticsearchConfig": { + "type": "object", + "properties": { + "awsRegion": { + "type": "string", + "description": "The AWS Region." + }, + "endpoint": { + "type": "string", + "description": "The endpoint." + } + } + }, + "aws-native:appsync:DataSourceEventBridgeConfig": { + "type": "object", + "properties": { + "eventBusArn": { + "type": "string", + "description": "ARN for the EventBridge bus." + } + } + }, + "aws-native:appsync:DataSourceHttpConfig": { + "type": "object", + "properties": { + "authorizationConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceAuthorizationConfig", + "description": "The authorization configuration." + }, + "endpoint": { + "type": "string", + "description": "The endpoint." + } + } + }, + "aws-native:appsync:DataSourceLambdaConfig": { + "type": "object", + "properties": { + "lambdaFunctionArn": { + "type": "string", + "description": "The ARN for the Lambda function." + } + } + }, + "aws-native:appsync:DataSourceMetricsConfig": { + "type": "string" + }, + "aws-native:appsync:DataSourceOpenSearchServiceConfig": { + "type": "object", + "properties": { + "awsRegion": { + "type": "string", + "description": "The AWS Region." + }, + "endpoint": { + "type": "string", + "description": "The endpoint." + } + } + }, + "aws-native:appsync:DataSourceRdsHttpEndpointConfig": { + "type": "object", + "properties": { + "awsRegion": { + "type": "string", + "description": "AWS Region for RDS HTTP endpoint." + }, + "awsSecretStoreArn": { + "type": "string", + "description": "The ARN for database credentials stored in AWS Secrets Manager." + }, + "databaseName": { + "type": "string", + "description": "Logical database name." + }, + "dbClusterIdentifier": { + "type": "string", + "description": "Amazon RDS cluster Amazon Resource Name (ARN)." + }, + "schema": { + "type": "string", + "description": "Logical schema name." + } + } + }, + "aws-native:appsync:DataSourceRelationalDatabaseConfig": { + "type": "object", + "properties": { + "rdsHttpEndpointConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceRdsHttpEndpointConfig", + "description": "Information about the Amazon RDS resource." + }, + "relationalDatabaseSourceType": { + "type": "string", + "description": "The type of relational data source." + } + } + }, "aws-native:appsync:FunctionConfigurationAppSyncRuntime": { "type": "object", "properties": { @@ -113297,6 +113992,13 @@ "$ref": "#/types/aws-native:codepipeline:PipelineActionTypeId", "description": "Specifies the action type and the provider of the action." }, + "commands": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The shell commands to run with your compute action in CodePipeline." + }, "configuration": { "$ref": "pulumi.json#/Any", "description": "The action's configuration. These are key-value pairs that specify input values for an action." @@ -113323,6 +114025,13 @@ }, "description": "The name or ID of the result of the action declaration, such as a test or build artifact. While the field is not a required parameter, most actions have an action configuration that requires a specified quantity of output artifacts. To refer to the action configuration specification by action provider, see the [Action structure reference](https://docs.aws.amazon.com/codepipeline/latest/userguide/action-reference.html) in the *AWS CodePipeline User Guide* ." }, + "outputVariables": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The list of variables that are to be exported from the compute action." + }, "region": { "type": "string", "description": "The action declaration's AWS Region, such as us-east-1." @@ -113606,6 +114315,13 @@ "aws-native:codepipeline:PipelineOutputArtifact": { "type": "object", "properties": { + "files": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The files that you want to associate with the output artifact that will be exported from the compute action." + }, "name": { "type": "string", "description": "The name of the output of an artifact, such as \"My App\"." @@ -113997,7 +114713,7 @@ "items": { "$ref": "#/types/aws-native:cognito:UserPoolRecoveryOption" }, - "description": "The list of `RecoveryOptionTypes` ." + "description": "The list of options and priorities for user message delivery in forgot-password operations. Sets or displays user pool preferences for email or SMS message priority, whether users should fall back to a second delivery method, and whether passwords should only be reset by administrators." } } }, @@ -114022,7 +114738,7 @@ }, "inviteMessageTemplate": { "$ref": "#/types/aws-native:cognito:UserPoolInviteMessageTemplate", - "description": "The message template to be used for the welcome message to new users.\n\nSee also [Customizing User Invitation Messages](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-message-customizations.html#cognito-user-pool-settings-user-invitation-message-customization) ." + "description": "The template for the welcome message to new users.\n\nSee also [Customizing User Invitation Messages](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-message-customizations.html#cognito-user-pool-settings-user-invitation-message-customization) ." }, "unusedAccountValidityDays": { "type": "integer", @@ -114043,7 +114759,7 @@ "properties": { "applicationArn": { "type": "string", - "description": "The Amazon Resource Name (ARN) of an Amazon Pinpoint project. You can use the Amazon Pinpoint project for integration with the chosen user pool client. Amazon Cognito publishes events to the Amazon Pinpoint project that the app ARN declares." + "description": "The Amazon Resource Name (ARN) of an Amazon Pinpoint project that you want to connect to your user pool app client. Amazon Cognito publishes events to the Amazon Pinpoint project that `ApplicationArn` declares. You can also configure your application to pass an endpoint ID in the `AnalyticsMetadata` parameter of sign-in operations. The endpoint ID is information about the destination for push notifications" }, "applicationId": { "type": "string", @@ -114085,11 +114801,11 @@ "properties": { "lambdaArn": { "type": "string", - "description": "The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Cognito triggers to send email notifications to users." + "description": "The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger." }, "lambdaVersion": { "type": "string", - "description": "The Lambda version represents the signature of the \"request\" attribute in the \"event\" information that Amazon Cognito passes to your custom email sender AWS Lambda function. The only supported value is `V1_0` ." + "description": "The user pool trigger version of the request that Amazon Cognito sends to your Lambda function. Higher-numbered versions add fields that support new features.\n\nYou must use a `LambdaVersion` of `V1_0` with a custom sender function." } } }, @@ -114098,11 +114814,11 @@ "properties": { "lambdaArn": { "type": "string", - "description": "The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Cognito triggers to send SMS notifications to users." + "description": "The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger." }, "lambdaVersion": { "type": "string", - "description": "The Lambda version represents the signature of the \"request\" attribute in the \"event\" information Amazon Cognito passes to your custom SMS sender Lambda function. The only supported value is `V1_0` ." + "description": "The user pool trigger version of the request that Amazon Cognito sends to your Lambda function. Higher-numbered versions add fields that support new features.\n\nYou must use a `LambdaVersion` of `V1_0` with a custom sender function." } } }, @@ -114133,7 +114849,7 @@ "properties": { "configurationSet": { "type": "string", - "description": "The set of configuration rules that can be applied to emails sent using Amazon SES. A configuration set is applied to an email by including a reference to the configuration set in the headers of the email. Once applied, all of the rules in that configuration set are applied to the email. Configuration sets can be used to apply the following types of rules to emails:\n\n- Event publishing – Amazon SES can track the number of send, delivery, open, click, bounce, and complaint events for each email sent. Use event publishing to send information about these events to other AWS services such as SNS and CloudWatch.\n- IP pool management – When leasing dedicated IP addresses with Amazon SES, you can create groups of IP addresses, called dedicated IP pools. You can then associate the dedicated IP pools with configuration sets." + "description": "The set of configuration rules that can be applied to emails sent using Amazon Simple Email Service. A configuration set is applied to an email by including a reference to the configuration set in the headers of the email. Once applied, all of the rules in that configuration set are applied to the email. Configuration sets can be used to apply the following types of rules to emails:\n\n- **Event publishing** - Amazon Simple Email Service can track the number of send, delivery, open, click, bounce, and complaint events for each email sent. Use event publishing to send information about these events to other AWS services such as and Amazon CloudWatch\n- **IP pool management** - When leasing dedicated IP addresses with Amazon Simple Email Service, you can create groups of IP addresses, called dedicated IP pools. You can then associate the dedicated IP pools with configuration sets." }, "emailSendingAccount": { "type": "string", @@ -114141,7 +114857,7 @@ }, "from": { "type": "string", - "description": "Identifies either the sender's email address or the sender's name with their email address. For example, `testuser@example.com` or `Test User \u003ctestuser@example.com\u003e` . This address appears before the body of the email." + "description": "Either the sender’s email address or the sender’s name with their email address. For example, `testuser@example.com` or `Test User \u003ctestuser@example.com\u003e` . This address appears before the body of the email." }, "replyToEmailAddress": { "type": "string", @@ -114182,7 +114898,7 @@ }, "customEmailSender": { "$ref": "#/types/aws-native:cognito:UserPoolCustomEmailSender", - "description": "A custom email sender AWS Lambda trigger." + "description": "The configuration of a custom email sender Lambda trigger. This trigger routes all email notifications from a user pool to a Lambda function that delivers the message using custom logic." }, "customMessage": { "type": "string", @@ -114190,7 +114906,7 @@ }, "customSmsSender": { "$ref": "#/types/aws-native:cognito:UserPoolCustomSmsSender", - "description": "A custom SMS sender AWS Lambda trigger." + "description": "The configuration of a custom SMS sender Lambda trigger. This trigger routes all SMS notifications from a user pool to a Lambda function that delivers the message using custom logic." }, "defineAuthChallenge": { "type": "string", @@ -114198,7 +114914,7 @@ }, "kmsKeyId": { "type": "string", - "description": "The Amazon Resource Name of a AWS Key Management Service ( AWS KMS ) key. Amazon Cognito uses the key to encrypt codes and temporary passwords sent to `CustomEmailSender` and `CustomSMSSender` ." + "description": "The ARN of an [KMS key](https://docs.aws.amazon.com//kms/latest/developerguide/concepts.html#master_keys) . Amazon Cognito uses the key to encrypt codes and temporary passwords sent to custom sender Lambda triggers." }, "postAuthentication": { "type": "string", @@ -114311,11 +115027,11 @@ "properties": { "name": { "type": "string", - "description": "Specifies the recovery method for a user." + "description": "The recovery method that this object sets a recovery option for." }, "priority": { "type": "integer", - "description": "A positive integer specifying priority of a method with 1 being the highest priority." + "description": "Your priority preference for using the specified attribute in account recovery. The highest priority is `1` ." } } }, @@ -114478,7 +115194,7 @@ }, "developerOnlyAttribute": { "type": "boolean", - "description": "\u003e We recommend that you use [WriteAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UserPoolClientType.html#CognitoUserPools-Type-UserPoolClientType-WriteAttributes) in the user pool client to control how attributes can be mutated for new use cases instead of using `DeveloperOnlyAttribute` . \n\nSpecifies whether the attribute type is developer only. This attribute can only be modified by an administrator. Users will not be able to modify this attribute using their access token." + "description": "\u003e You should use [WriteAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UserPoolClientType.html#CognitoUserPools-Type-UserPoolClientType-WriteAttributes) in the user pool client to control how attributes can be mutated for new use cases instead of using `DeveloperOnlyAttribute` . \n\nSpecifies whether the attribute type is developer only. This attribute can only be modified by an administrator. Users won't be able to modify this attribute using their access token. For example, `DeveloperOnlyAttribute` can be modified using AdminUpdateUserAttributes but can't be updated using UpdateUserAttributes." }, "mutable": { "type": "boolean", @@ -114507,7 +115223,7 @@ "properties": { "externalId": { "type": "string", - "description": "The external ID is a value. We recommend you use `ExternalId` to add security to your IAM role, which is used to call Amazon SNS to send SMS messages for your user pool. If you provide an `ExternalId` , the Cognito User Pool uses it when attempting to assume your IAM role. You can also set your roles trust policy to require the `ExternalID` . If you use the Cognito Management Console to create a role for SMS MFA, Cognito creates a role with the required permissions and a trust policy that uses `ExternalId` ." + "description": "The external ID provides additional security for your IAM role. You can use an `ExternalId` with the IAM role that you use with Amazon SNS to send SMS messages for your user pool. If you provide an `ExternalId` , your Amazon Cognito user pool includes it in the request to assume your IAM role. You can configure the role trust policy to require that Amazon Cognito, and any principal, provide the `ExternalID` . If you use the Amazon Cognito Management Console to create a role for SMS multi-factor authentication (MFA), Amazon Cognito creates a role with the required permissions and a trust policy that demonstrates use of the `ExternalId` .\n\nFor more information about the `ExternalId` of a role, see [How to use an external ID when granting access to your AWS resources to a third party](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) ." }, "snsCallerArn": { "type": "string", @@ -126213,13 +126929,16 @@ "type": "object", "properties": { "logEnabled": { - "type": "boolean" + "type": "boolean", + "description": "Enable or disable VPN tunnel logging feature. Default value is `False` .\n\nValid values: `True` | `False`" }, "logGroupArn": { - "type": "string" + "type": "string", + "description": "The Amazon Resource Name (ARN) of the CloudWatch log group to send logs to." }, "logOutputFormat": { - "$ref": "#/types/aws-native:ec2:VpnConnectionCloudwatchLogOptionsSpecificationLogOutputFormat" + "$ref": "#/types/aws-native:ec2:VpnConnectionCloudwatchLogOptionsSpecificationLogOutputFormat", + "description": "Set log format. Default format is `json` .\n\nValid values: `json` | `text`" } } }, @@ -126230,7 +126949,8 @@ "type": "object", "properties": { "value": { - "$ref": "#/types/aws-native:ec2:VpnConnectionIkeVersionsRequestListValueValue" + "$ref": "#/types/aws-native:ec2:VpnConnectionIkeVersionsRequestListValueValue", + "description": "The IKE version." } } }, @@ -126241,7 +126961,8 @@ "type": "object", "properties": { "value": { - "$ref": "#/types/aws-native:ec2:VpnConnectionPhase1EncryptionAlgorithmsRequestListValueValue" + "$ref": "#/types/aws-native:ec2:VpnConnectionPhase1EncryptionAlgorithmsRequestListValueValue", + "description": "The value for the encryption algorithm." } } }, @@ -126252,7 +126973,8 @@ "type": "object", "properties": { "value": { - "$ref": "#/types/aws-native:ec2:VpnConnectionPhase1IntegrityAlgorithmsRequestListValueValue" + "$ref": "#/types/aws-native:ec2:VpnConnectionPhase1IntegrityAlgorithmsRequestListValueValue", + "description": "The value for the integrity algorithm." } } }, @@ -126263,7 +126985,8 @@ "type": "object", "properties": { "value": { - "type": "integer" + "type": "integer", + "description": "The Diffie-Hellmann group number." } } }, @@ -126271,7 +126994,8 @@ "type": "object", "properties": { "value": { - "$ref": "#/types/aws-native:ec2:VpnConnectionPhase2EncryptionAlgorithmsRequestListValueValue" + "$ref": "#/types/aws-native:ec2:VpnConnectionPhase2EncryptionAlgorithmsRequestListValueValue", + "description": "The encryption algorithm." } } }, @@ -126282,7 +127006,8 @@ "type": "object", "properties": { "value": { - "$ref": "#/types/aws-native:ec2:VpnConnectionPhase2IntegrityAlgorithmsRequestListValueValue" + "$ref": "#/types/aws-native:ec2:VpnConnectionPhase2IntegrityAlgorithmsRequestListValueValue", + "description": "The integrity algorithm." } } }, @@ -126293,7 +127018,8 @@ "type": "object", "properties": { "value": { - "type": "integer" + "type": "integer", + "description": "The Diffie-Hellmann group number." } } }, @@ -126314,7 +127040,8 @@ "type": "object", "properties": { "cloudwatchLogOptions": { - "$ref": "#/types/aws-native:ec2:VpnConnectionCloudwatchLogOptionsSpecification" + "$ref": "#/types/aws-native:ec2:VpnConnectionCloudwatchLogOptionsSpecification", + "description": "Options for sending VPN tunnel logs to CloudWatch." } } }, @@ -126322,87 +127049,105 @@ "type": "object", "properties": { "dpdTimeoutAction": { - "$ref": "#/types/aws-native:ec2:VpnConnectionVpnTunnelOptionsSpecificationDpdTimeoutAction" + "$ref": "#/types/aws-native:ec2:VpnConnectionVpnTunnelOptionsSpecificationDpdTimeoutAction", + "description": "The action to take after DPD timeout occurs. Specify `restart` to restart the IKE initiation. Specify `clear` to end the IKE session.\n\nValid Values: `clear` | `none` | `restart`\n\nDefault: `clear`" }, "dpdTimeoutSeconds": { - "type": "integer" + "type": "integer", + "description": "The number of seconds after which a DPD timeout occurs.\n\nConstraints: A value greater than or equal to 30.\n\nDefault: `30`" }, "enableTunnelLifecycleControl": { - "type": "boolean" + "type": "boolean", + "description": "Turn on or off tunnel endpoint lifecycle control feature." }, "ikeVersions": { "type": "array", "items": { "$ref": "#/types/aws-native:ec2:VpnConnectionIkeVersionsRequestListValue" - } + }, + "description": "The IKE versions that are permitted for the VPN tunnel.\n\nValid values: `ikev1` | `ikev2`" }, "logOptions": { - "$ref": "#/types/aws-native:ec2:VpnConnectionVpnTunnelLogOptionsSpecification" + "$ref": "#/types/aws-native:ec2:VpnConnectionVpnTunnelLogOptionsSpecification", + "description": "Options for logging VPN tunnel activity." }, "phase1EncryptionAlgorithms": { "type": "array", "items": { "$ref": "#/types/aws-native:ec2:VpnConnectionPhase1EncryptionAlgorithmsRequestListValue" - } + }, + "description": "One or more encryption algorithms that are permitted for the VPN tunnel for phase 1 IKE negotiations.\n\nValid values: `AES128` | `AES256` | `AES128-GCM-16` | `AES256-GCM-16`" }, "phase1IntegrityAlgorithms": { "type": "array", "items": { "$ref": "#/types/aws-native:ec2:VpnConnectionPhase1IntegrityAlgorithmsRequestListValue" - } + }, + "description": "One or more integrity algorithms that are permitted for the VPN tunnel for phase 1 IKE negotiations.\n\nValid values: `SHA1` | `SHA2-256` | `SHA2-384` | `SHA2-512`" }, "phase1LifetimeSeconds": { - "type": "integer" + "type": "integer", + "description": "The lifetime for phase 1 of the IKE negotiation, in seconds.\n\nConstraints: A value between 900 and 28,800.\n\nDefault: `28800`" }, "phase1dhGroupNumbers": { "type": "array", "items": { "$ref": "#/types/aws-native:ec2:VpnConnectionPhase1dhGroupNumbersRequestListValue" - } + }, + "description": "One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel for phase 1 IKE negotiations.\n\nValid values: `2` | `14` | `15` | `16` | `17` | `18` | `19` | `20` | `21` | `22` | `23` | `24`" }, "phase2EncryptionAlgorithms": { "type": "array", "items": { "$ref": "#/types/aws-native:ec2:VpnConnectionPhase2EncryptionAlgorithmsRequestListValue" - } + }, + "description": "One or more encryption algorithms that are permitted for the VPN tunnel for phase 2 IKE negotiations.\n\nValid values: `AES128` | `AES256` | `AES128-GCM-16` | `AES256-GCM-16`" }, "phase2IntegrityAlgorithms": { "type": "array", "items": { "$ref": "#/types/aws-native:ec2:VpnConnectionPhase2IntegrityAlgorithmsRequestListValue" - } + }, + "description": "One or more integrity algorithms that are permitted for the VPN tunnel for phase 2 IKE negotiations.\n\nValid values: `SHA1` | `SHA2-256` | `SHA2-384` | `SHA2-512`" }, "phase2LifetimeSeconds": { - "type": "integer" + "type": "integer", + "description": "The lifetime for phase 2 of the IKE negotiation, in seconds.\n\nConstraints: A value between 900 and 3,600. The value must be less than the value for `Phase1LifetimeSeconds` .\n\nDefault: `3600`" }, "phase2dhGroupNumbers": { "type": "array", "items": { "$ref": "#/types/aws-native:ec2:VpnConnectionPhase2dhGroupNumbersRequestListValue" - } + }, + "description": "One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel for phase 2 IKE negotiations.\n\nValid values: `2` | `5` | `14` | `15` | `16` | `17` | `18` | `19` | `20` | `21` | `22` | `23` | `24`" }, "preSharedKey": { "type": "string", "description": "The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and customer gateway.\n Constraints: Allowed characters are alphanumeric characters, periods (.), and underscores (_). Must be between 8 and 64 characters in length and cannot start with zero (0)." }, "rekeyFuzzPercentage": { - "type": "integer" + "type": "integer", + "description": "The percentage of the rekey window (determined by `RekeyMarginTimeSeconds` ) during which the rekey time is randomly selected.\n\nConstraints: A value between 0 and 100.\n\nDefault: `100`" }, "rekeyMarginTimeSeconds": { - "type": "integer" + "type": "integer", + "description": "The margin time, in seconds, before the phase 2 lifetime expires, during which the AWS side of the VPN connection performs an IKE rekey. The exact time of the rekey is randomly selected based on the value for `RekeyFuzzPercentage` .\n\nConstraints: A value between 60 and half of `Phase2LifetimeSeconds` .\n\nDefault: `270`" }, "replayWindowSize": { - "type": "integer" + "type": "integer", + "description": "The number of packets in an IKE replay window.\n\nConstraints: A value between 64 and 2048.\n\nDefault: `1024`" }, "startupAction": { - "$ref": "#/types/aws-native:ec2:VpnConnectionVpnTunnelOptionsSpecificationStartupAction" + "$ref": "#/types/aws-native:ec2:VpnConnectionVpnTunnelOptionsSpecificationStartupAction", + "description": "The action to take when the establishing the tunnel for the VPN connection. By default, your customer gateway device must initiate the IKE negotiation and bring up the tunnel. Specify `start` for AWS to initiate the IKE negotiation.\n\nValid Values: `add` | `start`\n\nDefault: `add`" }, "tunnelInsideCidr": { "type": "string", "description": "The range of inside IP addresses for the tunnel. Any specified CIDR blocks must be unique across all VPN connections that use the same virtual private gateway. \n Constraints: A size /30 CIDR block from the ``169.254.0.0/16`` range. The following CIDR blocks are reserved and cannot be used:\n + ``169.254.0.0/30`` \n + ``169.254.1.0/30`` \n + ``169.254.2.0/30`` \n + ``169.254.3.0/30`` \n + ``169.254.4.0/30`` \n + ``169.254.5.0/30`` \n + ``169.254.169.252/30``" }, "tunnelInsideIpv6Cidr": { - "type": "string" + "type": "string", + "description": "The range of inside IPv6 addresses for the tunnel. Any specified CIDR blocks must be unique across all VPN connections that use the same transit gateway.\n\nConstraints: A size /126 CIDR block from the local `fd00::/8` range." } }, "irreversibleNames": { @@ -128099,13 +128844,16 @@ "type": "object", "properties": { "base": { - "type": "integer" + "type": "integer", + "description": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used." }, "capacityProvider": { - "type": "string" + "type": "string", + "description": "The short name of the capacity provider." }, "weight": { - "type": "integer" + "type": "integer", + "description": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nAn example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of `1` , then when the `base` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of `1` for *capacityProviderA* and a weight of `4` for *capacityProviderB* , then for every one task that's run using *capacityProviderA* , four tasks would use *capacityProviderB* ." } } }, @@ -133244,6 +133992,9 @@ "aws-native:gamelift:ContainerGroupDefinitionSchedulingStrategy": { "type": "string" }, + "aws-native:gamelift:ContainerGroupDefinitionStatus": { + "type": "string" + }, "aws-native:gamelift:ContainerGroupDefinitionTag": { "type": "object", "properties": { @@ -147682,11 +148433,11 @@ "properties": { "key": { "type": "string", - "description": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -." + "description": "The key for this tag." }, "value": { "type": "string", - "description": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -." + "description": "The value for this tag." } } }, @@ -163173,7 +163924,8 @@ "description": "The configuration that selects all options." }, "sourceColumn": { - "$ref": "#/types/aws-native:quicksight:AnalysisColumnIdentifier" + "$ref": "#/types/aws-native:quicksight:AnalysisColumnIdentifier", + "description": "A column of a data set." }, "sourceField": { "type": "string", @@ -173121,7 +173873,8 @@ "description": "The configuration that selects all options." }, "sourceColumn": { - "$ref": "#/types/aws-native:quicksight:DashboardColumnIdentifier" + "$ref": "#/types/aws-native:quicksight:DashboardColumnIdentifier", + "description": "A column of a data set." }, "sourceField": { "type": "string", @@ -181641,7 +182394,8 @@ "description": "An operation that filters rows based on some condition." }, "overrideDatasetParameterOperation": { - "$ref": "#/types/aws-native:quicksight:DataSetOverrideDatasetParameterOperation" + "$ref": "#/types/aws-native:quicksight:DataSetOverrideDatasetParameterOperation", + "description": "A transform operation that overrides the dataset parameter values that are defined in another dataset." }, "projectOperation": { "$ref": "#/types/aws-native:quicksight:DataSetProjectOperation", @@ -184858,7 +185612,8 @@ "description": "The configuration that selects all options." }, "sourceColumn": { - "$ref": "#/types/aws-native:quicksight:TemplateColumnIdentifier" + "$ref": "#/types/aws-native:quicksight:TemplateColumnIdentifier", + "description": "A column of a data set." }, "sourceField": { "type": "string", @@ -192399,7 +193154,8 @@ "description": "Display options related to sheets." }, "typography": { - "$ref": "#/types/aws-native:quicksight:ThemeTypography" + "$ref": "#/types/aws-native:quicksight:ThemeTypography", + "description": "Determines the typography options." }, "uiColorPalette": { "$ref": "#/types/aws-native:quicksight:ThemeUiColorPalette", @@ -207772,7 +208528,7 @@ "additionalProperties": { "type": "string" }, - "description": "The parameters for the configuration definition type. Parameters for configuration definitions vary based the configuration type. The following tables outline the parameters for each configuration type.\n\n- **OpsCenter (Type: AWS QuickSetupType-SSMOpsCenter)** - - `DelegatedAccountId`\n\n- Description: (Required) The ID of the delegated administrator account.\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Resource Scheduler (Type: AWS QuickSetupType-Scheduler)** - - `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target.\n- `ICalendarString`\n\n- Description: (Required) An iCalendar formatted string containing the schedule you want Change Manager to use.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Default Host Management Configuration (Type: AWS QuickSetupType-DHMC)** - - `UpdateSSMAgent`\n\n- Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \" `true` \".\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Resource Explorer (Type: AWS QuickSetupType-ResourceExplorer)** - - `SelectedAggregatorRegion`\n\n- Description: (Required) The AWS Region where you want to create the aggregator index.\n- `ReplaceExistingAggregator`\n\n- Description: (Required) A boolean value that determines whether to demote an existing aggregator if it is in a Region that differs from the value you specify for the `SelectedAggregatorRegion` .\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Change Manager (Type: AWS QuickSetupType-SSMChangeMgr)** - - `DelegatedAccountId`\n\n- Description: (Required) The ID of the delegated administrator account.\n- `JobFunction`\n\n- Description: (Required) The name for the Change Manager job function.\n- `PermissionType`\n\n- Description: (Optional) Specifies whether you want to use default administrator permissions for the job function role, or provide a custom IAM policy. The valid values are `CustomPermissions` and `AdminPermissions` . The default value for the parameter is `CustomerPermissions` .\n- `CustomPermissions`\n\n- Description: (Optional) A JSON string containing the IAM policy you want your job function to use. You must provide a value for this parameter if you specify `CustomPermissions` for the `PermissionType` parameter.\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **DevOps Guru (Type: AWS QuickSetupType-DevOpsGuru)** - - `AnalyseAllResources`\n\n- Description: (Optional) A boolean value that determines whether DevOps Guru analyzes all AWS CloudFormation stacks in the account. The default value is \" `false` \".\n- `EnableSnsNotifications`\n\n- Description: (Optional) A boolean value that determines whether DevOps Guru sends notifications when an insight is created. The default value is \" `true` \".\n- `EnableSsmOpsItems`\n\n- Description: (Optional) A boolean value that determines whether DevOps Guru creates an OpsCenter OpsItem when an insight is created. The default value is \" `true` \".\n- `EnableDriftRemediation`\n\n- Description: (Optional) A boolean value that determines whether a drift remediation schedule is used. The default value is \" `false` \".\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(1 days)` , and `none` . The default value is \" `none` \".\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Conformance Packs (Type: AWS QuickSetupType-CFGCPacks)** - - `DelegatedAccountId`\n\n- Description: (Optional) The ID of the delegated administrator account. This parameter is required for Organization deployments.\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(2 days)` , and `none` . The default value is \" `none` \".\n- `CPackNames`\n\n- Description: (Required) A comma separated list of AWS Config conformance packs.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **AWS Config Recording (Type: AWS QuickSetupType-CFGRecording)** - - `RecordAllResources`\n\n- Description: (Optional) A boolean value that determines whether all supported resources are recorded. The default value is \" `true` \".\n- `ResourceTypesToRecord`\n\n- Description: (Optional) A comma separated list of resource types you want to record.\n- `RecordGlobalResourceTypes`\n\n- Description: (Optional) A boolean value that determines whether global resources are recorded with all resource configurations. The default value is \" `false` \".\n- `GlobalResourceTypesRegion`\n\n- Description: (Optional) Determines the AWS Region where global resources are recorded.\n- `UseCustomBucket`\n\n- Description: (Optional) A boolean value that determines whether a custom Amazon S3 bucket is used for delivery. The default value is \" `false` \".\n- `DeliveryBucketName`\n\n- Description: (Optional) The name of the Amazon S3 bucket you want AWS Config to deliver configuration snapshots and configuration history files to.\n- `DeliveryBucketPrefix`\n\n- Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.\n- `NotificationOptions`\n\n- Description: (Optional) Determines the notification configuration for the recorder. The valid values are `NoStreaming` , `UseExistingTopic` , and `CreateTopic` . The default value is `NoStreaming` .\n- `CustomDeliveryTopicAccountId`\n\n- Description: (Optional) The ID of the AWS account where the Amazon SNS topic you want to use for notifications resides. You must specify a value for this parameter if you use the `UseExistingTopic` notification option.\n- `CustomDeliveryTopicName`\n\n- Description: (Optional) The name of the Amazon SNS topic you want to use for notifications. You must specify a value for this parameter if you use the `UseExistingTopic` notification option.\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(7 days)` , `rate(1 days)` , and `none` . The default value is \" `none` \".\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Host Management (Type: AWS QuickSetupType-SSMHostMgmt)** - - `UpdateSSMAgent`\n\n- Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \" `true` \".\n- `UpdateEc2LaunchAgent`\n\n- Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \" `false` \".\n- `CollectInventory`\n\n- Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \" `true` \".\n- `ScanInstances`\n\n- Description: (Optional) A boolean value that determines whether the target instances are scanned daily for available patches. The default value is \" `true` \".\n- `InstallCloudWatchAgent`\n\n- Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is installed on the target instances. The default value is \" `false` \".\n- `UpdateCloudWatchAgent`\n\n- Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is updated on the target instances every month. The default value is \" `false` \".\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Optional) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Optional) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Optional) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Distributor (Type: AWS QuickSetupType-Distributor)** - - `PackagesToInstall`\n\n- Description: (Required) A comma separated list of packages you want to install on the target instances. The valid values are `AWSEFSTools` , `AWSCWAgent` , and `AWSEC2LaunchAgent` .\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(2 days)` , and `none` . The default value is \" `rate(30 days)` \".\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Patch Policy (Type: AWS QuickSetupType-PatchPolicy)** - - `PatchPolicyName`\n\n- Description: (Required) A name for the patch policy. The value you provide is applied to target Amazon EC2 instances as a tag.\n- `SelectedPatchBaselines`\n\n- Description: (Required) An array of JSON objects containing the information for the patch baselines to include in your patch policy.\n- `PatchBaselineUseDefault`\n\n- Description: (Optional) A boolean value that determines whether the selected patch baselines are all AWS provided.\n- `ConfigurationOptionsPatchOperation`\n\n- Description: (Optional) Determines whether target instances scan for available patches, or scan and install available patches. The valid values are `Scan` and `ScanAndInstall` . The default value for the parameter is `Scan` .\n- `ConfigurationOptionsScanValue`\n\n- Description: (Optional) A cron expression that is used as the schedule for when instances scan for available patches.\n- `ConfigurationOptionsInstallValue`\n\n- Description: (Optional) A cron expression that is used as the schedule for when instances install available patches.\n- `ConfigurationOptionsScanNextInterval`\n\n- Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \" `false` \".\n- `ConfigurationOptionsInstallNextInterval`\n\n- Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \" `false` \".\n- `RebootOption`\n\n- Description: (Optional) A boolean value that determines whether instances are rebooted after patches are installed. The default value is \" `false` \".\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `OutputLogEnableS3`\n\n- Description: (Optional) A boolean value that determines whether command output logs are sent to Amazon S3.\n- `OutputS3Location`\n\n- Description: (Optional) A JSON string containing information about the Amazon S3 bucket where you want to store the output details of the request.\n\n- `OutputS3BucketRegion`\n\n- Description: (Optional) The AWS Region where the Amazon S3 bucket you want AWS Config to deliver command output to is located.\n- `OutputS3BucketName`\n\n- Description: (Optional) The name of the Amazon S3 bucket you want AWS Config to deliver command output to.\n- `OutputS3KeyPrefix`\n\n- Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to." + "description": "The parameters for the configuration definition type. Parameters for configuration definitions vary based the configuration type. The following tables outline the parameters for each configuration type.\n\n- **OpsCenter (Type: AWS QuickSetupType-SSMOpsCenter)** - - `DelegatedAccountId`\n\n- Description: (Required) The ID of the delegated administrator account.\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Resource Scheduler (Type: AWS QuickSetupType-Scheduler)** - - `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target.\n- `ICalendarString`\n\n- Description: (Required) An iCalendar formatted string containing the schedule you want Change Manager to use.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Default Host Management Configuration (Type: AWS QuickSetupType-DHMC)** - - `UpdateSSMAgent`\n\n- Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \" `true` \".\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Resource Explorer (Type: AWS QuickSetupType-ResourceExplorer)** - - `SelectedAggregatorRegion`\n\n- Description: (Required) The AWS Region where you want to create the aggregator index.\n- `ReplaceExistingAggregator`\n\n- Description: (Required) A boolean value that determines whether to demote an existing aggregator if it is in a Region that differs from the value you specify for the `SelectedAggregatorRegion` .\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Change Manager (Type: AWS QuickSetupType-SSMChangeMgr)** - - `DelegatedAccountId`\n\n- Description: (Required) The ID of the delegated administrator account.\n- `JobFunction`\n\n- Description: (Required) The name for the Change Manager job function.\n- `PermissionType`\n\n- Description: (Optional) Specifies whether you want to use default administrator permissions for the job function role, or provide a custom IAM policy. The valid values are `CustomPermissions` and `AdminPermissions` . The default value for the parameter is `CustomerPermissions` .\n- `CustomPermissions`\n\n- Description: (Optional) A JSON string containing the IAM policy you want your job function to use. You must provide a value for this parameter if you specify `CustomPermissions` for the `PermissionType` parameter.\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **DevOps Guru (Type: AWS QuickSetupType-DevOpsGuru)** - - `AnalyseAllResources`\n\n- Description: (Optional) A boolean value that determines whether DevOps Guru analyzes all AWS CloudFormation stacks in the account. The default value is \" `false` \".\n- `EnableSnsNotifications`\n\n- Description: (Optional) A boolean value that determines whether DevOps Guru sends notifications when an insight is created. The default value is \" `true` \".\n- `EnableSsmOpsItems`\n\n- Description: (Optional) A boolean value that determines whether DevOps Guru creates an OpsCenter OpsItem when an insight is created. The default value is \" `true` \".\n- `EnableDriftRemediation`\n\n- Description: (Optional) A boolean value that determines whether a drift remediation schedule is used. The default value is \" `false` \".\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(1 days)` , and `none` . The default value is \" `none` \".\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Conformance Packs (Type: AWS QuickSetupType-CFGCPacks)** - - `DelegatedAccountId`\n\n- Description: (Optional) The ID of the delegated administrator account. This parameter is required for Organization deployments.\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(2 days)` , and `none` . The default value is \" `none` \".\n- `CPackNames`\n\n- Description: (Required) A comma separated list of AWS Config conformance packs.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **AWS Config Recording (Type: AWS QuickSetupType-CFGRecording)** - - `RecordAllResources`\n\n- Description: (Optional) A boolean value that determines whether all supported resources are recorded. The default value is \" `true` \".\n- `ResourceTypesToRecord`\n\n- Description: (Optional) A comma separated list of resource types you want to record.\n- `RecordGlobalResourceTypes`\n\n- Description: (Optional) A boolean value that determines whether global resources are recorded with all resource configurations. The default value is \" `false` \".\n- `GlobalResourceTypesRegion`\n\n- Description: (Optional) Determines the AWS Region where global resources are recorded.\n- `UseCustomBucket`\n\n- Description: (Optional) A boolean value that determines whether a custom Amazon S3 bucket is used for delivery. The default value is \" `false` \".\n- `DeliveryBucketName`\n\n- Description: (Optional) The name of the Amazon S3 bucket you want AWS Config to deliver configuration snapshots and configuration history files to.\n- `DeliveryBucketPrefix`\n\n- Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.\n- `NotificationOptions`\n\n- Description: (Optional) Determines the notification configuration for the recorder. The valid values are `NoStreaming` , `UseExistingTopic` , and `CreateTopic` . The default value is `NoStreaming` .\n- `CustomDeliveryTopicAccountId`\n\n- Description: (Optional) The ID of the AWS account where the Amazon SNS topic you want to use for notifications resides. You must specify a value for this parameter if you use the `UseExistingTopic` notification option.\n- `CustomDeliveryTopicName`\n\n- Description: (Optional) The name of the Amazon SNS topic you want to use for notifications. You must specify a value for this parameter if you use the `UseExistingTopic` notification option.\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(7 days)` , `rate(1 days)` , and `none` . The default value is \" `none` \".\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Host Management (Type: AWS QuickSetupType-SSMHostMgmt)** - - `UpdateSSMAgent`\n\n- Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \" `true` \".\n- `UpdateEc2LaunchAgent`\n\n- Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \" `false` \".\n- `CollectInventory`\n\n- Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \" `true` \".\n- `ScanInstances`\n\n- Description: (Optional) A boolean value that determines whether the target instances are scanned daily for available patches. The default value is \" `true` \".\n- `InstallCloudWatchAgent`\n\n- Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is installed on the target instances. The default value is \" `false` \".\n- `UpdateCloudWatchAgent`\n\n- Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is updated on the target instances every month. The default value is \" `false` \".\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Optional) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Optional) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Optional) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Distributor (Type: AWS QuickSetupType-Distributor)** - - `PackagesToInstall`\n\n- Description: (Required) A comma separated list of packages you want to install on the target instances. The valid values are `AWSEFSTools` , `AWSCWAgent` , and `AWSEC2LaunchAgent` .\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(2 days)` , and `none` . The default value is \" `rate(30 days)` \".\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Patch Policy (Type: AWS QuickSetupType-PatchPolicy)** - - `PatchPolicyName`\n\n- Description: (Required) A name for the patch policy. The value you provide is applied to target Amazon EC2 instances as a tag.\n- `SelectedPatchBaselines`\n\n- Description: (Required) An array of JSON objects containing the information for the patch baselines to include in your patch policy.\n- `PatchBaselineUseDefault`\n\n- Description: (Optional) A boolean value that determines whether the selected patch baselines are all AWS provided.\n- `ConfigurationOptionsPatchOperation`\n\n- Description: (Optional) Determines whether target instances scan for available patches, or scan and install available patches. The valid values are `Scan` and `ScanAndInstall` . The default value for the parameter is `Scan` .\n- `ConfigurationOptionsScanValue`\n\n- Description: (Optional) A cron expression that is used as the schedule for when instances scan for available patches.\n- `ConfigurationOptionsInstallValue`\n\n- Description: (Optional) A cron expression that is used as the schedule for when instances install available patches.\n- `ConfigurationOptionsScanNextInterval`\n\n- Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \" `false` \".\n- `ConfigurationOptionsInstallNextInterval`\n\n- Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \" `false` \".\n- `RebootOption`\n\n- Description: (Optional) Determines whether instances are rebooted after patches are installed. Valid values are `RebootIfNeeded` and `NoReboot` .\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `OutputLogEnableS3`\n\n- Description: (Optional) A boolean value that determines whether command output logs are sent to Amazon S3.\n- `OutputS3Location`\n\n- Description: (Optional) A JSON string containing information about the Amazon S3 bucket where you want to store the output details of the request.\n\n- `OutputS3BucketRegion`\n\n- Description: (Optional) The AWS Region where the Amazon S3 bucket you want AWS Config to deliver command output to is located.\n- `OutputS3BucketName`\n\n- Description: (Optional) The name of the Amazon S3 bucket you want AWS Config to deliver command output to.\n- `OutputS3KeyPrefix`\n\n- Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to." }, "type": { "type": "string", @@ -208851,6 +209607,170 @@ "aws-native:transfer:ProfileType": { "type": "string" }, + "aws-native:transfer:ServerAs2Transport": { + "type": "string" + }, + "aws-native:transfer:ServerDirectoryListingOptimization": { + "type": "string" + }, + "aws-native:transfer:ServerDomain": { + "type": "string" + }, + "aws-native:transfer:ServerEndpointDetails": { + "type": "object", + "properties": { + "addressAllocationIds": { + "type": "array", + "items": { + "type": "string" + }, + "description": "A list of address allocation IDs that are required to attach an Elastic IP address to your server's endpoint.\n\nAn address allocation ID corresponds to the allocation ID of an Elastic IP address. This value can be retrieved from the `allocationId` field from the Amazon EC2 [Address](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Address.html) data type. One way to retrieve this value is by calling the EC2 [DescribeAddresses](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAddresses.html) API.\n\nThis parameter is optional. Set this parameter if you want to make your VPC endpoint public-facing. For details, see [Create an internet-facing endpoint for your server](https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#create-internet-facing-endpoint) .\n\n\u003e This property can only be set as follows:\n\u003e \n\u003e - `EndpointType` must be set to `VPC`\n\u003e - The Transfer Family server must be offline.\n\u003e - You cannot set this parameter for Transfer Family servers that use the FTP protocol.\n\u003e - The server must already have `SubnetIds` populated ( `SubnetIds` and `AddressAllocationIds` cannot be updated simultaneously).\n\u003e - `AddressAllocationIds` can't contain duplicates, and must be equal in length to `SubnetIds` . For example, if you have three subnet IDs, you must also specify three address allocation IDs.\n\u003e - Call the `UpdateServer` API to set or change this parameter." + }, + "securityGroupIds": { + "type": "array", + "items": { + "type": "string" + }, + "description": "A list of security groups IDs that are available to attach to your server's endpoint.\n\n\u003e This property can only be set when `EndpointType` is set to `VPC` .\n\u003e \n\u003e You can edit the `SecurityGroupIds` property in the [UpdateServer](https://docs.aws.amazon.com/transfer/latest/userguide/API_UpdateServer.html) API only if you are changing the `EndpointType` from `PUBLIC` or `VPC_ENDPOINT` to `VPC` . To change security groups associated with your server's VPC endpoint after creation, use the Amazon EC2 [ModifyVpcEndpoint](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyVpcEndpoint.html) API." + }, + "subnetIds": { + "type": "array", + "items": { + "type": "string" + }, + "description": "A list of subnet IDs that are required to host your server endpoint in your VPC.\n\n\u003e This property can only be set when `EndpointType` is set to `VPC` ." + }, + "vpcEndpointId": { + "type": "string", + "description": "The ID of the VPC endpoint.\n\n\u003e This property can only be set when `EndpointType` is set to `VPC_ENDPOINT` ." + }, + "vpcId": { + "type": "string", + "description": "The VPC ID of the virtual private cloud in which the server's endpoint will be hosted.\n\n\u003e This property can only be set when `EndpointType` is set to `VPC` ." + } + } + }, + "aws-native:transfer:ServerEndpointType": { + "type": "string" + }, + "aws-native:transfer:ServerIdentityProviderDetails": { + "type": "object", + "properties": { + "directoryId": { + "type": "string", + "description": "The identifier of the AWS Directory Service directory that you want to use as your identity provider." + }, + "function": { + "type": "string", + "description": "The ARN for a Lambda function to use for the Identity provider." + }, + "invocationRole": { + "type": "string", + "description": "This parameter is only applicable if your `IdentityProviderType` is `API_GATEWAY` . Provides the type of `InvocationRole` used to authenticate the user account." + }, + "sftpAuthenticationMethods": { + "$ref": "#/types/aws-native:transfer:ServerSftpAuthenticationMethods", + "description": "For SFTP-enabled servers, and for custom identity providers *only* , you can specify whether to authenticate using a password, SSH key pair, or both.\n\n- `PASSWORD` - users must provide their password to connect.\n- `PUBLIC_KEY` - users must provide their private key to connect.\n- `PUBLIC_KEY_OR_PASSWORD` - users can authenticate with either their password or their key. This is the default value.\n- `PUBLIC_KEY_AND_PASSWORD` - users must provide both their private key and their password to connect. The server checks the key first, and then if the key is valid, the system prompts for a password. If the private key provided does not match the public key that is stored, authentication fails." + }, + "url": { + "type": "string", + "description": "Provides the location of the service endpoint used to authenticate users." + } + } + }, + "aws-native:transfer:ServerIdentityProviderType": { + "type": "string" + }, + "aws-native:transfer:ServerProtocol": { + "type": "string" + }, + "aws-native:transfer:ServerProtocolDetails": { + "type": "object", + "properties": { + "as2Transports": { + "type": "array", + "items": { + "$ref": "#/types/aws-native:transfer:ServerAs2Transport" + }, + "description": "List of `As2Transport` objects." + }, + "passiveIp": { + "type": "string", + "description": "Indicates passive mode, for FTP and FTPS protocols. Enter a single IPv4 address, such as the public IP address of a firewall, router, or load balancer. For example:\n\n`aws transfer update-server --protocol-details PassiveIp=0.0.0.0`\n\nReplace `0.0.0.0` in the example above with the actual IP address you want to use.\n\n\u003e If you change the `PassiveIp` value, you must stop and then restart your Transfer Family server for the change to take effect. For details on using passive mode (PASV) in a NAT environment, see [Configuring your FTPS server behind a firewall or NAT with AWS Transfer Family](https://docs.aws.amazon.com/storage/configuring-your-ftps-server-behind-a-firewall-or-nat-with-aws-transfer-family/) . \n\n*Special values*\n\nThe `AUTO` and `0.0.0.0` are special values for the `PassiveIp` parameter. The value `PassiveIp=AUTO` is assigned by default to FTP and FTPS type servers. In this case, the server automatically responds with one of the endpoint IPs within the PASV response. `PassiveIp=0.0.0.0` has a more unique application for its usage. For example, if you have a High Availability (HA) Network Load Balancer (NLB) environment, where you have 3 subnets, you can only specify a single IP address using the `PassiveIp` parameter. This reduces the effectiveness of having High Availability. In this case, you can specify `PassiveIp=0.0.0.0` . This tells the client to use the same IP address as the Control connection and utilize all AZs for their connections. Note, however, that not all FTP clients support the `PassiveIp=0.0.0.0` response. FileZilla and WinSCP do support it. If you are using other clients, check to see if your client supports the `PassiveIp=0.0.0.0` response." + }, + "setStatOption": { + "$ref": "#/types/aws-native:transfer:ServerSetStatOption", + "description": "Use the `SetStatOption` to ignore the error that is generated when the client attempts to use `SETSTAT` on a file you are uploading to an S3 bucket.\n\nSome SFTP file transfer clients can attempt to change the attributes of remote files, including timestamp and permissions, using commands, such as `SETSTAT` when uploading the file. However, these commands are not compatible with object storage systems, such as Amazon S3. Due to this incompatibility, file uploads from these clients can result in errors even when the file is otherwise successfully uploaded.\n\nSet the value to `ENABLE_NO_OP` to have the Transfer Family server ignore the `SETSTAT` command, and upload files without needing to make any changes to your SFTP client. While the `SetStatOption` `ENABLE_NO_OP` setting ignores the error, it does generate a log entry in Amazon CloudWatch Logs, so you can determine when the client is making a `SETSTAT` call.\n\n\u003e If you want to preserve the original timestamp for your file, and modify other file attributes using `SETSTAT` , you can use Amazon EFS as backend storage with Transfer Family." + }, + "tlsSessionResumptionMode": { + "$ref": "#/types/aws-native:transfer:ServerTlsSessionResumptionMode", + "description": "A property used with Transfer Family servers that use the FTPS protocol. TLS Session Resumption provides a mechanism to resume or share a negotiated secret key between the control and data connection for an FTPS session. `TlsSessionResumptionMode` determines whether or not the server resumes recent, negotiated sessions through a unique session ID. This property is available during `CreateServer` and `UpdateServer` calls. If a `TlsSessionResumptionMode` value is not specified during `CreateServer` , it is set to `ENFORCED` by default.\n\n- `DISABLED` : the server does not process TLS session resumption client requests and creates a new TLS session for each request.\n- `ENABLED` : the server processes and accepts clients that are performing TLS session resumption. The server doesn't reject client data connections that do not perform the TLS session resumption client processing.\n- `ENFORCED` : the server processes and accepts clients that are performing TLS session resumption. The server rejects client data connections that do not perform the TLS session resumption client processing. Before you set the value to `ENFORCED` , test your clients.\n\n\u003e Not all FTPS clients perform TLS session resumption. So, if you choose to enforce TLS session resumption, you prevent any connections from FTPS clients that don't perform the protocol negotiation. To determine whether or not you can use the `ENFORCED` value, you need to test your clients." + } + } + }, + "aws-native:transfer:ServerS3StorageOptions": { + "type": "object", + "properties": { + "directoryListingOptimization": { + "$ref": "#/types/aws-native:transfer:ServerDirectoryListingOptimization", + "description": "Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target." + } + } + }, + "aws-native:transfer:ServerSetStatOption": { + "type": "string" + }, + "aws-native:transfer:ServerSftpAuthenticationMethods": { + "type": "string" + }, + "aws-native:transfer:ServerTag": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "The name assigned to the tag that you create." + }, + "value": { + "type": "string", + "description": "Contains one or more values that you assigned to the key name you create." + } + } + }, + "aws-native:transfer:ServerTlsSessionResumptionMode": { + "type": "string" + }, + "aws-native:transfer:ServerWorkflowDetail": { + "type": "object", + "properties": { + "executionRole": { + "type": "string", + "description": "Includes the necessary permissions for S3, EFS, and Lambda operations that Transfer can assume, so that all workflow steps can operate on the required resources" + }, + "workflowId": { + "type": "string", + "description": "A unique identifier for the workflow." + } + } + }, + "aws-native:transfer:ServerWorkflowDetails": { + "type": "object", + "properties": { + "onPartialUpload": { + "type": "array", + "items": { + "$ref": "#/types/aws-native:transfer:ServerWorkflowDetail" + }, + "description": "A trigger that starts a workflow if a file is only partially uploaded. You can attach a workflow to a server that executes whenever there is a partial upload.\n\nA *partial upload* occurs when a file is open when the session disconnects.\n\n\u003e `OnPartialUpload` can contain a maximum of one `WorkflowDetail` object." + }, + "onUpload": { + "type": "array", + "items": { + "$ref": "#/types/aws-native:transfer:ServerWorkflowDetail" + }, + "description": "A trigger that starts a workflow: the workflow begins to execute after a file is uploaded.\n\nTo remove an associated workflow from a server, you can provide an empty `OnUpload` object, as in the following example.\n\n`aws transfer update-server --server-id s-01234567890abcdef --workflow-details '{\"OnUpload\":[]}'`\n\n\u003e `OnUpload` can contain a maximum of one `WorkflowDetail` object." + } + } + }, "aws-native:transfer:SftpConfigProperties": { "type": "object", "properties": { @@ -212310,6 +213230,18 @@ } } }, + "aws-native:wisdom:AiPromptAiPromptApiFormat": { + "type": "string" + }, + "aws-native:wisdom:AiPromptAiPromptTemplateConfiguration": { + "type": "object" + }, + "aws-native:wisdom:AiPromptAiPromptTemplateType": { + "type": "string" + }, + "aws-native:wisdom:AiPromptAiPromptType": { + "type": "string" + }, "aws-native:wisdom:AssistantAssociationAssociationData": { "type": "object", "properties": { @@ -213386,6 +214318,12 @@ "name" ] }, + "aws-native:appsync:getDataSource": { + "cf": "AWS::AppSync::DataSource", + "ids": [ + "dataSourceArn" + ] + }, "aws-native:appsync:getDomainName": { "cf": "AWS::AppSync::DomainName", "ids": [ @@ -219005,6 +219943,12 @@ "profileId" ] }, + "aws-native:transfer:getServer": { + "cf": "AWS::Transfer::Server", + "ids": [ + "arn" + ] + }, "aws-native:transfer:getWorkflow": { "cf": "AWS::Transfer::Workflow", "ids": [ @@ -219142,6 +220086,13 @@ "scope" ] }, + "aws-native:wisdom:getAiPrompt": { + "cf": "AWS::Wisdom::AIPrompt", + "ids": [ + "aiPromptId", + "assistantId" + ] + }, "aws-native:wisdom:getAssistant": { "cf": "AWS::Wisdom::Assistant", "ids": [ diff --git a/provider/cmd/pulumi-resource-aws-native/schema.json b/provider/cmd/pulumi-resource-aws-native/schema.json index 7c55876f15..1285772e95 100644 --- a/provider/cmd/pulumi-resource-aws-native/schema.json +++ b/provider/cmd/pulumi-resource-aws-native/schema.json @@ -6444,7 +6444,8 @@ "description": "SAP Source connector page size", "properties": { "maxPageSize": { - "type": "integer" + "type": "integer", + "description": "The maximum number of records that Amazon AppFlow receives in each page of the response from your SAP application. For transfers of OData records, the maximum page size is 3,000. For transfers of data that comes from an ODP provider, the maximum page size is 10,000." } }, "type": "object", @@ -6456,7 +6457,8 @@ "description": "SAP Source connector parallelism factor", "properties": { "maxParallelism": { - "type": "integer" + "type": "integer", + "description": "The maximum number of processes that Amazon AppFlow runs at the same time when it retrieves your data from your SAP application." } }, "type": "object", @@ -6471,10 +6473,12 @@ "description": "The object path specified in the SAPOData flow source." }, "paginationConfig": { - "$ref": "#/types/aws-native:appflow:FlowSapoDataPaginationConfig" + "$ref": "#/types/aws-native:appflow:FlowSapoDataPaginationConfig", + "description": "Sets the page size for each concurrent process that transfers OData records from your SAP instance." }, "parallelismConfig": { - "$ref": "#/types/aws-native:appflow:FlowSapoDataParallelismConfig" + "$ref": "#/types/aws-native:appflow:FlowSapoDataParallelismConfig", + "description": "Sets the number of concurrent processes that transfers OData records from your SAP instance." } }, "type": "object", @@ -9756,6 +9760,220 @@ }, "type": "object" }, + "aws-native:appsync:DataSourceAuthorizationConfig": { + "properties": { + "authorizationType": { + "type": "string", + "description": "The authorization type that the HTTP endpoint requires." + }, + "awsIamConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceAwsIamConfig", + "description": "The AWS Identity and Access Management settings." + } + }, + "type": "object", + "required": [ + "authorizationType" + ] + }, + "aws-native:appsync:DataSourceAwsIamConfig": { + "properties": { + "signingRegion": { + "type": "string", + "description": "The signing Region for AWS Identity and Access Management authorization." + }, + "signingServiceName": { + "type": "string", + "description": "The signing service name for AWS Identity and Access Management authorization." + } + }, + "type": "object" + }, + "aws-native:appsync:DataSourceDeltaSyncConfig": { + "properties": { + "baseTableTtl": { + "type": "string", + "description": "The number of minutes that an Item is stored in the data source." + }, + "deltaSyncTableName": { + "type": "string", + "description": "The Delta Sync table name." + }, + "deltaSyncTableTtl": { + "type": "string", + "description": "The number of minutes that a Delta Sync log entry is stored in the Delta Sync table." + } + }, + "type": "object", + "required": [ + "baseTableTtl", + "deltaSyncTableName", + "deltaSyncTableTtl" + ] + }, + "aws-native:appsync:DataSourceDynamoDbConfig": { + "properties": { + "awsRegion": { + "type": "string", + "description": "The AWS Region." + }, + "deltaSyncConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceDeltaSyncConfig", + "description": "The DeltaSyncConfig for a versioned datasource." + }, + "tableName": { + "type": "string", + "description": "The table name." + }, + "useCallerCredentials": { + "type": "boolean", + "description": "Set to TRUE to use AWS Identity and Access Management with this data source." + }, + "versioned": { + "type": "boolean", + "description": "Set to TRUE to use Conflict Detection and Resolution with this data source." + } + }, + "type": "object", + "required": [ + "awsRegion", + "tableName" + ] + }, + "aws-native:appsync:DataSourceElasticsearchConfig": { + "properties": { + "awsRegion": { + "type": "string", + "description": "The AWS Region." + }, + "endpoint": { + "type": "string", + "description": "The endpoint." + } + }, + "type": "object", + "required": [ + "awsRegion", + "endpoint" + ] + }, + "aws-native:appsync:DataSourceEventBridgeConfig": { + "properties": { + "eventBusArn": { + "type": "string", + "description": "ARN for the EventBridge bus." + } + }, + "type": "object", + "required": [ + "eventBusArn" + ] + }, + "aws-native:appsync:DataSourceHttpConfig": { + "properties": { + "authorizationConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceAuthorizationConfig", + "description": "The authorization configuration." + }, + "endpoint": { + "type": "string", + "description": "The endpoint." + } + }, + "type": "object", + "required": [ + "endpoint" + ] + }, + "aws-native:appsync:DataSourceLambdaConfig": { + "properties": { + "lambdaFunctionArn": { + "type": "string", + "description": "The ARN for the Lambda function." + } + }, + "type": "object", + "required": [ + "lambdaFunctionArn" + ] + }, + "aws-native:appsync:DataSourceMetricsConfig": { + "description": "Enables or disables enhanced data source metrics for specified data sources. Note that `MetricsConfig` won't be used unless the `dataSourceLevelMetricsBehavior` value is set to `PER_DATA_SOURCE_METRICS` . If the `dataSourceLevelMetricsBehavior` is set to `FULL_REQUEST_DATA_SOURCE_METRICS` instead, `MetricsConfig` will be ignored. However, you can still set its value.\n\n`MetricsConfig` can be `ENABLED` or `DISABLED` .", + "type": "string", + "enum": [ + { + "name": "Disabled", + "value": "DISABLED" + }, + { + "name": "Enabled", + "value": "ENABLED" + } + ] + }, + "aws-native:appsync:DataSourceOpenSearchServiceConfig": { + "properties": { + "awsRegion": { + "type": "string", + "description": "The AWS Region." + }, + "endpoint": { + "type": "string", + "description": "The endpoint." + } + }, + "type": "object", + "required": [ + "awsRegion", + "endpoint" + ] + }, + "aws-native:appsync:DataSourceRdsHttpEndpointConfig": { + "properties": { + "awsRegion": { + "type": "string", + "description": "AWS Region for RDS HTTP endpoint." + }, + "awsSecretStoreArn": { + "type": "string", + "description": "The ARN for database credentials stored in AWS Secrets Manager." + }, + "databaseName": { + "type": "string", + "description": "Logical database name." + }, + "dbClusterIdentifier": { + "type": "string", + "description": "Amazon RDS cluster Amazon Resource Name (ARN)." + }, + "schema": { + "type": "string", + "description": "Logical schema name." + } + }, + "type": "object", + "required": [ + "awsRegion", + "awsSecretStoreArn", + "dbClusterIdentifier" + ] + }, + "aws-native:appsync:DataSourceRelationalDatabaseConfig": { + "properties": { + "rdsHttpEndpointConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceRdsHttpEndpointConfig", + "description": "Information about the Amazon RDS resource." + }, + "relationalDatabaseSourceType": { + "type": "string", + "description": "The type of relational data source." + } + }, + "type": "object", + "required": [ + "relationalDatabaseSourceType" + ] + }, "aws-native:appsync:FunctionConfigurationAppSyncRuntime": { "description": "Describes a runtime used by an AWS AppSync pipeline resolver or AWS AppSync function. Specifies the name and version of the runtime to use. Note that if a runtime is specified, code must also be specified.", "properties": { @@ -24225,7 +24443,7 @@ ] }, "aws-native:codebuild:FleetComputeType": { - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nInformation about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", + "description": "Information about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", "type": "string", "enum": [ { @@ -24251,7 +24469,7 @@ ] }, "aws-native:codebuild:FleetEnvironmentType": { - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nThe environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (São Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (São Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", + "description": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (São Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (São Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", "type": "string", "enum": [ { @@ -24683,6 +24901,13 @@ "$ref": "#/types/aws-native:codepipeline:PipelineActionTypeId", "description": "Specifies the action type and the provider of the action." }, + "commands": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The shell commands to run with your compute action in CodePipeline." + }, "configuration": { "$ref": "pulumi.json#/Any", "description": "The action's configuration. These are key-value pairs that specify input values for an action." @@ -24709,6 +24934,13 @@ }, "description": "The name or ID of the result of the action declaration, such as a test or build artifact. While the field is not a required parameter, most actions have an action configuration that requires a specified quantity of output artifacts. To refer to the action configuration specification by action provider, see the [Action structure reference](https://docs.aws.amazon.com/codepipeline/latest/userguide/action-reference.html) in the *AWS CodePipeline User Guide* ." }, + "outputVariables": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The list of variables that are to be exported from the compute action." + }, "region": { "type": "string", "description": "The action declaration's AWS Region, such as us-east-1." @@ -24787,6 +25019,10 @@ { "name": "Approval", "value": "Approval" + }, + { + "name": "Compute", + "value": "Compute" } ] }, @@ -25103,6 +25339,13 @@ "aws-native:codepipeline:PipelineOutputArtifact": { "description": "Represents information about the output of an action.", "properties": { + "files": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The files that you want to associate with the output artifact that will be exported from the compute action." + }, "name": { "type": "string", "description": "The name of the output of an artifact, such as \"My App\"." @@ -25660,7 +25903,7 @@ "items": { "$ref": "#/types/aws-native:cognito:UserPoolRecoveryOption" }, - "description": "The list of `RecoveryOptionTypes` ." + "description": "The list of options and priorities for user message delivery in forgot-password operations. Sets or displays user pool preferences for email or SMS message priority, whether users should fall back to a second delivery method, and whether passwords should only be reset by administrators." } }, "type": "object" @@ -25685,7 +25928,7 @@ }, "inviteMessageTemplate": { "$ref": "#/types/aws-native:cognito:UserPoolInviteMessageTemplate", - "description": "The message template to be used for the welcome message to new users.\n\nSee also [Customizing User Invitation Messages](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-message-customizations.html#cognito-user-pool-settings-user-invitation-message-customization) ." + "description": "The template for the welcome message to new users.\n\nSee also [Customizing User Invitation Messages](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-message-customizations.html#cognito-user-pool-settings-user-invitation-message-customization) ." }, "unusedAccountValidityDays": { "type": "integer", @@ -25706,7 +25949,7 @@ "properties": { "applicationArn": { "type": "string", - "description": "The Amazon Resource Name (ARN) of an Amazon Pinpoint project. You can use the Amazon Pinpoint project for integration with the chosen user pool client. Amazon Cognito publishes events to the Amazon Pinpoint project that the app ARN declares." + "description": "The Amazon Resource Name (ARN) of an Amazon Pinpoint project that you want to connect to your user pool app client. Amazon Cognito publishes events to the Amazon Pinpoint project that `ApplicationArn` declares. You can also configure your application to pass an endpoint ID in the `AnalyticsMetadata` parameter of sign-in operations. The endpoint ID is information about the destination for push notifications" }, "applicationId": { "type": "string", @@ -25748,11 +25991,11 @@ "properties": { "lambdaArn": { "type": "string", - "description": "The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Cognito triggers to send email notifications to users." + "description": "The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger." }, "lambdaVersion": { "type": "string", - "description": "The Lambda version represents the signature of the \"request\" attribute in the \"event\" information that Amazon Cognito passes to your custom email sender AWS Lambda function. The only supported value is `V1_0` ." + "description": "The user pool trigger version of the request that Amazon Cognito sends to your Lambda function. Higher-numbered versions add fields that support new features.\n\nYou must use a `LambdaVersion` of `V1_0` with a custom sender function." } }, "type": "object" @@ -25761,11 +26004,11 @@ "properties": { "lambdaArn": { "type": "string", - "description": "The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Cognito triggers to send SMS notifications to users." + "description": "The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger." }, "lambdaVersion": { "type": "string", - "description": "The Lambda version represents the signature of the \"request\" attribute in the \"event\" information Amazon Cognito passes to your custom SMS sender Lambda function. The only supported value is `V1_0` ." + "description": "The user pool trigger version of the request that Amazon Cognito sends to your Lambda function. Higher-numbered versions add fields that support new features.\n\nYou must use a `LambdaVersion` of `V1_0` with a custom sender function." } }, "type": "object" @@ -25796,7 +26039,7 @@ "properties": { "configurationSet": { "type": "string", - "description": "The set of configuration rules that can be applied to emails sent using Amazon SES. A configuration set is applied to an email by including a reference to the configuration set in the headers of the email. Once applied, all of the rules in that configuration set are applied to the email. Configuration sets can be used to apply the following types of rules to emails:\n\n- Event publishing – Amazon SES can track the number of send, delivery, open, click, bounce, and complaint events for each email sent. Use event publishing to send information about these events to other AWS services such as SNS and CloudWatch.\n- IP pool management – When leasing dedicated IP addresses with Amazon SES, you can create groups of IP addresses, called dedicated IP pools. You can then associate the dedicated IP pools with configuration sets." + "description": "The set of configuration rules that can be applied to emails sent using Amazon Simple Email Service. A configuration set is applied to an email by including a reference to the configuration set in the headers of the email. Once applied, all of the rules in that configuration set are applied to the email. Configuration sets can be used to apply the following types of rules to emails:\n\n- **Event publishing** - Amazon Simple Email Service can track the number of send, delivery, open, click, bounce, and complaint events for each email sent. Use event publishing to send information about these events to other AWS services such as and Amazon CloudWatch\n- **IP pool management** - When leasing dedicated IP addresses with Amazon Simple Email Service, you can create groups of IP addresses, called dedicated IP pools. You can then associate the dedicated IP pools with configuration sets." }, "emailSendingAccount": { "type": "string", @@ -25804,7 +26047,7 @@ }, "from": { "type": "string", - "description": "Identifies either the sender's email address or the sender's name with their email address. For example, `testuser@example.com` or `Test User \u003ctestuser@example.com\u003e` . This address appears before the body of the email." + "description": "Either the sender’s email address or the sender’s name with their email address. For example, `testuser@example.com` or `Test User \u003ctestuser@example.com\u003e` . This address appears before the body of the email." }, "replyToEmailAddress": { "type": "string", @@ -25842,7 +26085,7 @@ }, "customEmailSender": { "$ref": "#/types/aws-native:cognito:UserPoolCustomEmailSender", - "description": "A custom email sender AWS Lambda trigger." + "description": "The configuration of a custom email sender Lambda trigger. This trigger routes all email notifications from a user pool to a Lambda function that delivers the message using custom logic." }, "customMessage": { "type": "string", @@ -25850,7 +26093,7 @@ }, "customSmsSender": { "$ref": "#/types/aws-native:cognito:UserPoolCustomSmsSender", - "description": "A custom SMS sender AWS Lambda trigger." + "description": "The configuration of a custom SMS sender Lambda trigger. This trigger routes all SMS notifications from a user pool to a Lambda function that delivers the message using custom logic." }, "defineAuthChallenge": { "type": "string", @@ -25858,7 +26101,7 @@ }, "kmsKeyId": { "type": "string", - "description": "The Amazon Resource Name of a AWS Key Management Service ( AWS KMS ) key. Amazon Cognito uses the key to encrypt codes and temporary passwords sent to `CustomEmailSender` and `CustomSMSSender` ." + "description": "The ARN of an [KMS key](https://docs.aws.amazon.com//kms/latest/developerguide/concepts.html#master_keys) . Amazon Cognito uses the key to encrypt codes and temporary passwords sent to custom sender Lambda triggers." }, "postAuthentication": { "type": "string", @@ -25967,11 +26210,11 @@ "properties": { "name": { "type": "string", - "description": "Specifies the recovery method for a user." + "description": "The recovery method that this object sets a recovery option for." }, "priority": { "type": "integer", - "description": "A positive integer specifying priority of a method with 1 being the highest priority." + "description": "Your priority preference for using the specified attribute in account recovery. The highest priority is `1` ." } }, "type": "object" @@ -26153,7 +26396,7 @@ }, "developerOnlyAttribute": { "type": "boolean", - "description": "\u003e We recommend that you use [WriteAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UserPoolClientType.html#CognitoUserPools-Type-UserPoolClientType-WriteAttributes) in the user pool client to control how attributes can be mutated for new use cases instead of using `DeveloperOnlyAttribute` . \n\nSpecifies whether the attribute type is developer only. This attribute can only be modified by an administrator. Users will not be able to modify this attribute using their access token." + "description": "\u003e You should use [WriteAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UserPoolClientType.html#CognitoUserPools-Type-UserPoolClientType-WriteAttributes) in the user pool client to control how attributes can be mutated for new use cases instead of using `DeveloperOnlyAttribute` . \n\nSpecifies whether the attribute type is developer only. This attribute can only be modified by an administrator. Users won't be able to modify this attribute using their access token. For example, `DeveloperOnlyAttribute` can be modified using AdminUpdateUserAttributes but can't be updated using UpdateUserAttributes." }, "mutable": { "type": "boolean", @@ -26182,7 +26425,7 @@ "properties": { "externalId": { "type": "string", - "description": "The external ID is a value. We recommend you use `ExternalId` to add security to your IAM role, which is used to call Amazon SNS to send SMS messages for your user pool. If you provide an `ExternalId` , the Cognito User Pool uses it when attempting to assume your IAM role. You can also set your roles trust policy to require the `ExternalID` . If you use the Cognito Management Console to create a role for SMS MFA, Cognito creates a role with the required permissions and a trust policy that uses `ExternalId` ." + "description": "The external ID provides additional security for your IAM role. You can use an `ExternalId` with the IAM role that you use with Amazon SNS to send SMS messages for your user pool. If you provide an `ExternalId` , your Amazon Cognito user pool includes it in the request to assume your IAM role. You can configure the role trust policy to require that Amazon Cognito, and any principal, provide the `ExternalID` . If you use the Amazon Cognito Management Console to create a role for SMS multi-factor authentication (MFA), Amazon Cognito creates a role with the required permissions and a trust policy that demonstrates use of the `ExternalId` .\n\nFor more information about the `ExternalId` of a role, see [How to use an external ID when granting access to your AWS resources to a third party](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) ." }, "snsCallerArn": { "type": "string", @@ -43790,18 +44033,22 @@ "aws-native:ec2:VpnConnectionCloudwatchLogOptionsSpecification": { "properties": { "logEnabled": { - "type": "boolean" + "type": "boolean", + "description": "Enable or disable VPN tunnel logging feature. Default value is `False` .\n\nValid values: `True` | `False`" }, "logGroupArn": { - "type": "string" + "type": "string", + "description": "The Amazon Resource Name (ARN) of the CloudWatch log group to send logs to." }, "logOutputFormat": { - "$ref": "#/types/aws-native:ec2:VpnConnectionCloudwatchLogOptionsSpecificationLogOutputFormat" + "$ref": "#/types/aws-native:ec2:VpnConnectionCloudwatchLogOptionsSpecificationLogOutputFormat", + "description": "Set log format. Default format is `json` .\n\nValid values: `json` | `text`" } }, "type": "object" }, "aws-native:ec2:VpnConnectionCloudwatchLogOptionsSpecificationLogOutputFormat": { + "description": "Set log format. Default format is `json` .\n\nValid values: `json` | `text`", "type": "string", "enum": [ { @@ -43817,12 +44064,14 @@ "aws-native:ec2:VpnConnectionIkeVersionsRequestListValue": { "properties": { "value": { - "$ref": "#/types/aws-native:ec2:VpnConnectionIkeVersionsRequestListValueValue" + "$ref": "#/types/aws-native:ec2:VpnConnectionIkeVersionsRequestListValueValue", + "description": "The IKE version." } }, "type": "object" }, "aws-native:ec2:VpnConnectionIkeVersionsRequestListValueValue": { + "description": "The IKE version.", "type": "string", "enum": [ { @@ -43838,12 +44087,14 @@ "aws-native:ec2:VpnConnectionPhase1EncryptionAlgorithmsRequestListValue": { "properties": { "value": { - "$ref": "#/types/aws-native:ec2:VpnConnectionPhase1EncryptionAlgorithmsRequestListValueValue" + "$ref": "#/types/aws-native:ec2:VpnConnectionPhase1EncryptionAlgorithmsRequestListValueValue", + "description": "The value for the encryption algorithm." } }, "type": "object" }, "aws-native:ec2:VpnConnectionPhase1EncryptionAlgorithmsRequestListValueValue": { + "description": "The value for the encryption algorithm.", "type": "string", "enum": [ { @@ -43867,12 +44118,14 @@ "aws-native:ec2:VpnConnectionPhase1IntegrityAlgorithmsRequestListValue": { "properties": { "value": { - "$ref": "#/types/aws-native:ec2:VpnConnectionPhase1IntegrityAlgorithmsRequestListValueValue" + "$ref": "#/types/aws-native:ec2:VpnConnectionPhase1IntegrityAlgorithmsRequestListValueValue", + "description": "The value for the integrity algorithm." } }, "type": "object" }, "aws-native:ec2:VpnConnectionPhase1IntegrityAlgorithmsRequestListValueValue": { + "description": "The value for the integrity algorithm.", "type": "string", "enum": [ { @@ -43896,7 +44149,8 @@ "aws-native:ec2:VpnConnectionPhase1dhGroupNumbersRequestListValue": { "properties": { "value": { - "type": "integer" + "type": "integer", + "description": "The Diffie-Hellmann group number." } }, "type": "object" @@ -43904,12 +44158,14 @@ "aws-native:ec2:VpnConnectionPhase2EncryptionAlgorithmsRequestListValue": { "properties": { "value": { - "$ref": "#/types/aws-native:ec2:VpnConnectionPhase2EncryptionAlgorithmsRequestListValueValue" + "$ref": "#/types/aws-native:ec2:VpnConnectionPhase2EncryptionAlgorithmsRequestListValueValue", + "description": "The encryption algorithm." } }, "type": "object" }, "aws-native:ec2:VpnConnectionPhase2EncryptionAlgorithmsRequestListValueValue": { + "description": "The encryption algorithm.", "type": "string", "enum": [ { @@ -43933,12 +44189,14 @@ "aws-native:ec2:VpnConnectionPhase2IntegrityAlgorithmsRequestListValue": { "properties": { "value": { - "$ref": "#/types/aws-native:ec2:VpnConnectionPhase2IntegrityAlgorithmsRequestListValueValue" + "$ref": "#/types/aws-native:ec2:VpnConnectionPhase2IntegrityAlgorithmsRequestListValueValue", + "description": "The integrity algorithm." } }, "type": "object" }, "aws-native:ec2:VpnConnectionPhase2IntegrityAlgorithmsRequestListValueValue": { + "description": "The integrity algorithm.", "type": "string", "enum": [ { @@ -43962,7 +44220,8 @@ "aws-native:ec2:VpnConnectionPhase2dhGroupNumbersRequestListValue": { "properties": { "value": { - "type": "integer" + "type": "integer", + "description": "The Diffie-Hellmann group number." } }, "type": "object" @@ -43988,7 +44247,8 @@ "aws-native:ec2:VpnConnectionVpnTunnelLogOptionsSpecification": { "properties": { "cloudwatchLogOptions": { - "$ref": "#/types/aws-native:ec2:VpnConnectionCloudwatchLogOptionsSpecification" + "$ref": "#/types/aws-native:ec2:VpnConnectionCloudwatchLogOptionsSpecification", + "description": "Options for sending VPN tunnel logs to CloudWatch." } }, "type": "object" @@ -43997,92 +44257,111 @@ "description": "The tunnel options for a single VPN tunnel.", "properties": { "dpdTimeoutAction": { - "$ref": "#/types/aws-native:ec2:VpnConnectionVpnTunnelOptionsSpecificationDpdTimeoutAction" + "$ref": "#/types/aws-native:ec2:VpnConnectionVpnTunnelOptionsSpecificationDpdTimeoutAction", + "description": "The action to take after DPD timeout occurs. Specify `restart` to restart the IKE initiation. Specify `clear` to end the IKE session.\n\nValid Values: `clear` | `none` | `restart`\n\nDefault: `clear`" }, "dpdTimeoutSeconds": { - "type": "integer" + "type": "integer", + "description": "The number of seconds after which a DPD timeout occurs.\n\nConstraints: A value greater than or equal to 30.\n\nDefault: `30`" }, "enableTunnelLifecycleControl": { - "type": "boolean" + "type": "boolean", + "description": "Turn on or off tunnel endpoint lifecycle control feature." }, "ikeVersions": { "type": "array", "items": { "$ref": "#/types/aws-native:ec2:VpnConnectionIkeVersionsRequestListValue" - } + }, + "description": "The IKE versions that are permitted for the VPN tunnel.\n\nValid values: `ikev1` | `ikev2`" }, "logOptions": { - "$ref": "#/types/aws-native:ec2:VpnConnectionVpnTunnelLogOptionsSpecification" + "$ref": "#/types/aws-native:ec2:VpnConnectionVpnTunnelLogOptionsSpecification", + "description": "Options for logging VPN tunnel activity." }, "phase1EncryptionAlgorithms": { "type": "array", "items": { "$ref": "#/types/aws-native:ec2:VpnConnectionPhase1EncryptionAlgorithmsRequestListValue" - } + }, + "description": "One or more encryption algorithms that are permitted for the VPN tunnel for phase 1 IKE negotiations.\n\nValid values: `AES128` | `AES256` | `AES128-GCM-16` | `AES256-GCM-16`" }, "phase1IntegrityAlgorithms": { "type": "array", "items": { "$ref": "#/types/aws-native:ec2:VpnConnectionPhase1IntegrityAlgorithmsRequestListValue" - } + }, + "description": "One or more integrity algorithms that are permitted for the VPN tunnel for phase 1 IKE negotiations.\n\nValid values: `SHA1` | `SHA2-256` | `SHA2-384` | `SHA2-512`" }, "phase1LifetimeSeconds": { - "type": "integer" + "type": "integer", + "description": "The lifetime for phase 1 of the IKE negotiation, in seconds.\n\nConstraints: A value between 900 and 28,800.\n\nDefault: `28800`" }, "phase1dhGroupNumbers": { "type": "array", "items": { "$ref": "#/types/aws-native:ec2:VpnConnectionPhase1dhGroupNumbersRequestListValue" - } + }, + "description": "One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel for phase 1 IKE negotiations.\n\nValid values: `2` | `14` | `15` | `16` | `17` | `18` | `19` | `20` | `21` | `22` | `23` | `24`" }, "phase2EncryptionAlgorithms": { "type": "array", "items": { "$ref": "#/types/aws-native:ec2:VpnConnectionPhase2EncryptionAlgorithmsRequestListValue" - } + }, + "description": "One or more encryption algorithms that are permitted for the VPN tunnel for phase 2 IKE negotiations.\n\nValid values: `AES128` | `AES256` | `AES128-GCM-16` | `AES256-GCM-16`" }, "phase2IntegrityAlgorithms": { "type": "array", "items": { "$ref": "#/types/aws-native:ec2:VpnConnectionPhase2IntegrityAlgorithmsRequestListValue" - } + }, + "description": "One or more integrity algorithms that are permitted for the VPN tunnel for phase 2 IKE negotiations.\n\nValid values: `SHA1` | `SHA2-256` | `SHA2-384` | `SHA2-512`" }, "phase2LifetimeSeconds": { - "type": "integer" + "type": "integer", + "description": "The lifetime for phase 2 of the IKE negotiation, in seconds.\n\nConstraints: A value between 900 and 3,600. The value must be less than the value for `Phase1LifetimeSeconds` .\n\nDefault: `3600`" }, "phase2dhGroupNumbers": { "type": "array", "items": { "$ref": "#/types/aws-native:ec2:VpnConnectionPhase2dhGroupNumbersRequestListValue" - } + }, + "description": "One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel for phase 2 IKE negotiations.\n\nValid values: `2` | `5` | `14` | `15` | `16` | `17` | `18` | `19` | `20` | `21` | `22` | `23` | `24`" }, "preSharedKey": { "type": "string", "description": "The pre-shared key (PSK) to establish initial authentication between the virtual private gateway and customer gateway.\n Constraints: Allowed characters are alphanumeric characters, periods (.), and underscores (_). Must be between 8 and 64 characters in length and cannot start with zero (0)." }, "rekeyFuzzPercentage": { - "type": "integer" + "type": "integer", + "description": "The percentage of the rekey window (determined by `RekeyMarginTimeSeconds` ) during which the rekey time is randomly selected.\n\nConstraints: A value between 0 and 100.\n\nDefault: `100`" }, "rekeyMarginTimeSeconds": { - "type": "integer" + "type": "integer", + "description": "The margin time, in seconds, before the phase 2 lifetime expires, during which the AWS side of the VPN connection performs an IKE rekey. The exact time of the rekey is randomly selected based on the value for `RekeyFuzzPercentage` .\n\nConstraints: A value between 60 and half of `Phase2LifetimeSeconds` .\n\nDefault: `270`" }, "replayWindowSize": { - "type": "integer" + "type": "integer", + "description": "The number of packets in an IKE replay window.\n\nConstraints: A value between 64 and 2048.\n\nDefault: `1024`" }, "startupAction": { - "$ref": "#/types/aws-native:ec2:VpnConnectionVpnTunnelOptionsSpecificationStartupAction" + "$ref": "#/types/aws-native:ec2:VpnConnectionVpnTunnelOptionsSpecificationStartupAction", + "description": "The action to take when the establishing the tunnel for the VPN connection. By default, your customer gateway device must initiate the IKE negotiation and bring up the tunnel. Specify `start` for AWS to initiate the IKE negotiation.\n\nValid Values: `add` | `start`\n\nDefault: `add`" }, "tunnelInsideCidr": { "type": "string", "description": "The range of inside IP addresses for the tunnel. Any specified CIDR blocks must be unique across all VPN connections that use the same virtual private gateway. \n Constraints: A size /30 CIDR block from the ``169.254.0.0/16`` range. The following CIDR blocks are reserved and cannot be used:\n + ``169.254.0.0/30`` \n + ``169.254.1.0/30`` \n + ``169.254.2.0/30`` \n + ``169.254.3.0/30`` \n + ``169.254.4.0/30`` \n + ``169.254.5.0/30`` \n + ``169.254.169.252/30``" }, "tunnelInsideIpv6Cidr": { - "type": "string" + "type": "string", + "description": "The range of inside IPv6 addresses for the tunnel. Any specified CIDR blocks must be unique across all VPN connections that use the same transit gateway.\n\nConstraints: A size /126 CIDR block from the local `fd00::/8` range." } }, "type": "object" }, "aws-native:ec2:VpnConnectionVpnTunnelOptionsSpecificationDpdTimeoutAction": { + "description": "The action to take after DPD timeout occurs. Specify `restart` to restart the IKE initiation. Specify `clear` to end the IKE session.\n\nValid Values: `clear` | `none` | `restart`\n\nDefault: `clear`", "type": "string", "enum": [ { @@ -44100,6 +44379,7 @@ ] }, "aws-native:ec2:VpnConnectionVpnTunnelOptionsSpecificationStartupAction": { + "description": "The action to take when the establishing the tunnel for the VPN connection. By default, your customer gateway device must initiate the IKE negotiation and bring up the tunnel. Specify `start` for AWS to initiate the IKE negotiation.\n\nValid Values: `add` | `start`\n\nDefault: `add`", "type": "string", "enum": [ { @@ -46245,13 +46525,16 @@ "aws-native:ecs:TaskSetCapacityProviderStrategyItem": { "properties": { "base": { - "type": "integer" + "type": "integer", + "description": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used." }, "capacityProvider": { - "type": "string" + "type": "string", + "description": "The short name of the capacity provider." }, "weight": { - "type": "integer" + "type": "integer", + "description": "The *weight* value designates the relative percentage of the total number of tasks launched that should use the specified capacity provider. The `weight` value is taken into consideration after the `base` value, if defined, is satisfied.\n\nIf no `weight` value is specified, the default value of `0` is used. When multiple capacity providers are specified within a capacity provider strategy, at least one of the capacity providers must have a weight value greater than zero and any capacity providers with a weight of `0` can't be used to place tasks. If you specify multiple capacity providers in a strategy that all have a weight of `0` , any `RunTask` or `CreateService` actions using the capacity provider strategy will fail.\n\nAn example scenario for using weights is defining a strategy that contains two capacity providers and both have a weight of `1` , then when the `base` is satisfied, the tasks will be split evenly across the two capacity providers. Using that same logic, if you specify a weight of `1` for *capacityProviderA* and a weight of `4` for *capacityProviderB* , then for every one task that's run using *capacityProviderA* , four tasks would use *capacityProviderB* ." } }, "type": "object" @@ -53537,6 +53820,24 @@ } ] }, + "aws-native:gamelift:ContainerGroupDefinitionStatus": { + "description": "A string indicating ContainerGroupDefinition status.", + "type": "string", + "enum": [ + { + "name": "Ready", + "value": "READY" + }, + { + "name": "Copying", + "value": "COPYING" + }, + { + "name": "Failed", + "value": "FAILED" + } + ] + }, "aws-native:gamelift:ContainerGroupDefinitionTag": { "description": "A key-value pair to associate with a resource.", "properties": { @@ -73689,14 +73990,15 @@ ] }, "aws-native:lambda:EventSourceMappingTag": { + "description": "A [tag](https://docs.aws.amazon.com/lambda/latest/dg/tagging.html) to apply to the event source mapping.", "properties": { "key": { "type": "string", - "description": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -." + "description": "The key for this tag." }, "value": { "type": "string", - "description": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -." + "description": "The value for this tag." } }, "type": "object", @@ -88369,7 +88671,7 @@ ] }, "aws-native:organizations:PolicyType": { - "description": "The type of policy to create. You can specify one of the following values: AISERVICES_OPT_OUT_POLICY, BACKUP_POLICY, SERVICE_CONTROL_POLICY, TAG_POLICY", + "description": "The type of policy to create. You can specify one of the following values: AISERVICES_OPT_OUT_POLICY, BACKUP_POLICY, SERVICE_CONTROL_POLICY, TAG_POLICY, CHATBOT_POLICY", "type": "string", "enum": [ { @@ -88387,6 +88689,10 @@ { "name": "TagPolicy", "value": "TAG_POLICY" + }, + { + "name": "ChatbotPolicy", + "value": "CHATBOT_POLICY" } ] }, @@ -96710,7 +97016,8 @@ "description": "The configuration that selects all options." }, "sourceColumn": { - "$ref": "#/types/aws-native:quicksight:AnalysisColumnIdentifier" + "$ref": "#/types/aws-native:quicksight:AnalysisColumnIdentifier", + "description": "A column of a data set." }, "sourceField": { "type": "string", @@ -109080,7 +109387,8 @@ "description": "The configuration that selects all options." }, "sourceColumn": { - "$ref": "#/types/aws-native:quicksight:DashboardColumnIdentifier" + "$ref": "#/types/aws-native:quicksight:DashboardColumnIdentifier", + "description": "A column of a data set." }, "sourceField": { "type": "string", @@ -118823,7 +119131,6 @@ }, "type": "object", "required": [ - "columns", "dataSourceArn", "name", "sqlQuery" @@ -119538,7 +119845,6 @@ "type": "object", "required": [ "dataSourceArn", - "inputColumns", "name" ] }, @@ -119880,7 +120186,8 @@ "description": "An operation that filters rows based on some condition." }, "overrideDatasetParameterOperation": { - "$ref": "#/types/aws-native:quicksight:DataSetOverrideDatasetParameterOperation" + "$ref": "#/types/aws-native:quicksight:DataSetOverrideDatasetParameterOperation", + "description": "A transform operation that overrides the dataset parameter values that are defined in another dataset." }, "projectOperation": { "$ref": "#/types/aws-native:quicksight:DataSetProjectOperation", @@ -120823,6 +121130,10 @@ "name": "S3", "value": "S3" }, + { + "name": "S3Tables", + "value": "S3_TABLES" + }, { "name": "Salesforce", "value": "SALESFORCE" @@ -124135,7 +124446,8 @@ "description": "The configuration that selects all options." }, "sourceColumn": { - "$ref": "#/types/aws-native:quicksight:TemplateColumnIdentifier" + "$ref": "#/types/aws-native:quicksight:TemplateColumnIdentifier", + "description": "A column of a data set." }, "sourceField": { "type": "string", @@ -133494,7 +133806,8 @@ "description": "Display options related to sheets." }, "typography": { - "$ref": "#/types/aws-native:quicksight:ThemeTypography" + "$ref": "#/types/aws-native:quicksight:ThemeTypography", + "description": "Determines the typography options." }, "uiColorPalette": { "$ref": "#/types/aws-native:quicksight:ThemeUiColorPalette", @@ -157143,7 +157456,7 @@ "additionalProperties": { "type": "string" }, - "description": "The parameters for the configuration definition type. Parameters for configuration definitions vary based the configuration type. The following tables outline the parameters for each configuration type.\n\n- **OpsCenter (Type: AWS QuickSetupType-SSMOpsCenter)** - - `DelegatedAccountId`\n\n- Description: (Required) The ID of the delegated administrator account.\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Resource Scheduler (Type: AWS QuickSetupType-Scheduler)** - - `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target.\n- `ICalendarString`\n\n- Description: (Required) An iCalendar formatted string containing the schedule you want Change Manager to use.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Default Host Management Configuration (Type: AWS QuickSetupType-DHMC)** - - `UpdateSSMAgent`\n\n- Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \" `true` \".\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Resource Explorer (Type: AWS QuickSetupType-ResourceExplorer)** - - `SelectedAggregatorRegion`\n\n- Description: (Required) The AWS Region where you want to create the aggregator index.\n- `ReplaceExistingAggregator`\n\n- Description: (Required) A boolean value that determines whether to demote an existing aggregator if it is in a Region that differs from the value you specify for the `SelectedAggregatorRegion` .\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Change Manager (Type: AWS QuickSetupType-SSMChangeMgr)** - - `DelegatedAccountId`\n\n- Description: (Required) The ID of the delegated administrator account.\n- `JobFunction`\n\n- Description: (Required) The name for the Change Manager job function.\n- `PermissionType`\n\n- Description: (Optional) Specifies whether you want to use default administrator permissions for the job function role, or provide a custom IAM policy. The valid values are `CustomPermissions` and `AdminPermissions` . The default value for the parameter is `CustomerPermissions` .\n- `CustomPermissions`\n\n- Description: (Optional) A JSON string containing the IAM policy you want your job function to use. You must provide a value for this parameter if you specify `CustomPermissions` for the `PermissionType` parameter.\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **DevOps Guru (Type: AWS QuickSetupType-DevOpsGuru)** - - `AnalyseAllResources`\n\n- Description: (Optional) A boolean value that determines whether DevOps Guru analyzes all AWS CloudFormation stacks in the account. The default value is \" `false` \".\n- `EnableSnsNotifications`\n\n- Description: (Optional) A boolean value that determines whether DevOps Guru sends notifications when an insight is created. The default value is \" `true` \".\n- `EnableSsmOpsItems`\n\n- Description: (Optional) A boolean value that determines whether DevOps Guru creates an OpsCenter OpsItem when an insight is created. The default value is \" `true` \".\n- `EnableDriftRemediation`\n\n- Description: (Optional) A boolean value that determines whether a drift remediation schedule is used. The default value is \" `false` \".\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(1 days)` , and `none` . The default value is \" `none` \".\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Conformance Packs (Type: AWS QuickSetupType-CFGCPacks)** - - `DelegatedAccountId`\n\n- Description: (Optional) The ID of the delegated administrator account. This parameter is required for Organization deployments.\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(2 days)` , and `none` . The default value is \" `none` \".\n- `CPackNames`\n\n- Description: (Required) A comma separated list of AWS Config conformance packs.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **AWS Config Recording (Type: AWS QuickSetupType-CFGRecording)** - - `RecordAllResources`\n\n- Description: (Optional) A boolean value that determines whether all supported resources are recorded. The default value is \" `true` \".\n- `ResourceTypesToRecord`\n\n- Description: (Optional) A comma separated list of resource types you want to record.\n- `RecordGlobalResourceTypes`\n\n- Description: (Optional) A boolean value that determines whether global resources are recorded with all resource configurations. The default value is \" `false` \".\n- `GlobalResourceTypesRegion`\n\n- Description: (Optional) Determines the AWS Region where global resources are recorded.\n- `UseCustomBucket`\n\n- Description: (Optional) A boolean value that determines whether a custom Amazon S3 bucket is used for delivery. The default value is \" `false` \".\n- `DeliveryBucketName`\n\n- Description: (Optional) The name of the Amazon S3 bucket you want AWS Config to deliver configuration snapshots and configuration history files to.\n- `DeliveryBucketPrefix`\n\n- Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.\n- `NotificationOptions`\n\n- Description: (Optional) Determines the notification configuration for the recorder. The valid values are `NoStreaming` , `UseExistingTopic` , and `CreateTopic` . The default value is `NoStreaming` .\n- `CustomDeliveryTopicAccountId`\n\n- Description: (Optional) The ID of the AWS account where the Amazon SNS topic you want to use for notifications resides. You must specify a value for this parameter if you use the `UseExistingTopic` notification option.\n- `CustomDeliveryTopicName`\n\n- Description: (Optional) The name of the Amazon SNS topic you want to use for notifications. You must specify a value for this parameter if you use the `UseExistingTopic` notification option.\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(7 days)` , `rate(1 days)` , and `none` . The default value is \" `none` \".\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Host Management (Type: AWS QuickSetupType-SSMHostMgmt)** - - `UpdateSSMAgent`\n\n- Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \" `true` \".\n- `UpdateEc2LaunchAgent`\n\n- Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \" `false` \".\n- `CollectInventory`\n\n- Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \" `true` \".\n- `ScanInstances`\n\n- Description: (Optional) A boolean value that determines whether the target instances are scanned daily for available patches. The default value is \" `true` \".\n- `InstallCloudWatchAgent`\n\n- Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is installed on the target instances. The default value is \" `false` \".\n- `UpdateCloudWatchAgent`\n\n- Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is updated on the target instances every month. The default value is \" `false` \".\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Optional) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Optional) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Optional) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Distributor (Type: AWS QuickSetupType-Distributor)** - - `PackagesToInstall`\n\n- Description: (Required) A comma separated list of packages you want to install on the target instances. The valid values are `AWSEFSTools` , `AWSCWAgent` , and `AWSEC2LaunchAgent` .\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(2 days)` , and `none` . The default value is \" `rate(30 days)` \".\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Patch Policy (Type: AWS QuickSetupType-PatchPolicy)** - - `PatchPolicyName`\n\n- Description: (Required) A name for the patch policy. The value you provide is applied to target Amazon EC2 instances as a tag.\n- `SelectedPatchBaselines`\n\n- Description: (Required) An array of JSON objects containing the information for the patch baselines to include in your patch policy.\n- `PatchBaselineUseDefault`\n\n- Description: (Optional) A boolean value that determines whether the selected patch baselines are all AWS provided.\n- `ConfigurationOptionsPatchOperation`\n\n- Description: (Optional) Determines whether target instances scan for available patches, or scan and install available patches. The valid values are `Scan` and `ScanAndInstall` . The default value for the parameter is `Scan` .\n- `ConfigurationOptionsScanValue`\n\n- Description: (Optional) A cron expression that is used as the schedule for when instances scan for available patches.\n- `ConfigurationOptionsInstallValue`\n\n- Description: (Optional) A cron expression that is used as the schedule for when instances install available patches.\n- `ConfigurationOptionsScanNextInterval`\n\n- Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \" `false` \".\n- `ConfigurationOptionsInstallNextInterval`\n\n- Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \" `false` \".\n- `RebootOption`\n\n- Description: (Optional) A boolean value that determines whether instances are rebooted after patches are installed. The default value is \" `false` \".\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `OutputLogEnableS3`\n\n- Description: (Optional) A boolean value that determines whether command output logs are sent to Amazon S3.\n- `OutputS3Location`\n\n- Description: (Optional) A JSON string containing information about the Amazon S3 bucket where you want to store the output details of the request.\n\n- `OutputS3BucketRegion`\n\n- Description: (Optional) The AWS Region where the Amazon S3 bucket you want AWS Config to deliver command output to is located.\n- `OutputS3BucketName`\n\n- Description: (Optional) The name of the Amazon S3 bucket you want AWS Config to deliver command output to.\n- `OutputS3KeyPrefix`\n\n- Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to." + "description": "The parameters for the configuration definition type. Parameters for configuration definitions vary based the configuration type. The following tables outline the parameters for each configuration type.\n\n- **OpsCenter (Type: AWS QuickSetupType-SSMOpsCenter)** - - `DelegatedAccountId`\n\n- Description: (Required) The ID of the delegated administrator account.\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Resource Scheduler (Type: AWS QuickSetupType-Scheduler)** - - `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target.\n- `ICalendarString`\n\n- Description: (Required) An iCalendar formatted string containing the schedule you want Change Manager to use.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Default Host Management Configuration (Type: AWS QuickSetupType-DHMC)** - - `UpdateSSMAgent`\n\n- Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \" `true` \".\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Resource Explorer (Type: AWS QuickSetupType-ResourceExplorer)** - - `SelectedAggregatorRegion`\n\n- Description: (Required) The AWS Region where you want to create the aggregator index.\n- `ReplaceExistingAggregator`\n\n- Description: (Required) A boolean value that determines whether to demote an existing aggregator if it is in a Region that differs from the value you specify for the `SelectedAggregatorRegion` .\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Change Manager (Type: AWS QuickSetupType-SSMChangeMgr)** - - `DelegatedAccountId`\n\n- Description: (Required) The ID of the delegated administrator account.\n- `JobFunction`\n\n- Description: (Required) The name for the Change Manager job function.\n- `PermissionType`\n\n- Description: (Optional) Specifies whether you want to use default administrator permissions for the job function role, or provide a custom IAM policy. The valid values are `CustomPermissions` and `AdminPermissions` . The default value for the parameter is `CustomerPermissions` .\n- `CustomPermissions`\n\n- Description: (Optional) A JSON string containing the IAM policy you want your job function to use. You must provide a value for this parameter if you specify `CustomPermissions` for the `PermissionType` parameter.\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **DevOps Guru (Type: AWS QuickSetupType-DevOpsGuru)** - - `AnalyseAllResources`\n\n- Description: (Optional) A boolean value that determines whether DevOps Guru analyzes all AWS CloudFormation stacks in the account. The default value is \" `false` \".\n- `EnableSnsNotifications`\n\n- Description: (Optional) A boolean value that determines whether DevOps Guru sends notifications when an insight is created. The default value is \" `true` \".\n- `EnableSsmOpsItems`\n\n- Description: (Optional) A boolean value that determines whether DevOps Guru creates an OpsCenter OpsItem when an insight is created. The default value is \" `true` \".\n- `EnableDriftRemediation`\n\n- Description: (Optional) A boolean value that determines whether a drift remediation schedule is used. The default value is \" `false` \".\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(1 days)` , and `none` . The default value is \" `none` \".\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Conformance Packs (Type: AWS QuickSetupType-CFGCPacks)** - - `DelegatedAccountId`\n\n- Description: (Optional) The ID of the delegated administrator account. This parameter is required for Organization deployments.\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(2 days)` , and `none` . The default value is \" `none` \".\n- `CPackNames`\n\n- Description: (Required) A comma separated list of AWS Config conformance packs.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **AWS Config Recording (Type: AWS QuickSetupType-CFGRecording)** - - `RecordAllResources`\n\n- Description: (Optional) A boolean value that determines whether all supported resources are recorded. The default value is \" `true` \".\n- `ResourceTypesToRecord`\n\n- Description: (Optional) A comma separated list of resource types you want to record.\n- `RecordGlobalResourceTypes`\n\n- Description: (Optional) A boolean value that determines whether global resources are recorded with all resource configurations. The default value is \" `false` \".\n- `GlobalResourceTypesRegion`\n\n- Description: (Optional) Determines the AWS Region where global resources are recorded.\n- `UseCustomBucket`\n\n- Description: (Optional) A boolean value that determines whether a custom Amazon S3 bucket is used for delivery. The default value is \" `false` \".\n- `DeliveryBucketName`\n\n- Description: (Optional) The name of the Amazon S3 bucket you want AWS Config to deliver configuration snapshots and configuration history files to.\n- `DeliveryBucketPrefix`\n\n- Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.\n- `NotificationOptions`\n\n- Description: (Optional) Determines the notification configuration for the recorder. The valid values are `NoStreaming` , `UseExistingTopic` , and `CreateTopic` . The default value is `NoStreaming` .\n- `CustomDeliveryTopicAccountId`\n\n- Description: (Optional) The ID of the AWS account where the Amazon SNS topic you want to use for notifications resides. You must specify a value for this parameter if you use the `UseExistingTopic` notification option.\n- `CustomDeliveryTopicName`\n\n- Description: (Optional) The name of the Amazon SNS topic you want to use for notifications. You must specify a value for this parameter if you use the `UseExistingTopic` notification option.\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(7 days)` , `rate(1 days)` , and `none` . The default value is \" `none` \".\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Host Management (Type: AWS QuickSetupType-SSMHostMgmt)** - - `UpdateSSMAgent`\n\n- Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \" `true` \".\n- `UpdateEc2LaunchAgent`\n\n- Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \" `false` \".\n- `CollectInventory`\n\n- Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \" `true` \".\n- `ScanInstances`\n\n- Description: (Optional) A boolean value that determines whether the target instances are scanned daily for available patches. The default value is \" `true` \".\n- `InstallCloudWatchAgent`\n\n- Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is installed on the target instances. The default value is \" `false` \".\n- `UpdateCloudWatchAgent`\n\n- Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is updated on the target instances every month. The default value is \" `false` \".\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Optional) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Optional) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Optional) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Distributor (Type: AWS QuickSetupType-Distributor)** - - `PackagesToInstall`\n\n- Description: (Required) A comma separated list of packages you want to install on the target instances. The valid values are `AWSEFSTools` , `AWSCWAgent` , and `AWSEC2LaunchAgent` .\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(2 days)` , and `none` . The default value is \" `rate(30 days)` \".\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Patch Policy (Type: AWS QuickSetupType-PatchPolicy)** - - `PatchPolicyName`\n\n- Description: (Required) A name for the patch policy. The value you provide is applied to target Amazon EC2 instances as a tag.\n- `SelectedPatchBaselines`\n\n- Description: (Required) An array of JSON objects containing the information for the patch baselines to include in your patch policy.\n- `PatchBaselineUseDefault`\n\n- Description: (Optional) A boolean value that determines whether the selected patch baselines are all AWS provided.\n- `ConfigurationOptionsPatchOperation`\n\n- Description: (Optional) Determines whether target instances scan for available patches, or scan and install available patches. The valid values are `Scan` and `ScanAndInstall` . The default value for the parameter is `Scan` .\n- `ConfigurationOptionsScanValue`\n\n- Description: (Optional) A cron expression that is used as the schedule for when instances scan for available patches.\n- `ConfigurationOptionsInstallValue`\n\n- Description: (Optional) A cron expression that is used as the schedule for when instances install available patches.\n- `ConfigurationOptionsScanNextInterval`\n\n- Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \" `false` \".\n- `ConfigurationOptionsInstallNextInterval`\n\n- Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \" `false` \".\n- `RebootOption`\n\n- Description: (Optional) Determines whether instances are rebooted after patches are installed. Valid values are `RebootIfNeeded` and `NoReboot` .\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `OutputLogEnableS3`\n\n- Description: (Optional) A boolean value that determines whether command output logs are sent to Amazon S3.\n- `OutputS3Location`\n\n- Description: (Optional) A JSON string containing information about the Amazon S3 bucket where you want to store the output details of the request.\n\n- `OutputS3BucketRegion`\n\n- Description: (Optional) The AWS Region where the Amazon S3 bucket you want AWS Config to deliver command output to is located.\n- `OutputS3BucketName`\n\n- Description: (Optional) The name of the Amazon S3 bucket you want AWS Config to deliver command output to.\n- `OutputS3KeyPrefix`\n\n- Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to." }, "type": { "type": "string", @@ -158981,6 +159294,297 @@ } ] }, + "aws-native:transfer:ServerAs2Transport": { + "type": "string", + "enum": [ + { + "name": "Http", + "value": "HTTP" + } + ] + }, + "aws-native:transfer:ServerDirectoryListingOptimization": { + "description": "Indicates whether optimization to directory listing on S3 servers is used. Disabled by default for compatibility.", + "type": "string", + "enum": [ + { + "name": "Enabled", + "value": "ENABLED" + }, + { + "name": "Disabled", + "value": "DISABLED" + } + ] + }, + "aws-native:transfer:ServerDomain": { + "type": "string", + "enum": [ + { + "name": "S3", + "value": "S3" + }, + { + "name": "Efs", + "value": "EFS" + } + ] + }, + "aws-native:transfer:ServerEndpointDetails": { + "properties": { + "addressAllocationIds": { + "type": "array", + "items": { + "type": "string" + }, + "description": "A list of address allocation IDs that are required to attach an Elastic IP address to your server's endpoint.\n\nAn address allocation ID corresponds to the allocation ID of an Elastic IP address. This value can be retrieved from the `allocationId` field from the Amazon EC2 [Address](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Address.html) data type. One way to retrieve this value is by calling the EC2 [DescribeAddresses](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAddresses.html) API.\n\nThis parameter is optional. Set this parameter if you want to make your VPC endpoint public-facing. For details, see [Create an internet-facing endpoint for your server](https://docs.aws.amazon.com/transfer/latest/userguide/create-server-in-vpc.html#create-internet-facing-endpoint) .\n\n\u003e This property can only be set as follows:\n\u003e \n\u003e - `EndpointType` must be set to `VPC`\n\u003e - The Transfer Family server must be offline.\n\u003e - You cannot set this parameter for Transfer Family servers that use the FTP protocol.\n\u003e - The server must already have `SubnetIds` populated ( `SubnetIds` and `AddressAllocationIds` cannot be updated simultaneously).\n\u003e - `AddressAllocationIds` can't contain duplicates, and must be equal in length to `SubnetIds` . For example, if you have three subnet IDs, you must also specify three address allocation IDs.\n\u003e - Call the `UpdateServer` API to set or change this parameter." + }, + "securityGroupIds": { + "type": "array", + "items": { + "type": "string" + }, + "description": "A list of security groups IDs that are available to attach to your server's endpoint.\n\n\u003e This property can only be set when `EndpointType` is set to `VPC` .\n\u003e \n\u003e You can edit the `SecurityGroupIds` property in the [UpdateServer](https://docs.aws.amazon.com/transfer/latest/userguide/API_UpdateServer.html) API only if you are changing the `EndpointType` from `PUBLIC` or `VPC_ENDPOINT` to `VPC` . To change security groups associated with your server's VPC endpoint after creation, use the Amazon EC2 [ModifyVpcEndpoint](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyVpcEndpoint.html) API." + }, + "subnetIds": { + "type": "array", + "items": { + "type": "string" + }, + "description": "A list of subnet IDs that are required to host your server endpoint in your VPC.\n\n\u003e This property can only be set when `EndpointType` is set to `VPC` ." + }, + "vpcEndpointId": { + "type": "string", + "description": "The ID of the VPC endpoint.\n\n\u003e This property can only be set when `EndpointType` is set to `VPC_ENDPOINT` ." + }, + "vpcId": { + "type": "string", + "description": "The VPC ID of the virtual private cloud in which the server's endpoint will be hosted.\n\n\u003e This property can only be set when `EndpointType` is set to `VPC` ." + } + }, + "type": "object" + }, + "aws-native:transfer:ServerEndpointType": { + "type": "string", + "enum": [ + { + "name": "Public", + "value": "PUBLIC" + }, + { + "name": "Vpc", + "value": "VPC" + }, + { + "name": "VpcEndpoint", + "value": "VPC_ENDPOINT" + } + ] + }, + "aws-native:transfer:ServerIdentityProviderDetails": { + "properties": { + "directoryId": { + "type": "string", + "description": "The identifier of the AWS Directory Service directory that you want to use as your identity provider." + }, + "function": { + "type": "string", + "description": "The ARN for a Lambda function to use for the Identity provider." + }, + "invocationRole": { + "type": "string", + "description": "This parameter is only applicable if your `IdentityProviderType` is `API_GATEWAY` . Provides the type of `InvocationRole` used to authenticate the user account." + }, + "sftpAuthenticationMethods": { + "$ref": "#/types/aws-native:transfer:ServerSftpAuthenticationMethods", + "description": "For SFTP-enabled servers, and for custom identity providers *only* , you can specify whether to authenticate using a password, SSH key pair, or both.\n\n- `PASSWORD` - users must provide their password to connect.\n- `PUBLIC_KEY` - users must provide their private key to connect.\n- `PUBLIC_KEY_OR_PASSWORD` - users can authenticate with either their password or their key. This is the default value.\n- `PUBLIC_KEY_AND_PASSWORD` - users must provide both their private key and their password to connect. The server checks the key first, and then if the key is valid, the system prompts for a password. If the private key provided does not match the public key that is stored, authentication fails." + }, + "url": { + "type": "string", + "description": "Provides the location of the service endpoint used to authenticate users." + } + }, + "type": "object" + }, + "aws-native:transfer:ServerIdentityProviderType": { + "type": "string", + "enum": [ + { + "name": "ServiceManaged", + "value": "SERVICE_MANAGED" + }, + { + "name": "ApiGateway", + "value": "API_GATEWAY" + }, + { + "name": "AwsDirectoryService", + "value": "AWS_DIRECTORY_SERVICE" + }, + { + "name": "AwsLambda", + "value": "AWS_LAMBDA" + } + ] + }, + "aws-native:transfer:ServerProtocol": { + "type": "string", + "enum": [ + { + "name": "Sftp", + "value": "SFTP" + }, + { + "name": "Ftp", + "value": "FTP" + }, + { + "name": "Ftps", + "value": "FTPS" + }, + { + "name": "As2", + "value": "AS2" + } + ] + }, + "aws-native:transfer:ServerProtocolDetails": { + "properties": { + "as2Transports": { + "type": "array", + "items": { + "$ref": "#/types/aws-native:transfer:ServerAs2Transport" + }, + "description": "List of `As2Transport` objects." + }, + "passiveIp": { + "type": "string", + "description": "Indicates passive mode, for FTP and FTPS protocols. Enter a single IPv4 address, such as the public IP address of a firewall, router, or load balancer. For example:\n\n`aws transfer update-server --protocol-details PassiveIp=0.0.0.0`\n\nReplace `0.0.0.0` in the example above with the actual IP address you want to use.\n\n\u003e If you change the `PassiveIp` value, you must stop and then restart your Transfer Family server for the change to take effect. For details on using passive mode (PASV) in a NAT environment, see [Configuring your FTPS server behind a firewall or NAT with AWS Transfer Family](https://docs.aws.amazon.com/storage/configuring-your-ftps-server-behind-a-firewall-or-nat-with-aws-transfer-family/) . \n\n*Special values*\n\nThe `AUTO` and `0.0.0.0` are special values for the `PassiveIp` parameter. The value `PassiveIp=AUTO` is assigned by default to FTP and FTPS type servers. In this case, the server automatically responds with one of the endpoint IPs within the PASV response. `PassiveIp=0.0.0.0` has a more unique application for its usage. For example, if you have a High Availability (HA) Network Load Balancer (NLB) environment, where you have 3 subnets, you can only specify a single IP address using the `PassiveIp` parameter. This reduces the effectiveness of having High Availability. In this case, you can specify `PassiveIp=0.0.0.0` . This tells the client to use the same IP address as the Control connection and utilize all AZs for their connections. Note, however, that not all FTP clients support the `PassiveIp=0.0.0.0` response. FileZilla and WinSCP do support it. If you are using other clients, check to see if your client supports the `PassiveIp=0.0.0.0` response." + }, + "setStatOption": { + "$ref": "#/types/aws-native:transfer:ServerSetStatOption", + "description": "Use the `SetStatOption` to ignore the error that is generated when the client attempts to use `SETSTAT` on a file you are uploading to an S3 bucket.\n\nSome SFTP file transfer clients can attempt to change the attributes of remote files, including timestamp and permissions, using commands, such as `SETSTAT` when uploading the file. However, these commands are not compatible with object storage systems, such as Amazon S3. Due to this incompatibility, file uploads from these clients can result in errors even when the file is otherwise successfully uploaded.\n\nSet the value to `ENABLE_NO_OP` to have the Transfer Family server ignore the `SETSTAT` command, and upload files without needing to make any changes to your SFTP client. While the `SetStatOption` `ENABLE_NO_OP` setting ignores the error, it does generate a log entry in Amazon CloudWatch Logs, so you can determine when the client is making a `SETSTAT` call.\n\n\u003e If you want to preserve the original timestamp for your file, and modify other file attributes using `SETSTAT` , you can use Amazon EFS as backend storage with Transfer Family." + }, + "tlsSessionResumptionMode": { + "$ref": "#/types/aws-native:transfer:ServerTlsSessionResumptionMode", + "description": "A property used with Transfer Family servers that use the FTPS protocol. TLS Session Resumption provides a mechanism to resume or share a negotiated secret key between the control and data connection for an FTPS session. `TlsSessionResumptionMode` determines whether or not the server resumes recent, negotiated sessions through a unique session ID. This property is available during `CreateServer` and `UpdateServer` calls. If a `TlsSessionResumptionMode` value is not specified during `CreateServer` , it is set to `ENFORCED` by default.\n\n- `DISABLED` : the server does not process TLS session resumption client requests and creates a new TLS session for each request.\n- `ENABLED` : the server processes and accepts clients that are performing TLS session resumption. The server doesn't reject client data connections that do not perform the TLS session resumption client processing.\n- `ENFORCED` : the server processes and accepts clients that are performing TLS session resumption. The server rejects client data connections that do not perform the TLS session resumption client processing. Before you set the value to `ENFORCED` , test your clients.\n\n\u003e Not all FTPS clients perform TLS session resumption. So, if you choose to enforce TLS session resumption, you prevent any connections from FTPS clients that don't perform the protocol negotiation. To determine whether or not you can use the `ENFORCED` value, you need to test your clients." + } + }, + "type": "object" + }, + "aws-native:transfer:ServerS3StorageOptions": { + "properties": { + "directoryListingOptimization": { + "$ref": "#/types/aws-native:transfer:ServerDirectoryListingOptimization", + "description": "Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target." + } + }, + "type": "object" + }, + "aws-native:transfer:ServerSetStatOption": { + "type": "string", + "enum": [ + { + "name": "Default", + "value": "DEFAULT" + }, + { + "name": "EnableNoOp", + "value": "ENABLE_NO_OP" + } + ] + }, + "aws-native:transfer:ServerSftpAuthenticationMethods": { + "type": "string", + "enum": [ + { + "name": "Password", + "value": "PASSWORD" + }, + { + "name": "PublicKey", + "value": "PUBLIC_KEY" + }, + { + "name": "PublicKeyOrPassword", + "value": "PUBLIC_KEY_OR_PASSWORD" + }, + { + "name": "PublicKeyAndPassword", + "value": "PUBLIC_KEY_AND_PASSWORD" + } + ] + }, + "aws-native:transfer:ServerTag": { + "properties": { + "key": { + "type": "string", + "description": "The name assigned to the tag that you create." + }, + "value": { + "type": "string", + "description": "Contains one or more values that you assigned to the key name you create." + } + }, + "type": "object", + "required": [ + "key", + "value" + ] + }, + "aws-native:transfer:ServerTlsSessionResumptionMode": { + "type": "string", + "enum": [ + { + "name": "Disabled", + "value": "DISABLED" + }, + { + "name": "Enabled", + "value": "ENABLED" + }, + { + "name": "Enforced", + "value": "ENFORCED" + } + ] + }, + "aws-native:transfer:ServerWorkflowDetail": { + "properties": { + "executionRole": { + "type": "string", + "description": "Includes the necessary permissions for S3, EFS, and Lambda operations that Transfer can assume, so that all workflow steps can operate on the required resources" + }, + "workflowId": { + "type": "string", + "description": "A unique identifier for the workflow." + } + }, + "type": "object", + "required": [ + "executionRole", + "workflowId" + ] + }, + "aws-native:transfer:ServerWorkflowDetails": { + "properties": { + "onPartialUpload": { + "type": "array", + "items": { + "$ref": "#/types/aws-native:transfer:ServerWorkflowDetail" + }, + "description": "A trigger that starts a workflow if a file is only partially uploaded. You can attach a workflow to a server that executes whenever there is a partial upload.\n\nA *partial upload* occurs when a file is open when the session disconnects.\n\n\u003e `OnPartialUpload` can contain a maximum of one `WorkflowDetail` object." + }, + "onUpload": { + "type": "array", + "items": { + "$ref": "#/types/aws-native:transfer:ServerWorkflowDetail" + }, + "description": "A trigger that starts a workflow: the workflow begins to execute after a file is uploaded.\n\nTo remove an associated workflow from a server, you can provide an empty `OnUpload` object, as in the following example.\n\n`aws transfer update-server --server-id s-01234567890abcdef --workflow-details '{\"OnUpload\":[]}'`\n\n\u003e `OnUpload` can contain a maximum of one `WorkflowDetail` object." + } + }, + "type": "object" + }, "aws-native:transfer:SftpConfigProperties": { "description": "Configuration for an SFTP connector.", "properties": { @@ -164101,6 +164705,48 @@ "textTransformations" ] }, + "aws-native:wisdom:AiPromptAiPromptApiFormat": { + "type": "string", + "enum": [ + { + "name": "AnthropicClaudeMessages", + "value": "ANTHROPIC_CLAUDE_MESSAGES" + }, + { + "name": "AnthropicClaudeTextCompletions", + "value": "ANTHROPIC_CLAUDE_TEXT_COMPLETIONS" + } + ] + }, + "aws-native:wisdom:AiPromptAiPromptTemplateConfiguration": { + "type": "object" + }, + "aws-native:wisdom:AiPromptAiPromptTemplateType": { + "type": "string", + "enum": [ + { + "name": "Text", + "value": "TEXT" + } + ] + }, + "aws-native:wisdom:AiPromptAiPromptType": { + "type": "string", + "enum": [ + { + "name": "AnswerGeneration", + "value": "ANSWER_GENERATION" + }, + { + "name": "IntentLabelingGeneration", + "value": "INTENT_LABELING_GENERATION" + }, + { + "name": "QueryReformulation", + "value": "QUERY_REFORMULATION" + } + ] + }, "aws-native:wisdom:AssistantAssociationAssociationData": { "properties": { "knowledgeBaseId": { @@ -171799,6 +172445,134 @@ "instanceType" ] }, + "aws-native:appsync:DataSource": { + "description": "Resource Type definition for AWS::AppSync::DataSource\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n### Example\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing AwsNative = Pulumi.AwsNative;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n var graphQlApiId = config.Require(\"graphQlApiId\");\n var dataSourceName = config.Require(\"dataSourceName\");\n var dataSourceDescription = config.Require(\"dataSourceDescription\");\n var serviceRoleArn = config.Require(\"serviceRoleArn\");\n var lambdaFunctionArn = config.Require(\"lambdaFunctionArn\");\n var dataSource = new AwsNative.AppSync.DataSource(\"dataSource\", new()\n {\n ApiId = graphQlApiId,\n Name = dataSourceName,\n Description = dataSourceDescription,\n Type = \"AWS_LAMBDA\",\n ServiceRoleArn = serviceRoleArn,\n LambdaConfig = new AwsNative.AppSync.Inputs.DataSourceLambdaConfigArgs\n {\n LambdaFunctionArn = lambdaFunctionArn,\n },\n });\n\n});\n\n\n```\n\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/appsync\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcfg := config.New(ctx, \"\")\n\t\tgraphQlApiId := cfg.Require(\"graphQlApiId\")\n\t\tdataSourceName := cfg.Require(\"dataSourceName\")\n\t\tdataSourceDescription := cfg.Require(\"dataSourceDescription\")\n\t\tserviceRoleArn := cfg.Require(\"serviceRoleArn\")\n\t\tlambdaFunctionArn := cfg.Require(\"lambdaFunctionArn\")\n\t\t_, err := appsync.NewDataSource(ctx, \"dataSource\", \u0026appsync.DataSourceArgs{\n\t\t\tApiId: pulumi.String(graphQlApiId),\n\t\t\tName: pulumi.String(dataSourceName),\n\t\t\tDescription: pulumi.String(dataSourceDescription),\n\t\t\tType: pulumi.String(\"AWS_LAMBDA\"),\n\t\t\tServiceRoleArn: pulumi.String(serviceRoleArn),\n\t\t\tLambdaConfig: \u0026appsync.DataSourceLambdaConfigArgs{\n\t\t\t\tLambdaFunctionArn: pulumi.String(lambdaFunctionArn),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\n```\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws_native from \"@pulumi/aws-native\";\n\nconst config = new pulumi.Config();\nconst graphQlApiId = config.require(\"graphQlApiId\");\nconst dataSourceName = config.require(\"dataSourceName\");\nconst dataSourceDescription = config.require(\"dataSourceDescription\");\nconst serviceRoleArn = config.require(\"serviceRoleArn\");\nconst lambdaFunctionArn = config.require(\"lambdaFunctionArn\");\nconst dataSource = new aws_native.appsync.DataSource(\"dataSource\", {\n apiId: graphQlApiId,\n name: dataSourceName,\n description: dataSourceDescription,\n type: \"AWS_LAMBDA\",\n serviceRoleArn: serviceRoleArn,\n lambdaConfig: {\n lambdaFunctionArn: lambdaFunctionArn,\n },\n});\n\n```\n\n```python\nimport pulumi\nimport pulumi_aws_native as aws_native\n\nconfig = pulumi.Config()\ngraph_ql_api_id = config.require(\"graphQlApiId\")\ndata_source_name = config.require(\"dataSourceName\")\ndata_source_description = config.require(\"dataSourceDescription\")\nservice_role_arn = config.require(\"serviceRoleArn\")\nlambda_function_arn = config.require(\"lambdaFunctionArn\")\ndata_source = aws_native.appsync.DataSource(\"dataSource\",\n api_id=graph_ql_api_id,\n name=data_source_name,\n description=data_source_description,\n type=\"AWS_LAMBDA\",\n service_role_arn=service_role_arn,\n lambda_config={\n \"lambda_function_arn\": lambda_function_arn,\n })\n\n```\n\n{{% /example %}}\n{{% example %}}\n### Example\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing AwsNative = Pulumi.AwsNative;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n var graphQlApiId = config.Require(\"graphQlApiId\");\n var dataSourceName = config.Require(\"dataSourceName\");\n var dataSourceDescription = config.Require(\"dataSourceDescription\");\n var serviceRoleArn = config.Require(\"serviceRoleArn\");\n var lambdaFunctionArn = config.Require(\"lambdaFunctionArn\");\n var dataSource = new AwsNative.AppSync.DataSource(\"dataSource\", new()\n {\n ApiId = graphQlApiId,\n Name = dataSourceName,\n Description = dataSourceDescription,\n Type = \"AWS_LAMBDA\",\n ServiceRoleArn = serviceRoleArn,\n LambdaConfig = new AwsNative.AppSync.Inputs.DataSourceLambdaConfigArgs\n {\n LambdaFunctionArn = lambdaFunctionArn,\n },\n });\n\n});\n\n\n```\n\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/appsync\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcfg := config.New(ctx, \"\")\n\t\tgraphQlApiId := cfg.Require(\"graphQlApiId\")\n\t\tdataSourceName := cfg.Require(\"dataSourceName\")\n\t\tdataSourceDescription := cfg.Require(\"dataSourceDescription\")\n\t\tserviceRoleArn := cfg.Require(\"serviceRoleArn\")\n\t\tlambdaFunctionArn := cfg.Require(\"lambdaFunctionArn\")\n\t\t_, err := appsync.NewDataSource(ctx, \"dataSource\", \u0026appsync.DataSourceArgs{\n\t\t\tApiId: pulumi.String(graphQlApiId),\n\t\t\tName: pulumi.String(dataSourceName),\n\t\t\tDescription: pulumi.String(dataSourceDescription),\n\t\t\tType: pulumi.String(\"AWS_LAMBDA\"),\n\t\t\tServiceRoleArn: pulumi.String(serviceRoleArn),\n\t\t\tLambdaConfig: \u0026appsync.DataSourceLambdaConfigArgs{\n\t\t\t\tLambdaFunctionArn: pulumi.String(lambdaFunctionArn),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\n```\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws_native from \"@pulumi/aws-native\";\n\nconst config = new pulumi.Config();\nconst graphQlApiId = config.require(\"graphQlApiId\");\nconst dataSourceName = config.require(\"dataSourceName\");\nconst dataSourceDescription = config.require(\"dataSourceDescription\");\nconst serviceRoleArn = config.require(\"serviceRoleArn\");\nconst lambdaFunctionArn = config.require(\"lambdaFunctionArn\");\nconst dataSource = new aws_native.appsync.DataSource(\"dataSource\", {\n apiId: graphQlApiId,\n name: dataSourceName,\n description: dataSourceDescription,\n type: \"AWS_LAMBDA\",\n serviceRoleArn: serviceRoleArn,\n lambdaConfig: {\n lambdaFunctionArn: lambdaFunctionArn,\n },\n});\n\n```\n\n```python\nimport pulumi\nimport pulumi_aws_native as aws_native\n\nconfig = pulumi.Config()\ngraph_ql_api_id = config.require(\"graphQlApiId\")\ndata_source_name = config.require(\"dataSourceName\")\ndata_source_description = config.require(\"dataSourceDescription\")\nservice_role_arn = config.require(\"serviceRoleArn\")\nlambda_function_arn = config.require(\"lambdaFunctionArn\")\ndata_source = aws_native.appsync.DataSource(\"dataSource\",\n api_id=graph_ql_api_id,\n name=data_source_name,\n description=data_source_description,\n type=\"AWS_LAMBDA\",\n service_role_arn=service_role_arn,\n lambda_config={\n \"lambda_function_arn\": lambda_function_arn,\n })\n\n```\n\n{{% /example %}}\n{{% /examples %}}\n", + "properties": { + "apiId": { + "type": "string", + "description": "Unique AWS AppSync GraphQL API identifier where this data source will be created.", + "replaceOnChanges": true + }, + "dataSourceArn": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the API key, such as arn:aws:appsync:us-east-1:123456789012:apis/graphqlapiid/datasources/datasourcename." + }, + "description": { + "type": "string", + "description": "The description of the data source." + }, + "dynamoDbConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceDynamoDbConfig", + "description": "AWS Region and TableName for an Amazon DynamoDB table in your account." + }, + "elasticsearchConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceElasticsearchConfig", + "description": "AWS Region and Endpoints for an Amazon OpenSearch Service domain in your account.\nAs of September 2021, Amazon Elasticsearch Service is Amazon OpenSearch Service. This property is deprecated. For new data sources, use OpenSearchServiceConfig to specify an OpenSearch Service data source." + }, + "eventBridgeConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceEventBridgeConfig", + "description": "ARN for the EventBridge bus." + }, + "httpConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceHttpConfig", + "description": "Endpoints for an HTTP data source." + }, + "lambdaConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceLambdaConfig", + "description": "An ARN of a Lambda function in valid ARN format. This can be the ARN of a Lambda function that exists in the current account or in another account." + }, + "metricsConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceMetricsConfig", + "description": "Enables or disables enhanced data source metrics for specified data sources. Note that `MetricsConfig` won't be used unless the `dataSourceLevelMetricsBehavior` value is set to `PER_DATA_SOURCE_METRICS` . If the `dataSourceLevelMetricsBehavior` is set to `FULL_REQUEST_DATA_SOURCE_METRICS` instead, `MetricsConfig` will be ignored. However, you can still set its value.\n\n`MetricsConfig` can be `ENABLED` or `DISABLED` ." + }, + "name": { + "type": "string", + "description": "Friendly name for you to identify your AppSync data source after creation.", + "replaceOnChanges": true + }, + "openSearchServiceConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceOpenSearchServiceConfig", + "description": "AWS Region and Endpoints for an Amazon OpenSearch Service domain in your account." + }, + "relationalDatabaseConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceRelationalDatabaseConfig", + "description": "Relational Database configuration of the relational database data source." + }, + "serviceRoleArn": { + "type": "string", + "description": "The AWS Identity and Access Management service role ARN for the data source. The system assumes this role when accessing the data source." + }, + "type": { + "type": "string", + "description": "The type of the data source." + } + }, + "type": "object", + "required": [ + "apiId", + "dataSourceArn", + "name", + "type" + ], + "inputProperties": { + "apiId": { + "type": "string", + "description": "Unique AWS AppSync GraphQL API identifier where this data source will be created." + }, + "description": { + "type": "string", + "description": "The description of the data source." + }, + "dynamoDbConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceDynamoDbConfig", + "description": "AWS Region and TableName for an Amazon DynamoDB table in your account." + }, + "elasticsearchConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceElasticsearchConfig", + "description": "AWS Region and Endpoints for an Amazon OpenSearch Service domain in your account.\nAs of September 2021, Amazon Elasticsearch Service is Amazon OpenSearch Service. This property is deprecated. For new data sources, use OpenSearchServiceConfig to specify an OpenSearch Service data source." + }, + "eventBridgeConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceEventBridgeConfig", + "description": "ARN for the EventBridge bus." + }, + "httpConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceHttpConfig", + "description": "Endpoints for an HTTP data source." + }, + "lambdaConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceLambdaConfig", + "description": "An ARN of a Lambda function in valid ARN format. This can be the ARN of a Lambda function that exists in the current account or in another account." + }, + "metricsConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceMetricsConfig", + "description": "Enables or disables enhanced data source metrics for specified data sources. Note that `MetricsConfig` won't be used unless the `dataSourceLevelMetricsBehavior` value is set to `PER_DATA_SOURCE_METRICS` . If the `dataSourceLevelMetricsBehavior` is set to `FULL_REQUEST_DATA_SOURCE_METRICS` instead, `MetricsConfig` will be ignored. However, you can still set its value.\n\n`MetricsConfig` can be `ENABLED` or `DISABLED` ." + }, + "name": { + "type": "string", + "description": "Friendly name for you to identify your AppSync data source after creation." + }, + "openSearchServiceConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceOpenSearchServiceConfig", + "description": "AWS Region and Endpoints for an Amazon OpenSearch Service domain in your account." + }, + "relationalDatabaseConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceRelationalDatabaseConfig", + "description": "Relational Database configuration of the relational database data source." + }, + "serviceRoleArn": { + "type": "string", + "description": "The AWS Identity and Access Management service role ARN for the data source. The system assumes this role when accessing the data source." + }, + "type": { + "type": "string", + "description": "The type of the data source." + } + }, + "requiredInputs": [ + "apiId", + "type" + ] + }, "aws-native:appsync:DomainName": { "description": "Resource Type definition for AWS::AppSync::DomainName", "properties": { @@ -181041,11 +181815,11 @@ }, "computeType": { "$ref": "#/types/aws-native:codebuild:FleetComputeType", - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nInformation about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*" + "description": "Information about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*" }, "environmentType": { "$ref": "#/types/aws-native:codebuild:FleetEnvironmentType", - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nThe environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (São Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (São Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* ." + "description": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (São Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (São Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* ." }, "fleetServiceRole": { "type": "string", @@ -181053,11 +181827,11 @@ }, "fleetVpcConfig": { "$ref": "#/types/aws-native:codebuild:FleetVpcConfig", - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nInformation about the VPC configuration that AWS CodeBuild accesses." + "description": "Information about the VPC configuration that AWS CodeBuild accesses." }, "imageId": { "type": "string", - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nThe Amazon Machine Image (AMI) of the compute fleet." + "description": "The Amazon Machine Image (AMI) of the compute fleet." }, "name": { "type": "string", @@ -181086,11 +181860,11 @@ }, "computeType": { "$ref": "#/types/aws-native:codebuild:FleetComputeType", - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nInformation about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*" + "description": "Information about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*" }, "environmentType": { "$ref": "#/types/aws-native:codebuild:FleetEnvironmentType", - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nThe environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (São Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (São Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* ." + "description": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (São Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (São Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* ." }, "fleetServiceRole": { "type": "string", @@ -181098,11 +181872,11 @@ }, "fleetVpcConfig": { "$ref": "#/types/aws-native:codebuild:FleetVpcConfig", - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nInformation about the VPC configuration that AWS CodeBuild accesses." + "description": "Information about the VPC configuration that AWS CodeBuild accesses." }, "imageId": { "type": "string", - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nThe Amazon Machine Image (AMI) of the compute fleet." + "description": "The Amazon Machine Image (AMI) of the compute fleet." }, "name": { "type": "string", @@ -182368,7 +183142,7 @@ "properties": { "accountRecoverySetting": { "$ref": "#/types/aws-native:cognito:UserPoolAccountRecoverySetting", - "description": "Use this setting to define which verified available method a user can use to recover their password when they call `ForgotPassword` . It allows you to define a preferred method when a user has more than one method available. With this setting, SMS does not qualify for a valid password recovery mechanism if the user also has SMS MFA enabled. In the absence of this setting, Cognito uses the legacy behavior to determine the recovery method where SMS is preferred over email." + "description": "The available verified method a user can use to recover their password when they call `ForgotPassword` . You can use this setting to define a preferred method when a user has more than one method available. With this setting, SMS doesn't qualify for a valid password recovery mechanism if the user also has SMS multi-factor authentication (MFA) activated. In the absence of this setting, Amazon Cognito uses the legacy behavior to determine the recovery method where SMS is preferred through email." }, "adminCreateUserConfig": { "$ref": "#/types/aws-native:cognito:UserPoolAdminCreateUserConfig", @@ -182379,7 +183153,7 @@ "items": { "type": "string" }, - "description": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* .\n\n\u003e This user pool property cannot be updated." + "description": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* ." }, "arn": { "type": "string", @@ -182412,18 +183186,18 @@ }, "emailVerificationMessage": { "type": "string", - "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) ." + "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) ." }, "emailVerificationSubject": { "type": "string", - "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) ." + "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) ." }, "enabledMfas": { "type": "array", "items": { "type": "string" }, - "description": "Enables MFA on a specified user pool. To disable all MFAs after it has been enabled, set MfaConfiguration to \"OFF\" and remove EnabledMfas. MFAs can only be all disabled if MfaConfiguration is OFF. Once SMS_MFA is enabled, SMS_MFA can only be disabled by setting MfaConfiguration to \"OFF\". Can be one of the following values:\n\n- `SMS_MFA` - Enables SMS MFA for the user pool. SMS_MFA can only be enabled if SMS configuration is provided.\n- `SOFTWARE_TOKEN_MFA` - Enables software token MFA for the user pool.\n\nAllowed values: `SMS_MFA` | `SOFTWARE_TOKEN_MFA`" + "description": "Set enabled MFA options on a specified user pool. To disable all MFAs after it has been enabled, set `MfaConfiguration` to `OFF` and remove EnabledMfas. MFAs can only be all disabled if `MfaConfiguration` is `OFF` . After you enable `SMS_MFA` , you can only disable it by setting `MfaConfiguration` to `OFF` . Can be one of the following values:\n\n- `SMS_MFA` - Enables MFA with SMS for the user pool. To select this option, you must also provide values for `SmsConfiguration` .\n- `SOFTWARE_TOKEN_MFA` - Enables software token MFA for the user pool.\n- `EMAIL_OTP` - Enables MFA with email for the user pool. To select this option, you must provide values for `EmailConfiguration` and within those, set `EmailSendingAccount` to `DEVELOPER` .\n\nAllowed values: `SMS_MFA` | `SOFTWARE_TOKEN_MFA` | `EMAIL_OTP`" }, "lambdaConfig": { "$ref": "#/types/aws-native:cognito:UserPoolLambdaConfig", @@ -182439,7 +183213,7 @@ }, "providerName": { "type": "string", - "description": "The provider name of the Amazon Cognito user pool, specified as a `String` ." + "description": "A friendly name for the IdP." }, "providerUrl": { "type": "string", @@ -182450,11 +183224,11 @@ "items": { "$ref": "#/types/aws-native:cognito:UserPoolSchemaAttribute" }, - "description": "The schema attributes for the new user pool. These attributes can be standard or custom attributes.\n\n\u003e During a user pool update, you can add new schema attributes but you cannot modify or delete an existing schema attribute." + "description": "An array of schema attributes for the new user pool. These attributes can be standard or custom attributes." }, "smsAuthenticationMessage": { "type": "string", - "description": "A string representing the SMS authentication message." + "description": "The contents of the SMS authentication message." }, "smsConfiguration": { "$ref": "#/types/aws-native:cognito:UserPoolSmsConfiguration", @@ -182462,7 +183236,7 @@ }, "smsVerificationMessage": { "type": "string", - "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) ." + "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) ." }, "userAttributeUpdateSettings": { "$ref": "#/types/aws-native:cognito:UserPoolUserAttributeUpdateSettings", @@ -182492,11 +183266,11 @@ "items": { "type": "string" }, - "description": "Determines whether email addresses or phone numbers can be specified as user names when a user signs up. Possible values: `phone_number` or `email` .\n\nThis user pool property cannot be updated." + "description": "Specifies whether a user can use an email address or phone number as a username when they sign up." }, "usernameConfiguration": { "$ref": "#/types/aws-native:cognito:UserPoolUsernameConfiguration", - "description": "You can choose to set case sensitivity on the username input for the selected sign-in option. For example, when this is set to `False` , users will be able to sign in using either \"username\" or \"Username\". This configuration is immutable once it has been set." + "description": "Case sensitivity on the username input for the selected sign-in option. When case sensitivity is set to `False` (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, `username` , `USERNAME` , or `UserName` , or for email, `email@example.com` or `EMaiL@eXamplE.Com` . For most use cases, set case sensitivity to `False` (case insensitive) as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user.\n\nThis configuration is immutable after you set it. For more information, see [UsernameConfigurationType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UsernameConfigurationType.html) ." }, "verificationMessageTemplate": { "$ref": "#/types/aws-native:cognito:UserPoolVerificationMessageTemplate", @@ -182513,7 +183287,7 @@ "inputProperties": { "accountRecoverySetting": { "$ref": "#/types/aws-native:cognito:UserPoolAccountRecoverySetting", - "description": "Use this setting to define which verified available method a user can use to recover their password when they call `ForgotPassword` . It allows you to define a preferred method when a user has more than one method available. With this setting, SMS does not qualify for a valid password recovery mechanism if the user also has SMS MFA enabled. In the absence of this setting, Cognito uses the legacy behavior to determine the recovery method where SMS is preferred over email." + "description": "The available verified method a user can use to recover their password when they call `ForgotPassword` . You can use this setting to define a preferred method when a user has more than one method available. With this setting, SMS doesn't qualify for a valid password recovery mechanism if the user also has SMS multi-factor authentication (MFA) activated. In the absence of this setting, Amazon Cognito uses the legacy behavior to determine the recovery method where SMS is preferred through email." }, "adminCreateUserConfig": { "$ref": "#/types/aws-native:cognito:UserPoolAdminCreateUserConfig", @@ -182524,7 +183298,7 @@ "items": { "type": "string" }, - "description": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* .\n\n\u003e This user pool property cannot be updated." + "description": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* ." }, "autoVerifiedAttributes": { "type": "array", @@ -182553,18 +183327,18 @@ }, "emailVerificationMessage": { "type": "string", - "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) ." + "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) ." }, "emailVerificationSubject": { "type": "string", - "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) ." + "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) ." }, "enabledMfas": { "type": "array", "items": { "type": "string" }, - "description": "Enables MFA on a specified user pool. To disable all MFAs after it has been enabled, set MfaConfiguration to \"OFF\" and remove EnabledMfas. MFAs can only be all disabled if MfaConfiguration is OFF. Once SMS_MFA is enabled, SMS_MFA can only be disabled by setting MfaConfiguration to \"OFF\". Can be one of the following values:\n\n- `SMS_MFA` - Enables SMS MFA for the user pool. SMS_MFA can only be enabled if SMS configuration is provided.\n- `SOFTWARE_TOKEN_MFA` - Enables software token MFA for the user pool.\n\nAllowed values: `SMS_MFA` | `SOFTWARE_TOKEN_MFA`" + "description": "Set enabled MFA options on a specified user pool. To disable all MFAs after it has been enabled, set `MfaConfiguration` to `OFF` and remove EnabledMfas. MFAs can only be all disabled if `MfaConfiguration` is `OFF` . After you enable `SMS_MFA` , you can only disable it by setting `MfaConfiguration` to `OFF` . Can be one of the following values:\n\n- `SMS_MFA` - Enables MFA with SMS for the user pool. To select this option, you must also provide values for `SmsConfiguration` .\n- `SOFTWARE_TOKEN_MFA` - Enables software token MFA for the user pool.\n- `EMAIL_OTP` - Enables MFA with email for the user pool. To select this option, you must provide values for `EmailConfiguration` and within those, set `EmailSendingAccount` to `DEVELOPER` .\n\nAllowed values: `SMS_MFA` | `SOFTWARE_TOKEN_MFA` | `EMAIL_OTP`" }, "lambdaConfig": { "$ref": "#/types/aws-native:cognito:UserPoolLambdaConfig", @@ -182583,11 +183357,11 @@ "items": { "$ref": "#/types/aws-native:cognito:UserPoolSchemaAttribute" }, - "description": "The schema attributes for the new user pool. These attributes can be standard or custom attributes.\n\n\u003e During a user pool update, you can add new schema attributes but you cannot modify or delete an existing schema attribute." + "description": "An array of schema attributes for the new user pool. These attributes can be standard or custom attributes." }, "smsAuthenticationMessage": { "type": "string", - "description": "A string representing the SMS authentication message." + "description": "The contents of the SMS authentication message." }, "smsConfiguration": { "$ref": "#/types/aws-native:cognito:UserPoolSmsConfiguration", @@ -182595,7 +183369,7 @@ }, "smsVerificationMessage": { "type": "string", - "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) ." + "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) ." }, "userAttributeUpdateSettings": { "$ref": "#/types/aws-native:cognito:UserPoolUserAttributeUpdateSettings", @@ -182621,11 +183395,11 @@ "items": { "type": "string" }, - "description": "Determines whether email addresses or phone numbers can be specified as user names when a user signs up. Possible values: `phone_number` or `email` .\n\nThis user pool property cannot be updated." + "description": "Specifies whether a user can use an email address or phone number as a username when they sign up." }, "usernameConfiguration": { "$ref": "#/types/aws-native:cognito:UserPoolUsernameConfiguration", - "description": "You can choose to set case sensitivity on the username input for the selected sign-in option. For example, when this is set to `False` , users will be able to sign in using either \"username\" or \"Username\". This configuration is immutable once it has been set." + "description": "Case sensitivity on the username input for the selected sign-in option. When case sensitivity is set to `False` (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, `username` , `USERNAME` , or `UserName` , or for email, `email@example.com` or `EMaiL@eXamplE.Com` . For most use cases, set case sensitivity to `False` (case insensitive) as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user.\n\nThis configuration is immutable after you set it. For more information, see [UsernameConfigurationType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UsernameConfigurationType.html) ." }, "verificationMessageTemplate": { "$ref": "#/types/aws-native:cognito:UserPoolVerificationMessageTemplate", @@ -182638,7 +183412,7 @@ "properties": { "accessTokenValidity": { "type": "integer", - "description": "The access token time limit. After this limit expires, your user can't use their access token. To specify the time unit for `AccessTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `AccessTokenValidity` to `10` and `TokenValidityUnits` to `hours` , your user can authorize access with their access token for 10 hours.\n\nThe default time unit for `AccessTokenValidity` in an API request is hours." + "description": "The access token time limit. After this limit expires, your user can't use their access token. To specify the time unit for `AccessTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `AccessTokenValidity` to `10` and `TokenValidityUnits` to `hours` , your user can authorize access with\ntheir access token for 10 hours.\n\nThe default time unit for `AccessTokenValidity` in an API request is hours. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your access\ntokens are valid for one hour." }, "allowedOAuthFlows": { "type": "array", @@ -182710,7 +183484,7 @@ }, "idTokenValidity": { "type": "integer", - "description": "The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for `IdTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `IdTokenValidity` as `10` and `TokenValidityUnits` as `hours` , your user can authenticate their session with their ID token for 10 hours.\n\nThe default time unit for `IdTokenValidity` in an API request is hours." + "description": "The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for `IdTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `IdTokenValidity` as `10` and `TokenValidityUnits` as `hours` , your user can authenticate their session with their ID token for 10 hours.\n\nThe default time unit for `IdTokenValidity` in an API request is hours. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your ID\ntokens are valid for one hour." }, "logoutUrls": { "type": "array", @@ -182724,7 +183498,7 @@ }, "preventUserExistenceErrors": { "type": "string", - "description": "Use this setting to choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to `ENABLED` and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY` , those APIs will return a `UserNotFoundException` exception if the user does not exist in the user pool." + "description": "Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to `ENABLED` and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY` , those APIs return a `UserNotFoundException` exception if the user doesn't exist in the user pool.\n\nValid values include:\n\n- `ENABLED` - This prevents user existence-related errors.\n- `LEGACY` - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented.\n\nDefaults to `LEGACY` when you don't provide a value." }, "readAttributes": { "type": "array", @@ -182735,7 +183509,7 @@ }, "refreshTokenValidity": { "type": "integer", - "description": "The refresh token time limit. After this limit expires, your user can't use their refresh token. To specify the time unit for `RefreshTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `RefreshTokenValidity` as `10` and `TokenValidityUnits` as `days` , your user can refresh their session and retrieve new access and ID tokens for 10 days.\n\nThe default time unit for `RefreshTokenValidity` in an API request is days. You can't set `RefreshTokenValidity` to 0. If you do, Amazon Cognito overrides the value with the default value of 30 days." + "description": "The refresh token time limit. After this limit expires, your user can't use their refresh token. To specify the time unit for `RefreshTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `RefreshTokenValidity` as `10` and `TokenValidityUnits` as `days` , your user can refresh their session\nand retrieve new access and ID tokens for 10 days.\n\nThe default time unit for `RefreshTokenValidity` in an API request is days. You can't set `RefreshTokenValidity` to 0. If you do, Amazon Cognito overrides the value with the default value of 30 days. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your refresh\ntokens are valid for 30 days." }, "supportedIdentityProviders": { "type": "array", @@ -182771,7 +183545,7 @@ "inputProperties": { "accessTokenValidity": { "type": "integer", - "description": "The access token time limit. After this limit expires, your user can't use their access token. To specify the time unit for `AccessTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `AccessTokenValidity` to `10` and `TokenValidityUnits` to `hours` , your user can authorize access with their access token for 10 hours.\n\nThe default time unit for `AccessTokenValidity` in an API request is hours." + "description": "The access token time limit. After this limit expires, your user can't use their access token. To specify the time unit for `AccessTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `AccessTokenValidity` to `10` and `TokenValidityUnits` to `hours` , your user can authorize access with\ntheir access token for 10 hours.\n\nThe default time unit for `AccessTokenValidity` in an API request is hours. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your access\ntokens are valid for one hour." }, "allowedOAuthFlows": { "type": "array", @@ -182835,7 +183609,7 @@ }, "idTokenValidity": { "type": "integer", - "description": "The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for `IdTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `IdTokenValidity` as `10` and `TokenValidityUnits` as `hours` , your user can authenticate their session with their ID token for 10 hours.\n\nThe default time unit for `IdTokenValidity` in an API request is hours." + "description": "The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for `IdTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `IdTokenValidity` as `10` and `TokenValidityUnits` as `hours` , your user can authenticate their session with their ID token for 10 hours.\n\nThe default time unit for `IdTokenValidity` in an API request is hours. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your ID\ntokens are valid for one hour." }, "logoutUrls": { "type": "array", @@ -182846,7 +183620,7 @@ }, "preventUserExistenceErrors": { "type": "string", - "description": "Use this setting to choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to `ENABLED` and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY` , those APIs will return a `UserNotFoundException` exception if the user does not exist in the user pool." + "description": "Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to `ENABLED` and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY` , those APIs return a `UserNotFoundException` exception if the user doesn't exist in the user pool.\n\nValid values include:\n\n- `ENABLED` - This prevents user existence-related errors.\n- `LEGACY` - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented.\n\nDefaults to `LEGACY` when you don't provide a value." }, "readAttributes": { "type": "array", @@ -182857,7 +183631,7 @@ }, "refreshTokenValidity": { "type": "integer", - "description": "The refresh token time limit. After this limit expires, your user can't use their refresh token. To specify the time unit for `RefreshTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `RefreshTokenValidity` as `10` and `TokenValidityUnits` as `days` , your user can refresh their session and retrieve new access and ID tokens for 10 days.\n\nThe default time unit for `RefreshTokenValidity` in an API request is days. You can't set `RefreshTokenValidity` to 0. If you do, Amazon Cognito overrides the value with the default value of 30 days." + "description": "The refresh token time limit. After this limit expires, your user can't use their refresh token. To specify the time unit for `RefreshTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `RefreshTokenValidity` as `10` and `TokenValidityUnits` as `days` , your user can refresh their session\nand retrieve new access and ID tokens for 10 days.\n\nThe default time unit for `RefreshTokenValidity` in an API request is days. You can't set `RefreshTokenValidity` to 0. If you do, Amazon Cognito overrides the value with the default value of 30 days. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your refresh\ntokens are valid for 30 days." }, "supportedIdentityProviders": { "type": "array", @@ -182903,12 +183677,12 @@ }, "domain": { "type": "string", - "description": "The domain name for the domain that hosts the sign-up and sign-in pages for your application. For example: `auth.example.com` . If you're using a prefix domain, this field denotes the first part of the domain before `.auth.[region].amazoncognito.com` .\n\nThis string can include only lowercase letters, numbers, and hyphens. Don't use a hyphen for the first or last character. Use periods to separate subdomain names.", + "description": "The domain name for the custom domain that hosts the sign-up and sign-in pages for your application. One example might be `auth.example.com` .\n\nThis string can include only lowercase letters, numbers, and hyphens. Don't use a hyphen for the first or last character. Use periods to separate subdomain names.", "replaceOnChanges": true }, "userPoolId": { "type": "string", - "description": "The user pool ID for the user pool where you want to associate a user pool domain.", + "description": "The ID of the user pool that is associated with the custom domain whose certificate you're updating.", "replaceOnChanges": true } }, @@ -182926,11 +183700,11 @@ }, "domain": { "type": "string", - "description": "The domain name for the domain that hosts the sign-up and sign-in pages for your application. For example: `auth.example.com` . If you're using a prefix domain, this field denotes the first part of the domain before `.auth.[region].amazoncognito.com` .\n\nThis string can include only lowercase letters, numbers, and hyphens. Don't use a hyphen for the first or last character. Use periods to separate subdomain names." + "description": "The domain name for the custom domain that hosts the sign-up and sign-in pages for your application. One example might be `auth.example.com` .\n\nThis string can include only lowercase letters, numbers, and hyphens. Don't use a hyphen for the first or last character. Use periods to separate subdomain names." }, "userPoolId": { "type": "string", - "description": "The user pool ID for the user pool where you want to associate a user pool domain." + "description": "The ID of the user pool that is associated with the custom domain whose certificate you're updating." } }, "requiredInputs": [ @@ -183078,7 +183852,7 @@ "properties": { "identifier": { "type": "string", - "description": "A unique resource server identifier for the resource server. This could be an HTTPS endpoint where the resource server is located. For example: `https://my-weather-api.example.com` .", + "description": "A unique resource server identifier for the resource server. The identifier can be an API friendly name like `solar-system-data` . You can also set an API URL like `https://solar-system-data-api.example.com` as your identifier.\n\nAmazon Cognito represents scopes in the access token in the format `$resource-server-identifier/$scope` . Longer scope-identifier strings increase the size of your access tokens.", "replaceOnChanges": true }, "name": { @@ -183107,7 +183881,7 @@ "inputProperties": { "identifier": { "type": "string", - "description": "A unique resource server identifier for the resource server. This could be an HTTPS endpoint where the resource server is located. For example: `https://my-weather-api.example.com` ." + "description": "A unique resource server identifier for the resource server. The identifier can be an API friendly name like `solar-system-data` . You can also set an API URL like `https://solar-system-data-api.example.com` as your identifier.\n\nAmazon Cognito represents scopes in the access token in the format `$resource-server-identifier/$scope` . Longer scope-identifier strings increase the size of your access tokens." }, "name": { "type": "string", @@ -183139,7 +183913,7 @@ }, "clientId": { "type": "string", - "description": "The app client ID. You can specify the risk configuration for a single client (with a specific ClientId) or for all clients (by setting the ClientId to `ALL` ).", + "description": "The app client where this configuration is applied. When this parameter isn't present, the risk configuration applies to all user pool app clients that don't have client-level settings.", "replaceOnChanges": true }, "compromisedCredentialsRiskConfiguration": { @@ -183168,7 +183942,7 @@ }, "clientId": { "type": "string", - "description": "The app client ID. You can specify the risk configuration for a single client (with a specific ClientId) or for all clients (by setting the ClientId to `ALL` )." + "description": "The app client where this configuration is applied. When this parameter isn't present, the risk configuration applies to all user pool app clients that don't have client-level settings." }, "compromisedCredentialsRiskConfiguration": { "$ref": "#/types/aws-native:cognito:UserPoolRiskConfigurationAttachmentCompromisedCredentialsRiskConfigurationType", @@ -183193,7 +183967,7 @@ "properties": { "clientId": { "type": "string", - "description": "The client ID for the client app. You can specify the UI customization settings for a single client (with a specific clientId) or for all clients (by setting the clientId to `ALL` ).", + "description": "The app client ID for your UI customization. When this value isn't present, the customization applies to all user pool app clients that don't have client-level settings..", "replaceOnChanges": true }, "css": { @@ -183214,7 +183988,7 @@ "inputProperties": { "clientId": { "type": "string", - "description": "The client ID for the client app. You can specify the UI customization settings for a single client (with a specific clientId) or for all clients (by setting the clientId to `ALL` )." + "description": "The app client ID for your UI customization. When this value isn't present, the customization applies to all user pool app clients that don't have client-level settings.." }, "css": { "type": "string", @@ -183264,7 +184038,7 @@ "items": { "$ref": "#/types/aws-native:cognito:UserPoolUserAttributeType" }, - "description": "An array of name-value pairs that contain user attributes and attribute values.", + "description": "An array of name-value pairs that contain user attributes and attribute values to be set for the user to be created. You can create a user without specifying any attributes other than `Username` . However, any attributes that you specify as required (when creating a user pool or in the *Attributes* tab of the console) either you should supply (in your call to `AdminCreateUser` ) or the user should supply (when they sign up in response to your welcome message).\n\nFor custom attributes, you must prepend the `custom:` prefix to the attribute name.\n\nTo send a message inviting the user to sign up, you must specify the user's email address or phone number. You can do this in your call to AdminCreateUser or in the *Users* tab of the Amazon Cognito console for managing your user pools.\n\nIn your call to `AdminCreateUser` , you can set the `email_verified` attribute to `True` , and you can set the `phone_number_verified` attribute to `True` . You can also do this by calling [AdminUpdateUserAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminUpdateUserAttributes.html) .\n\n- *email* : The email address of the user to whom the message that contains the code and username will be sent. Required if the `email_verified` attribute is set to `True` , or if `\"EMAIL\"` is specified in the `DesiredDeliveryMediums` parameter.\n- *phone_number* : The phone number of the user to whom the message that contains the code and username will be sent. Required if the `phone_number_verified` attribute is set to `True` , or if `\"SMS\"` is specified in the `DesiredDeliveryMediums` parameter.", "replaceOnChanges": true }, "userPoolId": { @@ -183318,7 +184092,7 @@ "items": { "$ref": "#/types/aws-native:cognito:UserPoolUserAttributeType" }, - "description": "An array of name-value pairs that contain user attributes and attribute values." + "description": "An array of name-value pairs that contain user attributes and attribute values to be set for the user to be created. You can create a user without specifying any attributes other than `Username` . However, any attributes that you specify as required (when creating a user pool or in the *Attributes* tab of the console) either you should supply (in your call to `AdminCreateUser` ) or the user should supply (when they sign up in response to your welcome message).\n\nFor custom attributes, you must prepend the `custom:` prefix to the attribute name.\n\nTo send a message inviting the user to sign up, you must specify the user's email address or phone number. You can do this in your call to AdminCreateUser or in the *Users* tab of the Amazon Cognito console for managing your user pools.\n\nIn your call to `AdminCreateUser` , you can set the `email_verified` attribute to `True` , and you can set the `phone_number_verified` attribute to `True` . You can also do this by calling [AdminUpdateUserAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminUpdateUserAttributes.html) .\n\n- *email* : The email address of the user to whom the message that contains the code and username will be sent. Required if the `email_verified` attribute is set to `True` , or if `\"EMAIL\"` is specified in the `DesiredDeliveryMediums` parameter.\n- *phone_number* : The phone number of the user to whom the message that contains the code and username will be sent. Required if the `phone_number_verified` attribute is set to `True` , or if `\"SMS\"` is specified in the `DesiredDeliveryMediums` parameter." }, "userPoolId": { "type": "string", @@ -183355,6 +184129,7 @@ }, "username": { "type": "string", + "description": "The user's username.", "replaceOnChanges": true } }, @@ -183374,7 +184149,8 @@ "description": "The user pool ID for the user pool." }, "username": { - "type": "string" + "type": "string", + "description": "The user's username." } }, "requiredInputs": [ @@ -200779,6 +201555,7 @@ "items": { "$ref": "#/types/aws-native:ecs:TaskSetCapacityProviderStrategyItem" }, + "description": "The capacity provider strategy that are associated with the task set.", "replaceOnChanges": true }, "cluster": { @@ -200856,7 +201633,8 @@ "type": "array", "items": { "$ref": "#/types/aws-native:ecs:TaskSetCapacityProviderStrategyItem" - } + }, + "description": "The capacity provider strategy that are associated with the task set." }, "cluster": { "type": "string", @@ -207352,6 +208130,25 @@ "description": "Specifies whether the container group includes replica or daemon containers.", "replaceOnChanges": true }, + "sourceVersionNumber": { + "type": "integer", + "description": "A specific ContainerGroupDefinition version to be updated" + }, + "status": { + "$ref": "#/types/aws-native:gamelift:ContainerGroupDefinitionStatus", + "description": "A string indicating ContainerGroupDefinition status." + }, + "statusReason": { + "type": "string", + "description": "A string indicating the reason for ContainerGroupDefinition status." + }, + "supportContainerDefinitions": { + "type": "array", + "items": { + "$ref": "pulumi.json#/Any" + }, + "description": "A collection of support container definitions that define the containers in this group." + }, "tags": { "type": "array", "items": { @@ -207377,6 +208174,8 @@ "creationTime", "name", "operatingSystem", + "status", + "statusReason", "totalCpuLimit", "totalMemoryLimit" ], @@ -207400,6 +208199,17 @@ "$ref": "#/types/aws-native:gamelift:ContainerGroupDefinitionSchedulingStrategy", "description": "Specifies whether the container group includes replica or daemon containers." }, + "sourceVersionNumber": { + "type": "integer", + "description": "A specific ContainerGroupDefinition version to be updated" + }, + "supportContainerDefinitions": { + "type": "array", + "items": { + "$ref": "pulumi.json#/Any" + }, + "description": "A collection of support container definitions that define the containers in this group." + }, "tags": { "type": "array", "items": { @@ -209121,8 +209931,7 @@ "compatibility", "dataFormat", "initialSchemaVersionId", - "name", - "schemaDefinition" + "name" ], "inputProperties": { "checkpointVersion": { @@ -209163,8 +209972,7 @@ }, "requiredInputs": [ "compatibility", - "dataFormat", - "schemaDefinition" + "dataFormat" ] }, "aws-native:glue:SchemaVersion": { @@ -220437,7 +221245,7 @@ "items": { "$ref": "#/types/aws-native:index:Tag" }, - "description": "A list of tags to add to the event source mapping.\n\n\u003e You must have the `lambda:TagResource` , `lambda:UntagResource` , and `lambda:ListTags` permissions for your [IAM principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the AWS CloudFormation stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update." + "description": "A list of tags to add to the event source mapping.\n You must have the ``lambda:TagResource``, ``lambda:UntagResource``, and ``lambda:ListTags`` permissions for your [principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the CFN stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update." }, "topics": { "type": "array", @@ -220560,7 +221368,7 @@ "items": { "$ref": "#/types/aws-native:index:Tag" }, - "description": "A list of tags to add to the event source mapping.\n\n\u003e You must have the `lambda:TagResource` , `lambda:UntagResource` , and `lambda:ListTags` permissions for your [IAM principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the AWS CloudFormation stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update." + "description": "A list of tags to add to the event source mapping.\n You must have the ``lambda:TagResource``, ``lambda:UntagResource``, and ``lambda:ListTags`` permissions for your [principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the CFN stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update." }, "topics": { "type": "array", @@ -221164,10 +221972,6 @@ "description": "The name of the Lambda function.", "replaceOnChanges": true }, - "policy": { - "$ref": "pulumi.json#/Any", - "description": "The resource policy of your function\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::Lambda::Version` for more information about the expected schema for this property." - }, "provisionedConcurrencyConfig": { "$ref": "#/types/aws-native:lambda:VersionProvisionedConcurrencyConfiguration", "description": "Specifies a provisioned concurrency configuration for a function's version. Updates are not supported for this property.", @@ -221207,10 +222011,6 @@ "type": "string", "description": "The name of the Lambda function." }, - "policy": { - "$ref": "pulumi.json#/Any", - "description": "The resource policy of your function\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::Lambda::Version` for more information about the expected schema for this property." - }, "provisionedConcurrencyConfig": { "$ref": "#/types/aws-native:lambda:VersionProvisionedConcurrencyConfiguration", "description": "Specifies a provisioned concurrency configuration for a function's version. Updates are not supported for this property." @@ -234045,7 +234845,7 @@ }, "type": { "$ref": "#/types/aws-native:organizations:PolicyType", - "description": "The type of policy to create. You can specify one of the following values: AISERVICES_OPT_OUT_POLICY, BACKUP_POLICY, SERVICE_CONTROL_POLICY, TAG_POLICY", + "description": "The type of policy to create. You can specify one of the following values: AISERVICES_OPT_OUT_POLICY, BACKUP_POLICY, SERVICE_CONTROL_POLICY, TAG_POLICY, CHATBOT_POLICY", "replaceOnChanges": true } }, @@ -234087,7 +234887,7 @@ }, "type": { "$ref": "#/types/aws-native:organizations:PolicyType", - "description": "The type of policy to create. You can specify one of the following values: AISERVICES_OPT_OUT_POLICY, BACKUP_POLICY, SERVICE_CONTROL_POLICY, TAG_POLICY" + "description": "The type of policy to create. You can specify one of the following values: AISERVICES_OPT_OUT_POLICY, BACKUP_POLICY, SERVICE_CONTROL_POLICY, TAG_POLICY, CHATBOT_POLICY" } }, "requiredInputs": [ @@ -235931,7 +236731,7 @@ }, "configuration": { "$ref": "pulumi.json#/Any", - "description": "Use this property to specify a JSON or YAML schema with configuration information specific to your data source connector to connect your data source repository to Amazon Q Business . You must use the JSON or YAML schema provided by Amazon Q .\n\nYou can find configuration templates for your specific data source using the following steps:\n\n- Navigate to the [Supported connectors](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/connectors-list.html) page in the Amazon Q Business User Guide, and select the data source connector of your choice.\n- Then, from that specific data source connector's page, select *Using AWS CloudFormation* to find the schemas for your data source connector, including parameter descriptions and examples.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::QBusiness::DataSource` for more information about the expected schema for this property." + "description": "Configuration information to connect your data source repository to Amazon Q Business. Use this parameter to provide a JSON schema with configuration information specific to your data source connector.\n\nEach data source has a JSON schema provided by Amazon Q Business that you must use. For example, the Amazon S3 and Web Crawler connectors require the following JSON schemas:\n\n- [Amazon S3 JSON schema](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/s3-api.html)\n- [Web Crawler JSON schema](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/web-crawler-api.html)\n\nYou can find configuration templates for your specific data source using the following steps:\n\n- Navigate to the [Supported connectors](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/connectors-list.html) page in the Amazon Q Business User Guide, and select the data source of your choice.\n- Then, from your specific data source connector page, select *Using the API* . You will find the JSON schema for your data source, including parameter descriptions, in this section.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::QBusiness::DataSource` for more information about the expected schema for this property." }, "createdAt": { "type": "string", @@ -236014,7 +236814,7 @@ }, "configuration": { "$ref": "pulumi.json#/Any", - "description": "Use this property to specify a JSON or YAML schema with configuration information specific to your data source connector to connect your data source repository to Amazon Q Business . You must use the JSON or YAML schema provided by Amazon Q .\n\nYou can find configuration templates for your specific data source using the following steps:\n\n- Navigate to the [Supported connectors](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/connectors-list.html) page in the Amazon Q Business User Guide, and select the data source connector of your choice.\n- Then, from that specific data source connector's page, select *Using AWS CloudFormation* to find the schemas for your data source connector, including parameter descriptions and examples.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::QBusiness::DataSource` for more information about the expected schema for this property." + "description": "Configuration information to connect your data source repository to Amazon Q Business. Use this parameter to provide a JSON schema with configuration information specific to your data source connector.\n\nEach data source has a JSON schema provided by Amazon Q Business that you must use. For example, the Amazon S3 and Web Crawler connectors require the following JSON schemas:\n\n- [Amazon S3 JSON schema](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/s3-api.html)\n- [Web Crawler JSON schema](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/web-crawler-api.html)\n\nYou can find configuration templates for your specific data source using the following steps:\n\n- Navigate to the [Supported connectors](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/connectors-list.html) page in the Amazon Q Business User Guide, and select the data source of your choice.\n- Then, from your specific data source connector page, select *Using the API* . You will find the JSON schema for your data source, including parameter descriptions, in this section.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::QBusiness::DataSource` for more information about the expected schema for this property." }, "description": { "type": "string", @@ -236453,6 +237253,12 @@ ], "description": "Provides information about the identity provider (IdP) used to authenticate end users of an Amazon Q Business web experience." }, + "origins": { + "type": "array", + "items": { + "type": "string" + } + }, "roleArn": { "type": "string", "description": "The Amazon Resource Name (ARN) of the service role attached to your web experience.\n\n\u003e You must provide this value if you're using IAM Identity Center to manage end user access to your application. If you're using legacy identity management to manage user access, you don't need to provide this value." @@ -236523,6 +237329,12 @@ ], "description": "Provides information about the identity provider (IdP) used to authenticate end users of an Amazon Q Business web experience." }, + "origins": { + "type": "array", + "items": { + "type": "string" + } + }, "roleArn": { "type": "string", "description": "The Amazon Resource Name (ARN) of the service role attached to your web experience.\n\n\u003e You must provide this value if you're using IAM Identity Center to manage end user access to your application. If you're using legacy identity management to manage user access, you don't need to provide this value." @@ -236692,6 +237504,12 @@ }, "description": "\u003cp\u003eErrors associated with the analysis.\u003c/p\u003e" }, + "folderArns": { + "type": "array", + "items": { + "type": "string" + } + }, "lastUpdatedTime": { "type": "string", "description": "\u003cp\u003eThe time that the analysis was last updated.\u003c/p\u003e" @@ -236771,6 +237589,12 @@ }, "description": "\u003cp\u003eErrors associated with the analysis.\u003c/p\u003e" }, + "folderArns": { + "type": "array", + "items": { + "type": "string" + } + }, "name": { "type": "string", "description": "\u003cp\u003eThe descriptive name of the analysis.\u003c/p\u003e" @@ -236850,6 +237674,12 @@ "definition": { "$ref": "#/types/aws-native:quicksight:DashboardVersionDefinition" }, + "folderArns": { + "type": "array", + "items": { + "type": "string" + } + }, "lastPublishedTime": { "type": "string", "description": "\u003cp\u003eThe last time that this dashboard was published.\u003c/p\u003e" @@ -236938,6 +237768,12 @@ "definition": { "$ref": "#/types/aws-native:quicksight:DashboardVersionDefinition" }, + "folderArns": { + "type": "array", + "items": { + "type": "string" + } + }, "linkEntities": { "type": "array", "items": { @@ -237271,6 +238107,12 @@ "$ref": "#/types/aws-native:quicksight:DataSourceErrorInfo", "description": "Error information from the last update or the creation of the data source." }, + "folderArns": { + "type": "array", + "items": { + "type": "string" + } + }, "lastUpdatedTime": { "type": "string", "description": "\u003cp\u003eThe last time that this data source was updated.\u003c/p\u003e" @@ -237348,6 +238190,12 @@ "$ref": "#/types/aws-native:quicksight:DataSourceErrorInfo", "description": "Error information from the last update or the creation of the data source." }, + "folderArns": { + "type": "array", + "items": { + "type": "string" + } + }, "name": { "type": "string", "description": "A display name for the data source." @@ -237392,6 +238240,7 @@ }, "awsAccountId": { "type": "string", + "description": "The ID for the AWS account where you want to create the folder.", "replaceOnChanges": true }, "createdTime": { @@ -237418,14 +238267,15 @@ }, "parentFolderArn": { "type": "string", - "description": "A new parent folder arn. This change can only be applied if the import creates a brand new folder. Existing folders cannot be moved.", + "description": "The Amazon Resource Name (ARN) for the folder.", "replaceOnChanges": true }, "permissions": { "type": "array", "items": { "$ref": "#/types/aws-native:quicksight:FolderResourcePermission" - } + }, + "description": "A structure that describes the principals and the resource-level permissions of a folder.\n\nTo specify no permissions, omit `Permissions` ." }, "sharingModel": { "$ref": "#/types/aws-native:quicksight:FolderSharingModel", @@ -237448,7 +238298,8 @@ ], "inputProperties": { "awsAccountId": { - "type": "string" + "type": "string", + "description": "The ID for the AWS account where you want to create the folder." }, "folderId": { "type": "string", @@ -237464,13 +238315,14 @@ }, "parentFolderArn": { "type": "string", - "description": "A new parent folder arn. This change can only be applied if the import creates a brand new folder. Existing folders cannot be moved." + "description": "The Amazon Resource Name (ARN) for the folder." }, "permissions": { "type": "array", "items": { "$ref": "#/types/aws-native:quicksight:FolderResourcePermission" - } + }, + "description": "A structure that describes the principals and the resource-level permissions of a folder.\n\nTo specify no permissions, omit `Permissions` ." }, "sharingModel": { "$ref": "#/types/aws-native:quicksight:FolderSharingModel", @@ -238891,7 +239743,7 @@ }, "dbSnapshotIdentifier": { "type": "string", - "description": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n By specifying this property, you can create a DB instance from the specified DB snapshot. If the ``DBSnapshotIdentifier`` property is an empty string or the ``AWS::RDS::DBInstance`` declaration has no ``DBSnapshotIdentifier`` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n Some DB instance properties aren't valid when you restore from a snapshot, such as the ``MasterUsername`` and ``MasterUserPassword`` properties. For information about the properties that you can specify, see the ``RestoreDBInstanceFromDBSnapshot`` action in the *Amazon RDS API Reference*.\n After you restore a DB instance with a ``DBSnapshotIdentifier`` property, you must specify the same ``DBSnapshotIdentifier`` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the ``DBSnapshotIdentifier`` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified ``DBSnapshotIdentifier`` property, and the original DB instance is deleted.\n If you specify the ``DBSnapshotIdentifier`` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n + ``CharacterSetName`` \n + ``DBClusterIdentifier`` \n + ``DBName`` \n + ``DeleteAutomatedBackups`` \n + ``KmsKeyId`` \n + ``MasterUsername`` \n + ``MasterUserPassword`` \n + ``PerformanceInsightsKMSKeyId`` \n + ``PerformanceInsightsRetentionPeriod`` \n + ``PromotionTier`` \n + ``SourceDBInstanceIdentifier`` \n + ``SourceRegion`` \n + ``StorageEncrypted`` (for an encrypted snapshot)\n + ``Timezone`` \n \n *Amazon Aurora* \n Not applicable. Snapshot restore is managed by the DB cluster." + "description": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n By specifying this property, you can create a DB instance from the specified DB snapshot. If the ``DBSnapshotIdentifier`` property is an empty string or the ``AWS::RDS::DBInstance`` declaration has no ``DBSnapshotIdentifier`` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n Some DB instance properties aren't valid when you restore from a snapshot, such as the ``MasterUsername`` and ``MasterUserPassword`` properties. For information about the properties that you can specify, see the ``RestoreDBInstanceFromDBSnapshot`` action in the *Amazon RDS API Reference*.\n After you restore a DB instance with a ``DBSnapshotIdentifier`` property, you must specify the same ``DBSnapshotIdentifier`` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the ``DBSnapshotIdentifier`` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified ``DBSnapshotIdentifier`` property, and the original DB instance is deleted.\n If you specify the ``DBSnapshotIdentifier`` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n + ``CharacterSetName`` \n + ``DBClusterIdentifier`` \n + ``DBName`` \n + ``KmsKeyId`` \n + ``MasterUsername`` \n + ``MasterUserPassword`` \n + ``PromotionTier`` \n + ``SourceDBInstanceIdentifier`` \n + ``SourceRegion`` \n + ``StorageEncrypted`` (for an unencrypted snapshot)\n + ``Timezone`` \n \n *Amazon Aurora* \n Not applicable. Snapshot restore is managed by the DB cluster." }, "dbSubnetGroupName": { "type": "string", @@ -239246,7 +240098,7 @@ }, "dbSnapshotIdentifier": { "type": "string", - "description": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n By specifying this property, you can create a DB instance from the specified DB snapshot. If the ``DBSnapshotIdentifier`` property is an empty string or the ``AWS::RDS::DBInstance`` declaration has no ``DBSnapshotIdentifier`` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n Some DB instance properties aren't valid when you restore from a snapshot, such as the ``MasterUsername`` and ``MasterUserPassword`` properties. For information about the properties that you can specify, see the ``RestoreDBInstanceFromDBSnapshot`` action in the *Amazon RDS API Reference*.\n After you restore a DB instance with a ``DBSnapshotIdentifier`` property, you must specify the same ``DBSnapshotIdentifier`` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the ``DBSnapshotIdentifier`` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified ``DBSnapshotIdentifier`` property, and the original DB instance is deleted.\n If you specify the ``DBSnapshotIdentifier`` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n + ``CharacterSetName`` \n + ``DBClusterIdentifier`` \n + ``DBName`` \n + ``DeleteAutomatedBackups`` \n + ``KmsKeyId`` \n + ``MasterUsername`` \n + ``MasterUserPassword`` \n + ``PerformanceInsightsKMSKeyId`` \n + ``PerformanceInsightsRetentionPeriod`` \n + ``PromotionTier`` \n + ``SourceDBInstanceIdentifier`` \n + ``SourceRegion`` \n + ``StorageEncrypted`` (for an encrypted snapshot)\n + ``Timezone`` \n \n *Amazon Aurora* \n Not applicable. Snapshot restore is managed by the DB cluster." + "description": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n By specifying this property, you can create a DB instance from the specified DB snapshot. If the ``DBSnapshotIdentifier`` property is an empty string or the ``AWS::RDS::DBInstance`` declaration has no ``DBSnapshotIdentifier`` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n Some DB instance properties aren't valid when you restore from a snapshot, such as the ``MasterUsername`` and ``MasterUserPassword`` properties. For information about the properties that you can specify, see the ``RestoreDBInstanceFromDBSnapshot`` action in the *Amazon RDS API Reference*.\n After you restore a DB instance with a ``DBSnapshotIdentifier`` property, you must specify the same ``DBSnapshotIdentifier`` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the ``DBSnapshotIdentifier`` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified ``DBSnapshotIdentifier`` property, and the original DB instance is deleted.\n If you specify the ``DBSnapshotIdentifier`` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n + ``CharacterSetName`` \n + ``DBClusterIdentifier`` \n + ``DBName`` \n + ``KmsKeyId`` \n + ``MasterUsername`` \n + ``MasterUserPassword`` \n + ``PromotionTier`` \n + ``SourceDBInstanceIdentifier`` \n + ``SourceRegion`` \n + ``StorageEncrypted`` (for an unencrypted snapshot)\n + ``Timezone`` \n \n *Amazon Aurora* \n Not applicable. Snapshot restore is managed by the DB cluster." }, "dbSubnetGroupName": { "type": "string", @@ -247107,7 +247959,7 @@ }, "version": { "type": "integer", - "description": "The version number." + "description": "The version of the image." } }, "type": "object", @@ -252255,7 +253107,7 @@ ] }, "aws-native:sqs:Queue": { - "description": "The ``AWS::SQS::Queue`` resource creates an SQS standard or FIFO queue.\n Keep the following caveats in mind:\n + If you don't specify the ``FifoQueue`` property, SQS creates a standard queue.\n You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see [Moving from a standard queue to a FIFO queue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-moving.html) in the *Developer Guide*. \n + If you don't provide a value for a property, the queue is created with the default value for the property.\n + If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.\n + To successfully create a new queue, you must provide a queue name that adheres to the [limits related to queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) and is unique within the scope of your queues.\n \n For more information about creating FIFO (first-in-first-out) queues, see [Creating an queue ()](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/screate-queue-cloudformation.html) in the *Developer Guide*.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n### Example\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing AwsNative = Pulumi.AwsNative;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n var alarmEmail = config.Get(\"alarmEmail\") ?? \"jane.doe@example.com\";\n var myQueue = new AwsNative.Sqs.Queue(\"myQueue\", new()\n {\n QueueName = \"SampleQueue\",\n });\n\n var alarmTopic = new AwsNative.Sns.Topic(\"alarmTopic\", new()\n {\n Subscription = new[]\n {\n new AwsNative.Sns.Inputs.TopicSubscriptionArgs\n {\n Endpoint = alarmEmail,\n Protocol = \"email\",\n },\n },\n });\n\n var queueDepthAlarm = new AwsNative.CloudWatch.Alarm(\"queueDepthAlarm\", new()\n {\n AlarmDescription = \"Alarm if queue depth increases to more than 10 messages\",\n Namespace = \"AWS/SQS\",\n MetricName = \"ApproximateNumberOfMessagesVisible\",\n Dimensions = new[]\n {\n new AwsNative.CloudWatch.Inputs.AlarmDimensionArgs\n {\n Name = \"QueueName\",\n Value = myQueue.QueueName,\n },\n },\n Statistic = \"Sum\",\n Period = 300,\n EvaluationPeriods = 1,\n Threshold = 10,\n ComparisonOperator = \"GreaterThanThreshold\",\n AlarmActions = new[]\n {\n alarmTopic.Id,\n },\n InsufficientDataActions = new[]\n {\n alarmTopic.Id,\n },\n });\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"queueURL\"] = myQueue.Id,\n [\"queueARN\"] = myQueue.Arn,\n [\"queueName\"] = myQueue.QueueName,\n };\n});\n\n\n```\n\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/cloudwatch\"\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/sns\"\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/sqs\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcfg := config.New(ctx, \"\")\n\t\talarmEmail := \"jane.doe@example.com\"\n\t\tif param := cfg.Get(\"alarmEmail\"); param != \"\" {\n\t\t\talarmEmail = param\n\t\t}\n\t\tmyQueue, err := sqs.NewQueue(ctx, \"myQueue\", \u0026sqs.QueueArgs{\n\t\t\tQueueName: pulumi.String(\"SampleQueue\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\talarmTopic, err := sns.NewTopic(ctx, \"alarmTopic\", \u0026sns.TopicArgs{\n\t\t\tSubscription: sns.TopicSubscriptionArray{\n\t\t\t\t\u0026sns.TopicSubscriptionArgs{\n\t\t\t\t\tEndpoint: pulumi.String(alarmEmail),\n\t\t\t\t\tProtocol: pulumi.String(\"email\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = cloudwatch.NewAlarm(ctx, \"queueDepthAlarm\", \u0026cloudwatch.AlarmArgs{\n\t\t\tAlarmDescription: pulumi.String(\"Alarm if queue depth increases to more than 10 messages\"),\n\t\t\tNamespace: pulumi.String(\"AWS/SQS\"),\n\t\t\tMetricName: pulumi.String(\"ApproximateNumberOfMessagesVisible\"),\n\t\t\tDimensions: cloudwatch.AlarmDimensionArray{\n\t\t\t\t\u0026cloudwatch.AlarmDimensionArgs{\n\t\t\t\t\tName: pulumi.String(\"QueueName\"),\n\t\t\t\t\tValue: myQueue.QueueName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatistic: pulumi.String(\"Sum\"),\n\t\t\tPeriod: pulumi.Int(300),\n\t\t\tEvaluationPeriods: pulumi.Int(1),\n\t\t\tThreshold: pulumi.Float64(10),\n\t\t\tComparisonOperator: pulumi.String(\"GreaterThanThreshold\"),\n\t\t\tAlarmActions: pulumi.StringArray{\n\t\t\t\talarmTopic.ID(),\n\t\t\t},\n\t\t\tInsufficientDataActions: pulumi.StringArray{\n\t\t\t\talarmTopic.ID(),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"queueURL\", myQueue.ID())\n\t\tctx.Export(\"queueARN\", myQueue.Arn)\n\t\tctx.Export(\"queueName\", myQueue.QueueName)\n\t\treturn nil\n\t})\n}\n\n```\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws_native from \"@pulumi/aws-native\";\n\nconst config = new pulumi.Config();\nconst alarmEmail = config.get(\"alarmEmail\") || \"jane.doe@example.com\";\nconst myQueue = new aws_native.sqs.Queue(\"myQueue\", {queueName: \"SampleQueue\"});\nconst alarmTopic = new aws_native.sns.Topic(\"alarmTopic\", {subscription: [{\n endpoint: alarmEmail,\n protocol: \"email\",\n}]});\nconst queueDepthAlarm = new aws_native.cloudwatch.Alarm(\"queueDepthAlarm\", {\n alarmDescription: \"Alarm if queue depth increases to more than 10 messages\",\n namespace: \"AWS/SQS\",\n metricName: \"ApproximateNumberOfMessagesVisible\",\n dimensions: [{\n name: \"QueueName\",\n value: myQueue.queueName,\n }],\n statistic: \"Sum\",\n period: 300,\n evaluationPeriods: 1,\n threshold: 10,\n comparisonOperator: \"GreaterThanThreshold\",\n alarmActions: [alarmTopic.id],\n insufficientDataActions: [alarmTopic.id],\n});\nexport const queueURL = myQueue.id;\nexport const queueARN = myQueue.arn;\nexport const queueName = myQueue.queueName;\n\n```\n\n```python\nimport pulumi\nimport pulumi_aws_native as aws_native\n\nconfig = pulumi.Config()\nalarm_email = config.get(\"alarmEmail\")\nif alarm_email is None:\n alarm_email = \"jane.doe@example.com\"\nmy_queue = aws_native.sqs.Queue(\"myQueue\", queue_name=\"SampleQueue\")\nalarm_topic = aws_native.sns.Topic(\"alarmTopic\", subscription=[{\n \"endpoint\": alarm_email,\n \"protocol\": \"email\",\n}])\nqueue_depth_alarm = aws_native.cloudwatch.Alarm(\"queueDepthAlarm\",\n alarm_description=\"Alarm if queue depth increases to more than 10 messages\",\n namespace=\"AWS/SQS\",\n metric_name=\"ApproximateNumberOfMessagesVisible\",\n dimensions=[{\n \"name\": \"QueueName\",\n \"value\": my_queue.queue_name,\n }],\n statistic=\"Sum\",\n period=300,\n evaluation_periods=1,\n threshold=10,\n comparison_operator=\"GreaterThanThreshold\",\n alarm_actions=[alarm_topic.id],\n insufficient_data_actions=[alarm_topic.id])\npulumi.export(\"queueURL\", my_queue.id)\npulumi.export(\"queueARN\", my_queue.arn)\npulumi.export(\"queueName\", my_queue.queue_name)\n\n```\n\n{{% /example %}}\n{{% example %}}\n### Example\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing AwsNative = Pulumi.AwsNative;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n var alarmEmail = config.Get(\"alarmEmail\") ?? \"jane.doe@example.com\";\n var myQueue = new AwsNative.Sqs.Queue(\"myQueue\", new()\n {\n QueueName = \"SampleQueue\",\n });\n\n var alarmTopic = new AwsNative.Sns.Topic(\"alarmTopic\", new()\n {\n Subscription = new[]\n {\n new AwsNative.Sns.Inputs.TopicSubscriptionArgs\n {\n Endpoint = alarmEmail,\n Protocol = \"email\",\n },\n },\n });\n\n var queueDepthAlarm = new AwsNative.CloudWatch.Alarm(\"queueDepthAlarm\", new()\n {\n AlarmDescription = \"Alarm if queue depth increases to more than 10 messages\",\n Namespace = \"AWS/SQS\",\n MetricName = \"ApproximateNumberOfMessagesVisible\",\n Dimensions = new[]\n {\n new AwsNative.CloudWatch.Inputs.AlarmDimensionArgs\n {\n Name = \"QueueName\",\n Value = myQueue.QueueName,\n },\n },\n Statistic = \"Sum\",\n Period = 300,\n EvaluationPeriods = 1,\n Threshold = 10,\n ComparisonOperator = \"GreaterThanThreshold\",\n AlarmActions = new[]\n {\n alarmTopic.Id,\n },\n InsufficientDataActions = new[]\n {\n alarmTopic.Id,\n },\n });\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"queueURL\"] = myQueue.Id,\n [\"queueARN\"] = myQueue.Arn,\n [\"queueName\"] = myQueue.QueueName,\n };\n});\n\n\n```\n\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/cloudwatch\"\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/sns\"\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/sqs\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcfg := config.New(ctx, \"\")\n\t\talarmEmail := \"jane.doe@example.com\"\n\t\tif param := cfg.Get(\"alarmEmail\"); param != \"\" {\n\t\t\talarmEmail = param\n\t\t}\n\t\tmyQueue, err := sqs.NewQueue(ctx, \"myQueue\", \u0026sqs.QueueArgs{\n\t\t\tQueueName: pulumi.String(\"SampleQueue\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\talarmTopic, err := sns.NewTopic(ctx, \"alarmTopic\", \u0026sns.TopicArgs{\n\t\t\tSubscription: sns.TopicSubscriptionArray{\n\t\t\t\t\u0026sns.TopicSubscriptionArgs{\n\t\t\t\t\tEndpoint: pulumi.String(alarmEmail),\n\t\t\t\t\tProtocol: pulumi.String(\"email\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = cloudwatch.NewAlarm(ctx, \"queueDepthAlarm\", \u0026cloudwatch.AlarmArgs{\n\t\t\tAlarmDescription: pulumi.String(\"Alarm if queue depth increases to more than 10 messages\"),\n\t\t\tNamespace: pulumi.String(\"AWS/SQS\"),\n\t\t\tMetricName: pulumi.String(\"ApproximateNumberOfMessagesVisible\"),\n\t\t\tDimensions: cloudwatch.AlarmDimensionArray{\n\t\t\t\t\u0026cloudwatch.AlarmDimensionArgs{\n\t\t\t\t\tName: pulumi.String(\"QueueName\"),\n\t\t\t\t\tValue: myQueue.QueueName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatistic: pulumi.String(\"Sum\"),\n\t\t\tPeriod: pulumi.Int(300),\n\t\t\tEvaluationPeriods: pulumi.Int(1),\n\t\t\tThreshold: pulumi.Float64(10),\n\t\t\tComparisonOperator: pulumi.String(\"GreaterThanThreshold\"),\n\t\t\tAlarmActions: pulumi.StringArray{\n\t\t\t\talarmTopic.ID(),\n\t\t\t},\n\t\t\tInsufficientDataActions: pulumi.StringArray{\n\t\t\t\talarmTopic.ID(),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"queueURL\", myQueue.ID())\n\t\tctx.Export(\"queueARN\", myQueue.Arn)\n\t\tctx.Export(\"queueName\", myQueue.QueueName)\n\t\treturn nil\n\t})\n}\n\n```\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws_native from \"@pulumi/aws-native\";\n\nconst config = new pulumi.Config();\nconst alarmEmail = config.get(\"alarmEmail\") || \"jane.doe@example.com\";\nconst myQueue = new aws_native.sqs.Queue(\"myQueue\", {queueName: \"SampleQueue\"});\nconst alarmTopic = new aws_native.sns.Topic(\"alarmTopic\", {subscription: [{\n endpoint: alarmEmail,\n protocol: \"email\",\n}]});\nconst queueDepthAlarm = new aws_native.cloudwatch.Alarm(\"queueDepthAlarm\", {\n alarmDescription: \"Alarm if queue depth increases to more than 10 messages\",\n namespace: \"AWS/SQS\",\n metricName: \"ApproximateNumberOfMessagesVisible\",\n dimensions: [{\n name: \"QueueName\",\n value: myQueue.queueName,\n }],\n statistic: \"Sum\",\n period: 300,\n evaluationPeriods: 1,\n threshold: 10,\n comparisonOperator: \"GreaterThanThreshold\",\n alarmActions: [alarmTopic.id],\n insufficientDataActions: [alarmTopic.id],\n});\nexport const queueURL = myQueue.id;\nexport const queueARN = myQueue.arn;\nexport const queueName = myQueue.queueName;\n\n```\n\n```python\nimport pulumi\nimport pulumi_aws_native as aws_native\n\nconfig = pulumi.Config()\nalarm_email = config.get(\"alarmEmail\")\nif alarm_email is None:\n alarm_email = \"jane.doe@example.com\"\nmy_queue = aws_native.sqs.Queue(\"myQueue\", queue_name=\"SampleQueue\")\nalarm_topic = aws_native.sns.Topic(\"alarmTopic\", subscription=[{\n \"endpoint\": alarm_email,\n \"protocol\": \"email\",\n}])\nqueue_depth_alarm = aws_native.cloudwatch.Alarm(\"queueDepthAlarm\",\n alarm_description=\"Alarm if queue depth increases to more than 10 messages\",\n namespace=\"AWS/SQS\",\n metric_name=\"ApproximateNumberOfMessagesVisible\",\n dimensions=[{\n \"name\": \"QueueName\",\n \"value\": my_queue.queue_name,\n }],\n statistic=\"Sum\",\n period=300,\n evaluation_periods=1,\n threshold=10,\n comparison_operator=\"GreaterThanThreshold\",\n alarm_actions=[alarm_topic.id],\n insufficient_data_actions=[alarm_topic.id])\npulumi.export(\"queueURL\", my_queue.id)\npulumi.export(\"queueARN\", my_queue.arn)\npulumi.export(\"queueName\", my_queue.queue_name)\n\n```\n\n{{% /example %}}\n{{% example %}}\n### Example\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing AwsNative = Pulumi.AwsNative;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var myDeadLetterQueue = new AwsNative.Sqs.Queue(\"myDeadLetterQueue\");\n\n var mySourceQueue = new AwsNative.Sqs.Queue(\"mySourceQueue\", new()\n {\n RedrivePolicy = new Dictionary\u003cstring, object?\u003e\n {\n [\"deadLetterTargetArn\"] = myDeadLetterQueue.Arn,\n [\"maxReceiveCount\"] = 5,\n },\n });\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"sourceQueueURL\"] = mySourceQueue.Id,\n [\"sourceQueueARN\"] = mySourceQueue.Arn,\n [\"deadLetterQueueURL\"] = myDeadLetterQueue.Id,\n [\"deadLetterQueueARN\"] = myDeadLetterQueue.Arn,\n };\n});\n\n\n```\n\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/sqs\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tmyDeadLetterQueue, err := sqs.NewQueue(ctx, \"myDeadLetterQueue\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmySourceQueue, err := sqs.NewQueue(ctx, \"mySourceQueue\", \u0026sqs.QueueArgs{\n\t\t\tRedrivePolicy: pulumi.Any(map[string]interface{}{\n\t\t\t\t\"deadLetterTargetArn\": myDeadLetterQueue.Arn,\n\t\t\t\t\"maxReceiveCount\": 5,\n\t\t\t}),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"sourceQueueURL\", mySourceQueue.ID())\n\t\tctx.Export(\"sourceQueueARN\", mySourceQueue.Arn)\n\t\tctx.Export(\"deadLetterQueueURL\", myDeadLetterQueue.ID())\n\t\tctx.Export(\"deadLetterQueueARN\", myDeadLetterQueue.Arn)\n\t\treturn nil\n\t})\n}\n\n```\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws_native from \"@pulumi/aws-native\";\n\nconst myDeadLetterQueue = new aws_native.sqs.Queue(\"myDeadLetterQueue\", {});\nconst mySourceQueue = new aws_native.sqs.Queue(\"mySourceQueue\", {redrivePolicy: {\n deadLetterTargetArn: myDeadLetterQueue.arn,\n maxReceiveCount: 5,\n}});\nexport const sourceQueueURL = mySourceQueue.id;\nexport const sourceQueueARN = mySourceQueue.arn;\nexport const deadLetterQueueURL = myDeadLetterQueue.id;\nexport const deadLetterQueueARN = myDeadLetterQueue.arn;\n\n```\n\n```python\nimport pulumi\nimport pulumi_aws_native as aws_native\n\nmy_dead_letter_queue = aws_native.sqs.Queue(\"myDeadLetterQueue\")\nmy_source_queue = aws_native.sqs.Queue(\"mySourceQueue\", redrive_policy={\n \"deadLetterTargetArn\": my_dead_letter_queue.arn,\n \"maxReceiveCount\": 5,\n})\npulumi.export(\"sourceQueueURL\", my_source_queue.id)\npulumi.export(\"sourceQueueARN\", my_source_queue.arn)\npulumi.export(\"deadLetterQueueURL\", my_dead_letter_queue.id)\npulumi.export(\"deadLetterQueueARN\", my_dead_letter_queue.arn)\n\n```\n\n{{% /example %}}\n{{% example %}}\n### Example\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing AwsNative = Pulumi.AwsNative;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var myDeadLetterQueue = new AwsNative.Sqs.Queue(\"myDeadLetterQueue\");\n\n var mySourceQueue = new AwsNative.Sqs.Queue(\"mySourceQueue\", new()\n {\n RedrivePolicy = new Dictionary\u003cstring, object?\u003e\n {\n [\"deadLetterTargetArn\"] = myDeadLetterQueue.Arn,\n [\"maxReceiveCount\"] = 5,\n },\n });\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"sourceQueueURL\"] = mySourceQueue.Id,\n [\"sourceQueueARN\"] = mySourceQueue.Arn,\n [\"deadLetterQueueURL\"] = myDeadLetterQueue.Id,\n [\"deadLetterQueueARN\"] = myDeadLetterQueue.Arn,\n };\n});\n\n\n```\n\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/sqs\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tmyDeadLetterQueue, err := sqs.NewQueue(ctx, \"myDeadLetterQueue\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmySourceQueue, err := sqs.NewQueue(ctx, \"mySourceQueue\", \u0026sqs.QueueArgs{\n\t\t\tRedrivePolicy: pulumi.Any(map[string]interface{}{\n\t\t\t\t\"deadLetterTargetArn\": myDeadLetterQueue.Arn,\n\t\t\t\t\"maxReceiveCount\": 5,\n\t\t\t}),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"sourceQueueURL\", mySourceQueue.ID())\n\t\tctx.Export(\"sourceQueueARN\", mySourceQueue.Arn)\n\t\tctx.Export(\"deadLetterQueueURL\", myDeadLetterQueue.ID())\n\t\tctx.Export(\"deadLetterQueueARN\", myDeadLetterQueue.Arn)\n\t\treturn nil\n\t})\n}\n\n```\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws_native from \"@pulumi/aws-native\";\n\nconst myDeadLetterQueue = new aws_native.sqs.Queue(\"myDeadLetterQueue\", {});\nconst mySourceQueue = new aws_native.sqs.Queue(\"mySourceQueue\", {redrivePolicy: {\n deadLetterTargetArn: myDeadLetterQueue.arn,\n maxReceiveCount: 5,\n}});\nexport const sourceQueueURL = mySourceQueue.id;\nexport const sourceQueueARN = mySourceQueue.arn;\nexport const deadLetterQueueURL = myDeadLetterQueue.id;\nexport const deadLetterQueueARN = myDeadLetterQueue.arn;\n\n```\n\n```python\nimport pulumi\nimport pulumi_aws_native as aws_native\n\nmy_dead_letter_queue = aws_native.sqs.Queue(\"myDeadLetterQueue\")\nmy_source_queue = aws_native.sqs.Queue(\"mySourceQueue\", redrive_policy={\n \"deadLetterTargetArn\": my_dead_letter_queue.arn,\n \"maxReceiveCount\": 5,\n})\npulumi.export(\"sourceQueueURL\", my_source_queue.id)\npulumi.export(\"sourceQueueARN\", my_source_queue.arn)\npulumi.export(\"deadLetterQueueURL\", my_dead_letter_queue.id)\npulumi.export(\"deadLetterQueueARN\", my_dead_letter_queue.arn)\n\n```\n\n{{% /example %}}\n{{% /examples %}}\n", + "description": "The ``AWS::SQS::Queue`` resource creates an SQS standard or FIFO queue.\n Keep the following caveats in mind:\n + If you don't specify the ``FifoQueue`` property, SQS creates a standard queue.\n You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see [Moving from a standard queue to a FIFO queue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-moving.html) in the *Developer Guide*. \n + If you don't provide a value for a property, the queue is created with the default value for the property.\n + If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.\n + To successfully create a new queue, you must provide a queue name that adheres to the [limits related to queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) and is unique within the scope of your queues.\n \n For more information about creating FIFO (first-in-first-out) queues, see [Creating an queue ()](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/create-queue-cloudformation.html) in the *Developer Guide*.\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n### Example\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing AwsNative = Pulumi.AwsNative;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n var alarmEmail = config.Get(\"alarmEmail\") ?? \"jane.doe@example.com\";\n var myQueue = new AwsNative.Sqs.Queue(\"myQueue\", new()\n {\n QueueName = \"SampleQueue\",\n });\n\n var alarmTopic = new AwsNative.Sns.Topic(\"alarmTopic\", new()\n {\n Subscription = new[]\n {\n new AwsNative.Sns.Inputs.TopicSubscriptionArgs\n {\n Endpoint = alarmEmail,\n Protocol = \"email\",\n },\n },\n });\n\n var queueDepthAlarm = new AwsNative.CloudWatch.Alarm(\"queueDepthAlarm\", new()\n {\n AlarmDescription = \"Alarm if queue depth increases to more than 10 messages\",\n Namespace = \"AWS/SQS\",\n MetricName = \"ApproximateNumberOfMessagesVisible\",\n Dimensions = new[]\n {\n new AwsNative.CloudWatch.Inputs.AlarmDimensionArgs\n {\n Name = \"QueueName\",\n Value = myQueue.QueueName,\n },\n },\n Statistic = \"Sum\",\n Period = 300,\n EvaluationPeriods = 1,\n Threshold = 10,\n ComparisonOperator = \"GreaterThanThreshold\",\n AlarmActions = new[]\n {\n alarmTopic.Id,\n },\n InsufficientDataActions = new[]\n {\n alarmTopic.Id,\n },\n });\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"queueURL\"] = myQueue.Id,\n [\"queueARN\"] = myQueue.Arn,\n [\"queueName\"] = myQueue.QueueName,\n };\n});\n\n\n```\n\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/cloudwatch\"\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/sns\"\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/sqs\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcfg := config.New(ctx, \"\")\n\t\talarmEmail := \"jane.doe@example.com\"\n\t\tif param := cfg.Get(\"alarmEmail\"); param != \"\" {\n\t\t\talarmEmail = param\n\t\t}\n\t\tmyQueue, err := sqs.NewQueue(ctx, \"myQueue\", \u0026sqs.QueueArgs{\n\t\t\tQueueName: pulumi.String(\"SampleQueue\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\talarmTopic, err := sns.NewTopic(ctx, \"alarmTopic\", \u0026sns.TopicArgs{\n\t\t\tSubscription: sns.TopicSubscriptionArray{\n\t\t\t\t\u0026sns.TopicSubscriptionArgs{\n\t\t\t\t\tEndpoint: pulumi.String(alarmEmail),\n\t\t\t\t\tProtocol: pulumi.String(\"email\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = cloudwatch.NewAlarm(ctx, \"queueDepthAlarm\", \u0026cloudwatch.AlarmArgs{\n\t\t\tAlarmDescription: pulumi.String(\"Alarm if queue depth increases to more than 10 messages\"),\n\t\t\tNamespace: pulumi.String(\"AWS/SQS\"),\n\t\t\tMetricName: pulumi.String(\"ApproximateNumberOfMessagesVisible\"),\n\t\t\tDimensions: cloudwatch.AlarmDimensionArray{\n\t\t\t\t\u0026cloudwatch.AlarmDimensionArgs{\n\t\t\t\t\tName: pulumi.String(\"QueueName\"),\n\t\t\t\t\tValue: myQueue.QueueName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatistic: pulumi.String(\"Sum\"),\n\t\t\tPeriod: pulumi.Int(300),\n\t\t\tEvaluationPeriods: pulumi.Int(1),\n\t\t\tThreshold: pulumi.Float64(10),\n\t\t\tComparisonOperator: pulumi.String(\"GreaterThanThreshold\"),\n\t\t\tAlarmActions: pulumi.StringArray{\n\t\t\t\talarmTopic.ID(),\n\t\t\t},\n\t\t\tInsufficientDataActions: pulumi.StringArray{\n\t\t\t\talarmTopic.ID(),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"queueURL\", myQueue.ID())\n\t\tctx.Export(\"queueARN\", myQueue.Arn)\n\t\tctx.Export(\"queueName\", myQueue.QueueName)\n\t\treturn nil\n\t})\n}\n\n```\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws_native from \"@pulumi/aws-native\";\n\nconst config = new pulumi.Config();\nconst alarmEmail = config.get(\"alarmEmail\") || \"jane.doe@example.com\";\nconst myQueue = new aws_native.sqs.Queue(\"myQueue\", {queueName: \"SampleQueue\"});\nconst alarmTopic = new aws_native.sns.Topic(\"alarmTopic\", {subscription: [{\n endpoint: alarmEmail,\n protocol: \"email\",\n}]});\nconst queueDepthAlarm = new aws_native.cloudwatch.Alarm(\"queueDepthAlarm\", {\n alarmDescription: \"Alarm if queue depth increases to more than 10 messages\",\n namespace: \"AWS/SQS\",\n metricName: \"ApproximateNumberOfMessagesVisible\",\n dimensions: [{\n name: \"QueueName\",\n value: myQueue.queueName,\n }],\n statistic: \"Sum\",\n period: 300,\n evaluationPeriods: 1,\n threshold: 10,\n comparisonOperator: \"GreaterThanThreshold\",\n alarmActions: [alarmTopic.id],\n insufficientDataActions: [alarmTopic.id],\n});\nexport const queueURL = myQueue.id;\nexport const queueARN = myQueue.arn;\nexport const queueName = myQueue.queueName;\n\n```\n\n```python\nimport pulumi\nimport pulumi_aws_native as aws_native\n\nconfig = pulumi.Config()\nalarm_email = config.get(\"alarmEmail\")\nif alarm_email is None:\n alarm_email = \"jane.doe@example.com\"\nmy_queue = aws_native.sqs.Queue(\"myQueue\", queue_name=\"SampleQueue\")\nalarm_topic = aws_native.sns.Topic(\"alarmTopic\", subscription=[{\n \"endpoint\": alarm_email,\n \"protocol\": \"email\",\n}])\nqueue_depth_alarm = aws_native.cloudwatch.Alarm(\"queueDepthAlarm\",\n alarm_description=\"Alarm if queue depth increases to more than 10 messages\",\n namespace=\"AWS/SQS\",\n metric_name=\"ApproximateNumberOfMessagesVisible\",\n dimensions=[{\n \"name\": \"QueueName\",\n \"value\": my_queue.queue_name,\n }],\n statistic=\"Sum\",\n period=300,\n evaluation_periods=1,\n threshold=10,\n comparison_operator=\"GreaterThanThreshold\",\n alarm_actions=[alarm_topic.id],\n insufficient_data_actions=[alarm_topic.id])\npulumi.export(\"queueURL\", my_queue.id)\npulumi.export(\"queueARN\", my_queue.arn)\npulumi.export(\"queueName\", my_queue.queue_name)\n\n```\n\n{{% /example %}}\n{{% example %}}\n### Example\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing AwsNative = Pulumi.AwsNative;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var config = new Config();\n var alarmEmail = config.Get(\"alarmEmail\") ?? \"jane.doe@example.com\";\n var myQueue = new AwsNative.Sqs.Queue(\"myQueue\", new()\n {\n QueueName = \"SampleQueue\",\n });\n\n var alarmTopic = new AwsNative.Sns.Topic(\"alarmTopic\", new()\n {\n Subscription = new[]\n {\n new AwsNative.Sns.Inputs.TopicSubscriptionArgs\n {\n Endpoint = alarmEmail,\n Protocol = \"email\",\n },\n },\n });\n\n var queueDepthAlarm = new AwsNative.CloudWatch.Alarm(\"queueDepthAlarm\", new()\n {\n AlarmDescription = \"Alarm if queue depth increases to more than 10 messages\",\n Namespace = \"AWS/SQS\",\n MetricName = \"ApproximateNumberOfMessagesVisible\",\n Dimensions = new[]\n {\n new AwsNative.CloudWatch.Inputs.AlarmDimensionArgs\n {\n Name = \"QueueName\",\n Value = myQueue.QueueName,\n },\n },\n Statistic = \"Sum\",\n Period = 300,\n EvaluationPeriods = 1,\n Threshold = 10,\n ComparisonOperator = \"GreaterThanThreshold\",\n AlarmActions = new[]\n {\n alarmTopic.Id,\n },\n InsufficientDataActions = new[]\n {\n alarmTopic.Id,\n },\n });\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"queueURL\"] = myQueue.Id,\n [\"queueARN\"] = myQueue.Arn,\n [\"queueName\"] = myQueue.QueueName,\n };\n});\n\n\n```\n\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/cloudwatch\"\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/sns\"\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/sqs\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi/config\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tcfg := config.New(ctx, \"\")\n\t\talarmEmail := \"jane.doe@example.com\"\n\t\tif param := cfg.Get(\"alarmEmail\"); param != \"\" {\n\t\t\talarmEmail = param\n\t\t}\n\t\tmyQueue, err := sqs.NewQueue(ctx, \"myQueue\", \u0026sqs.QueueArgs{\n\t\t\tQueueName: pulumi.String(\"SampleQueue\"),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\talarmTopic, err := sns.NewTopic(ctx, \"alarmTopic\", \u0026sns.TopicArgs{\n\t\t\tSubscription: sns.TopicSubscriptionArray{\n\t\t\t\t\u0026sns.TopicSubscriptionArgs{\n\t\t\t\t\tEndpoint: pulumi.String(alarmEmail),\n\t\t\t\t\tProtocol: pulumi.String(\"email\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = cloudwatch.NewAlarm(ctx, \"queueDepthAlarm\", \u0026cloudwatch.AlarmArgs{\n\t\t\tAlarmDescription: pulumi.String(\"Alarm if queue depth increases to more than 10 messages\"),\n\t\t\tNamespace: pulumi.String(\"AWS/SQS\"),\n\t\t\tMetricName: pulumi.String(\"ApproximateNumberOfMessagesVisible\"),\n\t\t\tDimensions: cloudwatch.AlarmDimensionArray{\n\t\t\t\t\u0026cloudwatch.AlarmDimensionArgs{\n\t\t\t\t\tName: pulumi.String(\"QueueName\"),\n\t\t\t\t\tValue: myQueue.QueueName,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStatistic: pulumi.String(\"Sum\"),\n\t\t\tPeriod: pulumi.Int(300),\n\t\t\tEvaluationPeriods: pulumi.Int(1),\n\t\t\tThreshold: pulumi.Float64(10),\n\t\t\tComparisonOperator: pulumi.String(\"GreaterThanThreshold\"),\n\t\t\tAlarmActions: pulumi.StringArray{\n\t\t\t\talarmTopic.ID(),\n\t\t\t},\n\t\t\tInsufficientDataActions: pulumi.StringArray{\n\t\t\t\talarmTopic.ID(),\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"queueURL\", myQueue.ID())\n\t\tctx.Export(\"queueARN\", myQueue.Arn)\n\t\tctx.Export(\"queueName\", myQueue.QueueName)\n\t\treturn nil\n\t})\n}\n\n```\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws_native from \"@pulumi/aws-native\";\n\nconst config = new pulumi.Config();\nconst alarmEmail = config.get(\"alarmEmail\") || \"jane.doe@example.com\";\nconst myQueue = new aws_native.sqs.Queue(\"myQueue\", {queueName: \"SampleQueue\"});\nconst alarmTopic = new aws_native.sns.Topic(\"alarmTopic\", {subscription: [{\n endpoint: alarmEmail,\n protocol: \"email\",\n}]});\nconst queueDepthAlarm = new aws_native.cloudwatch.Alarm(\"queueDepthAlarm\", {\n alarmDescription: \"Alarm if queue depth increases to more than 10 messages\",\n namespace: \"AWS/SQS\",\n metricName: \"ApproximateNumberOfMessagesVisible\",\n dimensions: [{\n name: \"QueueName\",\n value: myQueue.queueName,\n }],\n statistic: \"Sum\",\n period: 300,\n evaluationPeriods: 1,\n threshold: 10,\n comparisonOperator: \"GreaterThanThreshold\",\n alarmActions: [alarmTopic.id],\n insufficientDataActions: [alarmTopic.id],\n});\nexport const queueURL = myQueue.id;\nexport const queueARN = myQueue.arn;\nexport const queueName = myQueue.queueName;\n\n```\n\n```python\nimport pulumi\nimport pulumi_aws_native as aws_native\n\nconfig = pulumi.Config()\nalarm_email = config.get(\"alarmEmail\")\nif alarm_email is None:\n alarm_email = \"jane.doe@example.com\"\nmy_queue = aws_native.sqs.Queue(\"myQueue\", queue_name=\"SampleQueue\")\nalarm_topic = aws_native.sns.Topic(\"alarmTopic\", subscription=[{\n \"endpoint\": alarm_email,\n \"protocol\": \"email\",\n}])\nqueue_depth_alarm = aws_native.cloudwatch.Alarm(\"queueDepthAlarm\",\n alarm_description=\"Alarm if queue depth increases to more than 10 messages\",\n namespace=\"AWS/SQS\",\n metric_name=\"ApproximateNumberOfMessagesVisible\",\n dimensions=[{\n \"name\": \"QueueName\",\n \"value\": my_queue.queue_name,\n }],\n statistic=\"Sum\",\n period=300,\n evaluation_periods=1,\n threshold=10,\n comparison_operator=\"GreaterThanThreshold\",\n alarm_actions=[alarm_topic.id],\n insufficient_data_actions=[alarm_topic.id])\npulumi.export(\"queueURL\", my_queue.id)\npulumi.export(\"queueARN\", my_queue.arn)\npulumi.export(\"queueName\", my_queue.queue_name)\n\n```\n\n{{% /example %}}\n{{% example %}}\n### Example\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing AwsNative = Pulumi.AwsNative;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var myDeadLetterQueue = new AwsNative.Sqs.Queue(\"myDeadLetterQueue\");\n\n var mySourceQueue = new AwsNative.Sqs.Queue(\"mySourceQueue\", new()\n {\n RedrivePolicy = new Dictionary\u003cstring, object?\u003e\n {\n [\"deadLetterTargetArn\"] = myDeadLetterQueue.Arn,\n [\"maxReceiveCount\"] = 5,\n },\n });\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"sourceQueueURL\"] = mySourceQueue.Id,\n [\"sourceQueueARN\"] = mySourceQueue.Arn,\n [\"deadLetterQueueURL\"] = myDeadLetterQueue.Id,\n [\"deadLetterQueueARN\"] = myDeadLetterQueue.Arn,\n };\n});\n\n\n```\n\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/sqs\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tmyDeadLetterQueue, err := sqs.NewQueue(ctx, \"myDeadLetterQueue\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmySourceQueue, err := sqs.NewQueue(ctx, \"mySourceQueue\", \u0026sqs.QueueArgs{\n\t\t\tRedrivePolicy: pulumi.Any(map[string]interface{}{\n\t\t\t\t\"deadLetterTargetArn\": myDeadLetterQueue.Arn,\n\t\t\t\t\"maxReceiveCount\": 5,\n\t\t\t}),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"sourceQueueURL\", mySourceQueue.ID())\n\t\tctx.Export(\"sourceQueueARN\", mySourceQueue.Arn)\n\t\tctx.Export(\"deadLetterQueueURL\", myDeadLetterQueue.ID())\n\t\tctx.Export(\"deadLetterQueueARN\", myDeadLetterQueue.Arn)\n\t\treturn nil\n\t})\n}\n\n```\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws_native from \"@pulumi/aws-native\";\n\nconst myDeadLetterQueue = new aws_native.sqs.Queue(\"myDeadLetterQueue\", {});\nconst mySourceQueue = new aws_native.sqs.Queue(\"mySourceQueue\", {redrivePolicy: {\n deadLetterTargetArn: myDeadLetterQueue.arn,\n maxReceiveCount: 5,\n}});\nexport const sourceQueueURL = mySourceQueue.id;\nexport const sourceQueueARN = mySourceQueue.arn;\nexport const deadLetterQueueURL = myDeadLetterQueue.id;\nexport const deadLetterQueueARN = myDeadLetterQueue.arn;\n\n```\n\n```python\nimport pulumi\nimport pulumi_aws_native as aws_native\n\nmy_dead_letter_queue = aws_native.sqs.Queue(\"myDeadLetterQueue\")\nmy_source_queue = aws_native.sqs.Queue(\"mySourceQueue\", redrive_policy={\n \"deadLetterTargetArn\": my_dead_letter_queue.arn,\n \"maxReceiveCount\": 5,\n})\npulumi.export(\"sourceQueueURL\", my_source_queue.id)\npulumi.export(\"sourceQueueARN\", my_source_queue.arn)\npulumi.export(\"deadLetterQueueURL\", my_dead_letter_queue.id)\npulumi.export(\"deadLetterQueueARN\", my_dead_letter_queue.arn)\n\n```\n\n{{% /example %}}\n{{% example %}}\n### Example\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing AwsNative = Pulumi.AwsNative;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var myDeadLetterQueue = new AwsNative.Sqs.Queue(\"myDeadLetterQueue\");\n\n var mySourceQueue = new AwsNative.Sqs.Queue(\"mySourceQueue\", new()\n {\n RedrivePolicy = new Dictionary\u003cstring, object?\u003e\n {\n [\"deadLetterTargetArn\"] = myDeadLetterQueue.Arn,\n [\"maxReceiveCount\"] = 5,\n },\n });\n\n return new Dictionary\u003cstring, object?\u003e\n {\n [\"sourceQueueURL\"] = mySourceQueue.Id,\n [\"sourceQueueARN\"] = mySourceQueue.Arn,\n [\"deadLetterQueueURL\"] = myDeadLetterQueue.Id,\n [\"deadLetterQueueARN\"] = myDeadLetterQueue.Arn,\n };\n});\n\n\n```\n\n```go\npackage main\n\nimport (\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/sqs\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\tmyDeadLetterQueue, err := sqs.NewQueue(ctx, \"myDeadLetterQueue\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmySourceQueue, err := sqs.NewQueue(ctx, \"mySourceQueue\", \u0026sqs.QueueArgs{\n\t\t\tRedrivePolicy: pulumi.Any(map[string]interface{}{\n\t\t\t\t\"deadLetterTargetArn\": myDeadLetterQueue.Arn,\n\t\t\t\t\"maxReceiveCount\": 5,\n\t\t\t}),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tctx.Export(\"sourceQueueURL\", mySourceQueue.ID())\n\t\tctx.Export(\"sourceQueueARN\", mySourceQueue.Arn)\n\t\tctx.Export(\"deadLetterQueueURL\", myDeadLetterQueue.ID())\n\t\tctx.Export(\"deadLetterQueueARN\", myDeadLetterQueue.Arn)\n\t\treturn nil\n\t})\n}\n\n```\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws_native from \"@pulumi/aws-native\";\n\nconst myDeadLetterQueue = new aws_native.sqs.Queue(\"myDeadLetterQueue\", {});\nconst mySourceQueue = new aws_native.sqs.Queue(\"mySourceQueue\", {redrivePolicy: {\n deadLetterTargetArn: myDeadLetterQueue.arn,\n maxReceiveCount: 5,\n}});\nexport const sourceQueueURL = mySourceQueue.id;\nexport const sourceQueueARN = mySourceQueue.arn;\nexport const deadLetterQueueURL = myDeadLetterQueue.id;\nexport const deadLetterQueueARN = myDeadLetterQueue.arn;\n\n```\n\n```python\nimport pulumi\nimport pulumi_aws_native as aws_native\n\nmy_dead_letter_queue = aws_native.sqs.Queue(\"myDeadLetterQueue\")\nmy_source_queue = aws_native.sqs.Queue(\"mySourceQueue\", redrive_policy={\n \"deadLetterTargetArn\": my_dead_letter_queue.arn,\n \"maxReceiveCount\": 5,\n})\npulumi.export(\"sourceQueueURL\", my_source_queue.id)\npulumi.export(\"sourceQueueARN\", my_source_queue.arn)\npulumi.export(\"deadLetterQueueURL\", my_dead_letter_queue.id)\npulumi.export(\"deadLetterQueueARN\", my_dead_letter_queue.arn)\n\n```\n\n{{% /example %}}\n{{% /examples %}}\n", "properties": { "arn": { "type": "string", @@ -252275,7 +253127,7 @@ }, "fifoQueue": { "type": "boolean", - "description": "If set to true, creates a FIFO queue. If you don't specify this property, SQS creates a standard queue. For more information, see [FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) in the *Developer Guide*.", + "description": "If set to true, creates a FIFO queue. If you don't specify this property, SQS creates a standard queue. For more information, see [Amazon SQS FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-fifo-queues.html) in the *Developer Guide*.", "replaceOnChanges": true }, "fifoThroughputLimit": { @@ -252288,7 +253140,7 @@ }, "kmsMasterKeyId": { "type": "string", - "description": "The ID of an AWS Key Management Service (KMS) for SQS, or a custom KMS. To use the AWS managed KMS for SQS, specify a (default) alias ARN, alias name (e.g. ``alias/aws/sqs``), key ARN, or key ID. For more information, see the following:\n + [Encryption at rest](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) in the *Developer Guide* \n + [CreateQueue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) in the *API Reference* \n + [Request Parameters](https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) in the *Key Management Service API Reference* \n + The Key Management Service (KMS) section of the [Best Practices](https://docs.aws.amazon.com/https://d0.awsstatic.com/whitepapers/aws-kms-best-practices.pdf) whitepaper" + "description": "The ID of an AWS Key Management Service (KMS) for SQS, or a custom KMS. To use the AWS managed KMS for SQS, specify a (default) alias ARN, alias name (for example ``alias/aws/sqs``), key ARN, or key ID. For more information, see the following:\n + [Encryption at rest](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) in the *Developer Guide* \n + [CreateQueue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) in the *API Reference* \n + [Request Parameters](https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) in the *Key Management Service API Reference* \n + The Key Management Service (KMS) section of the [Security best practices for Key Management Service](https://docs.aws.amazon.com/kms/latest/developerguide/best-practices.html) in the *Key Management Service Developer Guide*" }, "maximumMessageSize": { "type": "integer", @@ -252300,7 +253152,7 @@ }, "queueName": { "type": "string", - "description": "A name for the queue. To create a FIFO queue, the name of your FIFO queue must end with the ``.fifo`` suffix. For more information, see [FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) in the *Developer Guide*.\n If you don't specify a name, CFN generates a unique physical ID and uses that ID for the queue name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) in the *User Guide*. \n If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", + "description": "A name for the queue. To create a FIFO queue, the name of your FIFO queue must end with the ``.fifo`` suffix. For more information, see [Amazon SQS FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-fifo-queues.html) in the *Developer Guide*.\n If you don't specify a name, CFN generates a unique physical ID and uses that ID for the queue name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) in the *User Guide*. \n If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", "replaceOnChanges": true }, "queueUrl": { @@ -252313,11 +253165,11 @@ }, "redriveAllowPolicy": { "$ref": "pulumi.json#/Any", - "description": "The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows:\n + ``redrivePermission``: The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are:\n + ``allowAll``: (Default) Any source queues in this AWS account in the same Region can specify this queue as the dead-letter queue.\n + ``denyAll``: No source queues can specify this queue as the dead-letter queue.\n + ``byQueue``: Only queues specified by the ``sourceQueueArns`` parameter can specify this queue as the dead-letter queue.\n \n + ``sourceQueueArns``: The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the ``redrivePermission`` parameter is set to ``byQueue``. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the ``redrivePermission`` parameter to ``allowAll``.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::SQS::Queue` for more information about the expected schema for this property." + "description": "The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows:\n + ``redrivePermission``: The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are:\n + ``allowAll``: (Default) Any source queues in this AWS account in the same Region can specify this queue as the dead-letter queue.\n + ``denyAll``: No source queues can specify this queue as the dead-letter queue.\n + ``byQueue``: Only queues specified by the ``sourceQueueArns`` parameter can specify this queue as the dead-letter queue.\n \n + ``sourceQueueArns``: The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the ``redrivePermission`` parameter is set to ``byQueue``. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the ``redrivePermission`` parameter to ``allowAll``.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::SQS::Queue` for more information about the expected schema for this property." }, "redrivePolicy": { "$ref": "pulumi.json#/Any", - "description": "The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows:\n + ``deadLetterTargetArn``: The Amazon Resource Name (ARN) of the dead-letter queue to which SQS moves messages after the value of ``maxReceiveCount`` is exceeded.\n + ``maxReceiveCount``: The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ``ReceiveCount`` for a message exceeds the ``maxReceiveCount`` for a queue, SQS moves the message to the dead-letter-queue.\n \n The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.\n *JSON* \n ``{ \"deadLetterTargetArn\" : String, \"maxReceiveCount\" : Integer }`` \n *YAML* \n ``deadLetterTargetArn : String`` \n ``maxReceiveCount : Integer``\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::SQS::Queue` for more information about the expected schema for this property." + "description": "The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows:\n + ``deadLetterTargetArn``: The Amazon Resource Name (ARN) of the dead-letter queue to which SQS moves messages after the value of ``maxReceiveCount`` is exceeded.\n + ``maxReceiveCount``: The number of times a message is received by a consumer of the source queue before being moved to the dead-letter queue. When the ``ReceiveCount`` for a message exceeds the ``maxReceiveCount`` for a queue, SQS moves the message to the dead-letter-queue.\n \n The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.\n *JSON* \n ``{ \"deadLetterTargetArn\" : String, \"maxReceiveCount\" : Integer }`` \n *YAML* \n ``deadLetterTargetArn : String`` \n ``maxReceiveCount : Integer``\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::SQS::Queue` for more information about the expected schema for this property." }, "sqsManagedSseEnabled": { "type": "boolean", @@ -252355,7 +253207,7 @@ }, "fifoQueue": { "type": "boolean", - "description": "If set to true, creates a FIFO queue. If you don't specify this property, SQS creates a standard queue. For more information, see [FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) in the *Developer Guide*." + "description": "If set to true, creates a FIFO queue. If you don't specify this property, SQS creates a standard queue. For more information, see [Amazon SQS FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-fifo-queues.html) in the *Developer Guide*." }, "fifoThroughputLimit": { "type": "string", @@ -252367,7 +253219,7 @@ }, "kmsMasterKeyId": { "type": "string", - "description": "The ID of an AWS Key Management Service (KMS) for SQS, or a custom KMS. To use the AWS managed KMS for SQS, specify a (default) alias ARN, alias name (e.g. ``alias/aws/sqs``), key ARN, or key ID. For more information, see the following:\n + [Encryption at rest](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) in the *Developer Guide* \n + [CreateQueue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) in the *API Reference* \n + [Request Parameters](https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) in the *Key Management Service API Reference* \n + The Key Management Service (KMS) section of the [Best Practices](https://docs.aws.amazon.com/https://d0.awsstatic.com/whitepapers/aws-kms-best-practices.pdf) whitepaper" + "description": "The ID of an AWS Key Management Service (KMS) for SQS, or a custom KMS. To use the AWS managed KMS for SQS, specify a (default) alias ARN, alias name (for example ``alias/aws/sqs``), key ARN, or key ID. For more information, see the following:\n + [Encryption at rest](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) in the *Developer Guide* \n + [CreateQueue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) in the *API Reference* \n + [Request Parameters](https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) in the *Key Management Service API Reference* \n + The Key Management Service (KMS) section of the [Security best practices for Key Management Service](https://docs.aws.amazon.com/kms/latest/developerguide/best-practices.html) in the *Key Management Service Developer Guide*" }, "maximumMessageSize": { "type": "integer", @@ -252379,7 +253231,7 @@ }, "queueName": { "type": "string", - "description": "A name for the queue. To create a FIFO queue, the name of your FIFO queue must end with the ``.fifo`` suffix. For more information, see [FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) in the *Developer Guide*.\n If you don't specify a name, CFN generates a unique physical ID and uses that ID for the queue name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) in the *User Guide*. \n If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name." + "description": "A name for the queue. To create a FIFO queue, the name of your FIFO queue must end with the ``.fifo`` suffix. For more information, see [Amazon SQS FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-fifo-queues.html) in the *Developer Guide*.\n If you don't specify a name, CFN generates a unique physical ID and uses that ID for the queue name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) in the *User Guide*. \n If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name." }, "receiveMessageWaitTimeSeconds": { "type": "integer", @@ -252387,11 +253239,11 @@ }, "redriveAllowPolicy": { "$ref": "pulumi.json#/Any", - "description": "The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows:\n + ``redrivePermission``: The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are:\n + ``allowAll``: (Default) Any source queues in this AWS account in the same Region can specify this queue as the dead-letter queue.\n + ``denyAll``: No source queues can specify this queue as the dead-letter queue.\n + ``byQueue``: Only queues specified by the ``sourceQueueArns`` parameter can specify this queue as the dead-letter queue.\n \n + ``sourceQueueArns``: The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the ``redrivePermission`` parameter is set to ``byQueue``. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the ``redrivePermission`` parameter to ``allowAll``.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::SQS::Queue` for more information about the expected schema for this property." + "description": "The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows:\n + ``redrivePermission``: The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are:\n + ``allowAll``: (Default) Any source queues in this AWS account in the same Region can specify this queue as the dead-letter queue.\n + ``denyAll``: No source queues can specify this queue as the dead-letter queue.\n + ``byQueue``: Only queues specified by the ``sourceQueueArns`` parameter can specify this queue as the dead-letter queue.\n \n + ``sourceQueueArns``: The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the ``redrivePermission`` parameter is set to ``byQueue``. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the ``redrivePermission`` parameter to ``allowAll``.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::SQS::Queue` for more information about the expected schema for this property." }, "redrivePolicy": { "$ref": "pulumi.json#/Any", - "description": "The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows:\n + ``deadLetterTargetArn``: The Amazon Resource Name (ARN) of the dead-letter queue to which SQS moves messages after the value of ``maxReceiveCount`` is exceeded.\n + ``maxReceiveCount``: The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ``ReceiveCount`` for a message exceeds the ``maxReceiveCount`` for a queue, SQS moves the message to the dead-letter-queue.\n \n The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.\n *JSON* \n ``{ \"deadLetterTargetArn\" : String, \"maxReceiveCount\" : Integer }`` \n *YAML* \n ``deadLetterTargetArn : String`` \n ``maxReceiveCount : Integer``\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::SQS::Queue` for more information about the expected schema for this property." + "description": "The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows:\n + ``deadLetterTargetArn``: The Amazon Resource Name (ARN) of the dead-letter queue to which SQS moves messages after the value of ``maxReceiveCount`` is exceeded.\n + ``maxReceiveCount``: The number of times a message is received by a consumer of the source queue before being moved to the dead-letter queue. When the ``ReceiveCount`` for a message exceeds the ``maxReceiveCount`` for a queue, SQS moves the message to the dead-letter-queue.\n \n The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.\n *JSON* \n ``{ \"deadLetterTargetArn\" : String, \"maxReceiveCount\" : Integer }`` \n *YAML* \n ``deadLetterTargetArn : String`` \n ``maxReceiveCount : Integer``\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::SQS::Queue` for more information about the expected schema for this property." }, "sqsManagedSseEnabled": { "type": "boolean", @@ -255734,6 +256586,182 @@ "profileType" ] }, + "aws-native:transfer:Server": { + "description": "Definition of AWS::Transfer::Server Resource Type\n\n{{% examples %}}\n## Example Usage\n{{% example %}}\n### Example\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing AwsNative = Pulumi.AwsNative;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var myTransferServer = new AwsNative.Transfer.Server(\"myTransferServer\", new()\n {\n EndpointDetails = new AwsNative.Transfer.Inputs.ServerEndpointDetailsArgs\n {\n AddressAllocationIds = new[]\n {\n \"AddressAllocationId-1\",\n \"AddressAllocationId-2\",\n },\n SubnetIds = new[]\n {\n \"SubnetId-1\",\n \"SubnetId-2\",\n },\n VpcId = \"VpcId\",\n },\n EndpointType = AwsNative.Transfer.ServerEndpointType.Vpc,\n LoggingRole = \"Logging-Role-ARN\",\n Protocols = new[]\n {\n AwsNative.Transfer.ServerProtocol.Sftp,\n },\n SecurityPolicyName = \"Security-Policy-Name\",\n IdentityProviderDetails = new AwsNative.Transfer.Inputs.ServerIdentityProviderDetailsArgs\n {\n InvocationRole = \"Invocation-Role-ARN\",\n Url = \"API_GATEWAY-Invocation-URL\",\n },\n IdentityProviderType = AwsNative.Transfer.ServerIdentityProviderType.ApiGateway,\n Tags = new[]\n {\n new AwsNative.Inputs.TagArgs\n {\n Key = \"KeyName\",\n Value = \"ValueName\",\n },\n },\n });\n\n});\n\n\n```\n\n```go\npackage main\n\nimport (\n\tawsnative \"github.com/pulumi/pulumi-aws-native/sdk/go/aws\"\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/transfer\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := transfer.NewServer(ctx, \"myTransferServer\", \u0026transfer.ServerArgs{\n\t\t\tEndpointDetails: \u0026transfer.ServerEndpointDetailsArgs{\n\t\t\t\tAddressAllocationIds: pulumi.StringArray{\n\t\t\t\t\tpulumi.String(\"AddressAllocationId-1\"),\n\t\t\t\t\tpulumi.String(\"AddressAllocationId-2\"),\n\t\t\t\t},\n\t\t\t\tSubnetIds: pulumi.StringArray{\n\t\t\t\t\tpulumi.String(\"SubnetId-1\"),\n\t\t\t\t\tpulumi.String(\"SubnetId-2\"),\n\t\t\t\t},\n\t\t\t\tVpcId: pulumi.String(\"VpcId\"),\n\t\t\t},\n\t\t\tEndpointType: transfer.ServerEndpointTypeVpc,\n\t\t\tLoggingRole: pulumi.String(\"Logging-Role-ARN\"),\n\t\t\tProtocols: transfer.ServerProtocolArray{\n\t\t\t\ttransfer.ServerProtocolSftp,\n\t\t\t},\n\t\t\tSecurityPolicyName: pulumi.String(\"Security-Policy-Name\"),\n\t\t\tIdentityProviderDetails: \u0026transfer.ServerIdentityProviderDetailsArgs{\n\t\t\t\tInvocationRole: pulumi.String(\"Invocation-Role-ARN\"),\n\t\t\t\tUrl: pulumi.String(\"API_GATEWAY-Invocation-URL\"),\n\t\t\t},\n\t\t\tIdentityProviderType: transfer.ServerIdentityProviderTypeApiGateway,\n\t\t\tTags: aws.TagArray{\n\t\t\t\t\u0026aws.TagArgs{\n\t\t\t\t\tKey: pulumi.String(\"KeyName\"),\n\t\t\t\t\tValue: pulumi.String(\"ValueName\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\n```\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws_native from \"@pulumi/aws-native\";\n\nconst myTransferServer = new aws_native.transfer.Server(\"myTransferServer\", {\n endpointDetails: {\n addressAllocationIds: [\n \"AddressAllocationId-1\",\n \"AddressAllocationId-2\",\n ],\n subnetIds: [\n \"SubnetId-1\",\n \"SubnetId-2\",\n ],\n vpcId: \"VpcId\",\n },\n endpointType: aws_native.transfer.ServerEndpointType.Vpc,\n loggingRole: \"Logging-Role-ARN\",\n protocols: [aws_native.transfer.ServerProtocol.Sftp],\n securityPolicyName: \"Security-Policy-Name\",\n identityProviderDetails: {\n invocationRole: \"Invocation-Role-ARN\",\n url: \"API_GATEWAY-Invocation-URL\",\n },\n identityProviderType: aws_native.transfer.ServerIdentityProviderType.ApiGateway,\n tags: [{\n key: \"KeyName\",\n value: \"ValueName\",\n }],\n});\n\n```\n\n```python\nimport pulumi\nimport pulumi_aws_native as aws_native\n\nmy_transfer_server = aws_native.transfer.Server(\"myTransferServer\",\n endpoint_details={\n \"address_allocation_ids\": [\n \"AddressAllocationId-1\",\n \"AddressAllocationId-2\",\n ],\n \"subnet_ids\": [\n \"SubnetId-1\",\n \"SubnetId-2\",\n ],\n \"vpc_id\": \"VpcId\",\n },\n endpoint_type=aws_native.transfer.ServerEndpointType.VPC,\n logging_role=\"Logging-Role-ARN\",\n protocols=[aws_native.transfer.ServerProtocol.SFTP],\n security_policy_name=\"Security-Policy-Name\",\n identity_provider_details={\n \"invocation_role\": \"Invocation-Role-ARN\",\n \"url\": \"API_GATEWAY-Invocation-URL\",\n },\n identity_provider_type=aws_native.transfer.ServerIdentityProviderType.API_GATEWAY,\n tags=[{\n \"key\": \"KeyName\",\n \"value\": \"ValueName\",\n }])\n\n```\n\n{{% /example %}}\n{{% example %}}\n### Example\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing AwsNative = Pulumi.AwsNative;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var myTransferServer = new AwsNative.Transfer.Server(\"myTransferServer\", new()\n {\n EndpointDetails = new AwsNative.Transfer.Inputs.ServerEndpointDetailsArgs\n {\n AddressAllocationIds = new[]\n {\n \"AddressAllocationId-1\",\n \"AddressAllocationId-2\",\n },\n SubnetIds = new[]\n {\n \"SubnetId-1\",\n \"SubnetId-2\",\n },\n VpcId = \"VpcId\",\n },\n EndpointType = AwsNative.Transfer.ServerEndpointType.Vpc,\n LoggingRole = \"Logging-Role-ARN\",\n Protocols = new[]\n {\n AwsNative.Transfer.ServerProtocol.Sftp,\n },\n SecurityPolicyName = \"Security-Policy-Name\",\n IdentityProviderDetails = new AwsNative.Transfer.Inputs.ServerIdentityProviderDetailsArgs\n {\n InvocationRole = \"Invocation-Role-ARN\",\n Url = \"API_GATEWAY-Invocation-URL\",\n },\n IdentityProviderType = AwsNative.Transfer.ServerIdentityProviderType.ApiGateway,\n Tags = new[]\n {\n new AwsNative.Inputs.TagArgs\n {\n Key = \"KeyName\",\n Value = \"ValueName\",\n },\n },\n });\n\n});\n\n\n```\n\n```go\npackage main\n\nimport (\n\tawsnative \"github.com/pulumi/pulumi-aws-native/sdk/go/aws\"\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/transfer\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\t_, err := transfer.NewServer(ctx, \"myTransferServer\", \u0026transfer.ServerArgs{\n\t\t\tEndpointDetails: \u0026transfer.ServerEndpointDetailsArgs{\n\t\t\t\tAddressAllocationIds: pulumi.StringArray{\n\t\t\t\t\tpulumi.String(\"AddressAllocationId-1\"),\n\t\t\t\t\tpulumi.String(\"AddressAllocationId-2\"),\n\t\t\t\t},\n\t\t\t\tSubnetIds: pulumi.StringArray{\n\t\t\t\t\tpulumi.String(\"SubnetId-1\"),\n\t\t\t\t\tpulumi.String(\"SubnetId-2\"),\n\t\t\t\t},\n\t\t\t\tVpcId: pulumi.String(\"VpcId\"),\n\t\t\t},\n\t\t\tEndpointType: transfer.ServerEndpointTypeVpc,\n\t\t\tLoggingRole: pulumi.String(\"Logging-Role-ARN\"),\n\t\t\tProtocols: transfer.ServerProtocolArray{\n\t\t\t\ttransfer.ServerProtocolSftp,\n\t\t\t},\n\t\t\tSecurityPolicyName: pulumi.String(\"Security-Policy-Name\"),\n\t\t\tIdentityProviderDetails: \u0026transfer.ServerIdentityProviderDetailsArgs{\n\t\t\t\tInvocationRole: pulumi.String(\"Invocation-Role-ARN\"),\n\t\t\t\tUrl: pulumi.String(\"API_GATEWAY-Invocation-URL\"),\n\t\t\t},\n\t\t\tIdentityProviderType: transfer.ServerIdentityProviderTypeApiGateway,\n\t\t\tTags: aws.TagArray{\n\t\t\t\t\u0026aws.TagArgs{\n\t\t\t\t\tKey: pulumi.String(\"KeyName\"),\n\t\t\t\t\tValue: pulumi.String(\"ValueName\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\n```\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws_native from \"@pulumi/aws-native\";\n\nconst myTransferServer = new aws_native.transfer.Server(\"myTransferServer\", {\n endpointDetails: {\n addressAllocationIds: [\n \"AddressAllocationId-1\",\n \"AddressAllocationId-2\",\n ],\n subnetIds: [\n \"SubnetId-1\",\n \"SubnetId-2\",\n ],\n vpcId: \"VpcId\",\n },\n endpointType: aws_native.transfer.ServerEndpointType.Vpc,\n loggingRole: \"Logging-Role-ARN\",\n protocols: [aws_native.transfer.ServerProtocol.Sftp],\n securityPolicyName: \"Security-Policy-Name\",\n identityProviderDetails: {\n invocationRole: \"Invocation-Role-ARN\",\n url: \"API_GATEWAY-Invocation-URL\",\n },\n identityProviderType: aws_native.transfer.ServerIdentityProviderType.ApiGateway,\n tags: [{\n key: \"KeyName\",\n value: \"ValueName\",\n }],\n});\n\n```\n\n```python\nimport pulumi\nimport pulumi_aws_native as aws_native\n\nmy_transfer_server = aws_native.transfer.Server(\"myTransferServer\",\n endpoint_details={\n \"address_allocation_ids\": [\n \"AddressAllocationId-1\",\n \"AddressAllocationId-2\",\n ],\n \"subnet_ids\": [\n \"SubnetId-1\",\n \"SubnetId-2\",\n ],\n \"vpc_id\": \"VpcId\",\n },\n endpoint_type=aws_native.transfer.ServerEndpointType.VPC,\n logging_role=\"Logging-Role-ARN\",\n protocols=[aws_native.transfer.ServerProtocol.SFTP],\n security_policy_name=\"Security-Policy-Name\",\n identity_provider_details={\n \"invocation_role\": \"Invocation-Role-ARN\",\n \"url\": \"API_GATEWAY-Invocation-URL\",\n },\n identity_provider_type=aws_native.transfer.ServerIdentityProviderType.API_GATEWAY,\n tags=[{\n \"key\": \"KeyName\",\n \"value\": \"ValueName\",\n }])\n\n```\n\n{{% /example %}}\n{{% example %}}\n### Example\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing AwsNative = Pulumi.AwsNative;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var transferWorkflow = new AwsNative.Transfer.Workflow(\"transferWorkflow\", new()\n {\n Description = \"Transfer Family Workflows Blog\",\n Steps = new[]\n {\n new AwsNative.Transfer.Inputs.WorkflowStepArgs\n {\n Type = AwsNative.Transfer.WorkflowStepType.Copy,\n CopyStepDetails = new AwsNative.Transfer.Inputs.WorkflowStepCopyStepDetailsPropertiesArgs\n {\n Name = \"copyToUserKey\",\n DestinationFileLocation = new AwsNative.Transfer.Inputs.WorkflowS3FileLocationArgs\n {\n S3FileLocation = new AwsNative.Transfer.Inputs.WorkflowS3InputFileLocationArgs\n {\n Bucket = \"archived-records\",\n Key = \"${transfer:UserName}/\",\n },\n },\n OverwriteExisting = AwsNative.Transfer.WorkflowStepCopyStepDetailsPropertiesOverwriteExisting.True,\n },\n },\n new AwsNative.Transfer.Inputs.WorkflowStepArgs\n {\n Type = AwsNative.Transfer.WorkflowStepType.Tag,\n TagStepDetails = new AwsNative.Transfer.Inputs.WorkflowStepTagStepDetailsPropertiesArgs\n {\n Name = \"tagFileForArchive\",\n Tags = new[]\n {\n new AwsNative.Transfer.Inputs.WorkflowS3TagArgs\n {\n Key = \"Archive\",\n Value = \"yes\",\n },\n },\n },\n },\n new AwsNative.Transfer.Inputs.WorkflowStepArgs\n {\n Type = AwsNative.Transfer.WorkflowStepType.Custom,\n CustomStepDetails = new AwsNative.Transfer.Inputs.WorkflowStepCustomStepDetailsPropertiesArgs\n {\n Name = \"transferExtract\",\n Target = \"arn:${AWS::Partition}:lambda:${AWS::Region}:${AWS::AccountId}:function:my-function-name\",\n TimeoutSeconds = 60,\n },\n },\n new AwsNative.Transfer.Inputs.WorkflowStepArgs\n {\n Type = AwsNative.Transfer.WorkflowStepType.Delete,\n DeleteStepDetails = new AwsNative.Transfer.Inputs.WorkflowStepDeleteStepDetailsPropertiesArgs\n {\n Name = \"DeleteInputFile\",\n SourceFileLocation = \"${original.file}\",\n },\n },\n },\n Tags = new[]\n {\n new AwsNative.Inputs.TagArgs\n {\n Key = \"Name\",\n Value = \"TransferFamilyWorkflows\",\n },\n },\n });\n\n var sftpServer = new AwsNative.Transfer.Server(\"sftpServer\", new()\n {\n WorkflowDetails = new AwsNative.Transfer.Inputs.ServerWorkflowDetailsArgs\n {\n OnUpload = new[]\n {\n new AwsNative.Transfer.Inputs.ServerWorkflowDetailArgs\n {\n ExecutionRole = \"your-workflow-execution-role-arn\",\n WorkflowId = transferWorkflow.WorkflowId,\n },\n },\n },\n });\n\n});\n\n\n```\n\n```go\npackage main\n\nimport (\n\tawsnative \"github.com/pulumi/pulumi-aws-native/sdk/go/aws\"\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/transfer\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\ttransferWorkflow, err := transfer.NewWorkflow(ctx, \"transferWorkflow\", \u0026transfer.WorkflowArgs{\n\t\t\tDescription: pulumi.String(\"Transfer Family Workflows Blog\"),\n\t\t\tSteps: transfer.WorkflowStepArray{\n\t\t\t\t\u0026transfer.WorkflowStepArgs{\n\t\t\t\t\tType: transfer.WorkflowStepTypeCopy,\n\t\t\t\t\tCopyStepDetails: \u0026transfer.WorkflowStepCopyStepDetailsPropertiesArgs{\n\t\t\t\t\t\tName: pulumi.String(\"copyToUserKey\"),\n\t\t\t\t\t\tDestinationFileLocation: \u0026transfer.WorkflowS3FileLocationArgs{\n\t\t\t\t\t\t\tS3FileLocation: \u0026transfer.WorkflowS3InputFileLocationArgs{\n\t\t\t\t\t\t\t\tBucket: pulumi.String(\"archived-records\"),\n\t\t\t\t\t\t\t\tKey: pulumi.String(\"${transfer:UserName}/\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tOverwriteExisting: transfer.WorkflowStepCopyStepDetailsPropertiesOverwriteExistingTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\u0026transfer.WorkflowStepArgs{\n\t\t\t\t\tType: transfer.WorkflowStepTypeTag,\n\t\t\t\t\tTagStepDetails: \u0026transfer.WorkflowStepTagStepDetailsPropertiesArgs{\n\t\t\t\t\t\tName: pulumi.String(\"tagFileForArchive\"),\n\t\t\t\t\t\tTags: transfer.WorkflowS3TagArray{\n\t\t\t\t\t\t\t\u0026transfer.WorkflowS3TagArgs{\n\t\t\t\t\t\t\t\tKey: pulumi.String(\"Archive\"),\n\t\t\t\t\t\t\t\tValue: pulumi.String(\"yes\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\u0026transfer.WorkflowStepArgs{\n\t\t\t\t\tType: transfer.WorkflowStepTypeCustom,\n\t\t\t\t\tCustomStepDetails: \u0026transfer.WorkflowStepCustomStepDetailsPropertiesArgs{\n\t\t\t\t\t\tName: pulumi.String(\"transferExtract\"),\n\t\t\t\t\t\tTarget: pulumi.String(\"arn:${AWS::Partition}:lambda:${AWS::Region}:${AWS::AccountId}:function:my-function-name\"),\n\t\t\t\t\t\tTimeoutSeconds: pulumi.Int(60),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\u0026transfer.WorkflowStepArgs{\n\t\t\t\t\tType: transfer.WorkflowStepTypeDelete,\n\t\t\t\t\tDeleteStepDetails: \u0026transfer.WorkflowStepDeleteStepDetailsPropertiesArgs{\n\t\t\t\t\t\tName: pulumi.String(\"DeleteInputFile\"),\n\t\t\t\t\t\tSourceFileLocation: pulumi.String(\"${original.file}\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tTags: aws.TagArray{\n\t\t\t\t\u0026aws.TagArgs{\n\t\t\t\t\tKey: pulumi.String(\"Name\"),\n\t\t\t\t\tValue: pulumi.String(\"TransferFamilyWorkflows\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = transfer.NewServer(ctx, \"sftpServer\", \u0026transfer.ServerArgs{\n\t\t\tWorkflowDetails: \u0026transfer.ServerWorkflowDetailsArgs{\n\t\t\t\tOnUpload: transfer.ServerWorkflowDetailArray{\n\t\t\t\t\t\u0026transfer.ServerWorkflowDetailArgs{\n\t\t\t\t\t\tExecutionRole: pulumi.String(\"your-workflow-execution-role-arn\"),\n\t\t\t\t\t\tWorkflowId: transferWorkflow.WorkflowId,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\n```\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws_native from \"@pulumi/aws-native\";\n\nconst transferWorkflow = new aws_native.transfer.Workflow(\"transferWorkflow\", {\n description: \"Transfer Family Workflows Blog\",\n steps: [\n {\n type: aws_native.transfer.WorkflowStepType.Copy,\n copyStepDetails: {\n name: \"copyToUserKey\",\n destinationFileLocation: {\n s3FileLocation: {\n bucket: \"archived-records\",\n key: \"${transfer:UserName}/\",\n },\n },\n overwriteExisting: aws_native.transfer.WorkflowStepCopyStepDetailsPropertiesOverwriteExisting.True,\n },\n },\n {\n type: aws_native.transfer.WorkflowStepType.Tag,\n tagStepDetails: {\n name: \"tagFileForArchive\",\n tags: [{\n key: \"Archive\",\n value: \"yes\",\n }],\n },\n },\n {\n type: aws_native.transfer.WorkflowStepType.Custom,\n customStepDetails: {\n name: \"transferExtract\",\n target: \"arn:${AWS::Partition}:lambda:${AWS::Region}:${AWS::AccountId}:function:my-function-name\",\n timeoutSeconds: 60,\n },\n },\n {\n type: aws_native.transfer.WorkflowStepType.Delete,\n deleteStepDetails: {\n name: \"DeleteInputFile\",\n sourceFileLocation: \"${original.file}\",\n },\n },\n ],\n tags: [{\n key: \"Name\",\n value: \"TransferFamilyWorkflows\",\n }],\n});\nconst sftpServer = new aws_native.transfer.Server(\"sftpServer\", {workflowDetails: {\n onUpload: [{\n executionRole: \"your-workflow-execution-role-arn\",\n workflowId: transferWorkflow.workflowId,\n }],\n}});\n\n```\n\n```python\nimport pulumi\nimport pulumi_aws_native as aws_native\n\ntransfer_workflow = aws_native.transfer.Workflow(\"transferWorkflow\",\n description=\"Transfer Family Workflows Blog\",\n steps=[\n {\n \"type\": aws_native.transfer.WorkflowStepType.COPY,\n \"copy_step_details\": {\n \"name\": \"copyToUserKey\",\n \"destination_file_location\": {\n \"s3_file_location\": {\n \"bucket\": \"archived-records\",\n \"key\": \"${transfer:UserName}/\",\n },\n },\n \"overwrite_existing\": aws_native.transfer.WorkflowStepCopyStepDetailsPropertiesOverwriteExisting.TRUE,\n },\n },\n {\n \"type\": aws_native.transfer.WorkflowStepType.TAG,\n \"tag_step_details\": {\n \"name\": \"tagFileForArchive\",\n \"tags\": [{\n \"key\": \"Archive\",\n \"value\": \"yes\",\n }],\n },\n },\n {\n \"type\": aws_native.transfer.WorkflowStepType.CUSTOM,\n \"custom_step_details\": {\n \"name\": \"transferExtract\",\n \"target\": \"arn:${AWS::Partition}:lambda:${AWS::Region}:${AWS::AccountId}:function:my-function-name\",\n \"timeout_seconds\": 60,\n },\n },\n {\n \"type\": aws_native.transfer.WorkflowStepType.DELETE,\n \"delete_step_details\": {\n \"name\": \"DeleteInputFile\",\n \"source_file_location\": \"${original.file}\",\n },\n },\n ],\n tags=[{\n \"key\": \"Name\",\n \"value\": \"TransferFamilyWorkflows\",\n }])\nsftp_server = aws_native.transfer.Server(\"sftpServer\", workflow_details={\n \"on_upload\": [{\n \"execution_role\": \"your-workflow-execution-role-arn\",\n \"workflow_id\": transfer_workflow.workflow_id,\n }],\n})\n\n```\n\n{{% /example %}}\n{{% example %}}\n### Example\n```csharp\nusing System.Collections.Generic;\nusing System.Linq;\nusing Pulumi;\nusing AwsNative = Pulumi.AwsNative;\n\nreturn await Deployment.RunAsync(() =\u003e \n{\n var transferWorkflow = new AwsNative.Transfer.Workflow(\"transferWorkflow\", new()\n {\n Description = \"Transfer Family Workflows Blog\",\n Steps = new[]\n {\n new AwsNative.Transfer.Inputs.WorkflowStepArgs\n {\n Type = AwsNative.Transfer.WorkflowStepType.Copy,\n CopyStepDetails = new AwsNative.Transfer.Inputs.WorkflowStepCopyStepDetailsPropertiesArgs\n {\n Name = \"copyToUserKey\",\n DestinationFileLocation = new AwsNative.Transfer.Inputs.WorkflowS3FileLocationArgs\n {\n S3FileLocation = new AwsNative.Transfer.Inputs.WorkflowS3InputFileLocationArgs\n {\n Bucket = \"archived-records\",\n Key = \"${transfer:UserName}/\",\n },\n },\n OverwriteExisting = AwsNative.Transfer.WorkflowStepCopyStepDetailsPropertiesOverwriteExisting.True,\n },\n },\n new AwsNative.Transfer.Inputs.WorkflowStepArgs\n {\n Type = AwsNative.Transfer.WorkflowStepType.Tag,\n TagStepDetails = new AwsNative.Transfer.Inputs.WorkflowStepTagStepDetailsPropertiesArgs\n {\n Name = \"tagFileForArchive\",\n Tags = new[]\n {\n new AwsNative.Transfer.Inputs.WorkflowS3TagArgs\n {\n Key = \"Archive\",\n Value = \"yes\",\n },\n },\n },\n },\n new AwsNative.Transfer.Inputs.WorkflowStepArgs\n {\n Type = AwsNative.Transfer.WorkflowStepType.Custom,\n CustomStepDetails = new AwsNative.Transfer.Inputs.WorkflowStepCustomStepDetailsPropertiesArgs\n {\n Name = \"transferExtract\",\n Target = \"arn:${AWS::Partition}:lambda:${AWS::Region}:${AWS::AccountId}:function:my-function-name\",\n TimeoutSeconds = 60,\n },\n },\n new AwsNative.Transfer.Inputs.WorkflowStepArgs\n {\n Type = AwsNative.Transfer.WorkflowStepType.Delete,\n DeleteStepDetails = new AwsNative.Transfer.Inputs.WorkflowStepDeleteStepDetailsPropertiesArgs\n {\n Name = \"DeleteInputFile\",\n SourceFileLocation = \"${original.file}\",\n },\n },\n },\n Tags = new[]\n {\n new AwsNative.Inputs.TagArgs\n {\n Key = \"Name\",\n Value = \"TransferFamilyWorkflows\",\n },\n },\n });\n\n var sftpServer = new AwsNative.Transfer.Server(\"sftpServer\", new()\n {\n WorkflowDetails = new AwsNative.Transfer.Inputs.ServerWorkflowDetailsArgs\n {\n OnUpload = new[]\n {\n new AwsNative.Transfer.Inputs.ServerWorkflowDetailArgs\n {\n ExecutionRole = \"your-workflow-execution-role-arn\",\n WorkflowId = transferWorkflow.WorkflowId,\n },\n },\n },\n });\n\n});\n\n\n```\n\n```go\npackage main\n\nimport (\n\tawsnative \"github.com/pulumi/pulumi-aws-native/sdk/go/aws\"\n\t\"github.com/pulumi/pulumi-aws-native/sdk/go/aws/transfer\"\n\t\"github.com/pulumi/pulumi/sdk/v3/go/pulumi\"\n)\n\nfunc main() {\n\tpulumi.Run(func(ctx *pulumi.Context) error {\n\t\ttransferWorkflow, err := transfer.NewWorkflow(ctx, \"transferWorkflow\", \u0026transfer.WorkflowArgs{\n\t\t\tDescription: pulumi.String(\"Transfer Family Workflows Blog\"),\n\t\t\tSteps: transfer.WorkflowStepArray{\n\t\t\t\t\u0026transfer.WorkflowStepArgs{\n\t\t\t\t\tType: transfer.WorkflowStepTypeCopy,\n\t\t\t\t\tCopyStepDetails: \u0026transfer.WorkflowStepCopyStepDetailsPropertiesArgs{\n\t\t\t\t\t\tName: pulumi.String(\"copyToUserKey\"),\n\t\t\t\t\t\tDestinationFileLocation: \u0026transfer.WorkflowS3FileLocationArgs{\n\t\t\t\t\t\t\tS3FileLocation: \u0026transfer.WorkflowS3InputFileLocationArgs{\n\t\t\t\t\t\t\t\tBucket: pulumi.String(\"archived-records\"),\n\t\t\t\t\t\t\t\tKey: pulumi.String(\"${transfer:UserName}/\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tOverwriteExisting: transfer.WorkflowStepCopyStepDetailsPropertiesOverwriteExistingTrue,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\u0026transfer.WorkflowStepArgs{\n\t\t\t\t\tType: transfer.WorkflowStepTypeTag,\n\t\t\t\t\tTagStepDetails: \u0026transfer.WorkflowStepTagStepDetailsPropertiesArgs{\n\t\t\t\t\t\tName: pulumi.String(\"tagFileForArchive\"),\n\t\t\t\t\t\tTags: transfer.WorkflowS3TagArray{\n\t\t\t\t\t\t\t\u0026transfer.WorkflowS3TagArgs{\n\t\t\t\t\t\t\t\tKey: pulumi.String(\"Archive\"),\n\t\t\t\t\t\t\t\tValue: pulumi.String(\"yes\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\u0026transfer.WorkflowStepArgs{\n\t\t\t\t\tType: transfer.WorkflowStepTypeCustom,\n\t\t\t\t\tCustomStepDetails: \u0026transfer.WorkflowStepCustomStepDetailsPropertiesArgs{\n\t\t\t\t\t\tName: pulumi.String(\"transferExtract\"),\n\t\t\t\t\t\tTarget: pulumi.String(\"arn:${AWS::Partition}:lambda:${AWS::Region}:${AWS::AccountId}:function:my-function-name\"),\n\t\t\t\t\t\tTimeoutSeconds: pulumi.Int(60),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\u0026transfer.WorkflowStepArgs{\n\t\t\t\t\tType: transfer.WorkflowStepTypeDelete,\n\t\t\t\t\tDeleteStepDetails: \u0026transfer.WorkflowStepDeleteStepDetailsPropertiesArgs{\n\t\t\t\t\t\tName: pulumi.String(\"DeleteInputFile\"),\n\t\t\t\t\t\tSourceFileLocation: pulumi.String(\"${original.file}\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tTags: aws.TagArray{\n\t\t\t\t\u0026aws.TagArgs{\n\t\t\t\t\tKey: pulumi.String(\"Name\"),\n\t\t\t\t\tValue: pulumi.String(\"TransferFamilyWorkflows\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = transfer.NewServer(ctx, \"sftpServer\", \u0026transfer.ServerArgs{\n\t\t\tWorkflowDetails: \u0026transfer.ServerWorkflowDetailsArgs{\n\t\t\t\tOnUpload: transfer.ServerWorkflowDetailArray{\n\t\t\t\t\t\u0026transfer.ServerWorkflowDetailArgs{\n\t\t\t\t\t\tExecutionRole: pulumi.String(\"your-workflow-execution-role-arn\"),\n\t\t\t\t\t\tWorkflowId: transferWorkflow.WorkflowId,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\n```\n\n```typescript\nimport * as pulumi from \"@pulumi/pulumi\";\nimport * as aws_native from \"@pulumi/aws-native\";\n\nconst transferWorkflow = new aws_native.transfer.Workflow(\"transferWorkflow\", {\n description: \"Transfer Family Workflows Blog\",\n steps: [\n {\n type: aws_native.transfer.WorkflowStepType.Copy,\n copyStepDetails: {\n name: \"copyToUserKey\",\n destinationFileLocation: {\n s3FileLocation: {\n bucket: \"archived-records\",\n key: \"${transfer:UserName}/\",\n },\n },\n overwriteExisting: aws_native.transfer.WorkflowStepCopyStepDetailsPropertiesOverwriteExisting.True,\n },\n },\n {\n type: aws_native.transfer.WorkflowStepType.Tag,\n tagStepDetails: {\n name: \"tagFileForArchive\",\n tags: [{\n key: \"Archive\",\n value: \"yes\",\n }],\n },\n },\n {\n type: aws_native.transfer.WorkflowStepType.Custom,\n customStepDetails: {\n name: \"transferExtract\",\n target: \"arn:${AWS::Partition}:lambda:${AWS::Region}:${AWS::AccountId}:function:my-function-name\",\n timeoutSeconds: 60,\n },\n },\n {\n type: aws_native.transfer.WorkflowStepType.Delete,\n deleteStepDetails: {\n name: \"DeleteInputFile\",\n sourceFileLocation: \"${original.file}\",\n },\n },\n ],\n tags: [{\n key: \"Name\",\n value: \"TransferFamilyWorkflows\",\n }],\n});\nconst sftpServer = new aws_native.transfer.Server(\"sftpServer\", {workflowDetails: {\n onUpload: [{\n executionRole: \"your-workflow-execution-role-arn\",\n workflowId: transferWorkflow.workflowId,\n }],\n}});\n\n```\n\n```python\nimport pulumi\nimport pulumi_aws_native as aws_native\n\ntransfer_workflow = aws_native.transfer.Workflow(\"transferWorkflow\",\n description=\"Transfer Family Workflows Blog\",\n steps=[\n {\n \"type\": aws_native.transfer.WorkflowStepType.COPY,\n \"copy_step_details\": {\n \"name\": \"copyToUserKey\",\n \"destination_file_location\": {\n \"s3_file_location\": {\n \"bucket\": \"archived-records\",\n \"key\": \"${transfer:UserName}/\",\n },\n },\n \"overwrite_existing\": aws_native.transfer.WorkflowStepCopyStepDetailsPropertiesOverwriteExisting.TRUE,\n },\n },\n {\n \"type\": aws_native.transfer.WorkflowStepType.TAG,\n \"tag_step_details\": {\n \"name\": \"tagFileForArchive\",\n \"tags\": [{\n \"key\": \"Archive\",\n \"value\": \"yes\",\n }],\n },\n },\n {\n \"type\": aws_native.transfer.WorkflowStepType.CUSTOM,\n \"custom_step_details\": {\n \"name\": \"transferExtract\",\n \"target\": \"arn:${AWS::Partition}:lambda:${AWS::Region}:${AWS::AccountId}:function:my-function-name\",\n \"timeout_seconds\": 60,\n },\n },\n {\n \"type\": aws_native.transfer.WorkflowStepType.DELETE,\n \"delete_step_details\": {\n \"name\": \"DeleteInputFile\",\n \"source_file_location\": \"${original.file}\",\n },\n },\n ],\n tags=[{\n \"key\": \"Name\",\n \"value\": \"TransferFamilyWorkflows\",\n }])\nsftp_server = aws_native.transfer.Server(\"sftpServer\", workflow_details={\n \"on_upload\": [{\n \"execution_role\": \"your-workflow-execution-role-arn\",\n \"workflow_id\": transfer_workflow.workflow_id,\n }],\n})\n\n```\n\n{{% /example %}}\n{{% /examples %}}\n", + "properties": { + "arn": { + "type": "string", + "description": "The Amazon Resource Name associated with the server, in the form `arn:aws:transfer:region: *account-id* :server/ *server-id* /` .\n\nAn example of a server ARN is: `arn:aws:transfer:us-east-1:123456789012:server/s-01234567890abcdef` ." + }, + "as2ServiceManagedEgressIpAddresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The list of egress IP addresses of this server. These IP addresses are only relevant for servers that use the AS2 protocol. They are used for sending asynchronous MDNs. These IP addresses are assigned automatically when you create an AS2 server. Additionally, if you update an existing server and add the AS2 protocol, static IP addresses are assigned as well." + }, + "certificate": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. Required when `Protocols` is set to `FTPS` .\n\nTo request a new public certificate, see [Request a public certificate](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-public.html) in the *AWS Certificate Manager User Guide* .\n\nTo import an existing certificate into ACM, see [Importing certificates into ACM](https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) in the *AWS Certificate Manager User Guide* .\n\nTo request a private certificate to use FTPS through private IP addresses, see [Request a private certificate](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-private.html) in the *AWS Certificate Manager User Guide* .\n\nCertificates with the following cryptographic algorithms and key sizes are supported:\n\n- 2048-bit RSA (RSA_2048)\n- 4096-bit RSA (RSA_4096)\n- Elliptic Prime Curve 256 bit (EC_prime256v1)\n- Elliptic Prime Curve 384 bit (EC_secp384r1)\n- Elliptic Prime Curve 521 bit (EC_secp521r1)\n\n\u003e The certificate must be a valid SSL/TLS X.509 version 3 certificate with FQDN or IP address specified and information about the issuer." + }, + "domain": { + "$ref": "#/types/aws-native:transfer:ServerDomain", + "description": "Specifies the domain of the storage system that is used for file transfers. There are two domains available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The default value is S3.", + "replaceOnChanges": true + }, + "endpointDetails": { + "$ref": "#/types/aws-native:transfer:ServerEndpointDetails", + "description": "The virtual private cloud (VPC) endpoint settings that are configured for your server. When you host your endpoint within your VPC, you can make your endpoint accessible only to resources within your VPC, or you can attach Elastic IP addresses and make your endpoint accessible to clients over the internet. Your VPC's default security groups are automatically assigned to your endpoint." + }, + "endpointType": { + "$ref": "#/types/aws-native:transfer:ServerEndpointType", + "description": "The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC) or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it.\n\n\u003e After May 19, 2021, you won't be able to create a server using `EndpointType=VPC_ENDPOINT` in your AWS account if your account hasn't already done so before May 19, 2021. If you have already created servers with `EndpointType=VPC_ENDPOINT` in your AWS account on or before May 19, 2021, you will not be affected. After this date, use `EndpointType` = `VPC` .\n\u003e \n\u003e For more information, see [Discontinuing the use of VPC_ENDPOINT](https://docs.aws.amazon.com//transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint) .\n\u003e \n\u003e It is recommended that you use `VPC` as the `EndpointType` . With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with `EndpointType` set to `VPC_ENDPOINT` ." + }, + "identityProviderDetails": { + "$ref": "#/types/aws-native:transfer:ServerIdentityProviderDetails", + "description": "Required when `IdentityProviderType` is set to `AWS_DIRECTORY_SERVICE` , `AWS _LAMBDA` or `API_GATEWAY` . Accepts an array containing all of the information required to use a directory in `AWS_DIRECTORY_SERVICE` or invoke a customer-supplied authentication API, including the API Gateway URL. Cannot be specified when `IdentityProviderType` is set to `SERVICE_MANAGED` ." + }, + "identityProviderType": { + "$ref": "#/types/aws-native:transfer:ServerIdentityProviderType", + "description": "The mode of authentication for a server. The default value is `SERVICE_MANAGED` , which allows you to store and access user credentials within the AWS Transfer Family service.\n\nUse `AWS_DIRECTORY_SERVICE` to provide access to Active Directory groups in AWS Directory Service for Microsoft Active Directory or Microsoft Active Directory in your on-premises environment or in AWS using AD Connector. This option also requires you to provide a Directory ID by using the `IdentityProviderDetails` parameter.\n\nUse the `API_GATEWAY` value to integrate with an identity provider of your choosing. The `API_GATEWAY` setting requires you to provide an Amazon API Gateway endpoint URL to call for authentication by using the `IdentityProviderDetails` parameter.\n\nUse the `AWS_LAMBDA` value to directly use an AWS Lambda function as your identity provider. If you choose this value, you must specify the ARN for the Lambda function in the `Function` parameter for the `IdentityProviderDetails` data type.", + "replaceOnChanges": true + }, + "loggingRole": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging for Amazon S3 or Amazon EFSevents. When set, you can view user activity in your CloudWatch logs." + }, + "postAuthenticationLoginBanner": { + "type": "string", + "description": "Specifies a string to display when users connect to a server. This string is displayed after the user authenticates.\n\n\u003e The SFTP protocol does not support post-authentication display banners." + }, + "preAuthenticationLoginBanner": { + "type": "string", + "description": "Specifies a string to display when users connect to a server. This string is displayed before the user authenticates. For example, the following banner displays details about using the system:\n\n`This system is for the use of authorized users only. Individuals using this computer system without authority, or in excess of their authority, are subject to having all of their activities on this system monitored and recorded by system personnel.`" + }, + "protocolDetails": { + "$ref": "#/types/aws-native:transfer:ServerProtocolDetails", + "description": "The protocol settings that are configured for your server.\n\n- To indicate passive mode (for FTP and FTPS protocols), use the `PassiveIp` parameter. Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer.\n- To ignore the error that is generated when the client attempts to use the `SETSTAT` command on a file that you are uploading to an Amazon S3 bucket, use the `SetStatOption` parameter. To have the AWS Transfer Family server ignore the `SETSTAT` command and upload files without needing to make any changes to your SFTP client, set the value to `ENABLE_NO_OP` . If you set the `SetStatOption` parameter to `ENABLE_NO_OP` , Transfer Family generates a log entry to Amazon CloudWatch Logs, so that you can determine when the client is making a `SETSTAT` call.\n- To determine whether your AWS Transfer Family server resumes recent, negotiated sessions through a unique session ID, use the `TlsSessionResumptionMode` parameter.\n- `As2Transports` indicates the transport method for the AS2 messages. Currently, only HTTP is supported.\n\nThe `Protocols` parameter is an array of strings.\n\n*Allowed values* : One or more of `SFTP` , `FTPS` , `FTP` , `AS2`" + }, + "protocols": { + "type": "array", + "items": { + "$ref": "#/types/aws-native:transfer:ServerProtocol" + }, + "description": "Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:\n\n- `SFTP` (Secure Shell (SSH) File Transfer Protocol): File transfer over SSH\n- `FTPS` (File Transfer Protocol Secure): File transfer with TLS encryption\n- `FTP` (File Transfer Protocol): Unencrypted file transfer\n- `AS2` (Applicability Statement 2): used for transporting structured business-to-business data\n\n\u003e - If you select `FTPS` , you must choose a certificate stored in AWS Certificate Manager (ACM) which is used to identify your server when clients connect to it over FTPS.\n\u003e - If `Protocol` includes either `FTP` or `FTPS` , then the `EndpointType` must be `VPC` and the `IdentityProviderType` must be either `AWS_DIRECTORY_SERVICE` , `AWS_LAMBDA` , or `API_GATEWAY` .\n\u003e - If `Protocol` includes `FTP` , then `AddressAllocationIds` cannot be associated.\n\u003e - If `Protocol` is set only to `SFTP` , the `EndpointType` can be set to `PUBLIC` and the `IdentityProviderType` can be set any of the supported identity types: `SERVICE_MANAGED` , `AWS_DIRECTORY_SERVICE` , `AWS_LAMBDA` , or `API_GATEWAY` .\n\u003e - If `Protocol` includes `AS2` , then the `EndpointType` must be `VPC` , and domain must be Amazon S3. \n\nThe `Protocols` parameter is an array of strings.\n\n*Allowed values* : One or more of `SFTP` , `FTPS` , `FTP` , `AS2`" + }, + "s3StorageOptions": { + "$ref": "#/types/aws-native:transfer:ServerS3StorageOptions", + "description": "Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target." + }, + "securityPolicyName": { + "type": "string", + "description": "Specifies the name of the security policy for the server." + }, + "serverId": { + "type": "string", + "description": "The service-assigned ID of the server that is created.\n\nAn example `ServerId` is `s-01234567890abcdef` ." + }, + "structuredLogDestinations": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Specifies the log groups to which your server logs are sent.\n\nTo specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows:\n\n`arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:*`\n\nFor example, `arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:*`\n\nIf you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty value for this parameter in an `update-server` call. For example:\n\n`update-server --server-id s-1234567890abcdef0 --structured-log-destinations`" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/types/aws-native:index:Tag" + }, + "description": "Key-value pairs that can be used to group and search for servers." + }, + "workflowDetails": { + "$ref": "#/types/aws-native:transfer:ServerWorkflowDetails", + "description": "Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.\n\nIn addition to a workflow to execute when a file is uploaded completely, `WorkflowDetails` can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when the session disconnects." + } + }, + "type": "object", + "required": [ + "arn", + "as2ServiceManagedEgressIpAddresses", + "serverId" + ], + "inputProperties": { + "certificate": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. Required when `Protocols` is set to `FTPS` .\n\nTo request a new public certificate, see [Request a public certificate](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-public.html) in the *AWS Certificate Manager User Guide* .\n\nTo import an existing certificate into ACM, see [Importing certificates into ACM](https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) in the *AWS Certificate Manager User Guide* .\n\nTo request a private certificate to use FTPS through private IP addresses, see [Request a private certificate](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-private.html) in the *AWS Certificate Manager User Guide* .\n\nCertificates with the following cryptographic algorithms and key sizes are supported:\n\n- 2048-bit RSA (RSA_2048)\n- 4096-bit RSA (RSA_4096)\n- Elliptic Prime Curve 256 bit (EC_prime256v1)\n- Elliptic Prime Curve 384 bit (EC_secp384r1)\n- Elliptic Prime Curve 521 bit (EC_secp521r1)\n\n\u003e The certificate must be a valid SSL/TLS X.509 version 3 certificate with FQDN or IP address specified and information about the issuer." + }, + "domain": { + "$ref": "#/types/aws-native:transfer:ServerDomain", + "description": "Specifies the domain of the storage system that is used for file transfers. There are two domains available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The default value is S3." + }, + "endpointDetails": { + "$ref": "#/types/aws-native:transfer:ServerEndpointDetails", + "description": "The virtual private cloud (VPC) endpoint settings that are configured for your server. When you host your endpoint within your VPC, you can make your endpoint accessible only to resources within your VPC, or you can attach Elastic IP addresses and make your endpoint accessible to clients over the internet. Your VPC's default security groups are automatically assigned to your endpoint." + }, + "endpointType": { + "$ref": "#/types/aws-native:transfer:ServerEndpointType", + "description": "The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC) or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it.\n\n\u003e After May 19, 2021, you won't be able to create a server using `EndpointType=VPC_ENDPOINT` in your AWS account if your account hasn't already done so before May 19, 2021. If you have already created servers with `EndpointType=VPC_ENDPOINT` in your AWS account on or before May 19, 2021, you will not be affected. After this date, use `EndpointType` = `VPC` .\n\u003e \n\u003e For more information, see [Discontinuing the use of VPC_ENDPOINT](https://docs.aws.amazon.com//transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint) .\n\u003e \n\u003e It is recommended that you use `VPC` as the `EndpointType` . With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with `EndpointType` set to `VPC_ENDPOINT` ." + }, + "identityProviderDetails": { + "$ref": "#/types/aws-native:transfer:ServerIdentityProviderDetails", + "description": "Required when `IdentityProviderType` is set to `AWS_DIRECTORY_SERVICE` , `AWS _LAMBDA` or `API_GATEWAY` . Accepts an array containing all of the information required to use a directory in `AWS_DIRECTORY_SERVICE` or invoke a customer-supplied authentication API, including the API Gateway URL. Cannot be specified when `IdentityProviderType` is set to `SERVICE_MANAGED` ." + }, + "identityProviderType": { + "$ref": "#/types/aws-native:transfer:ServerIdentityProviderType", + "description": "The mode of authentication for a server. The default value is `SERVICE_MANAGED` , which allows you to store and access user credentials within the AWS Transfer Family service.\n\nUse `AWS_DIRECTORY_SERVICE` to provide access to Active Directory groups in AWS Directory Service for Microsoft Active Directory or Microsoft Active Directory in your on-premises environment or in AWS using AD Connector. This option also requires you to provide a Directory ID by using the `IdentityProviderDetails` parameter.\n\nUse the `API_GATEWAY` value to integrate with an identity provider of your choosing. The `API_GATEWAY` setting requires you to provide an Amazon API Gateway endpoint URL to call for authentication by using the `IdentityProviderDetails` parameter.\n\nUse the `AWS_LAMBDA` value to directly use an AWS Lambda function as your identity provider. If you choose this value, you must specify the ARN for the Lambda function in the `Function` parameter for the `IdentityProviderDetails` data type." + }, + "loggingRole": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging for Amazon S3 or Amazon EFSevents. When set, you can view user activity in your CloudWatch logs." + }, + "postAuthenticationLoginBanner": { + "type": "string", + "description": "Specifies a string to display when users connect to a server. This string is displayed after the user authenticates.\n\n\u003e The SFTP protocol does not support post-authentication display banners." + }, + "preAuthenticationLoginBanner": { + "type": "string", + "description": "Specifies a string to display when users connect to a server. This string is displayed before the user authenticates. For example, the following banner displays details about using the system:\n\n`This system is for the use of authorized users only. Individuals using this computer system without authority, or in excess of their authority, are subject to having all of their activities on this system monitored and recorded by system personnel.`" + }, + "protocolDetails": { + "$ref": "#/types/aws-native:transfer:ServerProtocolDetails", + "description": "The protocol settings that are configured for your server.\n\n- To indicate passive mode (for FTP and FTPS protocols), use the `PassiveIp` parameter. Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer.\n- To ignore the error that is generated when the client attempts to use the `SETSTAT` command on a file that you are uploading to an Amazon S3 bucket, use the `SetStatOption` parameter. To have the AWS Transfer Family server ignore the `SETSTAT` command and upload files without needing to make any changes to your SFTP client, set the value to `ENABLE_NO_OP` . If you set the `SetStatOption` parameter to `ENABLE_NO_OP` , Transfer Family generates a log entry to Amazon CloudWatch Logs, so that you can determine when the client is making a `SETSTAT` call.\n- To determine whether your AWS Transfer Family server resumes recent, negotiated sessions through a unique session ID, use the `TlsSessionResumptionMode` parameter.\n- `As2Transports` indicates the transport method for the AS2 messages. Currently, only HTTP is supported.\n\nThe `Protocols` parameter is an array of strings.\n\n*Allowed values* : One or more of `SFTP` , `FTPS` , `FTP` , `AS2`" + }, + "protocols": { + "type": "array", + "items": { + "$ref": "#/types/aws-native:transfer:ServerProtocol" + }, + "description": "Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:\n\n- `SFTP` (Secure Shell (SSH) File Transfer Protocol): File transfer over SSH\n- `FTPS` (File Transfer Protocol Secure): File transfer with TLS encryption\n- `FTP` (File Transfer Protocol): Unencrypted file transfer\n- `AS2` (Applicability Statement 2): used for transporting structured business-to-business data\n\n\u003e - If you select `FTPS` , you must choose a certificate stored in AWS Certificate Manager (ACM) which is used to identify your server when clients connect to it over FTPS.\n\u003e - If `Protocol` includes either `FTP` or `FTPS` , then the `EndpointType` must be `VPC` and the `IdentityProviderType` must be either `AWS_DIRECTORY_SERVICE` , `AWS_LAMBDA` , or `API_GATEWAY` .\n\u003e - If `Protocol` includes `FTP` , then `AddressAllocationIds` cannot be associated.\n\u003e - If `Protocol` is set only to `SFTP` , the `EndpointType` can be set to `PUBLIC` and the `IdentityProviderType` can be set any of the supported identity types: `SERVICE_MANAGED` , `AWS_DIRECTORY_SERVICE` , `AWS_LAMBDA` , or `API_GATEWAY` .\n\u003e - If `Protocol` includes `AS2` , then the `EndpointType` must be `VPC` , and domain must be Amazon S3. \n\nThe `Protocols` parameter is an array of strings.\n\n*Allowed values* : One or more of `SFTP` , `FTPS` , `FTP` , `AS2`" + }, + "s3StorageOptions": { + "$ref": "#/types/aws-native:transfer:ServerS3StorageOptions", + "description": "Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target." + }, + "securityPolicyName": { + "type": "string", + "description": "Specifies the name of the security policy for the server." + }, + "structuredLogDestinations": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Specifies the log groups to which your server logs are sent.\n\nTo specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows:\n\n`arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:*`\n\nFor example, `arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:*`\n\nIf you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty value for this parameter in an `update-server` call. For example:\n\n`update-server --server-id s-1234567890abcdef0 --structured-log-destinations`" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/types/aws-native:index:Tag" + }, + "description": "Key-value pairs that can be used to group and search for servers." + }, + "workflowDetails": { + "$ref": "#/types/aws-native:transfer:ServerWorkflowDetails", + "description": "Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.\n\nIn addition to a workflow to execute when a file is uploaded completely, `WorkflowDetails` can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when the session disconnects." + } + } + }, "aws-native:transfer:Workflow": { "description": "Resource Type definition for AWS::Transfer::Workflow", "properties": { @@ -257423,6 +258451,128 @@ "webAclArn" ] }, + "aws-native:wisdom:AiPrompt": { + "description": "Definition of AWS::Wisdom::AIPrompt Resource Type", + "properties": { + "aiPromptArn": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the AI Prompt." + }, + "aiPromptId": { + "type": "string", + "description": "The identifier of the Amazon Q in Connect AI prompt." + }, + "apiFormat": { + "$ref": "#/types/aws-native:wisdom:AiPromptAiPromptApiFormat", + "description": "The API format used for this AI Prompt.", + "replaceOnChanges": true + }, + "assistantArn": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant." + }, + "assistantId": { + "type": "string", + "description": "The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN.", + "replaceOnChanges": true + }, + "description": { + "type": "string", + "description": "The description of the AI Prompt." + }, + "modelId": { + "type": "string", + "description": "The identifier of the model used for this AI Prompt. Model Ids supported are: `CLAUDE_3_HAIKU_20240307_V1` .", + "replaceOnChanges": true + }, + "name": { + "type": "string", + "description": "The name of the AI Prompt", + "replaceOnChanges": true + }, + "tags": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "The tags used to organize, track, or control access for this resource.", + "replaceOnChanges": true + }, + "templateConfiguration": { + "$ref": "#/types/aws-native:wisdom:AiPromptAiPromptTemplateConfiguration", + "description": "The configuration of the prompt template for this AI Prompt." + }, + "templateType": { + "$ref": "#/types/aws-native:wisdom:AiPromptAiPromptTemplateType", + "description": "The type of the prompt template for this AI Prompt.", + "replaceOnChanges": true + }, + "type": { + "$ref": "#/types/aws-native:wisdom:AiPromptAiPromptType", + "description": "The type of this AI Prompt.", + "replaceOnChanges": true + } + }, + "type": "object", + "required": [ + "aiPromptArn", + "aiPromptId", + "apiFormat", + "assistantArn", + "modelId", + "templateConfiguration", + "templateType", + "type" + ], + "inputProperties": { + "apiFormat": { + "$ref": "#/types/aws-native:wisdom:AiPromptAiPromptApiFormat", + "description": "The API format used for this AI Prompt." + }, + "assistantId": { + "type": "string", + "description": "The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN." + }, + "description": { + "type": "string", + "description": "The description of the AI Prompt." + }, + "modelId": { + "type": "string", + "description": "The identifier of the model used for this AI Prompt. Model Ids supported are: `CLAUDE_3_HAIKU_20240307_V1` ." + }, + "name": { + "type": "string", + "description": "The name of the AI Prompt" + }, + "tags": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "The tags used to organize, track, or control access for this resource." + }, + "templateConfiguration": { + "$ref": "#/types/aws-native:wisdom:AiPromptAiPromptTemplateConfiguration", + "description": "The configuration of the prompt template for this AI Prompt." + }, + "templateType": { + "$ref": "#/types/aws-native:wisdom:AiPromptAiPromptTemplateType", + "description": "The type of the prompt template for this AI Prompt." + }, + "type": { + "$ref": "#/types/aws-native:wisdom:AiPromptAiPromptType", + "description": "The type of this AI Prompt." + } + }, + "requiredInputs": [ + "apiFormat", + "modelId", + "templateConfiguration", + "templateType", + "type" + ] + }, "aws-native:wisdom:Assistant": { "description": "Definition of AWS::Wisdom::Assistant Resource Type", "properties": { @@ -262180,6 +263330,72 @@ } } }, + "aws-native:appsync:getDataSource": { + "description": "Resource Type definition for AWS::AppSync::DataSource", + "inputs": { + "properties": { + "dataSourceArn": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the API key, such as arn:aws:appsync:us-east-1:123456789012:apis/graphqlapiid/datasources/datasourcename." + } + }, + "required": [ + "dataSourceArn" + ] + }, + "outputs": { + "properties": { + "dataSourceArn": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the API key, such as arn:aws:appsync:us-east-1:123456789012:apis/graphqlapiid/datasources/datasourcename." + }, + "description": { + "type": "string", + "description": "The description of the data source." + }, + "dynamoDbConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceDynamoDbConfig", + "description": "AWS Region and TableName for an Amazon DynamoDB table in your account." + }, + "elasticsearchConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceElasticsearchConfig", + "description": "AWS Region and Endpoints for an Amazon OpenSearch Service domain in your account.\nAs of September 2021, Amazon Elasticsearch Service is Amazon OpenSearch Service. This property is deprecated. For new data sources, use OpenSearchServiceConfig to specify an OpenSearch Service data source." + }, + "eventBridgeConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceEventBridgeConfig", + "description": "ARN for the EventBridge bus." + }, + "httpConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceHttpConfig", + "description": "Endpoints for an HTTP data source." + }, + "lambdaConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceLambdaConfig", + "description": "An ARN of a Lambda function in valid ARN format. This can be the ARN of a Lambda function that exists in the current account or in another account." + }, + "metricsConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceMetricsConfig", + "description": "Enables or disables enhanced data source metrics for specified data sources. Note that `MetricsConfig` won't be used unless the `dataSourceLevelMetricsBehavior` value is set to `PER_DATA_SOURCE_METRICS` . If the `dataSourceLevelMetricsBehavior` is set to `FULL_REQUEST_DATA_SOURCE_METRICS` instead, `MetricsConfig` will be ignored. However, you can still set its value.\n\n`MetricsConfig` can be `ENABLED` or `DISABLED` ." + }, + "openSearchServiceConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceOpenSearchServiceConfig", + "description": "AWS Region and Endpoints for an Amazon OpenSearch Service domain in your account." + }, + "relationalDatabaseConfig": { + "$ref": "#/types/aws-native:appsync:DataSourceRelationalDatabaseConfig", + "description": "Relational Database configuration of the relational database data source." + }, + "serviceRoleArn": { + "type": "string", + "description": "The AWS Identity and Access Management service role ARN for the data source. The system assumes this role when accessing the data source." + }, + "type": { + "type": "string", + "description": "The type of the data source." + } + } + } + }, "aws-native:appsync:getDomainName": { "description": "Resource Type definition for AWS::AppSync::DomainName", "inputs": { @@ -267302,11 +268518,11 @@ }, "computeType": { "$ref": "#/types/aws-native:codebuild:FleetComputeType", - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nInformation about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*" + "description": "Information about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*" }, "environmentType": { "$ref": "#/types/aws-native:codebuild:FleetEnvironmentType", - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nThe environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (São Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (São Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* ." + "description": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (São Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (São Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* ." }, "fleetServiceRole": { "type": "string", @@ -267314,11 +268530,11 @@ }, "fleetVpcConfig": { "$ref": "#/types/aws-native:codebuild:FleetVpcConfig", - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nInformation about the VPC configuration that AWS CodeBuild accesses." + "description": "Information about the VPC configuration that AWS CodeBuild accesses." }, "imageId": { "type": "string", - "description": "\u003e Updating this field is not allowed for `MAC_ARM` . \n\nThe Amazon Machine Image (AMI) of the compute fleet." + "description": "The Amazon Machine Image (AMI) of the compute fleet." }, "name": { "type": "string", @@ -267965,7 +269181,7 @@ "properties": { "accountRecoverySetting": { "$ref": "#/types/aws-native:cognito:UserPoolAccountRecoverySetting", - "description": "Use this setting to define which verified available method a user can use to recover their password when they call `ForgotPassword` . It allows you to define a preferred method when a user has more than one method available. With this setting, SMS does not qualify for a valid password recovery mechanism if the user also has SMS MFA enabled. In the absence of this setting, Cognito uses the legacy behavior to determine the recovery method where SMS is preferred over email." + "description": "The available verified method a user can use to recover their password when they call `ForgotPassword` . You can use this setting to define a preferred method when a user has more than one method available. With this setting, SMS doesn't qualify for a valid password recovery mechanism if the user also has SMS multi-factor authentication (MFA) activated. In the absence of this setting, Amazon Cognito uses the legacy behavior to determine the recovery method where SMS is preferred through email." }, "adminCreateUserConfig": { "$ref": "#/types/aws-native:cognito:UserPoolAdminCreateUserConfig", @@ -267976,7 +269192,7 @@ "items": { "type": "string" }, - "description": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* .\n\n\u003e This user pool property cannot be updated." + "description": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* ." }, "arn": { "type": "string", @@ -268009,11 +269225,11 @@ }, "emailVerificationMessage": { "type": "string", - "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) ." + "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) ." }, "emailVerificationSubject": { "type": "string", - "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) ." + "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) ." }, "lambdaConfig": { "$ref": "#/types/aws-native:cognito:UserPoolLambdaConfig", @@ -268029,7 +269245,7 @@ }, "providerName": { "type": "string", - "description": "The provider name of the Amazon Cognito user pool, specified as a `String` ." + "description": "A friendly name for the IdP." }, "providerUrl": { "type": "string", @@ -268040,11 +269256,11 @@ "items": { "$ref": "#/types/aws-native:cognito:UserPoolSchemaAttribute" }, - "description": "The schema attributes for the new user pool. These attributes can be standard or custom attributes.\n\n\u003e During a user pool update, you can add new schema attributes but you cannot modify or delete an existing schema attribute." + "description": "An array of schema attributes for the new user pool. These attributes can be standard or custom attributes." }, "smsAuthenticationMessage": { "type": "string", - "description": "A string representing the SMS authentication message." + "description": "The contents of the SMS authentication message." }, "smsConfiguration": { "$ref": "#/types/aws-native:cognito:UserPoolSmsConfiguration", @@ -268052,7 +269268,7 @@ }, "smsVerificationMessage": { "type": "string", - "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) ." + "description": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cognito-userpool-verificationmessagetemplate.html) ." }, "userAttributeUpdateSettings": { "$ref": "#/types/aws-native:cognito:UserPoolUserAttributeUpdateSettings", @@ -268082,11 +269298,11 @@ "items": { "type": "string" }, - "description": "Determines whether email addresses or phone numbers can be specified as user names when a user signs up. Possible values: `phone_number` or `email` .\n\nThis user pool property cannot be updated." + "description": "Specifies whether a user can use an email address or phone number as a username when they sign up." }, "usernameConfiguration": { "$ref": "#/types/aws-native:cognito:UserPoolUsernameConfiguration", - "description": "You can choose to set case sensitivity on the username input for the selected sign-in option. For example, when this is set to `False` , users will be able to sign in using either \"username\" or \"Username\". This configuration is immutable once it has been set." + "description": "Case sensitivity on the username input for the selected sign-in option. When case sensitivity is set to `False` (case insensitive), users can sign in with any combination of capital and lowercase letters. For example, `username` , `USERNAME` , or `UserName` , or for email, `email@example.com` or `EMaiL@eXamplE.Com` . For most use cases, set case sensitivity to `False` (case insensitive) as a best practice. When usernames and email addresses are case insensitive, Amazon Cognito treats any variation in case as the same user, and prevents a case variation from being assigned to the same attribute for a different user.\n\nThis configuration is immutable after you set it. For more information, see [UsernameConfigurationType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UsernameConfigurationType.html) ." }, "verificationMessageTemplate": { "$ref": "#/types/aws-native:cognito:UserPoolVerificationMessageTemplate", @@ -268117,7 +269333,7 @@ "properties": { "accessTokenValidity": { "type": "integer", - "description": "The access token time limit. After this limit expires, your user can't use their access token. To specify the time unit for `AccessTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `AccessTokenValidity` to `10` and `TokenValidityUnits` to `hours` , your user can authorize access with their access token for 10 hours.\n\nThe default time unit for `AccessTokenValidity` in an API request is hours." + "description": "The access token time limit. After this limit expires, your user can't use their access token. To specify the time unit for `AccessTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `AccessTokenValidity` to `10` and `TokenValidityUnits` to `hours` , your user can authorize access with\ntheir access token for 10 hours.\n\nThe default time unit for `AccessTokenValidity` in an API request is hours. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your access\ntokens are valid for one hour." }, "allowedOAuthFlows": { "type": "array", @@ -268184,7 +269400,7 @@ }, "idTokenValidity": { "type": "integer", - "description": "The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for `IdTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `IdTokenValidity` as `10` and `TokenValidityUnits` as `hours` , your user can authenticate their session with their ID token for 10 hours.\n\nThe default time unit for `IdTokenValidity` in an API request is hours." + "description": "The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for `IdTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `IdTokenValidity` as `10` and `TokenValidityUnits` as `hours` , your user can authenticate their session with their ID token for 10 hours.\n\nThe default time unit for `IdTokenValidity` in an API request is hours. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your ID\ntokens are valid for one hour." }, "logoutUrls": { "type": "array", @@ -268198,7 +269414,7 @@ }, "preventUserExistenceErrors": { "type": "string", - "description": "Use this setting to choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to `ENABLED` and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY` , those APIs will return a `UserNotFoundException` exception if the user does not exist in the user pool." + "description": "Errors and responses that you want Amazon Cognito APIs to return during authentication, account confirmation, and password recovery when the user doesn't exist in the user pool. When set to `ENABLED` and the user doesn't exist, authentication returns an error indicating either the username or password was incorrect. Account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY` , those APIs return a `UserNotFoundException` exception if the user doesn't exist in the user pool.\n\nValid values include:\n\n- `ENABLED` - This prevents user existence-related errors.\n- `LEGACY` - This represents the early behavior of Amazon Cognito where user existence related errors aren't prevented.\n\nDefaults to `LEGACY` when you don't provide a value." }, "readAttributes": { "type": "array", @@ -268209,7 +269425,7 @@ }, "refreshTokenValidity": { "type": "integer", - "description": "The refresh token time limit. After this limit expires, your user can't use their refresh token. To specify the time unit for `RefreshTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `RefreshTokenValidity` as `10` and `TokenValidityUnits` as `days` , your user can refresh their session and retrieve new access and ID tokens for 10 days.\n\nThe default time unit for `RefreshTokenValidity` in an API request is days. You can't set `RefreshTokenValidity` to 0. If you do, Amazon Cognito overrides the value with the default value of 30 days." + "description": "The refresh token time limit. After this limit expires, your user can't use their refresh token. To specify the time unit for `RefreshTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `RefreshTokenValidity` as `10` and `TokenValidityUnits` as `days` , your user can refresh their session\nand retrieve new access and ID tokens for 10 days.\n\nThe default time unit for `RefreshTokenValidity` in an API request is days. You can't set `RefreshTokenValidity` to 0. If you do, Amazon Cognito overrides the value with the default value of 30 days. *Valid range* is displayed below in seconds.\n\nIf you don't specify otherwise in the configuration of your app client, your refresh\ntokens are valid for 30 days." }, "supportedIdentityProviders": { "type": "array", @@ -268340,7 +269556,7 @@ "properties": { "identifier": { "type": "string", - "description": "A unique resource server identifier for the resource server. This could be an HTTPS endpoint where the resource server is located. For example: `https://my-weather-api.example.com` ." + "description": "A unique resource server identifier for the resource server. The identifier can be an API friendly name like `solar-system-data` . You can also set an API URL like `https://solar-system-data-api.example.com` as your identifier.\n\nAmazon Cognito represents scopes in the access token in the format `$resource-server-identifier/$scope` . Longer scope-identifier strings increase the size of your access tokens." }, "userPoolId": { "type": "string", @@ -268374,7 +269590,7 @@ "properties": { "clientId": { "type": "string", - "description": "The app client ID. You can specify the risk configuration for a single client (with a specific ClientId) or for all clients (by setting the ClientId to `ALL` )." + "description": "The app client where this configuration is applied. When this parameter isn't present, the risk configuration applies to all user pool app clients that don't have client-level settings." }, "userPoolId": { "type": "string", @@ -268409,7 +269625,7 @@ "properties": { "clientId": { "type": "string", - "description": "The client ID for the client app. You can specify the UI customization settings for a single client (with a specific clientId) or for all clients (by setting the clientId to `ALL` )." + "description": "The app client ID for your UI customization. When this value isn't present, the customization applies to all user pool app clients that don't have client-level settings.." }, "userPoolId": { "type": "string", @@ -280630,6 +281846,25 @@ "type": "string", "description": "A time stamp indicating when this data object was created. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\")." }, + "sourceVersionNumber": { + "type": "integer", + "description": "A specific ContainerGroupDefinition version to be updated" + }, + "status": { + "$ref": "#/types/aws-native:gamelift:ContainerGroupDefinitionStatus", + "description": "A string indicating ContainerGroupDefinition status." + }, + "statusReason": { + "type": "string", + "description": "A string indicating the reason for ContainerGroupDefinition status." + }, + "supportContainerDefinitions": { + "type": "array", + "items": { + "$ref": "pulumi.json#/Any" + }, + "description": "A collection of support container definitions that define the containers in this group." + }, "tags": { "type": "array", "items": { @@ -287845,7 +289080,7 @@ "items": { "$ref": "#/types/aws-native:index:Tag" }, - "description": "A list of tags to add to the event source mapping.\n\n\u003e You must have the `lambda:TagResource` , `lambda:UntagResource` , and `lambda:ListTags` permissions for your [IAM principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the AWS CloudFormation stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update." + "description": "A list of tags to add to the event source mapping.\n You must have the ``lambda:TagResource``, ``lambda:UntagResource``, and ``lambda:ListTags`` permissions for your [principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the CFN stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update." }, "topics": { "type": "array", @@ -288116,10 +289351,6 @@ "type": "string", "description": "The ARN of the version." }, - "policy": { - "$ref": "pulumi.json#/Any", - "description": "The resource policy of your function\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::Lambda::Version` for more information about the expected schema for this property." - }, "version": { "type": "string", "description": "The version number.", @@ -295828,7 +297059,7 @@ "properties": { "configuration": { "$ref": "pulumi.json#/Any", - "description": "Use this property to specify a JSON or YAML schema with configuration information specific to your data source connector to connect your data source repository to Amazon Q Business . You must use the JSON or YAML schema provided by Amazon Q .\n\nYou can find configuration templates for your specific data source using the following steps:\n\n- Navigate to the [Supported connectors](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/connectors-list.html) page in the Amazon Q Business User Guide, and select the data source connector of your choice.\n- Then, from that specific data source connector's page, select *Using AWS CloudFormation* to find the schemas for your data source connector, including parameter descriptions and examples.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::QBusiness::DataSource` for more information about the expected schema for this property." + "description": "Configuration information to connect your data source repository to Amazon Q Business. Use this parameter to provide a JSON schema with configuration information specific to your data source connector.\n\nEach data source has a JSON schema provided by Amazon Q Business that you must use. For example, the Amazon S3 and Web Crawler connectors require the following JSON schemas:\n\n- [Amazon S3 JSON schema](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/s3-api.html)\n- [Web Crawler JSON schema](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/web-crawler-api.html)\n\nYou can find configuration templates for your specific data source using the following steps:\n\n- Navigate to the [Supported connectors](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/connectors-list.html) page in the Amazon Q Business User Guide, and select the data source of your choice.\n- Then, from your specific data source connector page, select *Using the API* . You will find the JSON schema for your data source, including parameter descriptions, in this section.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::QBusiness::DataSource` for more information about the expected schema for this property." }, "createdAt": { "type": "string", @@ -296148,6 +297379,12 @@ ], "description": "Provides information about the identity provider (IdP) used to authenticate end users of an Amazon Q Business web experience." }, + "origins": { + "type": "array", + "items": { + "type": "string" + } + }, "roleArn": { "type": "string", "description": "The Amazon Resource Name (ARN) of the service role attached to your web experience.\n\n\u003e You must provide this value if you're using IAM Identity Center to manage end user access to your application. If you're using legacy identity management to manage user access, you don't need to provide this value." @@ -296581,7 +297818,8 @@ "inputs": { "properties": { "awsAccountId": { - "type": "string" + "type": "string", + "description": "The ID for the AWS account where you want to create the folder." }, "folderId": { "type": "string", @@ -296615,7 +297853,8 @@ "type": "array", "items": { "$ref": "#/types/aws-native:quicksight:FolderResourcePermission" - } + }, + "description": "A structure that describes the principals and the resource-level permissions of a folder.\n\nTo specify no permissions, omit `Permissions` ." }, "tags": { "type": "array", @@ -297654,6 +298893,13 @@ "type": "string", "description": "The description for the DB subnet group." }, + "subnetIds": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The EC2 Subnet IDs for the DB subnet group." + }, "tags": { "type": "array", "items": { @@ -301454,7 +302700,7 @@ }, "version": { "type": "integer", - "description": "The version number." + "description": "The version of the image." } } } @@ -304237,7 +305483,7 @@ } }, "aws-native:sqs:getQueue": { - "description": "The ``AWS::SQS::Queue`` resource creates an SQS standard or FIFO queue.\n Keep the following caveats in mind:\n + If you don't specify the ``FifoQueue`` property, SQS creates a standard queue.\n You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see [Moving from a standard queue to a FIFO queue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-moving.html) in the *Developer Guide*. \n + If you don't provide a value for a property, the queue is created with the default value for the property.\n + If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.\n + To successfully create a new queue, you must provide a queue name that adheres to the [limits related to queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) and is unique within the scope of your queues.\n \n For more information about creating FIFO (first-in-first-out) queues, see [Creating an queue ()](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/screate-queue-cloudformation.html) in the *Developer Guide*.", + "description": "The ``AWS::SQS::Queue`` resource creates an SQS standard or FIFO queue.\n Keep the following caveats in mind:\n + If you don't specify the ``FifoQueue`` property, SQS creates a standard queue.\n You can't change the queue type after you create it and you can't convert an existing standard queue into a FIFO queue. You must either create a new FIFO queue for your application or delete your existing standard queue and recreate it as a FIFO queue. For more information, see [Moving from a standard queue to a FIFO queue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues-moving.html) in the *Developer Guide*. \n + If you don't provide a value for a property, the queue is created with the default value for the property.\n + If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.\n + To successfully create a new queue, you must provide a queue name that adheres to the [limits related to queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) and is unique within the scope of your queues.\n \n For more information about creating FIFO (first-in-first-out) queues, see [Creating an queue ()](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/create-queue-cloudformation.html) in the *Developer Guide*.", "inputs": { "properties": { "queueUrl": { @@ -304277,7 +305523,7 @@ }, "kmsMasterKeyId": { "type": "string", - "description": "The ID of an AWS Key Management Service (KMS) for SQS, or a custom KMS. To use the AWS managed KMS for SQS, specify a (default) alias ARN, alias name (e.g. ``alias/aws/sqs``), key ARN, or key ID. For more information, see the following:\n + [Encryption at rest](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) in the *Developer Guide* \n + [CreateQueue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) in the *API Reference* \n + [Request Parameters](https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) in the *Key Management Service API Reference* \n + The Key Management Service (KMS) section of the [Best Practices](https://docs.aws.amazon.com/https://d0.awsstatic.com/whitepapers/aws-kms-best-practices.pdf) whitepaper" + "description": "The ID of an AWS Key Management Service (KMS) for SQS, or a custom KMS. To use the AWS managed KMS for SQS, specify a (default) alias ARN, alias name (for example ``alias/aws/sqs``), key ARN, or key ID. For more information, see the following:\n + [Encryption at rest](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) in the *Developer Guide* \n + [CreateQueue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) in the *API Reference* \n + [Request Parameters](https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) in the *Key Management Service API Reference* \n + The Key Management Service (KMS) section of the [Security best practices for Key Management Service](https://docs.aws.amazon.com/kms/latest/developerguide/best-practices.html) in the *Key Management Service Developer Guide*" }, "maximumMessageSize": { "type": "integer", @@ -304297,11 +305543,11 @@ }, "redriveAllowPolicy": { "$ref": "pulumi.json#/Any", - "description": "The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows:\n + ``redrivePermission``: The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are:\n + ``allowAll``: (Default) Any source queues in this AWS account in the same Region can specify this queue as the dead-letter queue.\n + ``denyAll``: No source queues can specify this queue as the dead-letter queue.\n + ``byQueue``: Only queues specified by the ``sourceQueueArns`` parameter can specify this queue as the dead-letter queue.\n \n + ``sourceQueueArns``: The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the ``redrivePermission`` parameter is set to ``byQueue``. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the ``redrivePermission`` parameter to ``allowAll``.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::SQS::Queue` for more information about the expected schema for this property." + "description": "The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows:\n + ``redrivePermission``: The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are:\n + ``allowAll``: (Default) Any source queues in this AWS account in the same Region can specify this queue as the dead-letter queue.\n + ``denyAll``: No source queues can specify this queue as the dead-letter queue.\n + ``byQueue``: Only queues specified by the ``sourceQueueArns`` parameter can specify this queue as the dead-letter queue.\n \n + ``sourceQueueArns``: The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the ``redrivePermission`` parameter is set to ``byQueue``. You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the ``redrivePermission`` parameter to ``allowAll``.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::SQS::Queue` for more information about the expected schema for this property." }, "redrivePolicy": { "$ref": "pulumi.json#/Any", - "description": "The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows:\n + ``deadLetterTargetArn``: The Amazon Resource Name (ARN) of the dead-letter queue to which SQS moves messages after the value of ``maxReceiveCount`` is exceeded.\n + ``maxReceiveCount``: The number of times a message is delivered to the source queue before being moved to the dead-letter queue. When the ``ReceiveCount`` for a message exceeds the ``maxReceiveCount`` for a queue, SQS moves the message to the dead-letter-queue.\n \n The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.\n *JSON* \n ``{ \"deadLetterTargetArn\" : String, \"maxReceiveCount\" : Integer }`` \n *YAML* \n ``deadLetterTargetArn : String`` \n ``maxReceiveCount : Integer``\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::SQS::Queue` for more information about the expected schema for this property." + "description": "The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows:\n + ``deadLetterTargetArn``: The Amazon Resource Name (ARN) of the dead-letter queue to which SQS moves messages after the value of ``maxReceiveCount`` is exceeded.\n + ``maxReceiveCount``: The number of times a message is received by a consumer of the source queue before being moved to the dead-letter queue. When the ``ReceiveCount`` for a message exceeds the ``maxReceiveCount`` for a queue, SQS moves the message to the dead-letter-queue.\n \n The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue.\n *JSON* \n ``{ \"deadLetterTargetArn\" : String, \"maxReceiveCount\" : Integer }`` \n *YAML* \n ``deadLetterTargetArn : String`` \n ``maxReceiveCount : Integer``\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::SQS::Queue` for more information about the expected schema for this property." }, "sqsManagedSseEnabled": { "type": "boolean", @@ -305968,6 +307214,104 @@ } } }, + "aws-native:transfer:getServer": { + "description": "Definition of AWS::Transfer::Server Resource Type", + "inputs": { + "properties": { + "arn": { + "type": "string", + "description": "The Amazon Resource Name associated with the server, in the form `arn:aws:transfer:region: *account-id* :server/ *server-id* /` .\n\nAn example of a server ARN is: `arn:aws:transfer:us-east-1:123456789012:server/s-01234567890abcdef` ." + } + }, + "required": [ + "arn" + ] + }, + "outputs": { + "properties": { + "arn": { + "type": "string", + "description": "The Amazon Resource Name associated with the server, in the form `arn:aws:transfer:region: *account-id* :server/ *server-id* /` .\n\nAn example of a server ARN is: `arn:aws:transfer:us-east-1:123456789012:server/s-01234567890abcdef` ." + }, + "as2ServiceManagedEgressIpAddresses": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The list of egress IP addresses of this server. These IP addresses are only relevant for servers that use the AS2 protocol. They are used for sending asynchronous MDNs. These IP addresses are assigned automatically when you create an AS2 server. Additionally, if you update an existing server and add the AS2 protocol, static IP addresses are assigned as well." + }, + "certificate": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. Required when `Protocols` is set to `FTPS` .\n\nTo request a new public certificate, see [Request a public certificate](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-public.html) in the *AWS Certificate Manager User Guide* .\n\nTo import an existing certificate into ACM, see [Importing certificates into ACM](https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) in the *AWS Certificate Manager User Guide* .\n\nTo request a private certificate to use FTPS through private IP addresses, see [Request a private certificate](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-private.html) in the *AWS Certificate Manager User Guide* .\n\nCertificates with the following cryptographic algorithms and key sizes are supported:\n\n- 2048-bit RSA (RSA_2048)\n- 4096-bit RSA (RSA_4096)\n- Elliptic Prime Curve 256 bit (EC_prime256v1)\n- Elliptic Prime Curve 384 bit (EC_secp384r1)\n- Elliptic Prime Curve 521 bit (EC_secp521r1)\n\n\u003e The certificate must be a valid SSL/TLS X.509 version 3 certificate with FQDN or IP address specified and information about the issuer." + }, + "endpointDetails": { + "$ref": "#/types/aws-native:transfer:ServerEndpointDetails", + "description": "The virtual private cloud (VPC) endpoint settings that are configured for your server. When you host your endpoint within your VPC, you can make your endpoint accessible only to resources within your VPC, or you can attach Elastic IP addresses and make your endpoint accessible to clients over the internet. Your VPC's default security groups are automatically assigned to your endpoint." + }, + "endpointType": { + "$ref": "#/types/aws-native:transfer:ServerEndpointType", + "description": "The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC) or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it.\n\n\u003e After May 19, 2021, you won't be able to create a server using `EndpointType=VPC_ENDPOINT` in your AWS account if your account hasn't already done so before May 19, 2021. If you have already created servers with `EndpointType=VPC_ENDPOINT` in your AWS account on or before May 19, 2021, you will not be affected. After this date, use `EndpointType` = `VPC` .\n\u003e \n\u003e For more information, see [Discontinuing the use of VPC_ENDPOINT](https://docs.aws.amazon.com//transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint) .\n\u003e \n\u003e It is recommended that you use `VPC` as the `EndpointType` . With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with `EndpointType` set to `VPC_ENDPOINT` ." + }, + "identityProviderDetails": { + "$ref": "#/types/aws-native:transfer:ServerIdentityProviderDetails", + "description": "Required when `IdentityProviderType` is set to `AWS_DIRECTORY_SERVICE` , `AWS _LAMBDA` or `API_GATEWAY` . Accepts an array containing all of the information required to use a directory in `AWS_DIRECTORY_SERVICE` or invoke a customer-supplied authentication API, including the API Gateway URL. Cannot be specified when `IdentityProviderType` is set to `SERVICE_MANAGED` ." + }, + "loggingRole": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging for Amazon S3 or Amazon EFSevents. When set, you can view user activity in your CloudWatch logs." + }, + "postAuthenticationLoginBanner": { + "type": "string", + "description": "Specifies a string to display when users connect to a server. This string is displayed after the user authenticates.\n\n\u003e The SFTP protocol does not support post-authentication display banners." + }, + "preAuthenticationLoginBanner": { + "type": "string", + "description": "Specifies a string to display when users connect to a server. This string is displayed before the user authenticates. For example, the following banner displays details about using the system:\n\n`This system is for the use of authorized users only. Individuals using this computer system without authority, or in excess of their authority, are subject to having all of their activities on this system monitored and recorded by system personnel.`" + }, + "protocolDetails": { + "$ref": "#/types/aws-native:transfer:ServerProtocolDetails", + "description": "The protocol settings that are configured for your server.\n\n- To indicate passive mode (for FTP and FTPS protocols), use the `PassiveIp` parameter. Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer.\n- To ignore the error that is generated when the client attempts to use the `SETSTAT` command on a file that you are uploading to an Amazon S3 bucket, use the `SetStatOption` parameter. To have the AWS Transfer Family server ignore the `SETSTAT` command and upload files without needing to make any changes to your SFTP client, set the value to `ENABLE_NO_OP` . If you set the `SetStatOption` parameter to `ENABLE_NO_OP` , Transfer Family generates a log entry to Amazon CloudWatch Logs, so that you can determine when the client is making a `SETSTAT` call.\n- To determine whether your AWS Transfer Family server resumes recent, negotiated sessions through a unique session ID, use the `TlsSessionResumptionMode` parameter.\n- `As2Transports` indicates the transport method for the AS2 messages. Currently, only HTTP is supported.\n\nThe `Protocols` parameter is an array of strings.\n\n*Allowed values* : One or more of `SFTP` , `FTPS` , `FTP` , `AS2`" + }, + "protocols": { + "type": "array", + "items": { + "$ref": "#/types/aws-native:transfer:ServerProtocol" + }, + "description": "Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:\n\n- `SFTP` (Secure Shell (SSH) File Transfer Protocol): File transfer over SSH\n- `FTPS` (File Transfer Protocol Secure): File transfer with TLS encryption\n- `FTP` (File Transfer Protocol): Unencrypted file transfer\n- `AS2` (Applicability Statement 2): used for transporting structured business-to-business data\n\n\u003e - If you select `FTPS` , you must choose a certificate stored in AWS Certificate Manager (ACM) which is used to identify your server when clients connect to it over FTPS.\n\u003e - If `Protocol` includes either `FTP` or `FTPS` , then the `EndpointType` must be `VPC` and the `IdentityProviderType` must be either `AWS_DIRECTORY_SERVICE` , `AWS_LAMBDA` , or `API_GATEWAY` .\n\u003e - If `Protocol` includes `FTP` , then `AddressAllocationIds` cannot be associated.\n\u003e - If `Protocol` is set only to `SFTP` , the `EndpointType` can be set to `PUBLIC` and the `IdentityProviderType` can be set any of the supported identity types: `SERVICE_MANAGED` , `AWS_DIRECTORY_SERVICE` , `AWS_LAMBDA` , or `API_GATEWAY` .\n\u003e - If `Protocol` includes `AS2` , then the `EndpointType` must be `VPC` , and domain must be Amazon S3. \n\nThe `Protocols` parameter is an array of strings.\n\n*Allowed values* : One or more of `SFTP` , `FTPS` , `FTP` , `AS2`" + }, + "s3StorageOptions": { + "$ref": "#/types/aws-native:transfer:ServerS3StorageOptions", + "description": "Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target." + }, + "securityPolicyName": { + "type": "string", + "description": "Specifies the name of the security policy for the server." + }, + "serverId": { + "type": "string", + "description": "The service-assigned ID of the server that is created.\n\nAn example `ServerId` is `s-01234567890abcdef` ." + }, + "structuredLogDestinations": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Specifies the log groups to which your server logs are sent.\n\nTo specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows:\n\n`arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:*`\n\nFor example, `arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:*`\n\nIf you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty value for this parameter in an `update-server` call. For example:\n\n`update-server --server-id s-1234567890abcdef0 --structured-log-destinations`" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/types/aws-native:index:Tag" + }, + "description": "Key-value pairs that can be used to group and search for servers." + }, + "workflowDetails": { + "$ref": "#/types/aws-native:transfer:ServerWorkflowDetails", + "description": "Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.\n\nIn addition to a workflow to execute when a file is uploaded completely, `WorkflowDetails` can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when the session disconnects." + } + } + } + }, "aws-native:transfer:getWorkflow": { "description": "Resource Type definition for AWS::Transfer::Workflow", "inputs": { @@ -306999,6 +308343,49 @@ } } }, + "aws-native:wisdom:getAiPrompt": { + "description": "Definition of AWS::Wisdom::AIPrompt Resource Type", + "inputs": { + "properties": { + "aiPromptId": { + "type": "string", + "description": "The identifier of the Amazon Q in Connect AI prompt." + }, + "assistantId": { + "type": "string", + "description": "The identifier of the Amazon Q in Connect assistant. Can be either the ID or the ARN. URLs cannot contain the ARN." + } + }, + "required": [ + "aiPromptId", + "assistantId" + ] + }, + "outputs": { + "properties": { + "aiPromptArn": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the AI Prompt." + }, + "aiPromptId": { + "type": "string", + "description": "The identifier of the Amazon Q in Connect AI prompt." + }, + "assistantArn": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the Amazon Q in Connect assistant." + }, + "description": { + "type": "string", + "description": "The description of the AI Prompt." + }, + "templateConfiguration": { + "$ref": "#/types/aws-native:wisdom:AiPromptAiPromptTemplateConfiguration", + "description": "The configuration of the prompt template for this AI Prompt." + } + } + } + }, "aws-native:wisdom:getAssistant": { "description": "Definition of AWS::Wisdom::Assistant Resource Type", "inputs": { diff --git a/reports/missedAutonaming.json b/reports/missedAutonaming.json index afaa606c1e..823f696603 100644 --- a/reports/missedAutonaming.json +++ b/reports/missedAutonaming.json @@ -2004,11 +2004,11 @@ }, "domain": { "type": "string", - "description": "The domain name for the domain that hosts the sign-up and sign-in pages for your application. For example: `auth.example.com` . If you're using a prefix domain, this field denotes the first part of the domain before `.auth.[region].amazoncognito.com` .\n\nThis string can include only lowercase letters, numbers, and hyphens. Don't use a hyphen for the first or last character. Use periods to separate subdomain names." + "description": "The domain name for the custom domain that hosts the sign-up and sign-in pages for your application. One example might be `auth.example.com` .\n\nThis string can include only lowercase letters, numbers, and hyphens. Don't use a hyphen for the first or last character. Use periods to separate subdomain names." }, "userPoolId": { "type": "string", - "description": "The user pool ID for the user pool where you want to associate a user pool domain." + "description": "The ID of the user pool that is associated with the custom domain whose certificate you're updating." } } }, @@ -2021,7 +2021,7 @@ }, "clientId": { "type": "string", - "description": "The app client ID. You can specify the risk configuration for a single client (with a specific ClientId) or for all clients (by setting the ClientId to `ALL` )." + "description": "The app client where this configuration is applied. When this parameter isn't present, the risk configuration applies to all user pool app clients that don't have client-level settings." }, "compromisedCredentialsRiskConfiguration": { "$ref": "#/types/aws-native:cognito:UserPoolRiskConfigurationAttachmentCompromisedCredentialsRiskConfigurationType", @@ -2042,7 +2042,7 @@ "properties": { "clientId": { "type": "string", - "description": "The client ID for the client app. You can specify the UI customization settings for a single client (with a specific clientId) or for all clients (by setting the clientId to `ALL` )." + "description": "The app client ID for your UI customization. When this value isn't present, the customization applies to all user pool app clients that don't have client-level settings.." }, "css": { "type": "string", @@ -2066,7 +2066,8 @@ "description": "The user pool ID for the user pool." }, "username": { - "type": "string" + "type": "string", + "description": "The user's username." } } }, @@ -6193,7 +6194,8 @@ "type": "array", "items": { "$ref": "#/types/aws-native:ecs:TaskSetCapacityProviderStrategyItem" - } + }, + "description": "The capacity provider strategy that are associated with the task set." }, "cluster": { "type": "string", @@ -8191,7 +8193,7 @@ "items": { "$ref": "#/types/aws-native:index:Tag" }, - "description": "A list of tags to add to the event source mapping.\n\n\u003e You must have the `lambda:TagResource` , `lambda:UntagResource` , and `lambda:ListTags` permissions for your [IAM principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the AWS CloudFormation stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update." + "description": "A list of tags to add to the event source mapping.\n You must have the ``lambda:TagResource``, ``lambda:UntagResource``, and ``lambda:ListTags`` permissions for your [principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the CFN stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update." }, "topics": { "type": "array", @@ -8339,10 +8341,6 @@ "type": "string", "description": "The name of the Lambda function." }, - "policy": { - "$ref": "pulumi.json#/Any", - "description": "The resource policy of your function\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::Lambda::Version` for more information about the expected schema for this property." - }, "provisionedConcurrencyConfig": { "$ref": "#/types/aws-native:lambda:VersionProvisionedConcurrencyConfiguration", "description": "Specifies a provisioned concurrency configuration for a function's version. Updates are not supported for this property." @@ -9766,7 +9764,7 @@ }, "configuration": { "$ref": "pulumi.json#/Any", - "description": "Use this property to specify a JSON or YAML schema with configuration information specific to your data source connector to connect your data source repository to Amazon Q Business . You must use the JSON or YAML schema provided by Amazon Q .\n\nYou can find configuration templates for your specific data source using the following steps:\n\n- Navigate to the [Supported connectors](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/connectors-list.html) page in the Amazon Q Business User Guide, and select the data source connector of your choice.\n- Then, from that specific data source connector's page, select *Using AWS CloudFormation* to find the schemas for your data source connector, including parameter descriptions and examples.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::QBusiness::DataSource` for more information about the expected schema for this property." + "description": "Configuration information to connect your data source repository to Amazon Q Business. Use this parameter to provide a JSON schema with configuration information specific to your data source connector.\n\nEach data source has a JSON schema provided by Amazon Q Business that you must use. For example, the Amazon S3 and Web Crawler connectors require the following JSON schemas:\n\n- [Amazon S3 JSON schema](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/s3-api.html)\n- [Web Crawler JSON schema](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/web-crawler-api.html)\n\nYou can find configuration templates for your specific data source using the following steps:\n\n- Navigate to the [Supported connectors](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/connectors-list.html) page in the Amazon Q Business User Guide, and select the data source of your choice.\n- Then, from your specific data source connector page, select *Using the API* . You will find the JSON schema for your data source, including parameter descriptions, in this section.\n\nSearch the [CloudFormation User Guide](https://docs.aws.amazon.com/cloudformation/) for `AWS::QBusiness::DataSource` for more information about the expected schema for this property." }, "description": { "type": "string", @@ -9951,6 +9949,12 @@ ], "description": "Provides information about the identity provider (IdP) used to authenticate end users of an Amazon Q Business web experience." }, + "origins": { + "type": "array", + "items": { + "type": "string" + } + }, "roleArn": { "type": "string", "description": "The Amazon Resource Name (ARN) of the service role attached to your web experience.\n\n\u003e You must provide this value if you're using IAM Identity Center to manage end user access to your application. If you're using legacy identity management to manage user access, you don't need to provide this value." @@ -10396,7 +10400,7 @@ }, "dbSnapshotIdentifier": { "type": "string", - "description": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n By specifying this property, you can create a DB instance from the specified DB snapshot. If the ``DBSnapshotIdentifier`` property is an empty string or the ``AWS::RDS::DBInstance`` declaration has no ``DBSnapshotIdentifier`` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n Some DB instance properties aren't valid when you restore from a snapshot, such as the ``MasterUsername`` and ``MasterUserPassword`` properties. For information about the properties that you can specify, see the ``RestoreDBInstanceFromDBSnapshot`` action in the *Amazon RDS API Reference*.\n After you restore a DB instance with a ``DBSnapshotIdentifier`` property, you must specify the same ``DBSnapshotIdentifier`` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the ``DBSnapshotIdentifier`` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified ``DBSnapshotIdentifier`` property, and the original DB instance is deleted.\n If you specify the ``DBSnapshotIdentifier`` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n + ``CharacterSetName`` \n + ``DBClusterIdentifier`` \n + ``DBName`` \n + ``DeleteAutomatedBackups`` \n + ``KmsKeyId`` \n + ``MasterUsername`` \n + ``MasterUserPassword`` \n + ``PerformanceInsightsKMSKeyId`` \n + ``PerformanceInsightsRetentionPeriod`` \n + ``PromotionTier`` \n + ``SourceDBInstanceIdentifier`` \n + ``SourceRegion`` \n + ``StorageEncrypted`` (for an encrypted snapshot)\n + ``Timezone`` \n \n *Amazon Aurora* \n Not applicable. Snapshot restore is managed by the DB cluster." + "description": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n By specifying this property, you can create a DB instance from the specified DB snapshot. If the ``DBSnapshotIdentifier`` property is an empty string or the ``AWS::RDS::DBInstance`` declaration has no ``DBSnapshotIdentifier`` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n Some DB instance properties aren't valid when you restore from a snapshot, such as the ``MasterUsername`` and ``MasterUserPassword`` properties. For information about the properties that you can specify, see the ``RestoreDBInstanceFromDBSnapshot`` action in the *Amazon RDS API Reference*.\n After you restore a DB instance with a ``DBSnapshotIdentifier`` property, you must specify the same ``DBSnapshotIdentifier`` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the ``DBSnapshotIdentifier`` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified ``DBSnapshotIdentifier`` property, and the original DB instance is deleted.\n If you specify the ``DBSnapshotIdentifier`` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n + ``CharacterSetName`` \n + ``DBClusterIdentifier`` \n + ``DBName`` \n + ``KmsKeyId`` \n + ``MasterUsername`` \n + ``MasterUserPassword`` \n + ``PromotionTier`` \n + ``SourceDBInstanceIdentifier`` \n + ``SourceRegion`` \n + ``StorageEncrypted`` (for an unencrypted snapshot)\n + ``Timezone`` \n \n *Amazon Aurora* \n Not applicable. Snapshot restore is managed by the DB cluster." }, "dbSubnetGroupName": { "type": "string", @@ -12460,6 +12464,84 @@ } } }, + "aws-native:transfer:Server": { + "cfTypeName": "AWS::Transfer::Server", + "properties": { + "certificate": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. Required when `Protocols` is set to `FTPS` .\n\nTo request a new public certificate, see [Request a public certificate](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-public.html) in the *AWS Certificate Manager User Guide* .\n\nTo import an existing certificate into ACM, see [Importing certificates into ACM](https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) in the *AWS Certificate Manager User Guide* .\n\nTo request a private certificate to use FTPS through private IP addresses, see [Request a private certificate](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-private.html) in the *AWS Certificate Manager User Guide* .\n\nCertificates with the following cryptographic algorithms and key sizes are supported:\n\n- 2048-bit RSA (RSA_2048)\n- 4096-bit RSA (RSA_4096)\n- Elliptic Prime Curve 256 bit (EC_prime256v1)\n- Elliptic Prime Curve 384 bit (EC_secp384r1)\n- Elliptic Prime Curve 521 bit (EC_secp521r1)\n\n\u003e The certificate must be a valid SSL/TLS X.509 version 3 certificate with FQDN or IP address specified and information about the issuer." + }, + "domain": { + "$ref": "#/types/aws-native:transfer:ServerDomain", + "description": "Specifies the domain of the storage system that is used for file transfers. There are two domains available: Amazon Simple Storage Service (Amazon S3) and Amazon Elastic File System (Amazon EFS). The default value is S3." + }, + "endpointDetails": { + "$ref": "#/types/aws-native:transfer:ServerEndpointDetails", + "description": "The virtual private cloud (VPC) endpoint settings that are configured for your server. When you host your endpoint within your VPC, you can make your endpoint accessible only to resources within your VPC, or you can attach Elastic IP addresses and make your endpoint accessible to clients over the internet. Your VPC's default security groups are automatically assigned to your endpoint." + }, + "endpointType": { + "$ref": "#/types/aws-native:transfer:ServerEndpointType", + "description": "The type of endpoint that you want your server to use. You can choose to make your server's endpoint publicly accessible (PUBLIC) or host it inside your VPC. With an endpoint that is hosted in a VPC, you can restrict access to your server and resources only within your VPC or choose to make it internet facing by attaching Elastic IP addresses directly to it.\n\n\u003e After May 19, 2021, you won't be able to create a server using `EndpointType=VPC_ENDPOINT` in your AWS account if your account hasn't already done so before May 19, 2021. If you have already created servers with `EndpointType=VPC_ENDPOINT` in your AWS account on or before May 19, 2021, you will not be affected. After this date, use `EndpointType` = `VPC` .\n\u003e \n\u003e For more information, see [Discontinuing the use of VPC_ENDPOINT](https://docs.aws.amazon.com//transfer/latest/userguide/create-server-in-vpc.html#deprecate-vpc-endpoint) .\n\u003e \n\u003e It is recommended that you use `VPC` as the `EndpointType` . With this endpoint type, you have the option to directly associate up to three Elastic IPv4 addresses (BYO IP included) with your server's endpoint and use VPC security groups to restrict traffic by the client's public IP address. This is not possible with `EndpointType` set to `VPC_ENDPOINT` ." + }, + "identityProviderDetails": { + "$ref": "#/types/aws-native:transfer:ServerIdentityProviderDetails", + "description": "Required when `IdentityProviderType` is set to `AWS_DIRECTORY_SERVICE` , `AWS _LAMBDA` or `API_GATEWAY` . Accepts an array containing all of the information required to use a directory in `AWS_DIRECTORY_SERVICE` or invoke a customer-supplied authentication API, including the API Gateway URL. Cannot be specified when `IdentityProviderType` is set to `SERVICE_MANAGED` ." + }, + "identityProviderType": { + "$ref": "#/types/aws-native:transfer:ServerIdentityProviderType", + "description": "The mode of authentication for a server. The default value is `SERVICE_MANAGED` , which allows you to store and access user credentials within the AWS Transfer Family service.\n\nUse `AWS_DIRECTORY_SERVICE` to provide access to Active Directory groups in AWS Directory Service for Microsoft Active Directory or Microsoft Active Directory in your on-premises environment or in AWS using AD Connector. This option also requires you to provide a Directory ID by using the `IdentityProviderDetails` parameter.\n\nUse the `API_GATEWAY` value to integrate with an identity provider of your choosing. The `API_GATEWAY` setting requires you to provide an Amazon API Gateway endpoint URL to call for authentication by using the `IdentityProviderDetails` parameter.\n\nUse the `AWS_LAMBDA` value to directly use an AWS Lambda function as your identity provider. If you choose this value, you must specify the ARN for the Lambda function in the `Function` parameter for the `IdentityProviderDetails` data type." + }, + "loggingRole": { + "type": "string", + "description": "The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that allows a server to turn on Amazon CloudWatch logging for Amazon S3 or Amazon EFSevents. When set, you can view user activity in your CloudWatch logs." + }, + "postAuthenticationLoginBanner": { + "type": "string", + "description": "Specifies a string to display when users connect to a server. This string is displayed after the user authenticates.\n\n\u003e The SFTP protocol does not support post-authentication display banners." + }, + "preAuthenticationLoginBanner": { + "type": "string", + "description": "Specifies a string to display when users connect to a server. This string is displayed before the user authenticates. For example, the following banner displays details about using the system:\n\n`This system is for the use of authorized users only. Individuals using this computer system without authority, or in excess of their authority, are subject to having all of their activities on this system monitored and recorded by system personnel.`" + }, + "protocolDetails": { + "$ref": "#/types/aws-native:transfer:ServerProtocolDetails", + "description": "The protocol settings that are configured for your server.\n\n- To indicate passive mode (for FTP and FTPS protocols), use the `PassiveIp` parameter. Enter a single dotted-quad IPv4 address, such as the external IP address of a firewall, router, or load balancer.\n- To ignore the error that is generated when the client attempts to use the `SETSTAT` command on a file that you are uploading to an Amazon S3 bucket, use the `SetStatOption` parameter. To have the AWS Transfer Family server ignore the `SETSTAT` command and upload files without needing to make any changes to your SFTP client, set the value to `ENABLE_NO_OP` . If you set the `SetStatOption` parameter to `ENABLE_NO_OP` , Transfer Family generates a log entry to Amazon CloudWatch Logs, so that you can determine when the client is making a `SETSTAT` call.\n- To determine whether your AWS Transfer Family server resumes recent, negotiated sessions through a unique session ID, use the `TlsSessionResumptionMode` parameter.\n- `As2Transports` indicates the transport method for the AS2 messages. Currently, only HTTP is supported.\n\nThe `Protocols` parameter is an array of strings.\n\n*Allowed values* : One or more of `SFTP` , `FTPS` , `FTP` , `AS2`" + }, + "protocols": { + "type": "array", + "items": { + "$ref": "#/types/aws-native:transfer:ServerProtocol" + }, + "description": "Specifies the file transfer protocol or protocols over which your file transfer protocol client can connect to your server's endpoint. The available protocols are:\n\n- `SFTP` (Secure Shell (SSH) File Transfer Protocol): File transfer over SSH\n- `FTPS` (File Transfer Protocol Secure): File transfer with TLS encryption\n- `FTP` (File Transfer Protocol): Unencrypted file transfer\n- `AS2` (Applicability Statement 2): used for transporting structured business-to-business data\n\n\u003e - If you select `FTPS` , you must choose a certificate stored in AWS Certificate Manager (ACM) which is used to identify your server when clients connect to it over FTPS.\n\u003e - If `Protocol` includes either `FTP` or `FTPS` , then the `EndpointType` must be `VPC` and the `IdentityProviderType` must be either `AWS_DIRECTORY_SERVICE` , `AWS_LAMBDA` , or `API_GATEWAY` .\n\u003e - If `Protocol` includes `FTP` , then `AddressAllocationIds` cannot be associated.\n\u003e - If `Protocol` is set only to `SFTP` , the `EndpointType` can be set to `PUBLIC` and the `IdentityProviderType` can be set any of the supported identity types: `SERVICE_MANAGED` , `AWS_DIRECTORY_SERVICE` , `AWS_LAMBDA` , or `API_GATEWAY` .\n\u003e - If `Protocol` includes `AS2` , then the `EndpointType` must be `VPC` , and domain must be Amazon S3. \n\nThe `Protocols` parameter is an array of strings.\n\n*Allowed values* : One or more of `SFTP` , `FTPS` , `FTP` , `AS2`" + }, + "s3StorageOptions": { + "$ref": "#/types/aws-native:transfer:ServerS3StorageOptions", + "description": "Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default.\n\nBy default, home directory mappings have a `TYPE` of `DIRECTORY` . If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` `Type` to `FILE` if you want a mapping to have a file target." + }, + "securityPolicyName": { + "type": "string", + "description": "Specifies the name of the security policy for the server." + }, + "structuredLogDestinations": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Specifies the log groups to which your server logs are sent.\n\nTo specify a log group, you must provide the ARN for an existing log group. In this case, the format of the log group is as follows:\n\n`arn:aws:logs:region-name:amazon-account-id:log-group:log-group-name:*`\n\nFor example, `arn:aws:logs:us-east-1:111122223333:log-group:mytestgroup:*`\n\nIf you have previously specified a log group for a server, you can clear it, and in effect turn off structured logging, by providing an empty value for this parameter in an `update-server` call. For example:\n\n`update-server --server-id s-1234567890abcdef0 --structured-log-destinations`" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/types/aws-native:index:Tag" + }, + "description": "Key-value pairs that can be used to group and search for servers." + }, + "workflowDetails": { + "$ref": "#/types/aws-native:transfer:ServerWorkflowDetails", + "description": "Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.\n\nIn addition to a workflow to execute when a file is uploaded completely, `WorkflowDetails` can also contain a workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when the session disconnects." + } + } + }, "aws-native:transfer:Workflow": { "cfTypeName": "AWS::Transfer::Workflow", "properties": { diff --git a/sdk/dotnet/AppFlow/Inputs/FlowSapoDataPaginationConfigArgs.cs b/sdk/dotnet/AppFlow/Inputs/FlowSapoDataPaginationConfigArgs.cs index 54f24ad805..2d4ea713e1 100644 --- a/sdk/dotnet/AppFlow/Inputs/FlowSapoDataPaginationConfigArgs.cs +++ b/sdk/dotnet/AppFlow/Inputs/FlowSapoDataPaginationConfigArgs.cs @@ -15,6 +15,9 @@ namespace Pulumi.AwsNative.AppFlow.Inputs /// public sealed class FlowSapoDataPaginationConfigArgs : global::Pulumi.ResourceArgs { + ///