diff --git a/awscli/examples/cloudformation/_package_description.rst b/awscli/examples/cloudformation/_package_description.rst index f47ec2212916..21566ac96c5d 100644 --- a/awscli/examples/cloudformation/_package_description.rst +++ b/awscli/examples/cloudformation/_package_description.rst @@ -40,7 +40,7 @@ For example, if your AWS Lambda function source code is in the ``/home/user/code/lambdafunction/`` folder, specify ``CodeUri: /home/user/code/lambdafunction`` for the ``AWS::Serverless::Function`` resource. The command returns a template and replaces -the local path with the S3 location: ``CodeUri: s3://mybucket/lambdafunction.zip``. +the local path with the S3 location: ``CodeUri: s3://amzn-s3-demo-bucket/lambdafunction.zip``. If you specify a file, the command directly uploads it to the S3 bucket. If you specify a folder, the command zips the folder and then uploads the .zip file. diff --git a/awscli/examples/emr/add-steps.rst b/awscli/examples/emr/add-steps.rst index 282c77edbe41..6ec39f356ebe 100644 --- a/awscli/examples/emr/add-steps.rst +++ b/awscli/examples/emr/add-steps.rst @@ -2,7 +2,7 @@ - Command:: - aws emr add-steps --cluster-id j-XXXXXXXX --steps Type=CUSTOM_JAR,Name=CustomJAR,ActionOnFailure=CONTINUE,Jar=s3://mybucket/mytest.jar,Args=arg1,arg2,arg3 Type=CUSTOM_JAR,Name=CustomJAR,ActionOnFailure=CONTINUE,Jar=s3://mybucket/mytest.jar,MainClass=mymainclass,Args=arg1,arg2,arg3 + aws emr add-steps --cluster-id j-XXXXXXXX --steps Type=CUSTOM_JAR,Name=CustomJAR,ActionOnFailure=CONTINUE,Jar=s3://amzn-s3-demo-bucket/mytest.jar,Args=arg1,arg2,arg3 Type=CUSTOM_JAR,Name=CustomJAR,ActionOnFailure=CONTINUE,Jar=s3://amzn-s3-demo-bucket/mytest.jar,MainClass=mymainclass,Args=arg1,arg2,arg3 - Required parameters:: @@ -25,7 +25,7 @@ - Command:: - aws emr add-steps --cluster-id j-XXXXXXXX --steps Type=STREAMING,Name='Streaming Program',ActionOnFailure=CONTINUE,Args=[-files,s3://elasticmapreduce/samples/wordcount/wordSplitter.py,-mapper,wordSplitter.py,-reducer,aggregate,-input,s3://elasticmapreduce/samples/wordcount/input,-output,s3://mybucket/wordcount/output] + aws emr add-steps --cluster-id j-XXXXXXXX --steps Type=STREAMING,Name='Streaming Program',ActionOnFailure=CONTINUE,Args=[-files,s3://elasticmapreduce/samples/wordcount/wordSplitter.py,-mapper,wordSplitter.py,-reducer,aggregate,-input,s3://elasticmapreduce/samples/wordcount/input,-output,s3://amzn-s3-demo-bucket/wordcount/output] - Required parameters:: @@ -40,7 +40,7 @@ [ { "Name": "JSON Streaming Step", - "Args": ["-files","s3://elasticmapreduce/samples/wordcount/wordSplitter.py","-mapper","wordSplitter.py","-reducer","aggregate","-input","s3://elasticmapreduce/samples/wordcount/input","-output","s3://mybucket/wordcount/output"], + "Args": ["-files","s3://elasticmapreduce/samples/wordcount/wordSplitter.py","-mapper","wordSplitter.py","-reducer","aggregate","-input","s3://elasticmapreduce/samples/wordcount/input","-output","s3://amzn-s3-demo-bucket/wordcount/output"], "ActionOnFailure": "CONTINUE", "Type": "STREAMING" } @@ -72,15 +72,15 @@ NOTE: JSON arguments must include options and values as their own items in the l "ActionOnFailure": "CONTINUE", "Args": [ "-files", - "s3://mybucket/mapper.py,s3://mybucket/reducer.py", + "s3://amzn-s3-demo-bucket/mapper.py,s3://amzn-s3-demo-bucket/reducer.py", "-mapper", "mapper.py", "-reducer", "reducer.py", "-input", - "s3://mybucket/input", + "s3://amzn-s3-demo-bucket/input", "-output", - "s3://mybucket/output"] + "s3://amzn-s3-demo-bucket/output"] } ] @@ -109,7 +109,7 @@ NOTE: JSON arguments must include options and values as their own items in the l - Command:: - aws emr add-steps --cluster-id j-XXXXXXXX --steps Type=HIVE,Name='Hive program',ActionOnFailure=CONTINUE,Args=[-f,s3://mybucket/myhivescript.q,-d,INPUT=s3://mybucket/myhiveinput,-d,OUTPUT=s3://mybucket/myhiveoutput,arg1,arg2] Type=HIVE,Name='Hive steps',ActionOnFailure=TERMINATE_CLUSTER,Args=[-f,s3://elasticmapreduce/samples/hive-ads/libs/model-build.q,-d,INPUT=s3://elasticmapreduce/samples/hive-ads/tables,-d,OUTPUT=s3://mybucket/hive-ads/output/2014-04-18/11-07-32,-d,LIBS=s3://elasticmapreduce/samples/hive-ads/libs] + aws emr add-steps --cluster-id j-XXXXXXXX --steps Type=HIVE,Name='Hive program',ActionOnFailure=CONTINUE,Args=[-f,s3://amzn-s3-demo-bucket/myhivescript.q,-d,INPUT=s3://amzn-s3-demo-bucket/myhiveinput,-d,OUTPUT=s3://amzn-s3-demo-bucket/myhiveoutput,arg1,arg2] Type=HIVE,Name='Hive steps',ActionOnFailure=TERMINATE_CLUSTER,Args=[-f,s3://elasticmapreduce/samples/hive-ads/libs/model-build.q,-d,INPUT=s3://elasticmapreduce/samples/hive-ads/tables,-d,OUTPUT=s3://amzn-s3-demo-bucket/hive-ads/output/2014-04-18/11-07-32,-d,LIBS=s3://elasticmapreduce/samples/hive-ads/libs] - Required parameters:: @@ -134,7 +134,7 @@ NOTE: JSON arguments must include options and values as their own items in the l - Command:: - aws emr add-steps --cluster-id j-XXXXXXXX --steps Type=PIG,Name='Pig program',ActionOnFailure=CONTINUE,Args=[-f,s3://mybucket/mypigscript.pig,-p,INPUT=s3://mybucket/mypiginput,-p,OUTPUT=s3://mybucket/mypigoutput,arg1,arg2] Type=PIG,Name='Pig program',Args=[-f,s3://elasticmapreduce/samples/pig-apache/do-reports2.pig,-p,INPUT=s3://elasticmapreduce/samples/pig-apache/input,-p,OUTPUT=s3://mybucket/pig-apache/output,arg1,arg2] + aws emr add-steps --cluster-id j-XXXXXXXX --steps Type=PIG,Name='Pig program',ActionOnFailure=CONTINUE,Args=[-f,s3://amzn-s3-demo-bucket/mypigscript.pig,-p,INPUT=s3://amzn-s3-demo-bucket/mypiginput,-p,OUTPUT=s3://amzn-s3-demo-bucket/mypigoutput,arg1,arg2] Type=PIG,Name='Pig program',Args=[-f,s3://elasticmapreduce/samples/pig-apache/do-reports2.pig,-p,INPUT=s3://elasticmapreduce/samples/pig-apache/input,-p,OUTPUT=s3://amzn-s3-demo-bucket/pig-apache/output,arg1,arg2] - Required parameters:: diff --git a/awscli/examples/emr/create-cluster-examples.rst b/awscli/examples/emr/create-cluster-examples.rst index 0d8f673f90ec..23d48e524fe9 100644 --- a/awscli/examples/emr/create-cluster-examples.rst +++ b/awscli/examples/emr/create-cluster-examples.rst @@ -369,7 +369,7 @@ The following ``create-cluster`` examples add a streaming step to a cluster that The following example specifies the step inline. :: aws emr create-cluster \ - --steps Type=STREAMING,Name='Streaming Program',ActionOnFailure=CONTINUE,Args=[-files,s3://elasticmapreduce/samples/wordcount/wordSplitter.py,-mapper,wordSplitter.py,-reducer,aggregate,-input,s3://elasticmapreduce/samples/wordcount/input,-output,s3://mybucket/wordcount/output] \ + --steps Type=STREAMING,Name='Streaming Program',ActionOnFailure=CONTINUE,Args=[-files,s3://elasticmapreduce/samples/wordcount/wordSplitter.py,-mapper,wordSplitter.py,-reducer,aggregate,-input,s3://elasticmapreduce/samples/wordcount/input,-output,s3://amzn-s3-demo-bucket/wordcount/output] \ --release-label emr-5.3.1 \ --instance-groups InstanceGroupType=MASTER,InstanceCount=1,InstanceType=m4.large InstanceGroupType=CORE,InstanceCount=2,InstanceType=m4.large \ --auto-terminate @@ -397,7 +397,7 @@ Contents of ``multiplefiles.json``:: "-input", "s3://elasticmapreduce/samples/wordcount/input", "-output", - "s3://mybucket/wordcount/output" + "s3://amzn-s3-demo-bucket/wordcount/output" ], "ActionOnFailure": "CONTINUE", "Type": "STREAMING" @@ -409,7 +409,7 @@ Contents of ``multiplefiles.json``:: The following example add Hive steps when creating a cluster. Hive steps require parameters ``Type`` and ``Args``. Hive steps optional parameters are ``Name`` and ``ActionOnFailure``. :: aws emr create-cluster \ - --steps Type=HIVE,Name='Hive program',ActionOnFailure=CONTINUE,ActionOnFailure=TERMINATE_CLUSTER,Args=[-f,s3://elasticmapreduce/samples/hive-ads/libs/model-build.q,-d,INPUT=s3://elasticmapreduce/samples/hive-ads/tables,-d,OUTPUT=s3://mybucket/hive-ads/output/2014-04-18/11-07-32,-d,LIBS=s3://elasticmapreduce/samples/hive-ads/libs] \ + --steps Type=HIVE,Name='Hive program',ActionOnFailure=CONTINUE,ActionOnFailure=TERMINATE_CLUSTER,Args=[-f,s3://elasticmapreduce/samples/hive-ads/libs/model-build.q,-d,INPUT=s3://elasticmapreduce/samples/hive-ads/tables,-d,OUTPUT=s3://amzn-s3-demo-bucket/hive-ads/output/2014-04-18/11-07-32,-d,LIBS=s3://elasticmapreduce/samples/hive-ads/libs] \ --applications Name=Hive \ --release-label emr-5.3.1 \ --instance-groups InstanceGroupType=MASTER,InstanceCount=1,InstanceType=m4.large InstanceGroupType=CORE,InstanceCount=2,InstanceType=m4.large @@ -419,7 +419,7 @@ The following example add Hive steps when creating a cluster. Hive steps require The following example adds Pig steps when creating a cluster. Pig steps required parameters are ``Type`` and ``Args``. Pig steps optional parameters are ``Name`` and ``ActionOnFailure``. :: aws emr create-cluster \ - --steps Type=PIG,Name='Pig program',ActionOnFailure=CONTINUE,Args=[-f,s3://elasticmapreduce/samples/pig-apache/do-reports2.pig,-p,INPUT=s3://elasticmapreduce/samples/pig-apache/input,-p,OUTPUT=s3://mybucket/pig-apache/output] \ + --steps Type=PIG,Name='Pig program',ActionOnFailure=CONTINUE,Args=[-f,s3://elasticmapreduce/samples/pig-apache/do-reports2.pig,-p,INPUT=s3://elasticmapreduce/samples/pig-apache/input,-p,OUTPUT=s3://amzn-s3-demo-bucket/pig-apache/output] \ --applications Name=Pig \ --release-label emr-5.3.1 \ --instance-groups InstanceGroupType=MASTER,InstanceCount=1,InstanceType=m4.large InstanceGroupType=CORE,InstanceCount=2,InstanceType=m4.large @@ -429,7 +429,7 @@ The following example adds Pig steps when creating a cluster. Pig steps required The following ``create-cluster`` example runs two bootstrap actions defined as scripts that are stored in Amazon S3. :: aws emr create-cluster \ - --bootstrap-actions Path=s3://mybucket/myscript1,Name=BootstrapAction1,Args=[arg1,arg2] Path=s3://mybucket/myscript2,Name=BootstrapAction2,Args=[arg1,arg2] \ + --bootstrap-actions Path=s3://amzn-s3-demo-bucket/myscript1,Name=BootstrapAction1,Args=[arg1,arg2] Path=s3://amzn-s3-demo-bucket/myscript2,Name=BootstrapAction2,Args=[arg1,arg2] \ --release-label emr-5.3.1 \ --instance-groups InstanceGroupType=MASTER,InstanceCount=1,InstanceType=m4.large InstanceGroupType=CORE,InstanceCount=2,InstanceType=m4.large \ --auto-terminate diff --git a/awscli/examples/rds/cancel-export-task.rst b/awscli/examples/rds/cancel-export-task.rst index edf6c140a809..29e7820f768b 100644 --- a/awscli/examples/rds/cancel-export-task.rst +++ b/awscli/examples/rds/cancel-export-task.rst @@ -1,23 +1,23 @@ -**To cancel a snapshot export to Amazon S3** - -The following ``cancel-export-task`` example cancels an export task in progress that is exporting a snapshot to Amazon S3. :: - - aws rds cancel-export-task \ - --export-task-identifier my-s3-export-1 - -Output:: - - { - "ExportTaskIdentifier": "my-s3-export-1", - "SourceArn": "arn:aws:rds:us-east-1:123456789012:snapshot:publisher-final-snapshot", - "SnapshotTime": "2019-03-24T20:01:09.815Z", - "S3Bucket": "mybucket", - "S3Prefix": "", - "IamRoleArn": "arn:aws:iam::123456789012:role/service-role/export-snap-S3-role", - "KmsKeyId": "arn:aws:kms:us-east-1:123456789012:key/abcd0000-7bfd-4594-af38-aabbccddeeff", - "Status": "CANCELING", - "PercentProgress": 0, - "TotalExtractedDataInGB": 0 - } - +**To cancel a snapshot export to Amazon S3** + +The following ``cancel-export-task`` example cancels an export task in progress that is exporting a snapshot to Amazon S3. :: + + aws rds cancel-export-task \ + --export-task-identifier my-s3-export-1 + +Output:: + + { + "ExportTaskIdentifier": "my-s3-export-1", + "SourceArn": "arn:aws:rds:us-east-1:123456789012:snapshot:publisher-final-snapshot", + "SnapshotTime": "2019-03-24T20:01:09.815Z", + "S3Bucket": "amzn-s3-demo-bucket", + "S3Prefix": "", + "IamRoleArn": "arn:aws:iam::123456789012:role/service-role/export-snap-S3-role", + "KmsKeyId": "arn:aws:kms:us-east-1:123456789012:key/abcd0000-7bfd-4594-af38-aabbccddeeff", + "Status": "CANCELING", + "PercentProgress": 0, + "TotalExtractedDataInGB": 0 + } + For more information, see `Canceling a snapshot export task `__ in the *Amazon RDS User Guide* or `Canceling a snapshot export task `__ in the *Amazon Aurora User Guide*. \ No newline at end of file diff --git a/awscli/examples/rds/describe-export-tasks.rst b/awscli/examples/rds/describe-export-tasks.rst index a39d1125afce..dd18e0943108 100644 --- a/awscli/examples/rds/describe-export-tasks.rst +++ b/awscli/examples/rds/describe-export-tasks.rst @@ -1,40 +1,40 @@ -**To describe snapshot export tasks** - -The following ``describe-export-tasks`` example returns information about snapshot exports to Amazon S3. :: - - aws rds describe-export-tasks - -Output:: - - { - "ExportTasks": [ - { - "ExportTaskIdentifier": "test-snapshot-export", - "SourceArn": "arn:aws:rds:us-west-2:123456789012:snapshot:test-snapshot", - "SnapshotTime": "2020-03-02T18:26:28.163Z", - "TaskStartTime": "2020-03-02T18:57:56.896Z", - "TaskEndTime": "2020-03-02T19:10:31.985Z", - "S3Bucket": "mybucket", - "S3Prefix": "", - "IamRoleArn": "arn:aws:iam::123456789012:role/service-role/ExportRole", - "KmsKeyId": "arn:aws:kms:us-west-2:123456789012:key/abcd0000-7fca-4128-82f2-aabbccddeeff", - "Status": "COMPLETE", - "PercentProgress": 100, - "TotalExtractedDataInGB": 0 - }, - { - "ExportTaskIdentifier": "my-s3-export", - "SourceArn": "arn:aws:rds:us-west-2:123456789012:snapshot:db5-snapshot-test", - "SnapshotTime": "2020-03-27T20:48:42.023Z", - "S3Bucket": "mybucket", - "S3Prefix": "", - "IamRoleArn": "arn:aws:iam::123456789012:role/service-role/ExportRole", - "KmsKeyId": "arn:aws:kms:us-west-2:123456789012:key/abcd0000-7fca-4128-82f2-aabbccddeeff", - "Status": "STARTING", - "PercentProgress": 0, - "TotalExtractedDataInGB": 0 - } - ] - } - -For more information, see `Monitoring Snapshot Exports `__ in the *Amazon RDS User Guide*. +**To describe snapshot export tasks** + +The following ``describe-export-tasks`` example returns information about snapshot exports to Amazon S3. :: + + aws rds describe-export-tasks + +Output:: + + { + "ExportTasks": [ + { + "ExportTaskIdentifier": "test-snapshot-export", + "SourceArn": "arn:aws:rds:us-west-2:123456789012:snapshot:test-snapshot", + "SnapshotTime": "2020-03-02T18:26:28.163Z", + "TaskStartTime": "2020-03-02T18:57:56.896Z", + "TaskEndTime": "2020-03-02T19:10:31.985Z", + "S3Bucket": "amzn-s3-demo-bucket", + "S3Prefix": "", + "IamRoleArn": "arn:aws:iam::123456789012:role/service-role/ExportRole", + "KmsKeyId": "arn:aws:kms:us-west-2:123456789012:key/abcd0000-7fca-4128-82f2-aabbccddeeff", + "Status": "COMPLETE", + "PercentProgress": 100, + "TotalExtractedDataInGB": 0 + }, + { + "ExportTaskIdentifier": "my-s3-export", + "SourceArn": "arn:aws:rds:us-west-2:123456789012:snapshot:db5-snapshot-test", + "SnapshotTime": "2020-03-27T20:48:42.023Z", + "S3Bucket": "amzn-s3-demo-bucket", + "S3Prefix": "", + "IamRoleArn": "arn:aws:iam::123456789012:role/service-role/ExportRole", + "KmsKeyId": "arn:aws:kms:us-west-2:123456789012:key/abcd0000-7fca-4128-82f2-aabbccddeeff", + "Status": "STARTING", + "PercentProgress": 0, + "TotalExtractedDataInGB": 0 + } + ] + } + +For more information, see `Monitoring Snapshot Exports `__ in the *Amazon RDS User Guide*. diff --git a/awscli/examples/rds/restore-db-cluster-from-s3.rst b/awscli/examples/rds/restore-db-cluster-from-s3.rst index e30d207c106c..3f7fe445d389 100644 --- a/awscli/examples/rds/restore-db-cluster-from-s3.rst +++ b/awscli/examples/rds/restore-db-cluster-from-s3.rst @@ -1,64 +1,64 @@ -**To restore an Amazon Aurora DB cluster from Amazon S3** - -The following ``restore-db-cluster-from-s3`` example restores an Amazon Aurora MySQL version 5.7-compatible DB cluster from a MySQL 5.7 DB backup file in Amazon S3. :: - - aws rds restore-db-cluster-from-s3 \ - --db-cluster-identifier cluster-s3-restore \ - --engine aurora-mysql \ - --master-username admin \ - --master-user-password mypassword \ - --s3-bucket-name mybucket \ - --s3-prefix test-backup \ - --s3-ingestion-role-arn arn:aws:iam::123456789012:role/service-role/TestBackup \ - --source-engine mysql \ - --source-engine-version 5.7.28 - -Output:: - - { - "DBCluster": { - "AllocatedStorage": 1, - "AvailabilityZones": [ - "us-west-2c", - "us-west-2a", - "us-west-2b" - ], - "BackupRetentionPeriod": 1, - "DBClusterIdentifier": "cluster-s3-restore", - "DBClusterParameterGroup": "default.aurora-mysql5.7", - "DBSubnetGroup": "default", - "Status": "creating", - "Endpoint": "cluster-s3-restore.cluster-co3xyzabc123.us-west-2.rds.amazonaws.com", - "ReaderEndpoint": "cluster-s3-restore.cluster-ro-co3xyzabc123.us-west-2.rds.amazonaws.com", - "MultiAZ": false, - "Engine": "aurora-mysql", - "EngineVersion": "5.7.12", - "Port": 3306, - "MasterUsername": "admin", - "PreferredBackupWindow": "11:15-11:45", - "PreferredMaintenanceWindow": "thu:12:19-thu:12:49", - "ReadReplicaIdentifiers": [], - "DBClusterMembers": [], - "VpcSecurityGroups": [ - { - "VpcSecurityGroupId": "sg-########", - "Status": "active" - } - ], - "HostedZoneId": "Z1PVIF0EXAMPLE", - "StorageEncrypted": false, - "DbClusterResourceId": "cluster-SU5THYQQHOWCXZZDGXREXAMPLE", - "DBClusterArn": "arn:aws:rds:us-west-2:123456789012:cluster:cluster-s3-restore", - "AssociatedRoles": [], - "IAMDatabaseAuthenticationEnabled": false, - "ClusterCreateTime": "2020-07-27T14:22:08.095Z", - "EngineMode": "provisioned", - "DeletionProtection": false, - "HttpEndpointEnabled": false, - "CopyTagsToSnapshot": false, - "CrossAccountClone": false, - "DomainMemberships": [] - } - } - -For more information, see `Migrating Data from MySQL by Using an Amazon S3 Bucket `__ in the *Amazon Aurora User Guide*. +**To restore an Amazon Aurora DB cluster from Amazon S3** + +The following ``restore-db-cluster-from-s3`` example restores an Amazon Aurora MySQL version 5.7-compatible DB cluster from a MySQL 5.7 DB backup file in Amazon S3. :: + + aws rds restore-db-cluster-from-s3 \ + --db-cluster-identifier cluster-s3-restore \ + --engine aurora-mysql \ + --master-username admin \ + --master-user-password mypassword \ + --s3-bucket-name amzn-s3-demo-bucket \ + --s3-prefix test-backup \ + --s3-ingestion-role-arn arn:aws:iam::123456789012:role/service-role/TestBackup \ + --source-engine mysql \ + --source-engine-version 5.7.28 + +Output:: + + { + "DBCluster": { + "AllocatedStorage": 1, + "AvailabilityZones": [ + "us-west-2c", + "us-west-2a", + "us-west-2b" + ], + "BackupRetentionPeriod": 1, + "DBClusterIdentifier": "cluster-s3-restore", + "DBClusterParameterGroup": "default.aurora-mysql5.7", + "DBSubnetGroup": "default", + "Status": "creating", + "Endpoint": "cluster-s3-restore.cluster-co3xyzabc123.us-west-2.rds.amazonaws.com", + "ReaderEndpoint": "cluster-s3-restore.cluster-ro-co3xyzabc123.us-west-2.rds.amazonaws.com", + "MultiAZ": false, + "Engine": "aurora-mysql", + "EngineVersion": "5.7.12", + "Port": 3306, + "MasterUsername": "admin", + "PreferredBackupWindow": "11:15-11:45", + "PreferredMaintenanceWindow": "thu:12:19-thu:12:49", + "ReadReplicaIdentifiers": [], + "DBClusterMembers": [], + "VpcSecurityGroups": [ + { + "VpcSecurityGroupId": "sg-########", + "Status": "active" + } + ], + "HostedZoneId": "Z1PVIF0EXAMPLE", + "StorageEncrypted": false, + "DbClusterResourceId": "cluster-SU5THYQQHOWCXZZDGXREXAMPLE", + "DBClusterArn": "arn:aws:rds:us-west-2:123456789012:cluster:cluster-s3-restore", + "AssociatedRoles": [], + "IAMDatabaseAuthenticationEnabled": false, + "ClusterCreateTime": "2020-07-27T14:22:08.095Z", + "EngineMode": "provisioned", + "DeletionProtection": false, + "HttpEndpointEnabled": false, + "CopyTagsToSnapshot": false, + "CrossAccountClone": false, + "DomainMemberships": [] + } + } + +For more information, see `Migrating Data from MySQL by Using an Amazon S3 Bucket `__ in the *Amazon Aurora User Guide*. diff --git a/awscli/examples/rds/start-export-task.rst b/awscli/examples/rds/start-export-task.rst index ae45c407d689..d99476c90fab 100644 --- a/awscli/examples/rds/start-export-task.rst +++ b/awscli/examples/rds/start-export-task.rst @@ -1,26 +1,26 @@ -**To export a snapshot to Amazon S3** - -The following ``start-export-task`` example exports a DB snapshot named ``db5-snapshot-test`` to the Amazon S3 bucket named ``mybucket``. :: - - aws rds start-export-task \ - --export-task-identifier my-s3-export \ - --source-arn arn:aws:rds:us-west-2:123456789012:snapshot:db5-snapshot-test \ - --s3-bucket-name mybucket \ - --iam-role-arn arn:aws:iam::123456789012:role/service-role/ExportRole \ - --kms-key-id arn:aws:kms:us-west-2:123456789012:key/abcd0000-7fca-4128-82f2-aabbccddeeff - -Output:: - - { - "ExportTaskIdentifier": "my-s3-export", - "SourceArn": "arn:aws:rds:us-west-2:123456789012:snapshot:db5-snapshot-test", - "SnapshotTime": "2020-03-27T20:48:42.023Z", - "S3Bucket": "mybucket", - "IamRoleArn": "arn:aws:iam::123456789012:role/service-role/ExportRole", - "KmsKeyId": "arn:aws:kms:us-west-2:123456789012:key/abcd0000-7fca-4128-82f2-aabbccddeeff", - "Status": "STARTING", - "PercentProgress": 0, - "TotalExtractedDataInGB": 0 - } - -For more information, see `Exporting a Snapshot to an Amazon S3 Bucket `__ in the *Amazon RDS User Guide*. +**To export a snapshot to Amazon S3** + +The following ``start-export-task`` example exports a DB snapshot named ``db5-snapshot-test`` to the Amazon S3 bucket named ``amzn-s3-demo-bucket``. :: + + aws rds start-export-task \ + --export-task-identifier my-s3-export \ + --source-arn arn:aws:rds:us-west-2:123456789012:snapshot:db5-snapshot-test \ + --s3-bucket-name amzn-s3-demo-bucket \ + --iam-role-arn arn:aws:iam::123456789012:role/service-role/ExportRole \ + --kms-key-id arn:aws:kms:us-west-2:123456789012:key/abcd0000-7fca-4128-82f2-aabbccddeeff + +Output:: + + { + "ExportTaskIdentifier": "my-s3-export", + "SourceArn": "arn:aws:rds:us-west-2:123456789012:snapshot:db5-snapshot-test", + "SnapshotTime": "2020-03-27T20:48:42.023Z", + "S3Bucket": "amzn-s3-demo-bucket", + "IamRoleArn": "arn:aws:iam::123456789012:role/service-role/ExportRole", + "KmsKeyId": "arn:aws:kms:us-west-2:123456789012:key/abcd0000-7fca-4128-82f2-aabbccddeeff", + "Status": "STARTING", + "PercentProgress": 0, + "TotalExtractedDataInGB": 0 + } + +For more information, see `Exporting a Snapshot to an Amazon S3 Bucket `__ in the *Amazon RDS User Guide*. diff --git a/awscli/examples/s3/_concepts.rst b/awscli/examples/s3/_concepts.rst index a2a36ffd7986..3a922bfb5734 100644 --- a/awscli/examples/s3/_concepts.rst +++ b/awscli/examples/s3/_concepts.rst @@ -14,13 +14,13 @@ are two types of path arguments: ``LocalPath`` and ``S3Uri``. written as an absolute path or relative path. ``S3Uri``: represents the location of a S3 object, prefix, or bucket. This -must be written in the form ``s3://mybucket/mykey`` where ``mybucket`` is +must be written in the form ``s3://amzn-s3-demo-bucket/mykey`` where ``amzn-s3-demo-bucket`` is the specified S3 bucket, ``mykey`` is the specified S3 key. The path argument must begin with ``s3://`` in order to denote that the path argument refers to a S3 object. Note that prefixes are separated by forward slashes. For example, if the S3 object ``myobject`` had the prefix ``myprefix``, the S3 key would be ``myprefix/myobject``, and if the object was in the bucket -``mybucket``, the ``S3Uri`` would be ``s3://mybucket/myprefix/myobject``. +``amzn-s3-demo-bucket``, the ``S3Uri`` would be ``s3://amzn-s3-demo-bucket/myprefix/myobject``. ``S3Uri`` also supports S3 access points. To specify an access point, this value must be of the form ``s3:///``. For example if diff --git a/awscli/examples/s3/cp.rst b/awscli/examples/s3/cp.rst index c89ddef1b666..71ffa2f5c89d 100644 --- a/awscli/examples/s3/cp.rst +++ b/awscli/examples/s3/cp.rst @@ -3,67 +3,67 @@ The following ``cp`` command copies a single file to a specified bucket and key:: - aws s3 cp test.txt s3://mybucket/test2.txt + aws s3 cp test.txt s3://amzn-s3-demo-bucket/test2.txt Output:: - upload: test.txt to s3://mybucket/test2.txt + upload: test.txt to s3://amzn-s3-demo-bucket/test2.txt **Example 2: Copying a local file to S3 with an expiration date** The following ``cp`` command copies a single file to a specified bucket and key that expires at the specified ISO 8601 timestamp:: - aws s3 cp test.txt s3://mybucket/test2.txt \ + aws s3 cp test.txt s3://amzn-s3-demo-bucket/test2.txt \ --expires 2014-10-01T20:30:00Z Output:: - upload: test.txt to s3://mybucket/test2.txt + upload: test.txt to s3://amzn-s3-demo-bucket/test2.txt **Example 3: Copying a file from S3 to S3** The following ``cp`` command copies a single s3 object to a specified bucket and key:: - aws s3 cp s3://mybucket/test.txt s3://mybucket/test2.txt + aws s3 cp s3://amzn-s3-demo-bucket/test.txt s3://amzn-s3-demo-bucket/test2.txt Output:: - copy: s3://mybucket/test.txt to s3://mybucket/test2.txt + copy: s3://amzn-s3-demo-bucket/test.txt to s3://amzn-s3-demo-bucket/test2.txt **Example 4: Copying an S3 object to a local file** The following ``cp`` command copies a single object to a specified file locally:: - aws s3 cp s3://mybucket/test.txt test2.txt + aws s3 cp s3://amzn-s3-demo-bucket/test.txt test2.txt Output:: - download: s3://mybucket/test.txt to test2.txt + download: s3://amzn-s3-demo-bucket/test.txt to test2.txt **Example 5: Copying an S3 object from one bucket to another** The following ``cp`` command copies a single object to a specified bucket while retaining its original name:: - aws s3 cp s3://mybucket/test.txt s3://amzn-s3-demo-bucket2/ + aws s3 cp s3://amzn-s3-demo-bucket/test.txt s3://amzn-s3-demo-bucket2/ Output:: - copy: s3://mybucket/test.txt to s3://amzn-s3-demo-bucket2/test.txt + copy: s3://amzn-s3-demo-bucket/test.txt to s3://amzn-s3-demo-bucket2/test.txt **Example 6: Recursively copying S3 objects to a local directory** When passed with the parameter ``--recursive``, the following ``cp`` command recursively copies all objects under a -specified prefix and bucket to a specified directory. In this example, the bucket ``mybucket`` has the objects +specified prefix and bucket to a specified directory. In this example, the bucket ``amzn-s3-demo-bucket`` has the objects ``test1.txt`` and ``test2.txt``:: - aws s3 cp s3://mybucket . \ + aws s3 cp s3://amzn-s3-demo-bucket . \ --recursive Output:: - download: s3://mybucket/test1.txt to test1.txt - download: s3://mybucket/test2.txt to test2.txt + download: s3://amzn-s3-demo-bucket/test1.txt to test1.txt + download: s3://amzn-s3-demo-bucket/test2.txt to test2.txt **Example 7: Recursively copying local files to S3** @@ -71,51 +71,51 @@ When passed with the parameter ``--recursive``, the following ``cp`` command rec specified directory to a specified bucket and prefix while excluding some files by using an ``--exclude`` parameter. In this example, the directory ``myDir`` has the files ``test1.txt`` and ``test2.jpg``:: - aws s3 cp myDir s3://mybucket/ \ + aws s3 cp myDir s3://amzn-s3-demo-bucket/ \ --recursive \ --exclude "*.jpg" Output:: - upload: myDir/test1.txt to s3://mybucket/test1.txt + upload: myDir/test1.txt to s3://amzn-s3-demo-bucket/test1.txt **Example 8: Recursively copying S3 objects to another bucket** When passed with the parameter ``--recursive``, the following ``cp`` command recursively copies all objects under a specified bucket to another bucket while excluding some objects by using an ``--exclude`` parameter. In this example, -the bucket ``mybucket`` has the objects ``test1.txt`` and ``another/test1.txt``:: +the bucket ``amzn-s3-demo-bucket`` has the objects ``test1.txt`` and ``another/test1.txt``:: - aws s3 cp s3://mybucket/ s3://amzn-s3-demo-bucket2/ \ +aws s3 cp s3://amzn-s3-demo-bucket/ s3://amzn-s3-demo-bucket2/ \ --recursive \ --exclude "another/*" Output:: - copy: s3://mybucket/test1.txt to s3://amzn-s3-demo-bucket2/test1.txt + copy: s3://amzn-s3-demo-bucket/test1.txt to s3://amzn-s3-demo-bucket2/test1.txt You can combine ``--exclude`` and ``--include`` options to copy only objects that match a pattern, excluding all others:: - aws s3 cp s3://mybucket/logs/ s3://amzn-s3-demo-bucket2/logs/ \ + aws s3 cp s3://amzn-s3-demo-bucket/logs/ s3://amzn-s3-demo-bucket2/logs/ \ --recursive \ --exclude "*" \ --include "*.log" Output:: - copy: s3://mybucket/logs/test/test.log to s3://amzn-s3-demo-bucket2/logs/test/test.log - copy: s3://mybucket/logs/test3.log to s3://amzn-s3-demo-bucket2/logs/test3.log + copy: s3://amzn-s3-demo-bucket/logs/test/test.log to s3://amzn-s3-demo-bucket2/logs/test/test.log + copy: s3://amzn-s3-demo-bucket/logs/test3.log to s3://amzn-s3-demo-bucket2/logs/test3.log **Example 9: Setting the Access Control List (ACL) while copying an S3 object** The following ``cp`` command copies a single object to a specified bucket and key while setting the ACL to ``public-read-write``:: - aws s3 cp s3://mybucket/test.txt s3://mybucket/test2.txt \ + aws s3 cp s3://amzn-s3-demo-bucket/test.txt s3://amzn-s3-demo-bucket/test2.txt \ --acl public-read-write Output:: - copy: s3://mybucket/test.txt to s3://mybucket/test2.txt + copy: s3://amzn-s3-demo-bucket/test.txt to s3://amzn-s3-demo-bucket/test2.txt Note that if you're using the ``--acl`` option, ensure that any associated IAM policies include the ``"s3:PutObjectAcl"`` action:: @@ -138,7 +138,7 @@ Output:: "s3:PutObjectAcl" ], "Resource": [ - "arn:aws:s3:::mybucket/*" + "arn:aws:s3:::amzn-s3-demo-bucket/*" ], "Effect": "Allow", "Sid": "Stmt1234567891234" @@ -152,11 +152,11 @@ Output:: The following ``cp`` command illustrates the use of the ``--grants`` option to grant read access to all users identified by URI and full control to a specific user identified by their Canonical ID:: - aws s3 cp file.txt s3://mybucket/ --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=id=79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be + aws s3 cp file.txt s3://amzn-s3-demo-bucket/ --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=id=79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be Output:: - upload: file.txt to s3://mybucket/file.txt + upload: file.txt to s3://amzn-s3-demo-bucket/file.txt **Example 11: Uploading a local file stream to S3** @@ -164,13 +164,13 @@ Output:: The following ``cp`` command uploads a local file stream from standard input to a specified bucket and key:: - aws s3 cp - s3://mybucket/stream.txt + aws s3 cp - s3://amzn-s3-demo-bucket/stream.txt **Example 12: Uploading a local file stream that is larger than 50GB to S3** The following ``cp`` command uploads a 51GB local file stream from standard input to a specified bucket and key. The ``--expected-size`` option must be provided, or the upload may fail when it reaches the default part limit of 10,000:: - aws s3 cp - s3://mybucket/stream.txt --expected-size 54760833024 + aws s3 cp - s3://amzn-s3-demo-bucket/stream.txt --expected-size 54760833024 **Example 13: Downloading an S3 object as a local file stream** @@ -178,7 +178,7 @@ The following ``cp`` command uploads a 51GB local file stream from standard inpu The following ``cp`` command downloads an S3 object locally as a stream to standard output. Downloading as a stream is not currently compatible with the ``--recursive`` parameter:: - aws s3 cp s3://mybucket/stream.txt - + aws s3 cp s3://amzn-s3-demo-bucket/stream.txt - **Example 14: Uploading to an S3 access point** diff --git a/awscli/examples/s3/ls.rst b/awscli/examples/s3/ls.rst index decd5e168daa..e3e456134834 100644 --- a/awscli/examples/s3/ls.rst +++ b/awscli/examples/s3/ls.rst @@ -1,19 +1,19 @@ **Example 1: Listing all user owned buckets** -The following ``ls`` command lists all of the bucket owned by the user. In this example, the user owns the buckets ``mybucket`` and ``amzn-s3-demo-bucket2``. The timestamp is the date the bucket was created, shown in your machine's time zone. This date can change when making changes to your bucket, such as editing its bucket policy. Note if ``s3://`` is used for the path argument ````, it will list all of the buckets as well. :: +The following ``ls`` command lists all of the bucket owned by the user. In this example, the user owns the buckets ``amzn-s3-demo-bucket`` and ``amzn-s3-demo-bucket2``. The timestamp is the date the bucket was created, shown in your machine's time zone. This date can change when making changes to your bucket, such as editing its bucket policy. Note if ``s3://`` is used for the path argument ````, it will list all of the buckets as well. :: aws s3 ls Output:: - 2013-07-11 17:08:50 mybucket + 2013-07-11 17:08:50 amzn-s3-demo-bucket 2013-07-24 14:55:44 amzn-s3-demo-bucket2 **Example 2: Listing all prefixes and objects in a bucket** -The following ``ls`` command lists objects and common prefixes under a specified bucket and prefix. In this example, the user owns the bucket ``mybucket`` with the objects ``test.txt`` and ``somePrefix/test.txt``. The ``LastWriteTime`` and ``Length`` are arbitrary. Note that since the ``ls`` command has no interaction with the local filesystem, the ``s3://`` URI scheme is not required to resolve ambiguity and may be omitted. :: +The following ``ls`` command lists objects and common prefixes under a specified bucket and prefix. In this example, the user owns the bucket ``amzn-s3-demo-bucket`` with the objects ``test.txt`` and ``somePrefix/test.txt``. The ``LastWriteTime`` and ``Length`` are arbitrary. Note that since the ``ls`` command has no interaction with the local filesystem, the ``s3://`` URI scheme is not required to resolve ambiguity and may be omitted. :: - aws s3 ls s3://mybucket + aws s3 ls s3://amzn-s3-demo-bucket Output:: @@ -24,7 +24,7 @@ Output:: The following ``ls`` command lists objects and common prefixes under a specified bucket and prefix. However, there are no objects nor common prefixes under the specified bucket and prefix. :: - aws s3 ls s3://mybucket/noExistPrefix + aws s3 ls s3://amzn-s3-demo-bucket/noExistPrefix Output:: @@ -34,7 +34,7 @@ Output:: The following ``ls`` command will recursively list objects in a bucket. Rather than showing ``PRE dirname/`` in the output, all the content in a bucket will be listed in order. :: - aws s3 ls s3://mybucket \ + aws s3 ls s3://amzn-s3-demo-bucket \ --recursive Output:: @@ -54,7 +54,7 @@ Output:: The following ``ls`` command demonstrates the same command using the --human-readable and --summarize options. --human-readable displays file size in Bytes/MiB/KiB/GiB/TiB/PiB/EiB. --summarize displays the total number of objects and total size at the end of the result listing:: - aws s3 ls s3://mybucket \ + aws s3 ls s3://amzn-s3-demo-bucket \ --recursive \ --human-readable \ --summarize diff --git a/awscli/examples/s3/mb.rst b/awscli/examples/s3/mb.rst index aa1e15234bb5..c7d25f3e0a62 100644 --- a/awscli/examples/s3/mb.rst +++ b/awscli/examples/s3/mb.rst @@ -1,22 +1,22 @@ **Example 1: Create a bucket** -The following ``mb`` command creates a bucket. In this example, the user makes the bucket ``mybucket``. The bucket is +The following ``mb`` command creates a bucket. In this example, the user makes the bucket ``amzn-s3-demo-bucket``. The bucket is created in the region specified in the user's configuration file:: - aws s3 mb s3://mybucket + aws s3 mb s3://amzn-s3-demo-bucket Output:: - make_bucket: s3://mybucket + make_bucket: s3://amzn-s3-demo-bucket **Example 2: Create a bucket in the specified region** The following ``mb`` command creates a bucket in a region specified by the ``--region`` parameter. In this example, the -user makes the bucket ``mybucket`` in the region ``us-west-1``:: +user makes the bucket ``amzn-s3-demo-bucket`` in the region ``us-west-1``:: - aws s3 mb s3://mybucket \ + aws s3 mb s3://amzn-s3-demo-bucket \ --region us-west-1 Output:: - make_bucket: s3://mybucket + make_bucket: s3://amzn-s3-demo-bucket diff --git a/awscli/examples/s3/mv.rst b/awscli/examples/s3/mv.rst index 62f9860adfe1..836d5d0fcae0 100644 --- a/awscli/examples/s3/mv.rst +++ b/awscli/examples/s3/mv.rst @@ -2,55 +2,55 @@ The following ``mv`` command moves a single file to a specified bucket and key. :: - aws s3 mv test.txt s3://mybucket/test2.txt + aws s3 mv test.txt s3://amzn-s3-demo-bucket/test2.txt Output:: - move: test.txt to s3://mybucket/test2.txt + move: test.txt to s3://amzn-s3-demo-bucket/test2.txt **Example 2: Move an object to the specified bucket and key** The following ``mv`` command moves a single s3 object to a specified bucket and key. :: - aws s3 mv s3://mybucket/test.txt s3://mybucket/test2.txt + aws s3 mv s3://amzn-s3-demo-bucket/test.txt s3://amzn-s3-demo-bucket/test2.txt Output:: - move: s3://mybucket/test.txt to s3://mybucket/test2.txt + move: s3://amzn-s3-demo-bucket/test.txt to s3://amzn-s3-demo-bucket/test2.txt **Example 3: Move an S3 object to the local directory** The following ``mv`` command moves a single object to a specified file locally. :: - aws s3 mv s3://mybucket/test.txt test2.txt + aws s3 mv s3://amzn-s3-demo-bucket/test.txt test2.txt Output:: - move: s3://mybucket/test.txt to test2.txt + move: s3://amzn-s3-demo-bucket/test.txt to test2.txt **Example 4: Move an object with it's original name to the specified bucket** The following ``mv`` command moves a single object to a specified bucket while retaining its original name:: - aws s3 mv s3://mybucket/test.txt s3://amzn-s3-demo-bucket2/ + aws s3 mv s3://amzn-s3-demo-bucket/test.txt s3://amzn-s3-demo-bucket2/ Output:: - move: s3://mybucket/test.txt to s3://amzn-s3-demo-bucket2/test.txt + move: s3://amzn-s3-demo-bucket/test.txt to s3://amzn-s3-demo-bucket2/test.txt **Example 5: Move all objects and prefixes in a bucket to the local directory** When passed with the parameter ``--recursive``, the following ``mv`` command recursively moves all objects under a -specified prefix and bucket to a specified directory. In this example, the bucket ``mybucket`` has the objects +specified prefix and bucket to a specified directory. In this example, the bucket ``amzn-s3-demo-bucket`` has the objects ``test1.txt`` and ``test2.txt``. :: - aws s3 mv s3://mybucket . \ + aws s3 mv s3://amzn-s3-demo-bucket . \ --recursive Output:: - move: s3://mybucket/test1.txt to test1.txt - move: s3://mybucket/test2.txt to test2.txt + move: s3://amzn-s3-demo-bucket/test1.txt to test1.txt + move: s3://amzn-s3-demo-bucket/test2.txt to test2.txt **Example 6: Move all objects and prefixes in a bucket to the local directory, except ``.jpg`` files** @@ -58,7 +58,7 @@ When passed with the parameter ``--recursive``, the following ``mv`` command rec specified directory to a specified bucket and prefix while excluding some files by using an ``--exclude`` parameter. In this example, the directory ``myDir`` has the files ``test1.txt`` and ``test2.jpg``. :: - aws s3 mv myDir s3://mybucket/ \ + aws s3 mv myDir s3://amzn-s3-demo-bucket/ \ --recursive \ --exclude "*.jpg" @@ -70,39 +70,39 @@ Output:: When passed with the parameter ``--recursive``, the following ``mv`` command recursively moves all objects under a specified bucket to another bucket while excluding some objects by using an ``--exclude`` parameter. In this example, -the bucket ``mybucket`` has the objects ``test1.txt`` and ``another/test1.txt``. :: +the bucket ``amzn-s3-demo-bucket`` has the objects ``test1.txt`` and ``another/test1.txt``. :: - aws s3 mv s3://mybucket/ s3://amzn-s3-demo-bucket2/ \ + aws s3 mv s3://amzn-s3-demo-bucket/ s3://amzn-s3-demo-bucket2/ \ --recursive \ - --exclude "mybucket/another/*" + --exclude "amzn-s3-demo-bucket/another/*" Output:: - move: s3://mybucket/test1.txt to s3://amzn-s3-demo-bucket2/test1.txt + move: s3://amzn-s3-demo-bucket/test1.txt to s3://amzn-s3-demo-bucket2/test1.txt **Example 8: Move an object to the specified bucket and set the ACL** The following ``mv`` command moves a single object to a specified bucket and key while setting the ACL to ``public-read-write``. :: - aws s3 mv s3://mybucket/test.txt s3://mybucket/test2.txt \ + aws s3 mv s3://amzn-s3-demo-bucket/test.txt s3://amzn-s3-demo-bucket/test2.txt \ --acl public-read-write Output:: - move: s3://mybucket/test.txt to s3://mybucket/test2.txt + move: s3://amzn-s3-demo-bucket/test.txt to s3://amzn-s3-demo-bucket/test2.txt **Example 9: Move a local file to the specified bucket and grant permissions** The following ``mv`` command illustrates the use of the ``--grants`` option to grant read access to all users and full control to a specific user identified by their email address. :: - aws s3 mv file.txt s3://mybucket/ \ + aws s3 mv file.txt s3://amzn-s3-demo-bucket/ \ --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=emailaddress=user@example.com Output:: - move: file.txt to s3://mybucket/file.txt + move: file.txt to s3://amzn-s3-demo-bucket/file.txt **Example 10: Move a file to an S3 access point** diff --git a/awscli/examples/s3/rb.rst b/awscli/examples/s3/rb.rst index 1abecb67ae92..a37590c40c11 100644 --- a/awscli/examples/s3/rb.rst +++ b/awscli/examples/s3/rb.rst @@ -1,24 +1,24 @@ **Example 1: Delete a bucket** -The following ``rb`` command removes a bucket. In this example, the user's bucket is ``mybucket``. Note that the bucket must be empty in order to remove:: +The following ``rb`` command removes a bucket. In this example, the user's bucket is ``amzn-s3-demo-bucket``. Note that the bucket must be empty in order to remove:: - aws s3 rb s3://mybucket + aws s3 rb s3://amzn-s3-demo-bucket Output:: - remove_bucket: mybucket + remove_bucket: amzn-s3-demo-bucket **Example 2: Force delete a bucket** The following ``rb`` command uses the ``--force`` parameter to first remove all of the objects in the bucket and then -remove the bucket itself. In this example, the user's bucket is ``mybucket`` and the objects in ``mybucket`` are +remove the bucket itself. In this example, the user's bucket is ``amzn-s3-demo-bucket`` and the objects in ``amzn-s3-demo-bucket`` are ``test1.txt`` and ``test2.txt``:: - aws s3 rb s3://mybucket \ + aws s3 rb s3://amzn-s3-demo-bucket \ --force Output:: - delete: s3://mybucket/test1.txt - delete: s3://mybucket/test2.txt - remove_bucket: mybucket \ No newline at end of file + delete: s3://amzn-s3-demo-bucket/test1.txt + delete: s3://amzn-s3-demo-bucket/test2.txt + remove_bucket: amzn-s3-demo-bucket \ No newline at end of file diff --git a/awscli/examples/s3/rm.rst b/awscli/examples/s3/rm.rst index 73cb6ce4905d..735e38995202 100644 --- a/awscli/examples/s3/rm.rst +++ b/awscli/examples/s3/rm.rst @@ -2,54 +2,54 @@ The following ``rm`` command deletes a single s3 object:: - aws s3 rm s3://mybucket/test2.txt + aws s3 rm s3://amzn-s3-demo-bucket/test2.txt Output:: - delete: s3://mybucket/test2.txt + delete: s3://amzn-s3-demo-bucket/test2.txt **Example 2: Delete all contents in a bucket** The following ``rm`` command recursively deletes all objects under a specified bucket and prefix when passed with the -parameter ``--recursive``. In this example, the bucket ``mybucket`` contains the objects ``test1.txt`` and +parameter ``--recursive``. In this example, the bucket ``amzn-s3-demo-bucket`` contains the objects ``test1.txt`` and ``test2.txt``:: - aws s3 rm s3://mybucket \ + aws s3 rm s3://amzn-s3-demo-bucket \ --recursive Output:: - delete: s3://mybucket/test1.txt - delete: s3://mybucket/test2.txt + delete: s3://amzn-s3-demo-bucket/test1.txt + delete: s3://amzn-s3-demo-bucket/test2.txt **Example 3: Delete all contents in a bucket, except ``.jpg`` files** The following ``rm`` command recursively deletes all objects under a specified bucket and prefix when passed with the parameter ``--recursive`` while excluding some objects by using an ``--exclude`` parameter. In this example, the bucket -``mybucket`` has the objects ``test1.txt`` and ``test2.jpg``:: +``amzn-s3-demo-bucket`` has the objects ``test1.txt`` and ``test2.jpg``:: - aws s3 rm s3://mybucket/ \ + aws s3 rm s3://amzn-s3-demo-bucket/ \ --recursive \ --exclude "*.jpg" Output:: - delete: s3://mybucket/test1.txt + delete: s3://amzn-s3-demo-bucket/test1.txt **Example 4: Delete all contents in a bucket, except objects under the specified prefix** The following ``rm`` command recursively deletes all objects under a specified bucket and prefix when passed with the parameter ``--recursive`` while excluding all objects under a particular prefix by using an ``--exclude`` parameter. In -this example, the bucket ``mybucket`` has the objects ``test1.txt`` and ``another/test.txt``:: +this example, the bucket ``amzn-s3-demo-bucket`` has the objects ``test1.txt`` and ``another/test.txt``:: - aws s3 rm s3://mybucket/ \ + aws s3 rm s3://amzn-s3-demo-bucket/ \ --recursive \ --exclude "another/*" Output:: - delete: s3://mybucket/test1.txt + delete: s3://amzn-s3-demo-bucket/test1.txt **Example 5: Delete an object from an S3 access point** diff --git a/awscli/examples/s3/sync.rst b/awscli/examples/s3/sync.rst index 1e6966678da3..bee26f6684fc 100644 --- a/awscli/examples/s3/sync.rst +++ b/awscli/examples/s3/sync.rst @@ -4,15 +4,15 @@ The following ``sync`` command syncs objects from a local directory to the speci uploading the local files to S3. A local file will require uploading if the size of the local file is different than the size of the S3 object, the last modified time of the local file is newer than the last modified time of the S3 object, or the local file does not exist under the specified bucket and prefix. In this example, the user syncs the -bucket ``mybucket`` to the local current directory. The local current directory contains the files ``test.txt`` and -``test2.txt``. The bucket ``mybucket`` contains no objects. :: +bucket ``amzn-s3-demo-bucket`` to the local current directory. The local current directory contains the files ``test.txt`` and +``test2.txt``. The bucket ``amzn-s3-demo-bucket`` contains no objects. :: - aws s3 sync . s3://mybucket + aws s3 sync . s3://amzn-s3-demo-bucket Output:: - upload: test.txt to s3://mybucket/test.txt - upload: test2.txt to s3://mybucket/test2.txt + upload: test.txt to s3://amzn-s3-demo-bucket/test.txt + upload: test2.txt to s3://amzn-s3-demo-bucket/test2.txt **Example 2: Sync all S3 objects from the specified S3 bucket to another bucket** @@ -21,15 +21,15 @@ prefix and bucket by copying S3 objects. An S3 object will require copying if th the last modified time of the source is newer than the last modified time of the destination, or the S3 object does not exist under the specified bucket and prefix destination. -In this example, the user syncs the bucket ``mybucket`` to the bucket ``amzn-s3-demo-bucket2``. The bucket ``mybucket`` contains the objects ``test.txt`` and ``test2.txt``. The bucket +In this example, the user syncs the bucket ``amzn-s3-demo-bucket`` to the bucket ``amzn-s3-demo-bucket2``. The bucket ``amzn-s3-demo-bucket`` contains the objects ``test.txt`` and ``test2.txt``. The bucket ``amzn-s3-demo-bucket2`` contains no objects:: - aws s3 sync s3://mybucket s3://amzn-s3-demo-bucket2 + aws s3 sync s3://amzn-s3-demo-bucket s3://amzn-s3-demo-bucket2 Output:: - copy: s3://mybucket/test.txt to s3://amzn-s3-demo-bucket2/test.txt - copy: s3://mybucket/test2.txt to s3://amzn-s3-demo-bucket2/test2.txt + copy: s3://amzn-s3-demo-bucket/test.txt to s3://amzn-s3-demo-bucket2/test.txt + copy: s3://amzn-s3-demo-bucket/test2.txt to s3://amzn-s3-demo-bucket2/test2.txt **Example 3: Sync all S3 objects from the specified S3 bucket to the local directory** @@ -38,62 +38,62 @@ downloading S3 objects. An S3 object will require downloading if the size of the local file, the last modified time of the S3 object is newer than the last modified time of the local file, or the S3 object does not exist in the local directory. Take note that when objects are downloaded from S3, the last modified time of the local file is changed to the last modified time of the S3 object. In this example, the user syncs the -bucket ``mybucket`` to the current local directory. The bucket ``mybucket`` contains the objects ``test.txt`` and +bucket ``amzn-s3-demo-bucket`` to the current local directory. The bucket ``amzn-s3-demo-bucket`` contains the objects ``test.txt`` and ``test2.txt``. The current local directory has no files:: - aws s3 sync s3://mybucket . + aws s3 sync s3://amzn-s3-demo-bucket . Output:: - download: s3://mybucket/test.txt to test.txt - download: s3://mybucket/test2.txt to test2.txt + download: s3://amzn-s3-demo-bucket/test.txt to test.txt + download: s3://amzn-s3-demo-bucket/test2.txt to test2.txt **Example 4: Sync all local objects to the specified bucket and delete all files that do not match** The following ``sync`` command syncs objects under a specified prefix and bucket to files in a local directory by uploading the local files to S3. Because of the ``--delete`` parameter, any files existing under the specified prefix and bucket but not existing in the local directory will be deleted. In this example, the user syncs -the bucket ``mybucket`` to the local current directory. The local current directory contains the files ``test.txt`` and -``test2.txt``. The bucket ``mybucket`` contains the object ``test3.txt``:: +the bucket ``amzn-s3-demo-bucket`` to the local current directory. The local current directory contains the files ``test.txt`` and +``test2.txt``. The bucket ``amzn-s3-demo-bucket`` contains the object ``test3.txt``:: - aws s3 sync . s3://mybucket \ + aws s3 sync . s3://amzn-s3-demo-bucket \ --delete Output:: - upload: test.txt to s3://mybucket/test.txt - upload: test2.txt to s3://mybucket/test2.txt - delete: s3://mybucket/test3.txt + upload: test.txt to s3://amzn-s3-demo-bucket/test.txt + upload: test2.txt to s3://amzn-s3-demo-bucket/test2.txt + delete: s3://amzn-s3-demo-bucket/test3.txt **Example 5: Sync all local objects to the specified bucket except ``.jpg`` files** The following ``sync`` command syncs objects under a specified prefix and bucket to files in a local directory by uploading the local files to S3. Because of the ``--exclude`` parameter, all files matching the pattern -existing both in S3 and locally will be excluded from the sync. In this example, the user syncs the bucket ``mybucket`` +existing both in S3 and locally will be excluded from the sync. In this example, the user syncs the bucket ``amzn-s3-demo-bucket`` to the local current directory. The local current directory contains the files ``test.jpg`` and ``test2.txt``. The -bucket ``mybucket`` contains the object ``test.jpg`` of a different size than the local ``test.jpg``:: +bucket ``amzn-s3-demo-bucket`` contains the object ``test.jpg`` of a different size than the local ``test.jpg``:: - aws s3 sync . s3://mybucket \ + aws s3 sync . s3://amzn-s3-demo-bucket \ --exclude "*.jpg" Output:: - upload: test2.txt to s3://mybucket/test2.txt + upload: test2.txt to s3://amzn-s3-demo-bucket/test2.txt **Example 6: Sync all local objects to the specified bucket except specified directory files** The following ``sync`` command syncs files under a local directory to objects under a specified prefix and bucket by downloading S3 objects. This example uses the ``--exclude`` parameter flag to exclude a specified directory and S3 prefix from the ``sync`` command. In this example, the user syncs the local current directory to the bucket -``mybucket``. The local current directory contains the files ``test.txt`` and ``another/test2.txt``. The bucket -``mybucket`` contains the objects ``another/test5.txt`` and ``test1.txt``:: +``amzn-s3-demo-bucket``. The local current directory contains the files ``test.txt`` and ``another/test2.txt``. The bucket +``amzn-s3-demo-bucket`` contains the objects ``another/test5.txt`` and ``test1.txt``:: - aws s3 sync s3://mybucket/ . \ + aws s3 sync s3://amzn-s3-demo-bucket/ . \ --exclude "*another/*" Output:: - download: s3://mybucket/test1.txt to test1.txt + download: s3://amzn-s3-demo-bucket/test1.txt to test1.txt **Example 7: Sync all objects between buckets in different regions** diff --git a/awscli/examples/s3api/get-bucket-policy.rst b/awscli/examples/s3api/get-bucket-policy.rst index 606e3297fbd9..1378b20d6d20 100644 --- a/awscli/examples/s3api/get-bucket-policy.rst +++ b/awscli/examples/s3api/get-bucket-policy.rst @@ -16,9 +16,9 @@ make modifications to the file, and then use ``put-bucket-policy`` to apply the modified bucket policy. To download the bucket policy to a file, you can run:: - aws s3api get-bucket-policy --bucket mybucket --query Policy --output text > policy.json + aws s3api get-bucket-policy --bucket amzn-s3-demo-bucket --query Policy --output text > policy.json You can then modify the ``policy.json`` file as needed. Finally you can apply this modified policy back to the S3 bucket by running:: - aws s3api put-bucket-policy --bucket mybucket --policy file://policy.json + aws s3api put-bucket-policy --bucket amzn-s3-demo-bucket --policy file://policy.json