diff --git a/output/openapi/elasticsearch-openapi.json b/output/openapi/elasticsearch-openapi.json index d19325d562..453cadb413 100644 --- a/output/openapi/elasticsearch-openapi.json +++ b/output/openapi/elasticsearch-openapi.json @@ -680,7 +680,7 @@ "document" ], "summary": "Bulk index or delete documents", - "description": "Performs multiple indexing or delete operations in a single API call.\nThis reduces overhead and can greatly increase indexing speed.", + "description": "Perform multiple `index`, `create`, `delete`, and `update` actions in a single request.\nThis reduces overhead and can greatly increase indexing speed.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action.\n* To use the `index` action, you must have the `create`, `index`, or `write` index privilege.\n* To use the `delete` action, you must have the `delete` or `write` index privilege.\n* To use the `update` action, you must have the `index` or `write` index privilege.\n* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe actions are specified in the request body using a newline delimited JSON (NDJSON) structure:\n\n```\naction_and_meta_data\\n\noptional_source\\n\naction_and_meta_data\\n\noptional_source\\n\n....\naction_and_meta_data\\n\noptional_source\\n\n```\n\nThe `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API.\nA `create` action fails if a document with the same ID already exists in the target\nAn `index` action adds or replaces a document as necessary.\n\nNOTE: Data streams support only the `create` action.\nTo update or delete a document in a data stream, you must target the backing index containing the document.\n\nAn `update` action expects that the partial doc, upsert, and script and its options are specified on the next line.\n\nA `delete` action does not expect a source on the next line and has the same semantics as the standard delete API.\n\nNOTE: The final line of data must end with a newline character (`\\n`).\nEach newline character may be preceded by a carriage return (`\\r`).\nWhen sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`.\nBecause this format uses literal newline characters (`\\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed.\n\nIf you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument.\n\nA note on the format: the idea here is to make processing as fast as possible.\nAs some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side.\n\nClient libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible.\n\nThere is no \"correct\" number of actions to perform in a single bulk request.\nExperiment with different settings to find the optimal size for your particular workload.\nNote that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size.\nIt is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch.\nFor instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch.\n\n**Client suppport for bulk requests**\n\nSome of the officially supported clients provide helpers to assist with bulk requests and reindexing:\n\n* Go: Check out `esutil.BulkIndexer`\n* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll`\n* Python: Check out `elasticsearch.helpers.*`\n* JavaScript: Check out `client.helpers.*`\n* .NET: Check out `BulkAllObservable`\n* PHP: Check out bulk indexing.\n\n**Submitting bulk requests with cURL**\n\nIf you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`.\nThe latter doesn't preserve newlines. For example:\n\n```\n$ cat requests\n{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n$ curl -s -H \"Content-Type: application/x-ndjson\" -XPOST localhost:9200/_bulk --data-binary \"@requests\"; echo\n{\"took\":7, \"errors\": false, \"items\":[{\"index\":{\"_index\":\"test\",\"_id\":\"1\",\"_version\":1,\"result\":\"created\",\"forced_refresh\":false}}]}\n```\n\n**Optimistic concurrency control**\n\nEach `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines.\nThe `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details.\n\n**Versioning**\n\nEach bulk item can include the version value using the `version` field.\nIt automatically follows the behavior of the index or delete operation based on the `_version` mapping.\nIt also support the `version_type`.\n\n**Routing**\n\nEach bulk item can include the routing value using the `routing` field.\nIt automatically follows the behavior of the index or delete operation based on the `_routing` mapping.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Wait for active shards**\n\nWhen making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request.\n\n**Refresh**\n\nControl when the changes made by this request are visible to search.\n\nNOTE: Only the shards that receive the bulk request will be affected by refresh.\nImagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards.\nThe request will only wait for those three shards to refresh.\nThe other two shards that make up the index do not participate in the `_bulk` request at all.", "operationId": "bulk-1", "parameters": [ { @@ -731,7 +731,7 @@ "document" ], "summary": "Bulk index or delete documents", - "description": "Performs multiple indexing or delete operations in a single API call.\nThis reduces overhead and can greatly increase indexing speed.", + "description": "Perform multiple `index`, `create`, `delete`, and `update` actions in a single request.\nThis reduces overhead and can greatly increase indexing speed.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action.\n* To use the `index` action, you must have the `create`, `index`, or `write` index privilege.\n* To use the `delete` action, you must have the `delete` or `write` index privilege.\n* To use the `update` action, you must have the `index` or `write` index privilege.\n* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe actions are specified in the request body using a newline delimited JSON (NDJSON) structure:\n\n```\naction_and_meta_data\\n\noptional_source\\n\naction_and_meta_data\\n\noptional_source\\n\n....\naction_and_meta_data\\n\noptional_source\\n\n```\n\nThe `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API.\nA `create` action fails if a document with the same ID already exists in the target\nAn `index` action adds or replaces a document as necessary.\n\nNOTE: Data streams support only the `create` action.\nTo update or delete a document in a data stream, you must target the backing index containing the document.\n\nAn `update` action expects that the partial doc, upsert, and script and its options are specified on the next line.\n\nA `delete` action does not expect a source on the next line and has the same semantics as the standard delete API.\n\nNOTE: The final line of data must end with a newline character (`\\n`).\nEach newline character may be preceded by a carriage return (`\\r`).\nWhen sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`.\nBecause this format uses literal newline characters (`\\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed.\n\nIf you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument.\n\nA note on the format: the idea here is to make processing as fast as possible.\nAs some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side.\n\nClient libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible.\n\nThere is no \"correct\" number of actions to perform in a single bulk request.\nExperiment with different settings to find the optimal size for your particular workload.\nNote that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size.\nIt is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch.\nFor instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch.\n\n**Client suppport for bulk requests**\n\nSome of the officially supported clients provide helpers to assist with bulk requests and reindexing:\n\n* Go: Check out `esutil.BulkIndexer`\n* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll`\n* Python: Check out `elasticsearch.helpers.*`\n* JavaScript: Check out `client.helpers.*`\n* .NET: Check out `BulkAllObservable`\n* PHP: Check out bulk indexing.\n\n**Submitting bulk requests with cURL**\n\nIf you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`.\nThe latter doesn't preserve newlines. For example:\n\n```\n$ cat requests\n{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n$ curl -s -H \"Content-Type: application/x-ndjson\" -XPOST localhost:9200/_bulk --data-binary \"@requests\"; echo\n{\"took\":7, \"errors\": false, \"items\":[{\"index\":{\"_index\":\"test\",\"_id\":\"1\",\"_version\":1,\"result\":\"created\",\"forced_refresh\":false}}]}\n```\n\n**Optimistic concurrency control**\n\nEach `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines.\nThe `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details.\n\n**Versioning**\n\nEach bulk item can include the version value using the `version` field.\nIt automatically follows the behavior of the index or delete operation based on the `_version` mapping.\nIt also support the `version_type`.\n\n**Routing**\n\nEach bulk item can include the routing value using the `routing` field.\nIt automatically follows the behavior of the index or delete operation based on the `_routing` mapping.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Wait for active shards**\n\nWhen making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request.\n\n**Refresh**\n\nControl when the changes made by this request are visible to search.\n\nNOTE: Only the shards that receive the bulk request will be affected by refresh.\nImagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards.\nThe request will only wait for those three shards to refresh.\nThe other two shards that make up the index do not participate in the `_bulk` request at all.", "operationId": "bulk", "parameters": [ { @@ -784,7 +784,7 @@ "document" ], "summary": "Bulk index or delete documents", - "description": "Performs multiple indexing or delete operations in a single API call.\nThis reduces overhead and can greatly increase indexing speed.", + "description": "Perform multiple `index`, `create`, `delete`, and `update` actions in a single request.\nThis reduces overhead and can greatly increase indexing speed.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action.\n* To use the `index` action, you must have the `create`, `index`, or `write` index privilege.\n* To use the `delete` action, you must have the `delete` or `write` index privilege.\n* To use the `update` action, you must have the `index` or `write` index privilege.\n* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe actions are specified in the request body using a newline delimited JSON (NDJSON) structure:\n\n```\naction_and_meta_data\\n\noptional_source\\n\naction_and_meta_data\\n\noptional_source\\n\n....\naction_and_meta_data\\n\noptional_source\\n\n```\n\nThe `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API.\nA `create` action fails if a document with the same ID already exists in the target\nAn `index` action adds or replaces a document as necessary.\n\nNOTE: Data streams support only the `create` action.\nTo update or delete a document in a data stream, you must target the backing index containing the document.\n\nAn `update` action expects that the partial doc, upsert, and script and its options are specified on the next line.\n\nA `delete` action does not expect a source on the next line and has the same semantics as the standard delete API.\n\nNOTE: The final line of data must end with a newline character (`\\n`).\nEach newline character may be preceded by a carriage return (`\\r`).\nWhen sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`.\nBecause this format uses literal newline characters (`\\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed.\n\nIf you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument.\n\nA note on the format: the idea here is to make processing as fast as possible.\nAs some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side.\n\nClient libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible.\n\nThere is no \"correct\" number of actions to perform in a single bulk request.\nExperiment with different settings to find the optimal size for your particular workload.\nNote that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size.\nIt is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch.\nFor instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch.\n\n**Client suppport for bulk requests**\n\nSome of the officially supported clients provide helpers to assist with bulk requests and reindexing:\n\n* Go: Check out `esutil.BulkIndexer`\n* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll`\n* Python: Check out `elasticsearch.helpers.*`\n* JavaScript: Check out `client.helpers.*`\n* .NET: Check out `BulkAllObservable`\n* PHP: Check out bulk indexing.\n\n**Submitting bulk requests with cURL**\n\nIf you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`.\nThe latter doesn't preserve newlines. For example:\n\n```\n$ cat requests\n{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n$ curl -s -H \"Content-Type: application/x-ndjson\" -XPOST localhost:9200/_bulk --data-binary \"@requests\"; echo\n{\"took\":7, \"errors\": false, \"items\":[{\"index\":{\"_index\":\"test\",\"_id\":\"1\",\"_version\":1,\"result\":\"created\",\"forced_refresh\":false}}]}\n```\n\n**Optimistic concurrency control**\n\nEach `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines.\nThe `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details.\n\n**Versioning**\n\nEach bulk item can include the version value using the `version` field.\nIt automatically follows the behavior of the index or delete operation based on the `_version` mapping.\nIt also support the `version_type`.\n\n**Routing**\n\nEach bulk item can include the routing value using the `routing` field.\nIt automatically follows the behavior of the index or delete operation based on the `_routing` mapping.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Wait for active shards**\n\nWhen making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request.\n\n**Refresh**\n\nControl when the changes made by this request are visible to search.\n\nNOTE: Only the shards that receive the bulk request will be affected by refresh.\nImagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards.\nThe request will only wait for those three shards to refresh.\nThe other two shards that make up the index do not participate in the `_bulk` request at all.", "operationId": "bulk-3", "parameters": [ { @@ -838,7 +838,7 @@ "document" ], "summary": "Bulk index or delete documents", - "description": "Performs multiple indexing or delete operations in a single API call.\nThis reduces overhead and can greatly increase indexing speed.", + "description": "Perform multiple `index`, `create`, `delete`, and `update` actions in a single request.\nThis reduces overhead and can greatly increase indexing speed.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action.\n* To use the `index` action, you must have the `create`, `index`, or `write` index privilege.\n* To use the `delete` action, you must have the `delete` or `write` index privilege.\n* To use the `update` action, you must have the `index` or `write` index privilege.\n* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe actions are specified in the request body using a newline delimited JSON (NDJSON) structure:\n\n```\naction_and_meta_data\\n\noptional_source\\n\naction_and_meta_data\\n\noptional_source\\n\n....\naction_and_meta_data\\n\noptional_source\\n\n```\n\nThe `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API.\nA `create` action fails if a document with the same ID already exists in the target\nAn `index` action adds or replaces a document as necessary.\n\nNOTE: Data streams support only the `create` action.\nTo update or delete a document in a data stream, you must target the backing index containing the document.\n\nAn `update` action expects that the partial doc, upsert, and script and its options are specified on the next line.\n\nA `delete` action does not expect a source on the next line and has the same semantics as the standard delete API.\n\nNOTE: The final line of data must end with a newline character (`\\n`).\nEach newline character may be preceded by a carriage return (`\\r`).\nWhen sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`.\nBecause this format uses literal newline characters (`\\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed.\n\nIf you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument.\n\nA note on the format: the idea here is to make processing as fast as possible.\nAs some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side.\n\nClient libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible.\n\nThere is no \"correct\" number of actions to perform in a single bulk request.\nExperiment with different settings to find the optimal size for your particular workload.\nNote that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size.\nIt is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch.\nFor instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch.\n\n**Client suppport for bulk requests**\n\nSome of the officially supported clients provide helpers to assist with bulk requests and reindexing:\n\n* Go: Check out `esutil.BulkIndexer`\n* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll`\n* Python: Check out `elasticsearch.helpers.*`\n* JavaScript: Check out `client.helpers.*`\n* .NET: Check out `BulkAllObservable`\n* PHP: Check out bulk indexing.\n\n**Submitting bulk requests with cURL**\n\nIf you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`.\nThe latter doesn't preserve newlines. For example:\n\n```\n$ cat requests\n{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n$ curl -s -H \"Content-Type: application/x-ndjson\" -XPOST localhost:9200/_bulk --data-binary \"@requests\"; echo\n{\"took\":7, \"errors\": false, \"items\":[{\"index\":{\"_index\":\"test\",\"_id\":\"1\",\"_version\":1,\"result\":\"created\",\"forced_refresh\":false}}]}\n```\n\n**Optimistic concurrency control**\n\nEach `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines.\nThe `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details.\n\n**Versioning**\n\nEach bulk item can include the version value using the `version` field.\nIt automatically follows the behavior of the index or delete operation based on the `_version` mapping.\nIt also support the `version_type`.\n\n**Routing**\n\nEach bulk item can include the routing value using the `routing` field.\nIt automatically follows the behavior of the index or delete operation based on the `_routing` mapping.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Wait for active shards**\n\nWhen making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request.\n\n**Refresh**\n\nControl when the changes made by this request are visible to search.\n\nNOTE: Only the shards that receive the bulk request will be affected by refresh.\nImagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards.\nThe request will only wait for those three shards to refresh.\nThe other two shards that make up the index do not participate in the `_bulk` request at all.", "operationId": "bulk-2", "parameters": [ { @@ -3370,6 +3370,9 @@ ], "summary": "Clear a scrolling search", "description": "Clear the search context and results for a scrolling search.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/paginate-search-results.html#scroll-search-results" + }, "operationId": "clear-scroll", "requestBody": { "$ref": "#/components/requestBodies/clear_scroll" @@ -3454,6 +3457,9 @@ ], "summary": "Clear a scrolling search", "description": "Clear the search context and results for a scrolling search.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/paginate-search-results.html#scroll-search-results" + }, "operationId": "clear-scroll-1", "parameters": [ { @@ -3504,9 +3510,11 @@ "type": "object", "properties": { "succeeded": { + "description": "If `true`, all search contexts associated with the point-in-time ID were successfully closed.", "type": "boolean" }, "num_freed": { + "description": "The number of search contexts that were successfully closed.", "type": "number" } }, @@ -6430,7 +6438,7 @@ "search" ], "summary": "Count search results", - "description": "Get the number of documents matching a query.", + "description": "Get the number of documents matching a query.\n\nThe query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body.\nThe latter must be nested in a `query` key, which is the same as the search API.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", "operationId": "count-1", "parameters": [ { @@ -6490,7 +6498,7 @@ "search" ], "summary": "Count search results", - "description": "Get the number of documents matching a query.", + "description": "Get the number of documents matching a query.\n\nThe query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body.\nThe latter must be nested in a `query` key, which is the same as the search API.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", "operationId": "count", "parameters": [ { @@ -6552,7 +6560,7 @@ "search" ], "summary": "Count search results", - "description": "Get the number of documents matching a query.", + "description": "Get the number of documents matching a query.\n\nThe query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body.\nThe latter must be nested in a `query` key, which is the same as the search API.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", "operationId": "count-3", "parameters": [ { @@ -6615,7 +6623,7 @@ "search" ], "summary": "Count search results", - "description": "Get the number of documents matching a query.", + "description": "Get the number of documents matching a query.\n\nThe query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body.\nThe latter must be nested in a `query` key, which is the same as the search API.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", "operationId": "count-2", "parameters": [ { @@ -43375,7 +43383,7 @@ "type": "string" }, "reason": { - "description": "A human-readable explanation of the error, in english", + "description": "A human-readable explanation of the error, in English.", "type": "string" }, "stack_trace": { @@ -52337,18 +52345,18 @@ "type": "object", "properties": { "dynamic_templates": { - "description": "A map from the full name of fields to the name of dynamic templates.\nDefaults to an empty map.\nIf a name matches a dynamic template, then that template will be applied regardless of other match predicates defined in the template.\nIf a field is already defined in the mapping, then this parameter won’t be used.", + "description": "A map from the full name of fields to the name of dynamic templates.\nIt defaults to an empty map.\nIf a name matches a dynamic template, that template will be applied regardless of other match predicates defined in the template.\nIf a field is already defined in the mapping, then this parameter won't be used.", "type": "object", "additionalProperties": { "type": "string" } }, "pipeline": { - "description": "ID of the pipeline to use to preprocess incoming documents.\nIf the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request.\nIf a final pipeline is configured it will always run, regardless of the value of this parameter.", + "description": "The ID of the pipeline to use to preprocess incoming documents.\nIf the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request.\nIf a final pipeline is configured, it will always run regardless of the value of this parameter.", "type": "string" }, "require_alias": { - "description": "If `true`, the request’s actions must target an index alias.", + "description": "If `true`, the request's actions must target an index alias.", "type": "boolean" } } @@ -52400,10 +52408,11 @@ "type": "object", "properties": { "require_alias": { - "description": "If `true`, the request’s actions must target an index alias.", + "description": "If `true`, the request's actions must target an index alias.", "type": "boolean" }, "retry_on_conflict": { + "description": "The number of times an update should be retried in the case of a version conflict.", "type": "number" } } @@ -52424,7 +52433,7 @@ "type": "object", "properties": { "detect_noop": { - "description": "Set to false to disable setting 'result' in the response\nto 'noop' if no change to the document occurred.", + "description": "If true, the `result` in the response is set to 'noop' when no changes to the document occur.", "type": "boolean" }, "doc": { @@ -52432,21 +52441,21 @@ "type": "object" }, "doc_as_upsert": { - "description": "Set to true to use the contents of 'doc' as the value of 'upsert'", + "description": "Set to `true` to use the contents of `doc` as the value of `upsert`.", "type": "boolean" }, "script": { "$ref": "#/components/schemas/_types:Script" }, "scripted_upsert": { - "description": "Set to true to execute the script whether or not the document exists.", + "description": "Set to `true` to run the script whether or not the document exists.", "type": "boolean" }, "_source": { "$ref": "#/components/schemas/_global.search._types:SourceConfig" }, "upsert": { - "description": "If the document does not already exist, the contents of 'upsert' are inserted as a\nnew document. If the document exists, the 'script' is executed.", + "description": "If the document does not already exist, the contents of `upsert` are inserted as a new document.\nIf the document exists, the `script` is run.", "type": "object" } } @@ -52467,22 +52476,22 @@ ] }, "_index": { - "description": "Name of the index associated with the operation.\nIf the operation targeted a data stream, this is the backing index into which the document was written.", + "description": "The name of the index associated with the operation.\nIf the operation targeted a data stream, this is the backing index into which the document was written.", "type": "string" }, "status": { - "description": "HTTP status code returned for the operation.", + "description": "The HTTP status code returned for the operation.", "type": "number" }, "error": { "$ref": "#/components/schemas/_types:ErrorCause" }, "_primary_term": { - "description": "The primary term assigned to the document for the operation.", + "description": "The primary term assigned to the document for the operation.\nThis property is returned only for successful operations.", "type": "number" }, "result": { - "description": "Result of the operation.\nSuccessful values are `created`, `deleted`, and `updated`.", + "description": "The result of the operation.\nSuccessful values are `created`, `deleted`, and `updated`.", "type": "string" }, "_seq_no": { @@ -93436,9 +93445,11 @@ "type": "object", "properties": { "errors": { + "description": "If `true`, one or more of the operations in the bulk request did not complete successfully.", "type": "boolean" }, "items": { + "description": "The result of each operation in the bulk request, in the order they were submitted.", "type": "array", "items": { "type": "object", @@ -93450,6 +93461,7 @@ } }, "took": { + "description": "The length of time, in milliseconds, it took to process the bulk request.", "type": "number" }, "ingest_took": { @@ -93715,9 +93727,11 @@ "type": "object", "properties": { "succeeded": { + "description": "If `true`, the request succeeded.\nThis does not indicate whether any scrolling search requests were cleared.", "type": "boolean" }, "num_freed": { + "description": "The number of scrolling search requests cleared.", "type": "number" } }, @@ -97635,7 +97649,7 @@ "bulk#index": { "in": "path", "name": "index", - "description": "Name of the data stream, index, or index alias to perform bulk actions on.", + "description": "The name of the data stream, index, or index alias to perform bulk actions on.", "required": true, "deprecated": false, "schema": { @@ -97646,7 +97660,7 @@ "bulk#list_executed_pipelines": { "in": "query", "name": "list_executed_pipelines", - "description": "If `true`, the response will include the ingest pipelines that were executed for each index or create.", + "description": "If `true`, the response will include the ingest pipelines that were run for each index or create.", "deprecated": false, "schema": { "type": "boolean" @@ -97656,7 +97670,7 @@ "bulk#pipeline": { "in": "query", "name": "pipeline", - "description": "ID of the pipeline to use to preprocess incoming documents.\nIf the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request.\nIf a final pipeline is configured it will always run, regardless of the value of this parameter.", + "description": "The pipeline identifier to use to preprocess incoming documents.\nIf the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request.\nIf a final pipeline is configured, it will always run regardless of the value of this parameter.", "deprecated": false, "schema": { "type": "string" @@ -97666,7 +97680,7 @@ "bulk#refresh": { "in": "query", "name": "refresh", - "description": "If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` do nothing with refreshes.\nValid values: `true`, `false`, `wait_for`.", + "description": "If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search.\nIf `wait_for`, wait for a refresh to make this operation visible to search.\nIf `false`, do nothing with refreshes.\nValid values: `true`, `false`, `wait_for`.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:Refresh" @@ -97676,7 +97690,7 @@ "bulk#routing": { "in": "query", "name": "routing", - "description": "Custom value used to route operations to a specific shard.", + "description": "A custom value that is used to route operations to a specific shard.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:Routing" @@ -97686,7 +97700,7 @@ "bulk#_source": { "in": "query", "name": "_source", - "description": "`true` or `false` to return the `_source` field or not, or a list of fields to return.", + "description": "Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_global.search._types:SourceConfigParam" @@ -97696,7 +97710,7 @@ "bulk#_source_excludes": { "in": "query", "name": "_source_excludes", - "description": "A comma-separated list of source fields to exclude from the response.", + "description": "A comma-separated list of source fields to exclude from the response.\nYou can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter.\nIf the `_source` parameter is `false`, this parameter is ignored.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:Fields" @@ -97706,7 +97720,7 @@ "bulk#_source_includes": { "in": "query", "name": "_source_includes", - "description": "A comma-separated list of source fields to include in the response.", + "description": "A comma-separated list of source fields to include in the response.\nIf this parameter is specified, only these source fields are returned.\nYou can exclude fields from this subset using the `_source_excludes` query parameter.\nIf the `_source` parameter is `false`, this parameter is ignored.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:Fields" @@ -97716,7 +97730,7 @@ "bulk#timeout": { "in": "query", "name": "timeout", - "description": "Period each action waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards.", + "description": "The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards.\nThe default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing.\nThe actual wait time could be longer, particularly when multiple waits occur.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:Duration" @@ -97726,7 +97740,7 @@ "bulk#wait_for_active_shards": { "in": "query", "name": "wait_for_active_shards", - "description": "The number of shard copies that must be active before proceeding with the operation.\nSet to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`).", + "description": "The number of shard copies that must be active before proceeding with the operation.\nSet to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`).\nThe default is `1`, which waits for each primary shard to be active.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:WaitForActiveShards" @@ -97736,7 +97750,7 @@ "bulk#require_alias": { "in": "query", "name": "require_alias", - "description": "If `true`, the request’s actions must target an index alias.", + "description": "If `true`, the request's actions must target an index alias.", "deprecated": false, "schema": { "type": "boolean" @@ -97746,7 +97760,7 @@ "bulk#require_data_stream": { "in": "query", "name": "require_data_stream", - "description": "If `true`, the request's actions must target a data stream (existing or to-be-created).", + "description": "If `true`, the request's actions must target a data stream (existing or to be created).", "deprecated": false, "schema": { "type": "boolean" @@ -98574,9 +98588,9 @@ "clear_scroll#scroll_id": { "in": "path", "name": "scroll_id", - "description": "Comma-separated list of scroll IDs to clear.\nTo clear all scroll IDs, use `_all`.", + "description": "A comma-separated list of scroll IDs to clear.\nTo clear all scroll IDs, use `_all`.\nIMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter.", "required": true, - "deprecated": false, + "deprecated": true, "schema": { "$ref": "#/components/schemas/_types:ScrollIds" }, @@ -98969,7 +98983,7 @@ "count#index": { "in": "path", "name": "index", - "description": "Comma-separated list of data streams, indices, and aliases to search.\nSupports wildcards (`*`).\nTo search all data streams and indices, omit this parameter or use `*` or `_all`.", + "description": "A comma-separated list of data streams, indices, and aliases to search.\nIt supports wildcards (`*`).\nTo search all data streams and indices, omit this parameter or use `*` or `_all`.", "required": true, "deprecated": false, "schema": { @@ -98980,7 +98994,7 @@ "count#allow_no_indices": { "in": "query", "name": "allow_no_indices", - "description": "If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices.\nThis behavior applies even if the request targets other open indices.", + "description": "If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices.\nThis behavior applies even if the request targets other open indices.\nFor example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`.", "deprecated": false, "schema": { "type": "boolean" @@ -98990,7 +99004,7 @@ "count#analyzer": { "in": "query", "name": "analyzer", - "description": "Analyzer to use for the query string.\nThis parameter can only be used when the `q` query string parameter is specified.", + "description": "The analyzer to use for the query string.\nThis parameter can be used only when the `q` query string parameter is specified.", "deprecated": false, "schema": { "type": "string" @@ -99000,7 +99014,7 @@ "count#analyze_wildcard": { "in": "query", "name": "analyze_wildcard", - "description": "If `true`, wildcard and prefix queries are analyzed.\nThis parameter can only be used when the `q` query string parameter is specified.", + "description": "If `true`, wildcard and prefix queries are analyzed.\nThis parameter can be used only when the `q` query string parameter is specified.", "deprecated": false, "schema": { "type": "boolean" @@ -99010,7 +99024,7 @@ "count#default_operator": { "in": "query", "name": "default_operator", - "description": "The default operator for query string query: `AND` or `OR`.\nThis parameter can only be used when the `q` query string parameter is specified.", + "description": "The default operator for query string query: `AND` or `OR`.\nThis parameter can be used only when the `q` query string parameter is specified.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.query_dsl:Operator" @@ -99020,7 +99034,7 @@ "count#df": { "in": "query", "name": "df", - "description": "Field to use as default where no field prefix is given in the query string.\nThis parameter can only be used when the `q` query string parameter is specified.", + "description": "The field to use as a default when no field prefix is given in the query string.\nThis parameter can be used only when the `q` query string parameter is specified.", "deprecated": false, "schema": { "type": "string" @@ -99030,7 +99044,7 @@ "count#expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:ExpandWildcards" @@ -99040,8 +99054,8 @@ "count#ignore_throttled": { "in": "query", "name": "ignore_throttled", - "description": "If `true`, concrete, expanded or aliased indices are ignored when frozen.", - "deprecated": false, + "description": "If `true`, concrete, expanded, or aliased indices are ignored when frozen.", + "deprecated": true, "schema": { "type": "boolean" }, @@ -99060,7 +99074,7 @@ "count#lenient": { "in": "query", "name": "lenient", - "description": "If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored.", + "description": "If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored.\nThis parameter can be used only when the `q` query string parameter is specified.", "deprecated": false, "schema": { "type": "boolean" @@ -99070,7 +99084,7 @@ "count#min_score": { "in": "query", "name": "min_score", - "description": "Sets the minimum `_score` value that documents must have to be included in the result.", + "description": "The minimum `_score` value that documents must have to be included in the result.", "deprecated": false, "schema": { "type": "number" @@ -99080,7 +99094,7 @@ "count#preference": { "in": "query", "name": "preference", - "description": "Specifies the node or shard the operation should be performed on.\nRandom by default.", + "description": "The node or shard the operation should be performed on.\nBy default, it is random.", "deprecated": false, "schema": { "type": "string" @@ -99090,7 +99104,7 @@ "count#routing": { "in": "query", "name": "routing", - "description": "Custom value used to route operations to a specific shard.", + "description": "A custom value used to route operations to a specific shard.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:Routing" @@ -99100,7 +99114,7 @@ "count#terminate_after": { "in": "query", "name": "terminate_after", - "description": "Maximum number of documents to collect for each shard.\nIf a query reaches this limit, Elasticsearch terminates the query early.\nElasticsearch collects documents before sorting.", + "description": "The maximum number of documents to collect for each shard.\nIf a query reaches this limit, Elasticsearch terminates the query early.\nElasticsearch collects documents before sorting.\n\nIMPORTANT: Use with caution.\nElasticsearch applies this parameter to each shard handling the request.\nWhen possible, let Elasticsearch perform early termination automatically.\nAvoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers.", "deprecated": false, "schema": { "type": "number" @@ -99110,7 +99124,7 @@ "count#q": { "in": "query", "name": "q", - "description": "Query in the Lucene query string syntax.", + "description": "The query in Lucene query string syntax.", "deprecated": false, "schema": { "type": "string" diff --git a/output/openapi/elasticsearch-serverless-openapi.json b/output/openapi/elasticsearch-serverless-openapi.json index 5dcd2cb903..f2becfe1f1 100644 --- a/output/openapi/elasticsearch-serverless-openapi.json +++ b/output/openapi/elasticsearch-serverless-openapi.json @@ -458,7 +458,7 @@ "document" ], "summary": "Bulk index or delete documents", - "description": "Performs multiple indexing or delete operations in a single API call.\nThis reduces overhead and can greatly increase indexing speed.", + "description": "Perform multiple `index`, `create`, `delete`, and `update` actions in a single request.\nThis reduces overhead and can greatly increase indexing speed.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action.\n* To use the `index` action, you must have the `create`, `index`, or `write` index privilege.\n* To use the `delete` action, you must have the `delete` or `write` index privilege.\n* To use the `update` action, you must have the `index` or `write` index privilege.\n* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe actions are specified in the request body using a newline delimited JSON (NDJSON) structure:\n\n```\naction_and_meta_data\\n\noptional_source\\n\naction_and_meta_data\\n\noptional_source\\n\n....\naction_and_meta_data\\n\noptional_source\\n\n```\n\nThe `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API.\nA `create` action fails if a document with the same ID already exists in the target\nAn `index` action adds or replaces a document as necessary.\n\nNOTE: Data streams support only the `create` action.\nTo update or delete a document in a data stream, you must target the backing index containing the document.\n\nAn `update` action expects that the partial doc, upsert, and script and its options are specified on the next line.\n\nA `delete` action does not expect a source on the next line and has the same semantics as the standard delete API.\n\nNOTE: The final line of data must end with a newline character (`\\n`).\nEach newline character may be preceded by a carriage return (`\\r`).\nWhen sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`.\nBecause this format uses literal newline characters (`\\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed.\n\nIf you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument.\n\nA note on the format: the idea here is to make processing as fast as possible.\nAs some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side.\n\nClient libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible.\n\nThere is no \"correct\" number of actions to perform in a single bulk request.\nExperiment with different settings to find the optimal size for your particular workload.\nNote that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size.\nIt is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch.\nFor instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch.\n\n**Client suppport for bulk requests**\n\nSome of the officially supported clients provide helpers to assist with bulk requests and reindexing:\n\n* Go: Check out `esutil.BulkIndexer`\n* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll`\n* Python: Check out `elasticsearch.helpers.*`\n* JavaScript: Check out `client.helpers.*`\n* .NET: Check out `BulkAllObservable`\n* PHP: Check out bulk indexing.\n\n**Submitting bulk requests with cURL**\n\nIf you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`.\nThe latter doesn't preserve newlines. For example:\n\n```\n$ cat requests\n{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n$ curl -s -H \"Content-Type: application/x-ndjson\" -XPOST localhost:9200/_bulk --data-binary \"@requests\"; echo\n{\"took\":7, \"errors\": false, \"items\":[{\"index\":{\"_index\":\"test\",\"_id\":\"1\",\"_version\":1,\"result\":\"created\",\"forced_refresh\":false}}]}\n```\n\n**Optimistic concurrency control**\n\nEach `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines.\nThe `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details.\n\n**Versioning**\n\nEach bulk item can include the version value using the `version` field.\nIt automatically follows the behavior of the index or delete operation based on the `_version` mapping.\nIt also support the `version_type`.\n\n**Routing**\n\nEach bulk item can include the routing value using the `routing` field.\nIt automatically follows the behavior of the index or delete operation based on the `_routing` mapping.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Wait for active shards**\n\nWhen making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request.\n\n**Refresh**\n\nControl when the changes made by this request are visible to search.\n\nNOTE: Only the shards that receive the bulk request will be affected by refresh.\nImagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards.\nThe request will only wait for those three shards to refresh.\nThe other two shards that make up the index do not participate in the `_bulk` request at all.", "operationId": "bulk-1", "parameters": [ { @@ -509,7 +509,7 @@ "document" ], "summary": "Bulk index or delete documents", - "description": "Performs multiple indexing or delete operations in a single API call.\nThis reduces overhead and can greatly increase indexing speed.", + "description": "Perform multiple `index`, `create`, `delete`, and `update` actions in a single request.\nThis reduces overhead and can greatly increase indexing speed.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action.\n* To use the `index` action, you must have the `create`, `index`, or `write` index privilege.\n* To use the `delete` action, you must have the `delete` or `write` index privilege.\n* To use the `update` action, you must have the `index` or `write` index privilege.\n* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe actions are specified in the request body using a newline delimited JSON (NDJSON) structure:\n\n```\naction_and_meta_data\\n\noptional_source\\n\naction_and_meta_data\\n\noptional_source\\n\n....\naction_and_meta_data\\n\noptional_source\\n\n```\n\nThe `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API.\nA `create` action fails if a document with the same ID already exists in the target\nAn `index` action adds or replaces a document as necessary.\n\nNOTE: Data streams support only the `create` action.\nTo update or delete a document in a data stream, you must target the backing index containing the document.\n\nAn `update` action expects that the partial doc, upsert, and script and its options are specified on the next line.\n\nA `delete` action does not expect a source on the next line and has the same semantics as the standard delete API.\n\nNOTE: The final line of data must end with a newline character (`\\n`).\nEach newline character may be preceded by a carriage return (`\\r`).\nWhen sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`.\nBecause this format uses literal newline characters (`\\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed.\n\nIf you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument.\n\nA note on the format: the idea here is to make processing as fast as possible.\nAs some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side.\n\nClient libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible.\n\nThere is no \"correct\" number of actions to perform in a single bulk request.\nExperiment with different settings to find the optimal size for your particular workload.\nNote that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size.\nIt is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch.\nFor instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch.\n\n**Client suppport for bulk requests**\n\nSome of the officially supported clients provide helpers to assist with bulk requests and reindexing:\n\n* Go: Check out `esutil.BulkIndexer`\n* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll`\n* Python: Check out `elasticsearch.helpers.*`\n* JavaScript: Check out `client.helpers.*`\n* .NET: Check out `BulkAllObservable`\n* PHP: Check out bulk indexing.\n\n**Submitting bulk requests with cURL**\n\nIf you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`.\nThe latter doesn't preserve newlines. For example:\n\n```\n$ cat requests\n{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n$ curl -s -H \"Content-Type: application/x-ndjson\" -XPOST localhost:9200/_bulk --data-binary \"@requests\"; echo\n{\"took\":7, \"errors\": false, \"items\":[{\"index\":{\"_index\":\"test\",\"_id\":\"1\",\"_version\":1,\"result\":\"created\",\"forced_refresh\":false}}]}\n```\n\n**Optimistic concurrency control**\n\nEach `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines.\nThe `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details.\n\n**Versioning**\n\nEach bulk item can include the version value using the `version` field.\nIt automatically follows the behavior of the index or delete operation based on the `_version` mapping.\nIt also support the `version_type`.\n\n**Routing**\n\nEach bulk item can include the routing value using the `routing` field.\nIt automatically follows the behavior of the index or delete operation based on the `_routing` mapping.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Wait for active shards**\n\nWhen making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request.\n\n**Refresh**\n\nControl when the changes made by this request are visible to search.\n\nNOTE: Only the shards that receive the bulk request will be affected by refresh.\nImagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards.\nThe request will only wait for those three shards to refresh.\nThe other two shards that make up the index do not participate in the `_bulk` request at all.", "operationId": "bulk", "parameters": [ { @@ -562,7 +562,7 @@ "document" ], "summary": "Bulk index or delete documents", - "description": "Performs multiple indexing or delete operations in a single API call.\nThis reduces overhead and can greatly increase indexing speed.", + "description": "Perform multiple `index`, `create`, `delete`, and `update` actions in a single request.\nThis reduces overhead and can greatly increase indexing speed.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action.\n* To use the `index` action, you must have the `create`, `index`, or `write` index privilege.\n* To use the `delete` action, you must have the `delete` or `write` index privilege.\n* To use the `update` action, you must have the `index` or `write` index privilege.\n* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe actions are specified in the request body using a newline delimited JSON (NDJSON) structure:\n\n```\naction_and_meta_data\\n\noptional_source\\n\naction_and_meta_data\\n\noptional_source\\n\n....\naction_and_meta_data\\n\noptional_source\\n\n```\n\nThe `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API.\nA `create` action fails if a document with the same ID already exists in the target\nAn `index` action adds or replaces a document as necessary.\n\nNOTE: Data streams support only the `create` action.\nTo update or delete a document in a data stream, you must target the backing index containing the document.\n\nAn `update` action expects that the partial doc, upsert, and script and its options are specified on the next line.\n\nA `delete` action does not expect a source on the next line and has the same semantics as the standard delete API.\n\nNOTE: The final line of data must end with a newline character (`\\n`).\nEach newline character may be preceded by a carriage return (`\\r`).\nWhen sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`.\nBecause this format uses literal newline characters (`\\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed.\n\nIf you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument.\n\nA note on the format: the idea here is to make processing as fast as possible.\nAs some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side.\n\nClient libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible.\n\nThere is no \"correct\" number of actions to perform in a single bulk request.\nExperiment with different settings to find the optimal size for your particular workload.\nNote that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size.\nIt is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch.\nFor instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch.\n\n**Client suppport for bulk requests**\n\nSome of the officially supported clients provide helpers to assist with bulk requests and reindexing:\n\n* Go: Check out `esutil.BulkIndexer`\n* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll`\n* Python: Check out `elasticsearch.helpers.*`\n* JavaScript: Check out `client.helpers.*`\n* .NET: Check out `BulkAllObservable`\n* PHP: Check out bulk indexing.\n\n**Submitting bulk requests with cURL**\n\nIf you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`.\nThe latter doesn't preserve newlines. For example:\n\n```\n$ cat requests\n{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n$ curl -s -H \"Content-Type: application/x-ndjson\" -XPOST localhost:9200/_bulk --data-binary \"@requests\"; echo\n{\"took\":7, \"errors\": false, \"items\":[{\"index\":{\"_index\":\"test\",\"_id\":\"1\",\"_version\":1,\"result\":\"created\",\"forced_refresh\":false}}]}\n```\n\n**Optimistic concurrency control**\n\nEach `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines.\nThe `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details.\n\n**Versioning**\n\nEach bulk item can include the version value using the `version` field.\nIt automatically follows the behavior of the index or delete operation based on the `_version` mapping.\nIt also support the `version_type`.\n\n**Routing**\n\nEach bulk item can include the routing value using the `routing` field.\nIt automatically follows the behavior of the index or delete operation based on the `_routing` mapping.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Wait for active shards**\n\nWhen making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request.\n\n**Refresh**\n\nControl when the changes made by this request are visible to search.\n\nNOTE: Only the shards that receive the bulk request will be affected by refresh.\nImagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards.\nThe request will only wait for those three shards to refresh.\nThe other two shards that make up the index do not participate in the `_bulk` request at all.", "operationId": "bulk-3", "parameters": [ { @@ -616,7 +616,7 @@ "document" ], "summary": "Bulk index or delete documents", - "description": "Performs multiple indexing or delete operations in a single API call.\nThis reduces overhead and can greatly increase indexing speed.", + "description": "Perform multiple `index`, `create`, `delete`, and `update` actions in a single request.\nThis reduces overhead and can greatly increase indexing speed.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action.\n* To use the `index` action, you must have the `create`, `index`, or `write` index privilege.\n* To use the `delete` action, you must have the `delete` or `write` index privilege.\n* To use the `update` action, you must have the `index` or `write` index privilege.\n* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe actions are specified in the request body using a newline delimited JSON (NDJSON) structure:\n\n```\naction_and_meta_data\\n\noptional_source\\n\naction_and_meta_data\\n\noptional_source\\n\n....\naction_and_meta_data\\n\noptional_source\\n\n```\n\nThe `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API.\nA `create` action fails if a document with the same ID already exists in the target\nAn `index` action adds or replaces a document as necessary.\n\nNOTE: Data streams support only the `create` action.\nTo update or delete a document in a data stream, you must target the backing index containing the document.\n\nAn `update` action expects that the partial doc, upsert, and script and its options are specified on the next line.\n\nA `delete` action does not expect a source on the next line and has the same semantics as the standard delete API.\n\nNOTE: The final line of data must end with a newline character (`\\n`).\nEach newline character may be preceded by a carriage return (`\\r`).\nWhen sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`.\nBecause this format uses literal newline characters (`\\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed.\n\nIf you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument.\n\nA note on the format: the idea here is to make processing as fast as possible.\nAs some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side.\n\nClient libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible.\n\nThere is no \"correct\" number of actions to perform in a single bulk request.\nExperiment with different settings to find the optimal size for your particular workload.\nNote that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size.\nIt is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch.\nFor instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch.\n\n**Client suppport for bulk requests**\n\nSome of the officially supported clients provide helpers to assist with bulk requests and reindexing:\n\n* Go: Check out `esutil.BulkIndexer`\n* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll`\n* Python: Check out `elasticsearch.helpers.*`\n* JavaScript: Check out `client.helpers.*`\n* .NET: Check out `BulkAllObservable`\n* PHP: Check out bulk indexing.\n\n**Submitting bulk requests with cURL**\n\nIf you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`.\nThe latter doesn't preserve newlines. For example:\n\n```\n$ cat requests\n{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n$ curl -s -H \"Content-Type: application/x-ndjson\" -XPOST localhost:9200/_bulk --data-binary \"@requests\"; echo\n{\"took\":7, \"errors\": false, \"items\":[{\"index\":{\"_index\":\"test\",\"_id\":\"1\",\"_version\":1,\"result\":\"created\",\"forced_refresh\":false}}]}\n```\n\n**Optimistic concurrency control**\n\nEach `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines.\nThe `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details.\n\n**Versioning**\n\nEach bulk item can include the version value using the `version` field.\nIt automatically follows the behavior of the index or delete operation based on the `_version` mapping.\nIt also support the `version_type`.\n\n**Routing**\n\nEach bulk item can include the routing value using the `routing` field.\nIt automatically follows the behavior of the index or delete operation based on the `_routing` mapping.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Wait for active shards**\n\nWhen making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request.\n\n**Refresh**\n\nControl when the changes made by this request are visible to search.\n\nNOTE: Only the shards that receive the bulk request will be affected by refresh.\nImagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards.\nThe request will only wait for those three shards to refresh.\nThe other two shards that make up the index do not participate in the `_bulk` request at all.", "operationId": "bulk-2", "parameters": [ { @@ -1326,6 +1326,9 @@ ], "summary": "Clear a scrolling search", "description": "Clear the search context and results for a scrolling search.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/paginate-search-results.html#scroll-search-results" + }, "operationId": "clear-scroll", "requestBody": { "$ref": "#/components/requestBodies/clear_scroll" @@ -1410,6 +1413,9 @@ ], "summary": "Clear a scrolling search", "description": "Clear the search context and results for a scrolling search.", + "externalDocs": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/paginate-search-results.html#scroll-search-results" + }, "operationId": "clear-scroll-1", "parameters": [ { @@ -1460,9 +1466,11 @@ "type": "object", "properties": { "succeeded": { + "description": "If `true`, all search contexts associated with the point-in-time ID were successfully closed.", "type": "boolean" }, "num_freed": { + "description": "The number of search contexts that were successfully closed.", "type": "number" } }, @@ -3164,7 +3172,7 @@ "search" ], "summary": "Count search results", - "description": "Get the number of documents matching a query.", + "description": "Get the number of documents matching a query.\n\nThe query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body.\nThe latter must be nested in a `query` key, which is the same as the search API.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", "operationId": "count-1", "parameters": [ { @@ -3224,7 +3232,7 @@ "search" ], "summary": "Count search results", - "description": "Get the number of documents matching a query.", + "description": "Get the number of documents matching a query.\n\nThe query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body.\nThe latter must be nested in a `query` key, which is the same as the search API.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", "operationId": "count", "parameters": [ { @@ -3286,7 +3294,7 @@ "search" ], "summary": "Count search results", - "description": "Get the number of documents matching a query.", + "description": "Get the number of documents matching a query.\n\nThe query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body.\nThe latter must be nested in a `query` key, which is the same as the search API.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", "operationId": "count-3", "parameters": [ { @@ -3349,7 +3357,7 @@ "search" ], "summary": "Count search results", - "description": "Get the number of documents matching a query.", + "description": "Get the number of documents matching a query.\n\nThe query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body.\nThe latter must be nested in a `query` key, which is the same as the search API.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", "operationId": "count-2", "parameters": [ { @@ -24281,7 +24289,7 @@ "type": "string" }, "reason": { - "description": "A human-readable explanation of the error, in english", + "description": "A human-readable explanation of the error, in English.", "type": "string" }, "stack_trace": { @@ -33098,18 +33106,18 @@ "type": "object", "properties": { "dynamic_templates": { - "description": "A map from the full name of fields to the name of dynamic templates.\nDefaults to an empty map.\nIf a name matches a dynamic template, then that template will be applied regardless of other match predicates defined in the template.\nIf a field is already defined in the mapping, then this parameter won’t be used.", + "description": "A map from the full name of fields to the name of dynamic templates.\nIt defaults to an empty map.\nIf a name matches a dynamic template, that template will be applied regardless of other match predicates defined in the template.\nIf a field is already defined in the mapping, then this parameter won't be used.", "type": "object", "additionalProperties": { "type": "string" } }, "pipeline": { - "description": "ID of the pipeline to use to preprocess incoming documents.\nIf the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request.\nIf a final pipeline is configured it will always run, regardless of the value of this parameter.", + "description": "The ID of the pipeline to use to preprocess incoming documents.\nIf the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request.\nIf a final pipeline is configured, it will always run regardless of the value of this parameter.", "type": "string" }, "require_alias": { - "description": "If `true`, the request’s actions must target an index alias.", + "description": "If `true`, the request's actions must target an index alias.", "type": "boolean" } } @@ -33161,10 +33169,11 @@ "type": "object", "properties": { "require_alias": { - "description": "If `true`, the request’s actions must target an index alias.", + "description": "If `true`, the request's actions must target an index alias.", "type": "boolean" }, "retry_on_conflict": { + "description": "The number of times an update should be retried in the case of a version conflict.", "type": "number" } } @@ -33185,7 +33194,7 @@ "type": "object", "properties": { "detect_noop": { - "description": "Set to false to disable setting 'result' in the response\nto 'noop' if no change to the document occurred.", + "description": "If true, the `result` in the response is set to 'noop' when no changes to the document occur.", "type": "boolean" }, "doc": { @@ -33193,21 +33202,21 @@ "type": "object" }, "doc_as_upsert": { - "description": "Set to true to use the contents of 'doc' as the value of 'upsert'", + "description": "Set to `true` to use the contents of `doc` as the value of `upsert`.", "type": "boolean" }, "script": { "$ref": "#/components/schemas/_types:Script" }, "scripted_upsert": { - "description": "Set to true to execute the script whether or not the document exists.", + "description": "Set to `true` to run the script whether or not the document exists.", "type": "boolean" }, "_source": { "$ref": "#/components/schemas/_global.search._types:SourceConfig" }, "upsert": { - "description": "If the document does not already exist, the contents of 'upsert' are inserted as a\nnew document. If the document exists, the 'script' is executed.", + "description": "If the document does not already exist, the contents of `upsert` are inserted as a new document.\nIf the document exists, the `script` is run.", "type": "object" } } @@ -33228,22 +33237,22 @@ ] }, "_index": { - "description": "Name of the index associated with the operation.\nIf the operation targeted a data stream, this is the backing index into which the document was written.", + "description": "The name of the index associated with the operation.\nIf the operation targeted a data stream, this is the backing index into which the document was written.", "type": "string" }, "status": { - "description": "HTTP status code returned for the operation.", + "description": "The HTTP status code returned for the operation.", "type": "number" }, "error": { "$ref": "#/components/schemas/_types:ErrorCause" }, "_primary_term": { - "description": "The primary term assigned to the document for the operation.", + "description": "The primary term assigned to the document for the operation.\nThis property is returned only for successful operations.", "type": "number" }, "result": { - "description": "Result of the operation.\nSuccessful values are `created`, `deleted`, and `updated`.", + "description": "The result of the operation.\nSuccessful values are `created`, `deleted`, and `updated`.", "type": "string" }, "_seq_no": { @@ -55983,9 +55992,11 @@ "type": "object", "properties": { "errors": { + "description": "If `true`, one or more of the operations in the bulk request did not complete successfully.", "type": "boolean" }, "items": { + "description": "The result of each operation in the bulk request, in the order they were submitted.", "type": "array", "items": { "type": "object", @@ -55997,6 +56008,7 @@ } }, "took": { + "description": "The length of time, in milliseconds, it took to process the bulk request.", "type": "number" }, "ingest_took": { @@ -56137,9 +56149,11 @@ "type": "object", "properties": { "succeeded": { + "description": "If `true`, the request succeeded.\nThis does not indicate whether any scrolling search requests were cleared.", "type": "boolean" }, "num_freed": { + "description": "The number of scrolling search requests cleared.", "type": "number" } }, @@ -58190,7 +58204,7 @@ "bulk#index": { "in": "path", "name": "index", - "description": "Name of the data stream, index, or index alias to perform bulk actions on.", + "description": "The name of the data stream, index, or index alias to perform bulk actions on.", "required": true, "deprecated": false, "schema": { @@ -58201,7 +58215,7 @@ "bulk#list_executed_pipelines": { "in": "query", "name": "list_executed_pipelines", - "description": "If `true`, the response will include the ingest pipelines that were executed for each index or create.", + "description": "If `true`, the response will include the ingest pipelines that were run for each index or create.", "deprecated": false, "schema": { "type": "boolean" @@ -58211,7 +58225,7 @@ "bulk#pipeline": { "in": "query", "name": "pipeline", - "description": "ID of the pipeline to use to preprocess incoming documents.\nIf the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request.\nIf a final pipeline is configured it will always run, regardless of the value of this parameter.", + "description": "The pipeline identifier to use to preprocess incoming documents.\nIf the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request.\nIf a final pipeline is configured, it will always run regardless of the value of this parameter.", "deprecated": false, "schema": { "type": "string" @@ -58221,7 +58235,7 @@ "bulk#refresh": { "in": "query", "name": "refresh", - "description": "If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` do nothing with refreshes.\nValid values: `true`, `false`, `wait_for`.", + "description": "If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search.\nIf `wait_for`, wait for a refresh to make this operation visible to search.\nIf `false`, do nothing with refreshes.\nValid values: `true`, `false`, `wait_for`.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:Refresh" @@ -58231,7 +58245,7 @@ "bulk#routing": { "in": "query", "name": "routing", - "description": "Custom value used to route operations to a specific shard.", + "description": "A custom value that is used to route operations to a specific shard.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:Routing" @@ -58241,7 +58255,7 @@ "bulk#_source": { "in": "query", "name": "_source", - "description": "`true` or `false` to return the `_source` field or not, or a list of fields to return.", + "description": "Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_global.search._types:SourceConfigParam" @@ -58251,7 +58265,7 @@ "bulk#_source_excludes": { "in": "query", "name": "_source_excludes", - "description": "A comma-separated list of source fields to exclude from the response.", + "description": "A comma-separated list of source fields to exclude from the response.\nYou can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter.\nIf the `_source` parameter is `false`, this parameter is ignored.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:Fields" @@ -58261,7 +58275,7 @@ "bulk#_source_includes": { "in": "query", "name": "_source_includes", - "description": "A comma-separated list of source fields to include in the response.", + "description": "A comma-separated list of source fields to include in the response.\nIf this parameter is specified, only these source fields are returned.\nYou can exclude fields from this subset using the `_source_excludes` query parameter.\nIf the `_source` parameter is `false`, this parameter is ignored.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:Fields" @@ -58271,7 +58285,7 @@ "bulk#timeout": { "in": "query", "name": "timeout", - "description": "Period each action waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards.", + "description": "The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards.\nThe default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing.\nThe actual wait time could be longer, particularly when multiple waits occur.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:Duration" @@ -58281,7 +58295,7 @@ "bulk#wait_for_active_shards": { "in": "query", "name": "wait_for_active_shards", - "description": "The number of shard copies that must be active before proceeding with the operation.\nSet to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`).", + "description": "The number of shard copies that must be active before proceeding with the operation.\nSet to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`).\nThe default is `1`, which waits for each primary shard to be active.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:WaitForActiveShards" @@ -58291,7 +58305,7 @@ "bulk#require_alias": { "in": "query", "name": "require_alias", - "description": "If `true`, the request’s actions must target an index alias.", + "description": "If `true`, the request's actions must target an index alias.", "deprecated": false, "schema": { "type": "boolean" @@ -58301,7 +58315,7 @@ "bulk#require_data_stream": { "in": "query", "name": "require_data_stream", - "description": "If `true`, the request's actions must target a data stream (existing or to-be-created).", + "description": "If `true`, the request's actions must target a data stream (existing or to be created).", "deprecated": false, "schema": { "type": "boolean" @@ -58790,9 +58804,9 @@ "clear_scroll#scroll_id": { "in": "path", "name": "scroll_id", - "description": "Comma-separated list of scroll IDs to clear.\nTo clear all scroll IDs, use `_all`.", + "description": "A comma-separated list of scroll IDs to clear.\nTo clear all scroll IDs, use `_all`.\nIMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter.", "required": true, - "deprecated": false, + "deprecated": true, "schema": { "$ref": "#/components/schemas/_types:ScrollIds" }, @@ -58894,7 +58908,7 @@ "count#index": { "in": "path", "name": "index", - "description": "Comma-separated list of data streams, indices, and aliases to search.\nSupports wildcards (`*`).\nTo search all data streams and indices, omit this parameter or use `*` or `_all`.", + "description": "A comma-separated list of data streams, indices, and aliases to search.\nIt supports wildcards (`*`).\nTo search all data streams and indices, omit this parameter or use `*` or `_all`.", "required": true, "deprecated": false, "schema": { @@ -58905,7 +58919,7 @@ "count#allow_no_indices": { "in": "query", "name": "allow_no_indices", - "description": "If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices.\nThis behavior applies even if the request targets other open indices.", + "description": "If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices.\nThis behavior applies even if the request targets other open indices.\nFor example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`.", "deprecated": false, "schema": { "type": "boolean" @@ -58915,7 +58929,7 @@ "count#analyzer": { "in": "query", "name": "analyzer", - "description": "Analyzer to use for the query string.\nThis parameter can only be used when the `q` query string parameter is specified.", + "description": "The analyzer to use for the query string.\nThis parameter can be used only when the `q` query string parameter is specified.", "deprecated": false, "schema": { "type": "string" @@ -58925,7 +58939,7 @@ "count#analyze_wildcard": { "in": "query", "name": "analyze_wildcard", - "description": "If `true`, wildcard and prefix queries are analyzed.\nThis parameter can only be used when the `q` query string parameter is specified.", + "description": "If `true`, wildcard and prefix queries are analyzed.\nThis parameter can be used only when the `q` query string parameter is specified.", "deprecated": false, "schema": { "type": "boolean" @@ -58935,7 +58949,7 @@ "count#default_operator": { "in": "query", "name": "default_operator", - "description": "The default operator for query string query: `AND` or `OR`.\nThis parameter can only be used when the `q` query string parameter is specified.", + "description": "The default operator for query string query: `AND` or `OR`.\nThis parameter can be used only when the `q` query string parameter is specified.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types.query_dsl:Operator" @@ -58945,7 +58959,7 @@ "count#df": { "in": "query", "name": "df", - "description": "Field to use as default where no field prefix is given in the query string.\nThis parameter can only be used when the `q` query string parameter is specified.", + "description": "The field to use as a default when no field prefix is given in the query string.\nThis parameter can be used only when the `q` query string parameter is specified.", "deprecated": false, "schema": { "type": "string" @@ -58955,7 +58969,7 @@ "count#expand_wildcards": { "in": "query", "name": "expand_wildcards", - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:ExpandWildcards" @@ -58965,8 +58979,8 @@ "count#ignore_throttled": { "in": "query", "name": "ignore_throttled", - "description": "If `true`, concrete, expanded or aliased indices are ignored when frozen.", - "deprecated": false, + "description": "If `true`, concrete, expanded, or aliased indices are ignored when frozen.", + "deprecated": true, "schema": { "type": "boolean" }, @@ -58985,7 +58999,7 @@ "count#lenient": { "in": "query", "name": "lenient", - "description": "If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored.", + "description": "If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored.\nThis parameter can be used only when the `q` query string parameter is specified.", "deprecated": false, "schema": { "type": "boolean" @@ -58995,7 +59009,7 @@ "count#min_score": { "in": "query", "name": "min_score", - "description": "Sets the minimum `_score` value that documents must have to be included in the result.", + "description": "The minimum `_score` value that documents must have to be included in the result.", "deprecated": false, "schema": { "type": "number" @@ -59005,7 +59019,7 @@ "count#preference": { "in": "query", "name": "preference", - "description": "Specifies the node or shard the operation should be performed on.\nRandom by default.", + "description": "The node or shard the operation should be performed on.\nBy default, it is random.", "deprecated": false, "schema": { "type": "string" @@ -59015,7 +59029,7 @@ "count#routing": { "in": "query", "name": "routing", - "description": "Custom value used to route operations to a specific shard.", + "description": "A custom value used to route operations to a specific shard.", "deprecated": false, "schema": { "$ref": "#/components/schemas/_types:Routing" @@ -59025,7 +59039,7 @@ "count#terminate_after": { "in": "query", "name": "terminate_after", - "description": "Maximum number of documents to collect for each shard.\nIf a query reaches this limit, Elasticsearch terminates the query early.\nElasticsearch collects documents before sorting.", + "description": "The maximum number of documents to collect for each shard.\nIf a query reaches this limit, Elasticsearch terminates the query early.\nElasticsearch collects documents before sorting.\n\nIMPORTANT: Use with caution.\nElasticsearch applies this parameter to each shard handling the request.\nWhen possible, let Elasticsearch perform early termination automatically.\nAvoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers.", "deprecated": false, "schema": { "type": "number" @@ -59035,7 +59049,7 @@ "count#q": { "in": "query", "name": "q", - "description": "Query in the Lucene query string syntax.", + "description": "The query in Lucene query string syntax.", "deprecated": false, "schema": { "type": "string" diff --git a/output/schema/schema.json b/output/schema/schema.json index 26a9266c72..112c3aacfd 100644 --- a/output/schema/schema.json +++ b/output/schema/schema.json @@ -466,7 +466,7 @@ "stability": "stable" } }, - "description": "Bulk index or delete documents.\nPerforms multiple indexing or delete operations in a single API call.\nThis reduces overhead and can greatly increase indexing speed.", + "description": "Bulk index or delete documents.\nPerform multiple `index`, `create`, `delete`, and `update` actions in a single request.\nThis reduces overhead and can greatly increase indexing speed.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action.\n* To use the `index` action, you must have the `create`, `index`, or `write` index privilege.\n* To use the `delete` action, you must have the `delete` or `write` index privilege.\n* To use the `update` action, you must have the `index` or `write` index privilege.\n* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe actions are specified in the request body using a newline delimited JSON (NDJSON) structure:\n\n```\naction_and_meta_data\\n\noptional_source\\n\naction_and_meta_data\\n\noptional_source\\n\n....\naction_and_meta_data\\n\noptional_source\\n\n```\n\nThe `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API.\nA `create` action fails if a document with the same ID already exists in the target\nAn `index` action adds or replaces a document as necessary.\n\nNOTE: Data streams support only the `create` action.\nTo update or delete a document in a data stream, you must target the backing index containing the document.\n\nAn `update` action expects that the partial doc, upsert, and script and its options are specified on the next line.\n\nA `delete` action does not expect a source on the next line and has the same semantics as the standard delete API.\n\nNOTE: The final line of data must end with a newline character (`\\n`).\nEach newline character may be preceded by a carriage return (`\\r`).\nWhen sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`.\nBecause this format uses literal newline characters (`\\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed.\n\nIf you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument.\n\nA note on the format: the idea here is to make processing as fast as possible.\nAs some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side.\n\nClient libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible.\n\nThere is no \"correct\" number of actions to perform in a single bulk request.\nExperiment with different settings to find the optimal size for your particular workload.\nNote that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size.\nIt is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch.\nFor instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch.\n\n**Client suppport for bulk requests**\n\nSome of the officially supported clients provide helpers to assist with bulk requests and reindexing:\n\n* Go: Check out `esutil.BulkIndexer`\n* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll`\n* Python: Check out `elasticsearch.helpers.*`\n* JavaScript: Check out `client.helpers.*`\n* .NET: Check out `BulkAllObservable`\n* PHP: Check out bulk indexing.\n\n**Submitting bulk requests with cURL**\n\nIf you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`.\nThe latter doesn't preserve newlines. For example:\n\n```\n$ cat requests\n{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n$ curl -s -H \"Content-Type: application/x-ndjson\" -XPOST localhost:9200/_bulk --data-binary \"@requests\"; echo\n{\"took\":7, \"errors\": false, \"items\":[{\"index\":{\"_index\":\"test\",\"_id\":\"1\",\"_version\":1,\"result\":\"created\",\"forced_refresh\":false}}]}\n```\n\n**Optimistic concurrency control**\n\nEach `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines.\nThe `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details.\n\n**Versioning**\n\nEach bulk item can include the version value using the `version` field.\nIt automatically follows the behavior of the index or delete operation based on the `_version` mapping.\nIt also support the `version_type`.\n\n**Routing**\n\nEach bulk item can include the routing value using the `routing` field.\nIt automatically follows the behavior of the index or delete operation based on the `_routing` mapping.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Wait for active shards**\n\nWhen making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request.\n\n**Refresh**\n\nControl when the changes made by this request are visible to search.\n\nNOTE: Only the shards that receive the bulk request will be affected by refresh.\nImagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards.\nThe request will only wait for those three shards to refresh.\nThe other two shards that make up the index do not participate in the `_bulk` request at all.", "docId": "docs-bulk", "docTag": "document", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/docs-bulk.html", @@ -2175,10 +2175,12 @@ "stability": "stable" } }, - "description": "Clear a scrolling search.\n\nClear the search context and results for a scrolling search.", + "description": "Clear a scrolling search.\nClear the search context and results for a scrolling search.", "docId": "clear-scroll-api", "docTag": "search", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/clear-scroll-api.html", + "extDocId": "scroll-search-results", + "extDocUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/paginate-search-results.html#scroll-search-results", "name": "clear_scroll", "request": { "name": "Request", @@ -2226,7 +2228,7 @@ "stability": "stable" } }, - "description": "Close a point in time.\n\nA point in time must be opened explicitly before being used in search requests.\nThe `keep_alive` parameter tells Elasticsearch how long it should persist.\nA point in time is automatically closed when the `keep_alive` period has elapsed.\nHowever, keeping points in time has a cost; close them as soon as they are no longer required for search requests.", + "description": "Close a point in time.\nA point in time must be opened explicitly before being used in search requests.\nThe `keep_alive` parameter tells Elasticsearch how long it should persist.\nA point in time is automatically closed when the `keep_alive` period has elapsed.\nHowever, keeping points in time has a cost; close them as soon as they are no longer required for search requests.", "docId": "point-in-time-api", "docTag": "search", "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/point-in-time-api.html", @@ -4151,10 +4153,16 @@ "stability": "stable" } }, - "description": "Count search results.\nGet the number of documents matching a query.", + "description": "Count search results.\nGet the number of documents matching a query.\n\nThe query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body.\nThe latter must be nested in a `query` key, which is the same as the search API.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", + "docId": "search-count", "docTag": "search", - "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/master/search-count.html", + "docUrl": "https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/search-count.html", "name": "count", + "privileges": { + "index": [ + "read" + ] + }, "request": { "name": "Request", "namespace": "_global.count" @@ -21876,7 +21884,7 @@ "namespace": "_global.bulk" }, "properties": [], - "specLocation": "_global/bulk/types.ts#L130-L130" + "specLocation": "_global/bulk/types.ts#L132-L132" }, { "kind": "interface", @@ -21891,7 +21899,7 @@ "namespace": "_global.bulk" }, "properties": [], - "specLocation": "_global/bulk/types.ts#L134-L134" + "specLocation": "_global/bulk/types.ts#L136-L136" }, { "kind": "interface", @@ -21906,7 +21914,7 @@ "namespace": "_global.bulk" }, "properties": [], - "specLocation": "_global/bulk/types.ts#L132-L132" + "specLocation": "_global/bulk/types.ts#L134-L134" }, { "kind": "interface", @@ -21928,7 +21936,7 @@ } }, { - "description": "Name of the index or index alias to perform the action on.", + "description": "The name of the index or index alias to perform the action on.", "name": "_index", "required": false, "type": { @@ -21940,7 +21948,7 @@ } }, { - "description": "Custom value used to route operations to a specific shard.", + "description": "A custom value used to route operations to a specific shard.", "name": "routing", "required": false, "type": { @@ -21996,7 +22004,7 @@ } } ], - "specLocation": "_global/bulk/types.ts#L90-L107" + "specLocation": "_global/bulk/types.ts#L92-L109" }, { "kind": "interface", @@ -22006,7 +22014,7 @@ }, "properties": [ { - "description": "Indexes the specified document.\nIf the document exists, replaces the document and increments the version.\nThe following line must contain the source data to be indexed.", + "description": "Index the specified document.\nIf the document exists, it replaces the document and increments the version.\nThe following line must contain the source data to be indexed.", "name": "index", "required": false, "type": { @@ -22018,7 +22026,7 @@ } }, { - "description": "Indexes the specified document if it does not already exist.\nThe following line must contain the source data to be indexed.", + "description": "Index the specified document if it does not already exist.\nThe following line must contain the source data to be indexed.", "name": "create", "required": false, "type": { @@ -22030,7 +22038,7 @@ } }, { - "description": "Performs a partial document update.\nThe following line must contain the partial document and update options.", + "description": "Perform a partial document update.\nThe following line must contain the partial document and update options.", "name": "update", "required": false, "type": { @@ -22042,7 +22050,7 @@ } }, { - "description": "Removes the specified document from the index.", + "description": "Remove the specified document from the index.", "name": "delete", "required": false, "type": { @@ -22054,7 +22062,7 @@ } } ], - "specLocation": "_global/bulk/types.ts#L145-L167", + "specLocation": "_global/bulk/types.ts#L150-L172", "variants": { "kind": "container" } @@ -22079,7 +22087,7 @@ "name": "OperationType", "namespace": "_global.bulk" }, - "specLocation": "_global/bulk/types.ts#L83-L88" + "specLocation": "_global/bulk/types.ts#L85-L90" }, { "kind": "request", @@ -22135,7 +22143,7 @@ } } }, - "description": "Bulk index or delete documents.\nPerforms multiple indexing or delete operations in a single API call.\nThis reduces overhead and can greatly increase indexing speed.", + "description": "Bulk index or delete documents.\nPerform multiple `index`, `create`, `delete`, and `update` actions in a single request.\nThis reduces overhead and can greatly increase indexing speed.\n\nIf the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:\n\n* To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action.\n* To use the `index` action, you must have the `create`, `index`, or `write` index privilege.\n* To use the `delete` action, you must have the `delete` or `write` index privilege.\n* To use the `update` action, you must have the `index` or `write` index privilege.\n* To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege.\n* To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege.\n\nAutomatic data stream creation requires a matching index template with data stream enabled.\n\nThe actions are specified in the request body using a newline delimited JSON (NDJSON) structure:\n\n```\naction_and_meta_data\\n\noptional_source\\n\naction_and_meta_data\\n\noptional_source\\n\n....\naction_and_meta_data\\n\noptional_source\\n\n```\n\nThe `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API.\nA `create` action fails if a document with the same ID already exists in the target\nAn `index` action adds or replaces a document as necessary.\n\nNOTE: Data streams support only the `create` action.\nTo update or delete a document in a data stream, you must target the backing index containing the document.\n\nAn `update` action expects that the partial doc, upsert, and script and its options are specified on the next line.\n\nA `delete` action does not expect a source on the next line and has the same semantics as the standard delete API.\n\nNOTE: The final line of data must end with a newline character (`\\n`).\nEach newline character may be preceded by a carriage return (`\\r`).\nWhen sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`.\nBecause this format uses literal newline characters (`\\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed.\n\nIf you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument.\n\nA note on the format: the idea here is to make processing as fast as possible.\nAs some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side.\n\nClient libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible.\n\nThere is no \"correct\" number of actions to perform in a single bulk request.\nExperiment with different settings to find the optimal size for your particular workload.\nNote that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size.\nIt is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch.\nFor instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch.\n\n**Client suppport for bulk requests**\n\nSome of the officially supported clients provide helpers to assist with bulk requests and reindexing:\n\n* Go: Check out `esutil.BulkIndexer`\n* Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll`\n* Python: Check out `elasticsearch.helpers.*`\n* JavaScript: Check out `client.helpers.*`\n* .NET: Check out `BulkAllObservable`\n* PHP: Check out bulk indexing.\n\n**Submitting bulk requests with cURL**\n\nIf you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`.\nThe latter doesn't preserve newlines. For example:\n\n```\n$ cat requests\n{ \"index\" : { \"_index\" : \"test\", \"_id\" : \"1\" } }\n{ \"field1\" : \"value1\" }\n$ curl -s -H \"Content-Type: application/x-ndjson\" -XPOST localhost:9200/_bulk --data-binary \"@requests\"; echo\n{\"took\":7, \"errors\": false, \"items\":[{\"index\":{\"_index\":\"test\",\"_id\":\"1\",\"_version\":1,\"result\":\"created\",\"forced_refresh\":false}}]}\n```\n\n**Optimistic concurrency control**\n\nEach `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines.\nThe `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details.\n\n**Versioning**\n\nEach bulk item can include the version value using the `version` field.\nIt automatically follows the behavior of the index or delete operation based on the `_version` mapping.\nIt also support the `version_type`.\n\n**Routing**\n\nEach bulk item can include the routing value using the `routing` field.\nIt automatically follows the behavior of the index or delete operation based on the `_routing` mapping.\n\nNOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template.\n\n**Wait for active shards**\n\nWhen making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request.\n\n**Refresh**\n\nControl when the changes made by this request are visible to search.\n\nNOTE: Only the shards that receive the bulk request will be affected by refresh.\nImagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards.\nThe request will only wait for those three shards to refresh.\nThe other two shards that make up the index do not participate in the `_bulk` request at all.", "generics": [ { "name": "TDocument", @@ -22158,7 +22166,7 @@ }, "path": [ { - "description": "Name of the data stream, index, or index alias to perform bulk actions on.", + "description": "The name of the data stream, index, or index alias to perform bulk actions on.", "name": "index", "required": false, "type": { @@ -22172,7 +22180,7 @@ ], "query": [ { - "description": "If `true`, the response will include the ingest pipelines that were executed for each index or create.", + "description": "If `true`, the response will include the ingest pipelines that were run for each index or create.", "name": "list_executed_pipelines", "required": false, "serverDefault": false, @@ -22185,7 +22193,7 @@ } }, { - "description": "ID of the pipeline to use to preprocess incoming documents.\nIf the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request.\nIf a final pipeline is configured it will always run, regardless of the value of this parameter.", + "description": "The pipeline identifier to use to preprocess incoming documents.\nIf the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request.\nIf a final pipeline is configured, it will always run regardless of the value of this parameter.", "name": "pipeline", "required": false, "type": { @@ -22197,7 +22205,7 @@ } }, { - "description": "If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` do nothing with refreshes.\nValid values: `true`, `false`, `wait_for`.", + "description": "If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search.\nIf `wait_for`, wait for a refresh to make this operation visible to search.\nIf `false`, do nothing with refreshes.\nValid values: `true`, `false`, `wait_for`.", "name": "refresh", "required": false, "serverDefault": "false", @@ -22210,7 +22218,7 @@ } }, { - "description": "Custom value used to route operations to a specific shard.", + "description": "A custom value that is used to route operations to a specific shard.", "name": "routing", "required": false, "type": { @@ -22222,7 +22230,7 @@ } }, { - "description": "`true` or `false` to return the `_source` field or not, or a list of fields to return.", + "description": "Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return.", "name": "_source", "required": false, "type": { @@ -22234,7 +22242,7 @@ } }, { - "description": "A comma-separated list of source fields to exclude from the response.", + "description": "A comma-separated list of source fields to exclude from the response.\nYou can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter.\nIf the `_source` parameter is `false`, this parameter is ignored.", "name": "_source_excludes", "required": false, "type": { @@ -22246,7 +22254,7 @@ } }, { - "description": "A comma-separated list of source fields to include in the response.", + "description": "A comma-separated list of source fields to include in the response.\nIf this parameter is specified, only these source fields are returned.\nYou can exclude fields from this subset using the `_source_excludes` query parameter.\nIf the `_source` parameter is `false`, this parameter is ignored.", "name": "_source_includes", "required": false, "type": { @@ -22258,7 +22266,7 @@ } }, { - "description": "Period each action waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards.", + "description": "The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards.\nThe default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing.\nThe actual wait time could be longer, particularly when multiple waits occur.", "name": "timeout", "required": false, "serverDefault": "1m", @@ -22271,7 +22279,7 @@ } }, { - "description": "The number of shard copies that must be active before proceeding with the operation.\nSet to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`).", + "description": "The number of shard copies that must be active before proceeding with the operation.\nSet to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`).\nThe default is `1`, which waits for each primary shard to be active.", "name": "wait_for_active_shards", "required": false, "serverDefault": "1", @@ -22284,7 +22292,7 @@ } }, { - "description": "If `true`, the request’s actions must target an index alias.", + "description": "If `true`, the request's actions must target an index alias.", "name": "require_alias", "required": false, "serverDefault": false, @@ -22297,7 +22305,7 @@ } }, { - "description": "If `true`, the request's actions must target a data stream (existing or to-be-created).", + "description": "If `true`, the request's actions must target a data stream (existing or to be created).", "name": "require_data_stream", "required": false, "serverDefault": false, @@ -22310,7 +22318,7 @@ } } ], - "specLocation": "_global/bulk/BulkRequest.ts#L32-L125" + "specLocation": "_global/bulk/BulkRequest.ts#L32-L242" }, { "kind": "response", @@ -22318,6 +22326,7 @@ "kind": "properties", "properties": [ { + "description": "If `true`, one or more of the operations in the bulk request did not complete successfully.", "name": "errors", "required": true, "type": { @@ -22329,6 +22338,7 @@ } }, { + "description": "The result of each operation in the bulk request, in the order they were submitted.", "name": "items", "required": true, "type": { @@ -22354,6 +22364,7 @@ } }, { + "description": "The length of time, in milliseconds, it took to process the bulk request.", "name": "took", "required": true, "type": { @@ -22381,7 +22392,7 @@ "name": "Response", "namespace": "_global.bulk" }, - "specLocation": "_global/bulk/BulkResponse.ts#L24-L31" + "specLocation": "_global/bulk/BulkResponse.ts#L24-L45" }, { "kind": "interface", @@ -22415,7 +22426,7 @@ } }, { - "description": "Name of the index associated with the operation.\nIf the operation targeted a data stream, this is the backing index into which the document was written.", + "description": "The name of the index associated with the operation.\nIf the operation targeted a data stream, this is the backing index into which the document was written.", "name": "_index", "required": true, "type": { @@ -22427,7 +22438,7 @@ } }, { - "description": "HTTP status code returned for the operation.", + "description": "The HTTP status code returned for the operation.", "name": "status", "required": true, "type": { @@ -22439,7 +22450,7 @@ } }, { - "description": "Contains additional information about the failed operation.\nThe parameter is only returned for failed operations.", + "description": "Additional information about the failed operation.\nThe property is returned only for failed operations.", "name": "error", "required": false, "type": { @@ -22451,7 +22462,7 @@ } }, { - "description": "The primary term assigned to the document for the operation.", + "description": "The primary term assigned to the document for the operation.\nThis property is returned only for successful operations.", "name": "_primary_term", "required": false, "type": { @@ -22463,7 +22474,7 @@ } }, { - "description": "Result of the operation.\nSuccessful values are `created`, `deleted`, and `updated`.", + "description": "The result of the operation.\nSuccessful values are `created`, `deleted`, and `updated`.", "name": "result", "required": false, "type": { @@ -22475,7 +22486,7 @@ } }, { - "description": "The sequence number assigned to the document for the operation.\nSequence numbers are used to ensure an older version of a document doesn’t overwrite a newer version.", + "description": "The sequence number assigned to the document for the operation.\nSequence numbers are used to ensure an older version of a document doesn't overwrite a newer version.", "name": "_seq_no", "required": false, "type": { @@ -22487,7 +22498,7 @@ } }, { - "description": "Contains shard information for the operation.", + "description": "Shard information for the operation.", "name": "_shards", "required": false, "type": { @@ -22499,7 +22510,7 @@ } }, { - "description": "The document version associated with the operation.\nThe document version is incremented each time the document is updated.", + "description": "The document version associated with the operation.\nThe document version is incremented each time the document is updated.\nThis property is returned only for successful actions.", "name": "_version", "required": false, "type": { @@ -22549,7 +22560,7 @@ } } ], - "specLocation": "_global/bulk/types.ts#L37-L81" + "specLocation": "_global/bulk/types.ts#L37-L83" }, { "kind": "interface", @@ -22569,7 +22580,7 @@ }, "properties": [ { - "description": "Set to false to disable setting 'result' in the response\nto 'noop' if no change to the document occurred.", + "description": "If true, the `result` in the response is set to 'noop' when no changes to the document occur.", "name": "detect_noop", "required": false, "serverDefault": true, @@ -22594,7 +22605,7 @@ } }, { - "description": "Set to true to use the contents of 'doc' as the value of 'upsert'", + "description": "Set to `true` to use the contents of `doc` as the value of `upsert`.", "name": "doc_as_upsert", "required": false, "serverDefault": false, @@ -22607,7 +22618,7 @@ } }, { - "description": "Script to execute to update the document.", + "description": "The script to run to update the document.", "name": "script", "required": false, "type": { @@ -22619,7 +22630,7 @@ } }, { - "description": "Set to true to execute the script whether or not the document exists.", + "description": "Set to `true` to run the script whether or not the document exists.", "name": "scripted_upsert", "required": false, "serverDefault": false, @@ -22632,7 +22643,7 @@ } }, { - "description": "Set to false to disable source retrieval. You can also specify a comma-separated\nlist of the fields you want to retrieve.", + "description": "If `false`, source retrieval is turned off.\nYou can also specify a comma-separated list of the fields you want to retrieve.", "name": "_source", "required": false, "serverDefault": "true", @@ -22645,7 +22656,7 @@ } }, { - "description": "If the document does not already exist, the contents of 'upsert' are inserted as a\nnew document. If the document exists, the 'script' is executed.", + "description": "If the document does not already exist, the contents of `upsert` are inserted as a new document.\nIf the document exists, the `script` is run.", "name": "upsert", "required": false, "type": { @@ -22657,7 +22668,7 @@ } } ], - "specLocation": "_global/bulk/types.ts#L169-L205" + "specLocation": "_global/bulk/types.ts#L174-L209" }, { "kind": "interface", @@ -22673,7 +22684,7 @@ }, "properties": [ { - "description": "If `true`, the request’s actions must target an index alias.", + "description": "If `true`, the request's actions must target an index alias.", "name": "require_alias", "required": false, "serverDefault": false, @@ -22686,6 +22697,7 @@ } }, { + "description": "The number of times an update should be retried in the case of a version conflict.", "name": "retry_on_conflict", "required": false, "type": { @@ -22697,7 +22709,7 @@ } } ], - "specLocation": "_global/bulk/types.ts#L136-L143" + "specLocation": "_global/bulk/types.ts#L138-L148" }, { "kind": "interface", @@ -22713,7 +22725,7 @@ }, "properties": [ { - "description": "A map from the full name of fields to the name of dynamic templates.\nDefaults to an empty map.\nIf a name matches a dynamic template, then that template will be applied regardless of other match predicates defined in the template.\nIf a field is already defined in the mapping, then this parameter won’t be used.", + "description": "A map from the full name of fields to the name of dynamic templates.\nIt defaults to an empty map.\nIf a name matches a dynamic template, that template will be applied regardless of other match predicates defined in the template.\nIf a field is already defined in the mapping, then this parameter won't be used.", "name": "dynamic_templates", "required": false, "type": { @@ -22736,7 +22748,7 @@ } }, { - "description": "ID of the pipeline to use to preprocess incoming documents.\nIf the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request.\nIf a final pipeline is configured it will always run, regardless of the value of this parameter.", + "description": "The ID of the pipeline to use to preprocess incoming documents.\nIf the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request.\nIf a final pipeline is configured, it will always run regardless of the value of this parameter.", "name": "pipeline", "required": false, "type": { @@ -22748,7 +22760,7 @@ } }, { - "description": "If `true`, the request’s actions must target an index alias.", + "description": "If `true`, the request's actions must target an index alias.", "name": "require_alias", "required": false, "serverDefault": false, @@ -22761,7 +22773,7 @@ } } ], - "specLocation": "_global/bulk/types.ts#L109-L128" + "specLocation": "_global/bulk/types.ts#L111-L130" }, { "kind": "request", @@ -22772,7 +22784,7 @@ "kind": "properties", "properties": [ { - "description": "Scroll IDs to clear.\nTo clear all scroll IDs, use `_all`.", + "description": "The scroll IDs to clear.\nTo clear all scroll IDs, use `_all`.", "name": "scroll_id", "required": false, "type": { @@ -22785,7 +22797,7 @@ } ] }, - "description": "Clear a scrolling search.\n\nClear the search context and results for a scrolling search.", + "description": "Clear a scrolling search.\nClear the search context and results for a scrolling search.", "inherits": { "type": { "name": "RequestBase", @@ -22798,7 +22810,11 @@ }, "path": [ { - "description": "Comma-separated list of scroll IDs to clear.\nTo clear all scroll IDs, use `_all`.", + "deprecation": { + "description": "", + "version": "7.0.0" + }, + "description": "A comma-separated list of scroll IDs to clear.\nTo clear all scroll IDs, use `_all`.\nIMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter.", "name": "scroll_id", "required": false, "type": { @@ -22811,7 +22827,7 @@ } ], "query": [], - "specLocation": "_global/clear_scroll/ClearScrollRequest.ts#L23-L59" + "specLocation": "_global/clear_scroll/ClearScrollRequest.ts#L23-L61" }, { "kind": "response", @@ -22819,6 +22835,7 @@ "kind": "properties", "properties": [ { + "description": "If `true`, the request succeeded.\nThis does not indicate whether any scrolling search requests were cleared.", "name": "succeeded", "required": true, "type": { @@ -22830,6 +22847,7 @@ } }, { + "description": "The number of scrolling search requests cleared.", "name": "num_freed", "required": true, "type": { @@ -22880,7 +22898,7 @@ "name": "Response", "namespace": "_global.clear_scroll" }, - "specLocation": "_global/clear_scroll/ClearScrollResponse.ts#L22-L36" + "specLocation": "_global/clear_scroll/ClearScrollResponse.ts#L22-L43" }, { "kind": "request", @@ -22904,7 +22922,7 @@ } ] }, - "description": "Close a point in time.\n\nA point in time must be opened explicitly before being used in search requests.\nThe `keep_alive` parameter tells Elasticsearch how long it should persist.\nA point in time is automatically closed when the `keep_alive` period has elapsed.\nHowever, keeping points in time has a cost; close them as soon as they are no longer required for search requests.", + "description": "Close a point in time.\nA point in time must be opened explicitly before being used in search requests.\nThe `keep_alive` parameter tells Elasticsearch how long it should persist.\nA point in time is automatically closed when the `keep_alive` period has elapsed.\nHowever, keeping points in time has a cost; close them as soon as they are no longer required for search requests.", "inherits": { "type": { "name": "RequestBase", @@ -22917,7 +22935,7 @@ }, "path": [], "query": [], - "specLocation": "_global/close_point_in_time/ClosePointInTimeRequest.ts#L23-L49" + "specLocation": "_global/close_point_in_time/ClosePointInTimeRequest.ts#L23-L48" }, { "kind": "response", @@ -22925,6 +22943,7 @@ "kind": "properties", "properties": [ { + "description": "If `true`, all search contexts associated with the point-in-time ID were successfully closed.", "name": "succeeded", "required": true, "type": { @@ -22936,6 +22955,7 @@ } }, { + "description": "The number of search contexts that were successfully closed.", "name": "num_freed", "required": true, "type": { @@ -22986,7 +23006,7 @@ "name": "Response", "namespace": "_global.close_point_in_time" }, - "specLocation": "_global/close_point_in_time/ClosePointInTimeResponse.ts#L22-L36" + "specLocation": "_global/close_point_in_time/ClosePointInTimeResponse.ts#L22-L42" }, { "kind": "request", @@ -22997,7 +23017,7 @@ "kind": "properties", "properties": [ { - "description": "Defines the search definition using the Query DSL.", + "description": "Defines the search definition using the Query DSL.\nThe query is optional, and when not provided, it will use `match_all` to count all the docs.", "name": "query", "required": false, "type": { @@ -23010,7 +23030,7 @@ } ] }, - "description": "Count search results.\nGet the number of documents matching a query.", + "description": "Count search results.\nGet the number of documents matching a query.\n\nThe query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body.\nThe latter must be nested in a `query` key, which is the same as the search API.\n\nThe count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.\n\nThe operation is broadcast across all shards.\nFor each shard ID group, a replica is chosen and the search is run against it.\nThis means that replicas increase the scalability of the count.", "inherits": { "type": { "name": "RequestBase", @@ -23023,7 +23043,7 @@ }, "path": [ { - "description": "Comma-separated list of data streams, indices, and aliases to search.\nSupports wildcards (`*`).\nTo search all data streams and indices, omit this parameter or use `*` or `_all`.", + "description": "A comma-separated list of data streams, indices, and aliases to search.\nIt supports wildcards (`*`).\nTo search all data streams and indices, omit this parameter or use `*` or `_all`.", "name": "index", "required": false, "type": { @@ -23037,7 +23057,7 @@ ], "query": [ { - "description": "If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices.\nThis behavior applies even if the request targets other open indices.", + "description": "If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices.\nThis behavior applies even if the request targets other open indices.\nFor example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`.", "name": "allow_no_indices", "required": false, "serverDefault": true, @@ -23050,7 +23070,7 @@ } }, { - "description": "Analyzer to use for the query string.\nThis parameter can only be used when the `q` query string parameter is specified.", + "description": "The analyzer to use for the query string.\nThis parameter can be used only when the `q` query string parameter is specified.", "name": "analyzer", "required": false, "type": { @@ -23062,7 +23082,7 @@ } }, { - "description": "If `true`, wildcard and prefix queries are analyzed.\nThis parameter can only be used when the `q` query string parameter is specified.", + "description": "If `true`, wildcard and prefix queries are analyzed.\nThis parameter can be used only when the `q` query string parameter is specified.", "name": "analyze_wildcard", "required": false, "serverDefault": false, @@ -23075,9 +23095,10 @@ } }, { - "description": "The default operator for query string query: `AND` or `OR`.\nThis parameter can only be used when the `q` query string parameter is specified.", + "description": "The default operator for query string query: `AND` or `OR`.\nThis parameter can be used only when the `q` query string parameter is specified.", "name": "default_operator", "required": false, + "serverDefault": "OR", "type": { "kind": "instance_of", "type": { @@ -23087,7 +23108,7 @@ } }, { - "description": "Field to use as default where no field prefix is given in the query string.\nThis parameter can only be used when the `q` query string parameter is specified.", + "description": "The field to use as a default when no field prefix is given in the query string.\nThis parameter can be used only when the `q` query string parameter is specified.", "name": "df", "required": false, "type": { @@ -23099,7 +23120,7 @@ } }, { - "description": "Type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nSupports comma-separated values, such as `open,hidden`.", + "description": "The type of index that wildcard patterns can match.\nIf the request can target data streams, this argument determines whether wildcard expressions match hidden data streams.\nIt supports comma-separated values, such as `open,hidden`.", "name": "expand_wildcards", "required": false, "serverDefault": "open", @@ -23112,7 +23133,11 @@ } }, { - "description": "If `true`, concrete, expanded or aliased indices are ignored when frozen.", + "deprecation": { + "description": "", + "version": "7.16.0" + }, + "description": "If `true`, concrete, expanded, or aliased indices are ignored when frozen.", "name": "ignore_throttled", "required": false, "serverDefault": true, @@ -23138,7 +23163,7 @@ } }, { - "description": "If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored.", + "description": "If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored.\nThis parameter can be used only when the `q` query string parameter is specified.", "name": "lenient", "required": false, "serverDefault": false, @@ -23151,7 +23176,7 @@ } }, { - "description": "Sets the minimum `_score` value that documents must have to be included in the result.", + "description": "The minimum `_score` value that documents must have to be included in the result.", "name": "min_score", "required": false, "type": { @@ -23163,7 +23188,7 @@ } }, { - "description": "Specifies the node or shard the operation should be performed on.\nRandom by default.", + "description": "The node or shard the operation should be performed on.\nBy default, it is random.", "name": "preference", "required": false, "type": { @@ -23175,7 +23200,7 @@ } }, { - "description": "Custom value used to route operations to a specific shard.", + "description": "A custom value used to route operations to a specific shard.", "name": "routing", "required": false, "type": { @@ -23187,7 +23212,7 @@ } }, { - "description": "Maximum number of documents to collect for each shard.\nIf a query reaches this limit, Elasticsearch terminates the query early.\nElasticsearch collects documents before sorting.", + "description": "The maximum number of documents to collect for each shard.\nIf a query reaches this limit, Elasticsearch terminates the query early.\nElasticsearch collects documents before sorting.\n\nIMPORTANT: Use with caution.\nElasticsearch applies this parameter to each shard handling the request.\nWhen possible, let Elasticsearch perform early termination automatically.\nAvoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers.", "name": "terminate_after", "required": false, "type": { @@ -23199,7 +23224,7 @@ } }, { - "description": "Query in the Lucene query string syntax.", + "description": "The query in Lucene query string syntax.", "name": "q", "required": false, "type": { @@ -23211,7 +23236,7 @@ } } ], - "specLocation": "_global/count/CountRequest.ts#L26-L133" + "specLocation": "_global/count/CountRequest.ts#L26-L154" }, { "kind": "response", @@ -44363,7 +44388,7 @@ } } ], - "specLocation": "_types/Stats.ts#L68-L78" + "specLocation": "_types/Stats.ts#L71-L81" }, { "kind": "type_alias", @@ -44778,7 +44803,7 @@ } } ], - "specLocation": "_types/Stats.ts#L80-L90" + "specLocation": "_types/Stats.ts#L83-L93" }, { "kind": "enum", @@ -45130,7 +45155,7 @@ } } ], - "specLocation": "_types/Stats.ts#L97-L109" + "specLocation": "_types/Stats.ts#L100-L112" }, { "kind": "type_alias", @@ -45452,7 +45477,7 @@ } }, { - "description": "A human-readable explanation of the error, in english", + "description": "A human-readable explanation of the error, in English.", "name": "reason", "required": false, "type": { @@ -45671,7 +45696,7 @@ } } ], - "specLocation": "_types/Stats.ts#L118-L121" + "specLocation": "_types/Stats.ts#L121-L124" }, { "kind": "interface", @@ -45703,7 +45728,7 @@ } } ], - "specLocation": "_types/Stats.ts#L92-L95" + "specLocation": "_types/Stats.ts#L95-L98" }, { "kind": "interface", @@ -45938,7 +45963,7 @@ } } ], - "specLocation": "_types/Stats.ts#L111-L116" + "specLocation": "_types/Stats.ts#L114-L119" }, { "kind": "type_alias", @@ -46031,7 +46056,7 @@ } } ], - "specLocation": "_types/Stats.ts#L123-L128" + "specLocation": "_types/Stats.ts#L126-L131" }, { "kind": "type_alias", @@ -46640,7 +46665,7 @@ } } ], - "specLocation": "_types/Stats.ts#L130-L141" + "specLocation": "_types/Stats.ts#L133-L144" }, { "kind": "type_alias", @@ -47095,7 +47120,7 @@ } } ], - "specLocation": "_types/Stats.ts#L143-L159" + "specLocation": "_types/Stats.ts#L146-L162" }, { "kind": "type_alias", @@ -47984,7 +48009,7 @@ } } ], - "specLocation": "_types/Stats.ts#L161-L178" + "specLocation": "_types/Stats.ts#L164-L181" }, { "kind": "type_alias", @@ -48838,7 +48863,7 @@ } } ], - "specLocation": "_types/Stats.ts#L180-L190" + "specLocation": "_types/Stats.ts#L183-L193" }, { "kind": "type_alias", @@ -48959,7 +48984,7 @@ } } ], - "specLocation": "_types/Stats.ts#L192-L226" + "specLocation": "_types/Stats.ts#L195-L229" }, { "kind": "type_alias", @@ -49153,7 +49178,7 @@ } } ], - "specLocation": "_types/Stats.ts#L228-L233" + "specLocation": "_types/Stats.ts#L231-L236" }, { "kind": "enum", @@ -49267,7 +49292,7 @@ } } ], - "specLocation": "_types/Stats.ts#L235-L242" + "specLocation": "_types/Stats.ts#L238-L245" }, { "kind": "type_alias", @@ -49388,7 +49413,7 @@ } } ], - "specLocation": "_types/Stats.ts#L244-L250" + "specLocation": "_types/Stats.ts#L247-L253" }, { "kind": "enum", @@ -50357,7 +50382,7 @@ } } ], - "specLocation": "_types/Stats.ts#L252-L271" + "specLocation": "_types/Stats.ts#L255-L274" }, { "kind": "interface", @@ -50714,7 +50739,7 @@ } } ], - "specLocation": "_types/Stats.ts#L273-L366" + "specLocation": "_types/Stats.ts#L276-L369" }, { "kind": "type_alias", @@ -50819,6 +50844,7 @@ }, "properties": [ { + "description": "The number of shards the operation or search attempted to run on but failed.", "name": "failed", "required": true, "type": { @@ -50830,7 +50856,7 @@ } }, { - "description": "Indicates how many shards have successfully run the search.", + "description": "The number of shards the operation or search succeeded on.", "name": "successful", "required": true, "type": { @@ -50842,7 +50868,7 @@ } }, { - "description": "Indicates how many shards the search will run on overall.", + "description": "The number of shards the operation or search will run on overall.", "name": "total", "required": true, "type": { @@ -50879,7 +50905,7 @@ } } ], - "specLocation": "_types/Stats.ts#L54-L66" + "specLocation": "_types/Stats.ts#L54-L69" }, { "kind": "interface", @@ -51359,7 +51385,7 @@ } } ], - "specLocation": "_types/Stats.ts#L368-L395" + "specLocation": "_types/Stats.ts#L371-L398" }, { "kind": "interface", @@ -51951,7 +51977,7 @@ } } ], - "specLocation": "_types/Stats.ts#L397-L405" + "specLocation": "_types/Stats.ts#L400-L408" }, { "kind": "type_alias", @@ -52253,7 +52279,7 @@ } } ], - "specLocation": "_types/Stats.ts#L407-L412" + "specLocation": "_types/Stats.ts#L410-L415" }, { "kind": "interface", diff --git a/specification/_global/bulk/BulkRequest.ts b/specification/_global/bulk/BulkRequest.ts index 0630ce1c10..cd493230be 100644 --- a/specification/_global/bulk/BulkRequest.ts +++ b/specification/_global/bulk/BulkRequest.ts @@ -31,8 +31,115 @@ import { OperationContainer, UpdateAction } from './types' /** * Bulk index or delete documents. - * Performs multiple indexing or delete operations in a single API call. + * Perform multiple `index`, `create`, `delete`, and `update` actions in a single request. * This reduces overhead and can greatly increase indexing speed. + * + * If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias: + * + * * To use the `create` action, you must have the `create_doc`, `create`, `index`, or `write` index privilege. Data streams support only the `create` action. + * * To use the `index` action, you must have the `create`, `index`, or `write` index privilege. + * * To use the `delete` action, you must have the `delete` or `write` index privilege. + * * To use the `update` action, you must have the `index` or `write` index privilege. + * * To automatically create a data stream or index with a bulk API request, you must have the `auto_configure`, `create_index`, or `manage` index privilege. + * * To make the result of a bulk operation visible to search using the `refresh` parameter, you must have the `maintenance` or `manage` index privilege. + * + * Automatic data stream creation requires a matching index template with data stream enabled. + * + * The actions are specified in the request body using a newline delimited JSON (NDJSON) structure: + * + * ``` + * action_and_meta_data\n + * optional_source\n + * action_and_meta_data\n + * optional_source\n + * .... + * action_and_meta_data\n + * optional_source\n + * ``` + * + * The `index` and `create` actions expect a source on the next line and have the same semantics as the `op_type` parameter in the standard index API. + * A `create` action fails if a document with the same ID already exists in the target + * An `index` action adds or replaces a document as necessary. + * + * NOTE: Data streams support only the `create` action. + * To update or delete a document in a data stream, you must target the backing index containing the document. + * + * An `update` action expects that the partial doc, upsert, and script and its options are specified on the next line. + * + * A `delete` action does not expect a source on the next line and has the same semantics as the standard delete API. + * + * NOTE: The final line of data must end with a newline character (`\n`). + * Each newline character may be preceded by a carriage return (`\r`). + * When sending NDJSON data to the `_bulk` endpoint, use a `Content-Type` header of `application/json` or `application/x-ndjson`. + * Because this format uses literal newline characters (`\n`) as delimiters, make sure that the JSON actions and sources are not pretty printed. + * + * If you provide a target in the request path, it is used for any actions that don't explicitly specify an `_index` argument. + * + * A note on the format: the idea here is to make processing as fast as possible. + * As some of the actions are redirected to other shards on other nodes, only `action_meta_data` is parsed on the receiving node side. + * + * Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible. + * + * There is no "correct" number of actions to perform in a single bulk request. + * Experiment with different settings to find the optimal size for your particular workload. + * Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. + * It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. + * For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch. + * + * **Client suppport for bulk requests** + * + * Some of the officially supported clients provide helpers to assist with bulk requests and reindexing: + * + * * Go: Check out `esutil.BulkIndexer` + * * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` and `Search::Elasticsearch::Client::5_0::Scroll` + * * Python: Check out `elasticsearch.helpers.*` + * * JavaScript: Check out `client.helpers.*` + * * .NET: Check out `BulkAllObservable` + * * PHP: Check out bulk indexing. + * + * **Submitting bulk requests with cURL** + * + * If you're providing text file input to `curl`, you must use the `--data-binary` flag instead of plain `-d`. + * The latter doesn't preserve newlines. For example: + * + * ``` + * $ cat requests + * { "index" : { "_index" : "test", "_id" : "1" } } + * { "field1" : "value1" } + * $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo + * {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} + * ``` + * + * **Optimistic concurrency control** + * + * Each `index` and `delete` action within a bulk API call may include the `if_seq_no` and `if_primary_term` parameters in their respective action and meta data lines. + * The `if_seq_no` and `if_primary_term` parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details. + * + * **Versioning** + * + * Each bulk item can include the version value using the `version` field. + * It automatically follows the behavior of the index or delete operation based on the `_version` mapping. + * It also support the `version_type`. + * + * **Routing** + * + * Each bulk item can include the routing value using the `routing` field. + * It automatically follows the behavior of the index or delete operation based on the `_routing` mapping. + * + * NOTE: Data streams do not support custom routing unless they were created with the `allow_custom_routing` setting enabled in the template. + * + * **Wait for active shards** + * + * When making bulk calls, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active before starting to process the bulk request. + * + * **Refresh** + * + * Control when the changes made by this request are visible to search. + * + * NOTE: Only the shards that receive the bulk request will be affected by refresh. + * Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen to be routed to different shards in an index with five shards. + * The request will only wait for those three shards to refresh. + * The other two shards that make up the index do not participate in the `_bulk` request at all. * @rest_spec_name bulk * @availability stack stability=stable * @availability serverless stability=stable visibility=public @@ -53,62 +160,72 @@ export interface Request extends RequestBase { ] path_parts: { /** - * Name of the data stream, index, or index alias to perform bulk actions on. + * The name of the data stream, index, or index alias to perform bulk actions on. */ index?: IndexName } query_parameters: { /** - * If `true`, the response will include the ingest pipelines that were executed for each index or create. + * If `true`, the response will include the ingest pipelines that were run for each index or create. * @server_default false */ list_executed_pipelines?: boolean /** - * ID of the pipeline to use to preprocess incoming documents. - * If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. - * If a final pipeline is configured it will always run, regardless of the value of this parameter. + * The pipeline identifier to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. + * If a final pipeline is configured, it will always run regardless of the value of this parameter. */ pipeline?: string /** - * If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` do nothing with refreshes. + * If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. + * If `wait_for`, wait for a refresh to make this operation visible to search. + * If `false`, do nothing with refreshes. * Valid values: `true`, `false`, `wait_for`. * @server_default false */ refresh?: Refresh /** - * Custom value used to route operations to a specific shard. + * A custom value that is used to route operations to a specific shard. */ routing?: Routing /** - * `true` or `false` to return the `_source` field or not, or a list of fields to return. + * Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. */ _source?: SourceConfigParam /** * A comma-separated list of source fields to exclude from the response. + * You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_excludes?: Fields /** * A comma-separated list of source fields to include in the response. + * If this parameter is specified, only these source fields are returned. + * You can exclude fields from this subset using the `_source_excludes` query parameter. + * If the `_source` parameter is `false`, this parameter is ignored. */ _source_includes?: Fields /** - * Period each action waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. + * The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. + * The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. + * The actual wait time could be longer, particularly when multiple waits occur. * @server_default 1m */ timeout?: Duration /** * The number of shard copies that must be active before proceeding with the operation. - * Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). + * The default is `1`, which waits for each primary shard to be active. * @server_default 1 */ wait_for_active_shards?: WaitForActiveShards /** - * If `true`, the request’s actions must target an index alias. + * If `true`, the request's actions must target an index alias. * @server_default false */ require_alias?: boolean /** - * If `true`, the request's actions must target a data stream (existing or to-be-created). + * If `true`, the request's actions must target a data stream (existing or to be created). * @server_default false */ require_data_stream?: boolean diff --git a/specification/_global/bulk/BulkResponse.ts b/specification/_global/bulk/BulkResponse.ts index cd63b63c57..a378633c5b 100644 --- a/specification/_global/bulk/BulkResponse.ts +++ b/specification/_global/bulk/BulkResponse.ts @@ -22,9 +22,23 @@ import { long } from '@_types/Numeric' import { OperationType, ResponseItem } from './types' export class Response { + /** + * The response contains the individual results of each operation in the request. + * They are returned in the order submitted. + * The success or failure of an individual operation does not affect other operations in the request. + */ body: { + /** + * If `true`, one or more of the operations in the bulk request did not complete successfully. + */ errors: boolean + /** + * The result of each operation in the bulk request, in the order they were submitted. + */ items: SingleKeyDictionary[] + /** + * The length of time, in milliseconds, it took to process the bulk request. + */ took: long ingest_took?: long } diff --git a/specification/_global/bulk/examples/request/BulkRequestExample1.yaml b/specification/_global/bulk/examples/request/BulkRequestExample1.yaml new file mode 100644 index 0000000000..ba8fea8042 --- /dev/null +++ b/specification/_global/bulk/examples/request/BulkRequestExample1.yaml @@ -0,0 +1,17 @@ +summary: Multiple operations +# method_request: POST _bulk +description: Run `POST _bulk` to perform multiple operations. +# type: request +value: '{ "index" : { "_index" : "test", "_id" : "1" } } + + { "field1" : "value1" } + + { "delete" : { "_index" : "test", "_id" : "2" } } + + { "create" : { "_index" : "test", "_id" : "3" } } + + { "field1" : "value3" } + + { "update" : {"_id" : "1", "_index" : "test"} } + + { "doc" : {"field2" : "value2"} }' diff --git a/specification/_global/bulk/examples/request/BulkRequestExample2.yaml b/specification/_global/bulk/examples/request/BulkRequestExample2.yaml new file mode 100644 index 0000000000..ee4320a65c --- /dev/null +++ b/specification/_global/bulk/examples/request/BulkRequestExample2.yaml @@ -0,0 +1,26 @@ +summary: Bulk updates +# method_request: POST _bulk +description: > + When you run `POST _bulk` and use the `update` action, you can use `retry_on_conflict` as a field in the action itself (not in the extra payload line) to specify how many times an update should be retried in the case of a version conflict. +# type: request +value: + '{ "update" : {"_id" : "1", "_index" : "index1", "retry_on_conflict" : 3} } + + { "doc" : {"field" : "value"} } + + { "update" : { "_id" : "0", "_index" : "index1", "retry_on_conflict" : 3} } + + { "script" : { "source": "ctx._source.counter += params.param1", "lang" : "painless", + "params" : {"param1" : 1}}, "upsert" : {"counter" : 1}} + + { "update" : {"_id" : "2", "_index" : "index1", "retry_on_conflict" : 3} } + + { "doc" : {"field" : "value"}, "doc_as_upsert" : true } + + { "update" : {"_id" : "3", "_index" : "index1", "_source" : true} } + + { "doc" : {"field" : "value"} } + + { "update" : {"_id" : "4", "_index" : "index1"} } + + { "doc" : {"field" : "value"}, "_source": true}' diff --git a/specification/_global/bulk/examples/request/BulkRequestExample3.yaml b/specification/_global/bulk/examples/request/BulkRequestExample3.yaml new file mode 100644 index 0000000000..5c81d99d7d --- /dev/null +++ b/specification/_global/bulk/examples/request/BulkRequestExample3.yaml @@ -0,0 +1,16 @@ +summary: Filter for failed operations +# method_request: POST /_bulk +description: > + To return only information about failed operations, run `POST /_bulk?filter_path=items.*.error`. +# type: request +value: '{ "update": {"_id": "5", "_index": "index1"} } + + { "doc": {"my_field": "foo"} } + + { "update": {"_id": "6", "_index": "index1"} } + + { "doc": {"my_field": "foo"} } + + { "create": {"_id": "7", "_index": "index1"} } + + { "my_field": "foo" }' diff --git a/specification/_global/bulk/examples/request/BulkRequestExample4.yaml b/specification/_global/bulk/examples/request/BulkRequestExample4.yaml new file mode 100644 index 0000000000..32bc99d57a --- /dev/null +++ b/specification/_global/bulk/examples/request/BulkRequestExample4.yaml @@ -0,0 +1,13 @@ +summary: Dynamic templates +method_request: POST /_bulk +description: > + Run `POST /_bulk` to perform a bulk request that consists of index and create actions with the `dynamic_templates` parameter. + The bulk request creates two new fields `work_location` and `home_location` with type `geo_point` according to the `dynamic_templates` parameter. + However, the `raw_location` field is created using default dynamic mapping rules, as a text field in that case since it is supplied as a string in the JSON document. +# type: request +value: "{ \"index\" : {\ + \ \"_index\" : \"my_index\", \"_id\" : \"1\", \"dynamic_templates\": {\"work_location\"\ + : \"geo_point\"}} }\n{ \"field\" : \"value1\", \"work_location\": \"41.12,-71.34\"\ + , \"raw_location\": \"41.12,-71.34\"}\n{ \"create\" : { \"_index\" : \"my_index\"\ + , \"_id\" : \"2\", \"dynamic_templates\": {\"home_location\": \"geo_point\"}} }\n\ + { \"field\" : \"value2\", \"home_location\": \"41.12,-71.34\"}" diff --git a/specification/_global/bulk/examples/response/BulkResponseExample1.yaml b/specification/_global/bulk/examples/response/BulkResponseExample1.yaml new file mode 100644 index 0000000000..f1f8426579 --- /dev/null +++ b/specification/_global/bulk/examples/response/BulkResponseExample1.yaml @@ -0,0 +1,27 @@ +summary: Multiple successful operations +# description: '' +# type: response +# response_code: '' +value: + "{\n \"took\": 30,\n \"errors\": false,\n \"items\": [\n {\n \ + \ \"index\": {\n \"_index\": \"test\",\n \"_id\": \"\ + 1\",\n \"_version\": 1,\n \"result\": \"created\",\n \ + \ \"_shards\": {\n \"total\": 2,\n \"successful\"\ + : 1,\n \"failed\": 0\n },\n \"status\": 201,\n\ + \ \"_seq_no\" : 0,\n \"_primary_term\": 1\n }\n \ + \ },\n {\n \"delete\": {\n \"_index\": \"test\",\n \ + \ \"_id\": \"2\",\n \"_version\": 1,\n \"result\"\ + : \"not_found\",\n \"_shards\": {\n \"total\": 2,\n \ + \ \"successful\": 1,\n \"failed\": 0\n },\n\ + \ \"status\": 404,\n \"_seq_no\" : 1,\n \"_primary_term\"\ + \ : 2\n }\n },\n {\n \"create\": {\n \"_index\"\ + : \"test\",\n \"_id\": \"3\",\n \"_version\": 1,\n \ + \ \"result\": \"created\",\n \"_shards\": {\n \"total\"\ + : 2,\n \"successful\": 1,\n \"failed\": 0\n \ + \ },\n \"status\": 201,\n \"_seq_no\" : 2,\n \ + \ \"_primary_term\" : 3\n }\n },\n {\n \"update\": {\n\ + \ \"_index\": \"test\",\n \"_id\": \"1\",\n \"\ + _version\": 2,\n \"result\": \"updated\",\n \"_shards\": {\n\ + \ \"total\": 2,\n \"successful\": 1,\n \ + \ \"failed\": 0\n },\n \"status\": 200,\n \"\ + _seq_no\" : 3,\n \"_primary_term\" : 4\n }\n }\n ]\n}" diff --git a/specification/_global/bulk/examples/response/BulkResponseExample2.yaml b/specification/_global/bulk/examples/response/BulkResponseExample2.yaml new file mode 100644 index 0000000000..08c231547a --- /dev/null +++ b/specification/_global/bulk/examples/response/BulkResponseExample2.yaml @@ -0,0 +1,24 @@ +summary: Failed actions +description: > + If you run `POST /_bulk` with operations that update non-existent documents, the operations cannot complete successfully. + The API returns a response with an `errors` property value `true`. + The response also includes an error object for any failed operations. + The error object contains additional information about the failure, such as the error type and reason. +# type: response +# response_code: '' +value: + "{\n \"took\": 486,\n \"errors\": true,\n \"items\": [\n {\n \"\ + update\": {\n \"_index\": \"index1\",\n \"_id\": \"5\",\n \"\ + status\": 404,\n \"error\": {\n \"type\": \"document_missing_exception\"\ + ,\n \"reason\": \"[5]: document missing\",\n \"index_uuid\": \"\ + aAsFqTI0Tc2W0LCWgPNrOA\",\n \"shard\": \"0\",\n \"index\": \"\ + index1\"\n }\n }\n },\n {\n \"update\": {\n \"_index\"\ + : \"index1\",\n \"_id\": \"6\",\n \"status\": 404,\n \"error\"\ + : {\n \"type\": \"document_missing_exception\",\n \"reason\":\ + \ \"[6]: document missing\",\n \"index_uuid\": \"aAsFqTI0Tc2W0LCWgPNrOA\"\ + ,\n \"shard\": \"0\",\n \"index\": \"index1\"\n }\n \ + \ }\n },\n {\n \"create\": {\n \"_index\": \"index1\",\n \ + \ \"_id\": \"7\",\n \"_version\": 1,\n \"result\": \"created\"\ + ,\n \"_shards\": {\n \"total\": 2,\n \"successful\": 1,\n\ + \ \"failed\": 0\n },\n \"_seq_no\": 0,\n \"_primary_term\"\ + : 1,\n \"status\": 201\n }\n }\n ]\n}" diff --git a/specification/_global/bulk/examples/response/BulkResponseExample3.yaml b/specification/_global/bulk/examples/response/BulkResponseExample3.yaml new file mode 100644 index 0000000000..34a86b5839 --- /dev/null +++ b/specification/_global/bulk/examples/response/BulkResponseExample3.yaml @@ -0,0 +1,14 @@ +summary: Filter for failed operations +description: > + An example response from `POST /_bulk?filter_path=items.*.error`, which returns only information about failed operations. +# type: response +# response_code: '' +value: + "{\n \"items\": [\n {\n \"update\": {\n \"error\": {\n \ + \ \"type\": \"document_missing_exception\",\n \"reason\": \"[5]: document\ + \ missing\",\n \"index_uuid\": \"aAsFqTI0Tc2W0LCWgPNrOA\",\n \"\ + shard\": \"0\",\n \"index\": \"index1\"\n }\n }\n },\n \ + \ {\n \"update\": {\n \"error\": {\n \"type\": \"document_missing_exception\"\ + ,\n \"reason\": \"[6]: document missing\",\n \"index_uuid\": \"\ + aAsFqTI0Tc2W0LCWgPNrOA\",\n \"shard\": \"0\",\n \"index\": \"\ + index1\"\n }\n }\n }\n ]\n}" diff --git a/specification/_global/bulk/types.ts b/specification/_global/bulk/types.ts index 470f1dcfdd..4310bf8ff5 100644 --- a/specification/_global/bulk/types.ts +++ b/specification/_global/bulk/types.ts @@ -40,40 +40,42 @@ export class ResponseItem { */ _id?: string | null /** - * Name of the index associated with the operation. + * The name of the index associated with the operation. * If the operation targeted a data stream, this is the backing index into which the document was written. */ _index: string /** - * HTTP status code returned for the operation. + * The HTTP status code returned for the operation. */ status: integer /** - * Contains additional information about the failed operation. - * The parameter is only returned for failed operations. + * Additional information about the failed operation. + * The property is returned only for failed operations. */ error?: ErrorCause /** * The primary term assigned to the document for the operation. + * This property is returned only for successful operations. */ _primary_term?: long /** - * Result of the operation. + * The result of the operation. * Successful values are `created`, `deleted`, and `updated`. */ result?: string /** * The sequence number assigned to the document for the operation. - * Sequence numbers are used to ensure an older version of a document doesn’t overwrite a newer version. + * Sequence numbers are used to ensure an older version of a document doesn't overwrite a newer version. */ _seq_no?: SequenceNumber /** - * Contains shard information for the operation. + * Shard information for the operation. */ _shards?: ShardStatistics /** * The document version associated with the operation. * The document version is incremented each time the document is updated. + * This property is returned only for successful actions. */ _version?: VersionNumber forced_refresh?: boolean @@ -93,11 +95,11 @@ export class OperationBase { */ _id?: Id /** - * Name of the index or index alias to perform the action on. + * The name of the index or index alias to perform the action on. */ _index?: IndexName /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. */ routing?: Routing if_primary_term?: long @@ -109,19 +111,19 @@ export class OperationBase { export class WriteOperation extends OperationBase { /** * A map from the full name of fields to the name of dynamic templates. - * Defaults to an empty map. - * If a name matches a dynamic template, then that template will be applied regardless of other match predicates defined in the template. - * If a field is already defined in the mapping, then this parameter won’t be used. + * It defaults to an empty map. + * If a name matches a dynamic template, that template will be applied regardless of other match predicates defined in the template. + * If a field is already defined in the mapping, then this parameter won't be used. */ dynamic_templates?: Dictionary /** - * ID of the pipeline to use to preprocess incoming documents. - * If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. - * If a final pipeline is configured it will always run, regardless of the value of this parameter. + * The ID of the pipeline to use to preprocess incoming documents. + * If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. + * If a final pipeline is configured, it will always run regardless of the value of this parameter. */ pipeline?: string /** - * If `true`, the request’s actions must target an index alias. + * If `true`, the request's actions must target an index alias. * @server_default false */ require_alias?: boolean @@ -135,41 +137,43 @@ export class DeleteOperation extends OperationBase {} export class UpdateOperation extends OperationBase { /** - * If `true`, the request’s actions must target an index alias. + * If `true`, the request's actions must target an index alias. * @server_default false */ require_alias?: boolean + /** + * The number of times an update should be retried in the case of a version conflict. + */ retry_on_conflict?: integer } /** @variants container */ export class OperationContainer { /** - * Indexes the specified document. - * If the document exists, replaces the document and increments the version. + * Index the specified document. + * If the document exists, it replaces the document and increments the version. * The following line must contain the source data to be indexed. */ index?: IndexOperation /** - * Indexes the specified document if it does not already exist. + * Index the specified document if it does not already exist. * The following line must contain the source data to be indexed. */ create?: CreateOperation /** - * Performs a partial document update. + * Perform a partial document update. * The following line must contain the partial document and update options. */ update?: UpdateOperation /** - * Removes the specified document from the index. + * Remove the specified document from the index. */ delete?: DeleteOperation } export class UpdateAction { /** - * Set to false to disable setting 'result' in the response - * to 'noop' if no change to the document occurred. + * If true, the `result` in the response is set to 'noop' when no changes to the document occur. * @server_default true */ detect_noop?: boolean @@ -178,28 +182,28 @@ export class UpdateAction { */ doc?: TPartialDocument /** - * Set to true to use the contents of 'doc' as the value of 'upsert' + * Set to `true` to use the contents of `doc` as the value of `upsert`. * @server_default false */ doc_as_upsert?: boolean /** - * Script to execute to update the document. + * The script to run to update the document. */ script?: Script /** - * Set to true to execute the script whether or not the document exists. + * Set to `true` to run the script whether or not the document exists. * @server_default false */ scripted_upsert?: boolean /** - * Set to false to disable source retrieval. You can also specify a comma-separated - * list of the fields you want to retrieve. + * If `false`, source retrieval is turned off. + * You can also specify a comma-separated list of the fields you want to retrieve. * @server_default true */ _source?: SourceConfig /** - * If the document does not already exist, the contents of 'upsert' are inserted as a - * new document. If the document exists, the 'script' is executed. + * If the document does not already exist, the contents of `upsert` are inserted as a new document. + * If the document exists, the `script` is run. */ upsert?: TDocument } diff --git a/specification/_global/clear_scroll/ClearScrollRequest.ts b/specification/_global/clear_scroll/ClearScrollRequest.ts index 291b5d3941..ff2e53cdfd 100644 --- a/specification/_global/clear_scroll/ClearScrollRequest.ts +++ b/specification/_global/clear_scroll/ClearScrollRequest.ts @@ -22,13 +22,13 @@ import { ScrollIds } from '@_types/common' /** * Clear a scrolling search. - * * Clear the search context and results for a scrolling search. * @rest_spec_name clear_scroll * @availability stack stability=stable * @availability serverless stability=stable visibility=public * @doc_id clear-scroll-api * @doc_tag search + * @ext_doc_id scroll-search-results */ export interface Request extends RequestBase { urls: [ @@ -44,14 +44,16 @@ export interface Request extends RequestBase { ] path_parts: { /** - * Comma-separated list of scroll IDs to clear. + * A comma-separated list of scroll IDs to clear. * To clear all scroll IDs, use `_all`. + * IMPORTANT: Scroll IDs can be long. It is recommended to specify scroll IDs in the request body parameter. + * @deprecated 7.0.0 */ scroll_id?: ScrollIds } body: { /** - * Scroll IDs to clear. + * The scroll IDs to clear. * To clear all scroll IDs, use `_all`. */ scroll_id?: ScrollIds diff --git a/specification/_global/clear_scroll/ClearScrollResponse.ts b/specification/_global/clear_scroll/ClearScrollResponse.ts index 77bbe83daf..51355a00e1 100644 --- a/specification/_global/clear_scroll/ClearScrollResponse.ts +++ b/specification/_global/clear_scroll/ClearScrollResponse.ts @@ -21,7 +21,14 @@ import { integer } from '@_types/Numeric' export class Response { body: { + /** + * If `true`, the request succeeded. + * This does not indicate whether any scrolling search requests were cleared. + */ succeeded: boolean + /** + * The number of scrolling search requests cleared. + */ num_freed: integer } exceptions: [ diff --git a/specification/_global/clear_scroll/examples/request/ClearScrollRequestExample1.yaml b/specification/_global/clear_scroll/examples/request/ClearScrollRequestExample1.yaml new file mode 100644 index 0000000000..cf38b01512 --- /dev/null +++ b/specification/_global/clear_scroll/examples/request/ClearScrollRequestExample1.yaml @@ -0,0 +1,8 @@ +# summary: +# method_request: DELETE /_search/scroll +description: Run `DELETE /_search/scroll` to clear the search context and results for a scrolling search. +# type: request +value: |- + { + "scroll_id": "DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==" + } diff --git a/specification/_global/close_point_in_time/ClosePointInTimeRequest.ts b/specification/_global/close_point_in_time/ClosePointInTimeRequest.ts index 0156c29866..922650cead 100644 --- a/specification/_global/close_point_in_time/ClosePointInTimeRequest.ts +++ b/specification/_global/close_point_in_time/ClosePointInTimeRequest.ts @@ -22,7 +22,6 @@ import { Id } from '@_types/common' /** * Close a point in time. - * * A point in time must be opened explicitly before being used in search requests. * The `keep_alive` parameter tells Elasticsearch how long it should persist. * A point in time is automatically closed when the `keep_alive` period has elapsed. diff --git a/specification/_global/close_point_in_time/ClosePointInTimeResponse.ts b/specification/_global/close_point_in_time/ClosePointInTimeResponse.ts index 77bbe83daf..0ec2fc39fa 100644 --- a/specification/_global/close_point_in_time/ClosePointInTimeResponse.ts +++ b/specification/_global/close_point_in_time/ClosePointInTimeResponse.ts @@ -21,7 +21,13 @@ import { integer } from '@_types/Numeric' export class Response { body: { + /** + * If `true`, all search contexts associated with the point-in-time ID were successfully closed. + */ succeeded: boolean + /** + * The number of search contexts that were successfully closed. + */ num_freed: integer } exceptions: [ diff --git a/specification/_global/close_point_in_time/examples/200_response/ClosePointInTimeResponseExample1.yaml b/specification/_global/close_point_in_time/examples/200_response/ClosePointInTimeResponseExample1.yaml new file mode 100644 index 0000000000..934f1fe814 --- /dev/null +++ b/specification/_global/close_point_in_time/examples/200_response/ClosePointInTimeResponseExample1.yaml @@ -0,0 +1,9 @@ +# summary: +description: A successful response from `DELETE /_pit`. +# type: response +# response_code: 200 +value: |- + { + "succeeded": true, + "num_freed": 3 + } diff --git a/specification/_global/close_point_in_time/examples/request/ClosePointInTimeRequestExample1.yaml b/specification/_global/close_point_in_time/examples/request/ClosePointInTimeRequestExample1.yaml new file mode 100644 index 0000000000..8173163203 --- /dev/null +++ b/specification/_global/close_point_in_time/examples/request/ClosePointInTimeRequestExample1.yaml @@ -0,0 +1,8 @@ +# summary: +# method_request: DELETE /_pit +description: Run `DELETE /_pit` to close a point-in-time. +# type: request +value: |- + { + "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==" + } diff --git a/specification/_global/count/CountRequest.ts b/specification/_global/count/CountRequest.ts index 6eb7370ff8..2921adbfcc 100644 --- a/specification/_global/count/CountRequest.ts +++ b/specification/_global/count/CountRequest.ts @@ -26,10 +26,21 @@ import { Operator } from '@_types/query_dsl/Operator' /** * Count search results. * Get the number of documents matching a query. + * + * The query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body. + * The latter must be nested in a `query` key, which is the same as the search API. + * + * The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices. + * + * The operation is broadcast across all shards. + * For each shard ID group, a replica is chosen and the search is run against it. + * This means that replicas increase the scalability of the count. * @rest_spec_name count * @availability stack stability=stable * @availability serverless stability=stable visibility=public * @doc_tag search + * @doc_id search-count + * @index_privileges read */ export interface Request extends RequestBase { urls: [ @@ -44,8 +55,8 @@ export interface Request extends RequestBase { ] path_parts: { /** - * Comma-separated list of data streams, indices, and aliases to search. - * Supports wildcards (`*`). + * A comma-separated list of data streams, indices, and aliases to search. + * It supports wildcards (`*`). * To search all data streams and indices, omit this parameter or use `*` or `_all`. */ index?: Indices @@ -54,40 +65,43 @@ export interface Request extends RequestBase { /** * If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. * This behavior applies even if the request targets other open indices. + * For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. * @server_default true */ allow_no_indices?: boolean /** - * Analyzer to use for the query string. - * This parameter can only be used when the `q` query string parameter is specified. + * The analyzer to use for the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ analyzer?: string /** * If `true`, wildcard and prefix queries are analyzed. - * This parameter can only be used when the `q` query string parameter is specified. + * This parameter can be used only when the `q` query string parameter is specified. * @server_default false */ analyze_wildcard?: boolean /** * The default operator for query string query: `AND` or `OR`. - * This parameter can only be used when the `q` query string parameter is specified. + * This parameter can be used only when the `q` query string parameter is specified. + * @server_default OR */ default_operator?: Operator /** - * Field to use as default where no field prefix is given in the query string. - * This parameter can only be used when the `q` query string parameter is specified. + * The field to use as a default when no field prefix is given in the query string. + * This parameter can be used only when the `q` query string parameter is specified. */ df?: string /** - * Type of index that wildcard patterns can match. + * The type of index that wildcard patterns can match. * If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. - * Supports comma-separated values, such as `open,hidden`. + * It supports comma-separated values, such as `open,hidden`. * @server_default open */ expand_wildcards?: ExpandWildcards /** - * If `true`, concrete, expanded or aliased indices are ignored when frozen. + * If `true`, concrete, expanded, or aliased indices are ignored when frozen. * @server_default true + * @deprecated 7.16.0 */ ignore_throttled?: boolean /** @@ -97,36 +111,43 @@ export interface Request extends RequestBase { ignore_unavailable?: boolean /** * If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. + * This parameter can be used only when the `q` query string parameter is specified. * @server_default false */ lenient?: boolean /** - * Sets the minimum `_score` value that documents must have to be included in the result. + * The minimum `_score` value that documents must have to be included in the result. */ min_score?: double /** - * Specifies the node or shard the operation should be performed on. - * Random by default. + * The node or shard the operation should be performed on. + * By default, it is random. */ preference?: string /** - * Custom value used to route operations to a specific shard. + * A custom value used to route operations to a specific shard. */ routing?: Routing /** - * Maximum number of documents to collect for each shard. + * The maximum number of documents to collect for each shard. * If a query reaches this limit, Elasticsearch terminates the query early. * Elasticsearch collects documents before sorting. + * + * IMPORTANT: Use with caution. + * Elasticsearch applies this parameter to each shard handling the request. + * When possible, let Elasticsearch perform early termination automatically. + * Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. */ terminate_after?: long /** - * Query in the Lucene query string syntax. + * The query in Lucene query string syntax. */ q?: string } body: { /** * Defines the search definition using the Query DSL. + * The query is optional, and when not provided, it will use `match_all` to count all the docs. */ query?: QueryContainer } diff --git a/specification/_global/count/examples/200_response/CountResponseExample1.yaml b/specification/_global/count/examples/200_response/CountResponseExample1.yaml new file mode 100644 index 0000000000..05f9999dbf --- /dev/null +++ b/specification/_global/count/examples/200_response/CountResponseExample1.yaml @@ -0,0 +1,14 @@ +# summary: +description: A successful response from `GET /my-index-000001/_count?q=user:kimchy`. +# type: response +# response_code: 200 +value: |- + { + "count": 1, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + } + } diff --git a/specification/_global/count/examples/request/CountRequestExample1.yaml b/specification/_global/count/examples/request/CountRequestExample1.yaml new file mode 100644 index 0000000000..2c84775812 --- /dev/null +++ b/specification/_global/count/examples/request/CountRequestExample1.yaml @@ -0,0 +1,13 @@ +# summary: +# method_request: GET /my-index-000001/_count +description: > + Run `GET /my-index-000001/_count?q=user:kimchy`. + Alternatively, run `GET /my-index-000001/_count` with the same query in the request body. + Both requests count the number of documents in `my-index-000001` with a `user.id` of `kimchy`. +# type: request +value: |- + { + "query" : { + "term" : { "user.id" : "kimchy" } + } + } diff --git a/specification/_types/Errors.ts b/specification/_types/Errors.ts index ec5dd15215..62ce8eeee6 100644 --- a/specification/_types/Errors.ts +++ b/specification/_types/Errors.ts @@ -36,7 +36,7 @@ export class ErrorCause */ type: string /** - * A human-readable explanation of the error, in english + * A human-readable explanation of the error, in English. */ reason?: string /** diff --git a/specification/_types/Stats.ts b/specification/_types/Stats.ts index 0d368edfb1..0455fca1d9 100644 --- a/specification/_types/Stats.ts +++ b/specification/_types/Stats.ts @@ -52,13 +52,16 @@ export class ClusterDetails { } export class ShardStatistics { + /** + * The number of shards the operation or search attempted to run on but failed. + */ failed: uint /** - * Indicates how many shards have successfully run the search. + * The number of shards the operation or search succeeded on. */ successful: uint /** - * Indicates how many shards the search will run on overall. + * The number of shards the operation or search will run on overall. */ total: uint failures?: ShardFailure[]