Skip to content

Commit

Permalink
Rename es to client in examples for consistency (#2486)
Browse files Browse the repository at this point in the history
  • Loading branch information
iuliaferoli authored Mar 26, 2024
1 parent 135c60a commit 794341d
Show file tree
Hide file tree
Showing 12 changed files with 80 additions and 78 deletions.
2 changes: 1 addition & 1 deletion docs/examples/3d1ff6097e2359f927c88c2ccdb36252.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,6 @@

[source, python]
----
resp = es.info()
resp = client.info()
print(resp)
----
50 changes: 25 additions & 25 deletions docs/guide/configuration.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ If you have your own CA bundle to use you can configure via the `ca_certs` param

[source,python]
------------------------------------
es = Elasticsearch(
client = Elasticsearch(
"https://...",
ca_certs="/path/to/certs.pem"
)
Expand All @@ -32,7 +32,7 @@ In Python 3.9 and earlier only the leaf certificate will be verified but in Pyth

[source,python]
------------------------------------
es = Elasticsearch(
client = Elasticsearch(
"https://...",
ssl_assert_fingerprint=(
"315f5bdb76d078c43b8ac0064e4a0164612b1fce77c869345bfc94c75894edd3"
Expand All @@ -44,7 +44,7 @@ To disable certificate verification use the `verify_certs=False` parameter. This

[source,python]
------------------------------------
es = Elasticsearch(
client = Elasticsearch(
"https://...",
verify_certs=False
)
Expand All @@ -59,7 +59,7 @@ Configuring the minimum TLS version to connect to is done via the `ssl_version`
------------------------------------
import ssl
es = Elasticsearch(
client = Elasticsearch(
...,
ssl_version=ssl.TLSVersion.TLSv1_2
)
Expand All @@ -72,7 +72,7 @@ Elasticsearch can be configured to authenticate clients via TLS client certifica

[source,python]
------------------------------------
es = Elasticsearch(
client = Elasticsearch(
...,
client_cert="/path/to/cert.pem",
client_key="/path/to/key.pem",
Expand All @@ -93,7 +93,7 @@ import ssl
ctx = ssl.create_default_context()
ctx.load_verify_locations(...)
es = Elasticsearch(
client = Elasticsearch(
...,
ssl_context=ctx
)
Expand All @@ -110,7 +110,7 @@ the `Accept-Encoding: gzip` HTTP header. By default compression is disabled.

[source,python]
------------------------------------
es = Elasticsearch(
client = Elasticsearch(
...,
http_compress=True # Enable compression!
)
Expand All @@ -130,13 +130,13 @@ Setting `request_timeout` to `None` will disable timeouts.

[source,python]
------------------------------------
es = Elasticsearch(
client = Elasticsearch(
...,
request_timeout=10 # 10 second timeout
)
# Search request will timeout in 5 seconds
es.options(request_timeout=5).search(...)
client.options(request_timeout=5).search(...)
------------------------------------

[discrete]
Expand All @@ -148,7 +148,7 @@ In the example below there are three different configurable timeouts for the `cl

[source,python]
------------------------------------
es.options(
client.options(
# Amount of time to wait for an HTTP response to start.
request_timeout=30
).cluster.health(
Expand All @@ -170,13 +170,13 @@ The maximum number of retries per request can be configured via the `max_retries

[source,python]
------------------------------------
es = Elasticsearch(
client = Elasticsearch(
...,
max_retries=5
)
# For this API request we disable retries with 'max_retries=0'
es.options(max_retries=0).index(
client.options(max_retries=0).index(
index="blogs",
document={
"title": "..."
Expand All @@ -191,11 +191,11 @@ Connection errors are automatically retried if retries are enabled. Retrying req

[source,python]
------------------------------------
es = Elasticsearch(
client = Elasticsearch(
...,
retry_on_timeout=True
)
es.options(retry_on_timeout=False).info()
client.options(retry_on_timeout=False).info()
------------------------------------

[discrete]
Expand All @@ -205,13 +205,13 @@ By default if retries are enabled `retry_on_status` is set to `(429, 502, 503, 5

[source,python]
------------------------------------
es = Elasticsearch(
client = Elasticsearch(
...,
retry_on_status=()
)
# Retry this API on '500 Internal Error' statuses
es.options(retry_on_status=[500]).index(
client.options(retry_on_status=[500]).index(
index="blogs",
document={
"title": "..."
Expand All @@ -228,14 +228,14 @@ A good example where this is useful is setting up or cleaning up resources in a

[source,python]
------------------------------------
es = Elasticsearch(...)
client = Elasticsearch(...)
# API request is robust against the index not existing:
resp = es.options(ignore_status=404).indices.delete(index="delete-this")
resp = client.options(ignore_status=404).indices.delete(index="delete-this")
resp.meta.status # Can be either '2XX' or '404'
# API request is robust against the index already existing:
resp = es.options(ignore_status=[400]).indices.create(
resp = client.options(ignore_status=[400]).indices.create(
index="create-this",
mapping={
"properties": {"field": {"type": "integer"}}
Expand Down Expand Up @@ -322,7 +322,7 @@ You can specify a node selector pattern via the `node_selector_class` parameter.

[source,python]
------------------------------------
es = Elasticsearch(
client = Elasticsearch(
...,
node_selector_class="round_robin"
)
Expand All @@ -337,7 +337,7 @@ from elastic_transport import NodeSelector
class CustomSelector(NodeSelector):
def select(nodes): ...
es = Elasticsearch(
client = Elasticsearch(
...,
node_selector_class=CustomSelector
)
Expand Down Expand Up @@ -374,7 +374,7 @@ class JsonSetSerializer(JsonSerializer):
return list(data)
return super().default(data)
es = Elasticsearch(
client = Elasticsearch(
...,
# Serializers are a mapping of 'mimetype' to Serializer class.
serializers={"application/json": JsonSetSerializer()}
Expand All @@ -397,7 +397,7 @@ For all of the built-in HTTP node implementations like `urllib3`, `requests`, an
------------------------------------
from elasticsearch import Elasticsearch
es = Elasticsearch(
client = Elasticsearch(
...,
node_class="requests"
)
Expand All @@ -413,7 +413,7 @@ from elastic_transport import Urllib3HttpNode
class CustomHttpNode(Urllib3HttpNode):
...
es = Elasticsearch(
client = Elasticsearch(
...
node_class=CustomHttpNode
)
Expand All @@ -426,7 +426,7 @@ Each node contains its own pool of HTTP connections to allow for concurrent requ

[source,python]
------------------------------------
es = Elasticsearch(
client = Elasticsearch(
...,
connections_per_node=5
)
Expand Down
12 changes: 6 additions & 6 deletions docs/guide/connecting.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -251,20 +251,20 @@ or via the per-request `.options()` method:
from elasticsearch import Elasticsearch
# Authenticate from the constructor
es = Elasticsearch(
client = Elasticsearch(
"https://localhost:9200",
ca_certs="/path/to/http_ca.crt",
basic_auth=("username", "password")
)
# Authenticate via the .options() method:
es.options(
client.options(
basic_auth=("username", "password")
).indices.get(index="*")
# You can persist the authenticated client to use
# later or use for multiple API calls:
auth_client = es.options(api_key="api_key")
auth_client = client.options(api_key="api_key")
for i in range(10):
auth_client.index(
index="example-index",
Expand All @@ -285,7 +285,7 @@ username and password within a tuple:
from elasticsearch import Elasticsearch
# Adds the HTTP header 'Authorization: Basic <base64 username:password>'
es = Elasticsearch(
client = Elasticsearch(
"https://localhost:9200",
ca_certs="/path/to/http_ca.crt",
basic_auth=("username", "password")
Expand All @@ -307,7 +307,7 @@ and https://www.elastic.co/guide/en/elasticsearch/reference/{branch}/security-ap
from elasticsearch import Elasticsearch
# Adds the HTTP header 'Authorization: Bearer token-value'
es = Elasticsearch(
client = Elasticsearch(
"https://localhost:9200",
bearer_auth="token-value"
)
Expand All @@ -328,7 +328,7 @@ or https://www.elastic.co/guide/en/kibana/current/api-keys.html#create-api-key[K
from elasticsearch import Elasticsearch
# Adds the HTTP header 'Authorization: ApiKey <base64 api_key.id:api_key.api_key>'
es = Elasticsearch(
client = Elasticsearch(
"https://localhost:9200",
ca_certs="/path/to/http_ca.crt",
api_key="api_key",
Expand Down
10 changes: 5 additions & 5 deletions docs/guide/examples.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -22,14 +22,14 @@ To index a document, you need to specify three pieces of information: `index`,
----------------------------
from datetime import datetime
from elasticsearch import Elasticsearch
es = Elasticsearch('https://localhost:9200')
client = Elasticsearch('https://localhost:9200')
doc = {
'author': 'author_name',
'text': 'Interesting content...',
'timestamp': datetime.now(),
}
resp = es.index(index="test-index", id=1, document=doc)
resp = client.index(index="test-index", id=1, document=doc)
print(resp['result'])
----------------------------

Expand All @@ -42,7 +42,7 @@ To get a document, you need to specify its `index` and `id`:

[source,py]
----------------------------
resp = es.get(index="test-index", id=1)
resp = client.get(index="test-index", id=1)
print(resp['_source'])
----------------------------

Expand All @@ -55,7 +55,7 @@ You can perform the refresh operation on an index:

[source,py]
----------------------------
es.indices.refresh(index="test-index")
client.indices.refresh(index="test-index")
----------------------------


Expand All @@ -67,7 +67,7 @@ The `search()` method returns results that are matching a query:

[source,py]
----------------------------
resp = es.search(index="test-index", query={"match_all": {}})
resp = client.search(index="test-index", query={"match_all": {}})
print("Got %d Hits:" % resp['hits']['total']['value'])
for hit in resp['hits']['hits']:
print("%(timestamp)s %(author)s: %(text)s" % hit["_source"])
Expand Down
8 changes: 4 additions & 4 deletions docs/guide/integrations.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ The opaque ID can be set via the `opaque_id` parameter via the client `.options(

[source,python]
------------------------------------
es = Elasticsearch(...)
es.options(opaque_id="request-id-...").search(...)
client = Elasticsearch(...)
client.options(opaque_id="request-id-...").search(...)
------------------------------------


Expand All @@ -41,8 +41,8 @@ If we write a script that has a type error like using `request_timeout` with a `
# script.py
from elasticsearch import Elasticsearch
es = Elasticsearch(...)
es.options(
client = Elasticsearch(...)
client.options(
request_timeout="5" # type error!
).search(...)
Expand Down
6 changes: 3 additions & 3 deletions docs/guide/overview.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -42,14 +42,14 @@ Simple use-case:
>>> from elasticsearch import Elasticsearch
# Connect to 'http://localhost:9200'
>>> es = Elasticsearch("http://localhost:9200")
>>> client = Elasticsearch("http://localhost:9200")
# Datetimes will be serialized:
>>> es.index(index="my-index-000001", id=42, document={"any": "data", "timestamp": datetime.now()})
>>> client.index(index="my-index-000001", id=42, document={"any": "data", "timestamp": datetime.now()})
{'_id': '42', '_index': 'my-index-000001', '_type': 'test-type', '_version': 1, 'ok': True}
# ...but not deserialized
>>> es.get(index="my-index-000001", id=42)['_source']
>>> client.get(index="my-index-000001", id=42)['_source']
{'any': 'data', 'timestamp': '2013-05-12T19:45:31.804229'}
------------------------------------

Expand Down
Loading

0 comments on commit 794341d

Please sign in to comment.