From 2e4931fb5c41d1cd7ce0757e7dea8d32ed09b82a Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 4 Dec 2024 10:20:57 +1100 Subject: [PATCH 01/45] Mute org.elasticsearch.xpack.test.rest.XPackRestIT test {p0=esql/60_usage/Basic ESQL usage output (telemetry) non-snapshot version} #117862 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index e26f21eb1492..8eb0e902135e 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -412,3 +412,6 @@ tests: - class: org.elasticsearch.xpack.ml.integration.RegressionIT method: testTwoJobsWithSameRandomizeSeedUseSameTrainingSet issue: https://github.com/elastic/elasticsearch/issues/117805 +- class: org.elasticsearch.xpack.test.rest.XPackRestIT + method: test {p0=esql/60_usage/Basic ESQL usage output (telemetry) non-snapshot version} + issue: https://github.com/elastic/elasticsearch/issues/117862 From e137a149b4b02d07aa9640f1ecedf25da3a2c9ec Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 3 Dec 2024 15:43:52 -0800 Subject: [PATCH 02/45] Move common system properties for test clusters to default provider (#117928) (#117941) This commit moves common system properties needed for test clusters to a SystemPropertyProvider which can be reused and extended by serverless. --- .../local/AbstractLocalSpecBuilder.java | 2 +- .../local/DefaultLocalClusterSpecBuilder.java | 4 +--- .../local/DefaultSystemPropertyProvider.java | 23 +++++++++++++++++++ .../test/cluster/local/LocalSpecBuilder.java | 2 +- 4 files changed, 26 insertions(+), 5 deletions(-) create mode 100644 test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultSystemPropertyProvider.java diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java index 78e3727f9de3..c3c4f3fe825e 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalSpecBuilder.java @@ -214,7 +214,7 @@ public T systemProperty(String key, Supplier supplier) { return cast(this); } - public T systemProperty(SystemPropertyProvider systemPropertyProvider) { + public T systemProperties(SystemPropertyProvider systemPropertyProvider) { this.systemPropertyProviders.add(systemPropertyProvider); return cast(this); } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java index 1d7cc76be165..a23a3ba9e453 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultLocalClusterSpecBuilder.java @@ -19,10 +19,8 @@ public final class DefaultLocalClusterSpecBuilder extends AbstractLocalClusterSp public DefaultLocalClusterSpecBuilder() { super(); - this.apply( - c -> c.systemProperty("ingest.geoip.downloader.enabled.default", "false").systemProperty("tests.testfeatures.enabled", "true") - ); this.apply(new FipsEnabledClusterConfigProvider()); + this.systemProperties(new DefaultSystemPropertyProvider()); this.settings(new DefaultSettingsProvider()); this.environment(new DefaultEnvironmentProvider()); this.rolesFile(Resource.fromClasspath("default_test_roles.yml")); diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultSystemPropertyProvider.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultSystemPropertyProvider.java new file mode 100644 index 000000000000..62bbd10bcf85 --- /dev/null +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/DefaultSystemPropertyProvider.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.test.cluster.local; + +import org.elasticsearch.test.cluster.SystemPropertyProvider; + +import java.util.Map; + +import static java.util.Map.entry; + +public class DefaultSystemPropertyProvider implements SystemPropertyProvider { + @Override + public Map get(LocalClusterSpec.LocalNodeSpec nodeSpec) { + return Map.ofEntries(entry("ingest.geoip.downloader.enabled.default", "false"), entry("tests.testfeatures.enabled", "true")); + } +} diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java index cd9f81a98cb0..1c9ac8a0af6c 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/LocalSpecBuilder.java @@ -137,7 +137,7 @@ interface LocalSpecBuilder> { /** * Register a {@link SystemPropertyProvider}. */ - T systemProperty(SystemPropertyProvider systemPropertyProvider); + T systemProperties(SystemPropertyProvider systemPropertyProvider); /** * Adds an additional command line argument to node JVM arguments. From 122f8059272c90b481669e144de8bb3003b11f44 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Wed, 4 Dec 2024 11:06:22 +1100 Subject: [PATCH 03/45] Mute org.elasticsearch.xpack.security.authc.ldap.UserAttributeGroupsResolverTests org.elasticsearch.xpack.security.authc.ldap.UserAttributeGroupsResolverTests #116537 --- muted-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index 8eb0e902135e..4c542aa78591 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -415,3 +415,5 @@ tests: - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=esql/60_usage/Basic ESQL usage output (telemetry) non-snapshot version} issue: https://github.com/elastic/elasticsearch/issues/117862 +- class: org.elasticsearch.xpack.security.authc.ldap.UserAttributeGroupsResolverTests + issue: https://github.com/elastic/elasticsearch/issues/116537 From c47162ca81b7f405317ce788ace36b3017fd4eef Mon Sep 17 00:00:00 2001 From: Stanislav Malyshev Date: Tue, 3 Dec 2024 19:21:58 -0700 Subject: [PATCH 04/45] Fix reconstituting version string from components (#117213) (#117950) * Fix reconstituting version string from components Co-authored-by: Joe Gallo (cherry picked from commit 28eda97ddd48fc23f6d21d3f5b8a68f977f9627f) --- docs/changelog/117213.yaml | 6 +++ .../ingest/useragent/UserAgentProcessor.java | 47 +++++++++---------- .../useragent/UserAgentProcessorTests.java | 17 +++++++ 3 files changed, 45 insertions(+), 25 deletions(-) create mode 100644 docs/changelog/117213.yaml diff --git a/docs/changelog/117213.yaml b/docs/changelog/117213.yaml new file mode 100644 index 000000000000..3b4cd0cee966 --- /dev/null +++ b/docs/changelog/117213.yaml @@ -0,0 +1,6 @@ +pr: 117213 +summary: Fix reconstituting version string from components +area: Ingest Node +type: bug +issues: + - 116950 diff --git a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java index 6224bb4d502d..742b4c8c7e8e 100644 --- a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java +++ b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentProcessor.java @@ -9,6 +9,7 @@ package org.elasticsearch.ingest.useragent; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.util.Maps; @@ -98,19 +99,8 @@ public IngestDocument execute(IngestDocument ingestDocument) { } break; case VERSION: - StringBuilder version = new StringBuilder(); if (uaClient.userAgent() != null && uaClient.userAgent().major() != null) { - version.append(uaClient.userAgent().major()); - if (uaClient.userAgent().minor() != null) { - version.append(".").append(uaClient.userAgent().minor()); - if (uaClient.userAgent().patch() != null) { - version.append(".").append(uaClient.userAgent().patch()); - if (uaClient.userAgent().build() != null) { - version.append(".").append(uaClient.userAgent().build()); - } - } - } - uaDetails.put("version", version.toString()); + uaDetails.put("version", versionToString(uaClient.userAgent())); } break; case OS: @@ -118,20 +108,10 @@ public IngestDocument execute(IngestDocument ingestDocument) { Map osDetails = Maps.newMapWithExpectedSize(3); if (uaClient.operatingSystem().name() != null) { osDetails.put("name", uaClient.operatingSystem().name()); - StringBuilder sb = new StringBuilder(); if (uaClient.operatingSystem().major() != null) { - sb.append(uaClient.operatingSystem().major()); - if (uaClient.operatingSystem().minor() != null) { - sb.append(".").append(uaClient.operatingSystem().minor()); - if (uaClient.operatingSystem().patch() != null) { - sb.append(".").append(uaClient.operatingSystem().patch()); - if (uaClient.operatingSystem().build() != null) { - sb.append(".").append(uaClient.operatingSystem().build()); - } - } - } - osDetails.put("version", sb.toString()); - osDetails.put("full", uaClient.operatingSystem().name() + " " + sb.toString()); + String version = versionToString(uaClient.operatingSystem()); + osDetails.put("version", version); + osDetails.put("full", uaClient.operatingSystem().name() + " " + version); } uaDetails.put("os", osDetails); } @@ -163,6 +143,23 @@ public IngestDocument execute(IngestDocument ingestDocument) { return ingestDocument; } + private static String versionToString(final UserAgentParser.VersionedName version) { + final StringBuilder versionString = new StringBuilder(); + if (Strings.hasLength(version.major())) { + versionString.append(version.major()); + if (Strings.hasLength(version.minor())) { + versionString.append(".").append(version.minor()); + if (Strings.hasLength(version.patch())) { + versionString.append(".").append(version.patch()); + if (Strings.hasLength(version.build())) { + versionString.append(".").append(version.build()); + } + } + } + } + return versionString.toString(); + } + @Override public String getType() { return TYPE; diff --git a/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorTests.java b/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorTests.java index d9459404987d..df023f4d1342 100644 --- a/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorTests.java +++ b/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/UserAgentProcessorTests.java @@ -331,4 +331,21 @@ public void testExtractDeviceTypeDisabled() { device.put("name", "Other"); assertThat(target.get("device"), is(device)); } + + // From https://github.com/elastic/elasticsearch/issues/116950 + @SuppressWarnings("unchecked") + public void testFirefoxVersion() { + Map document = new HashMap<>(); + document.put("source_field", "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:128.0) Gecko/20100101 Firefox/128.0"); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); + + processor.execute(ingestDocument); + Map data = ingestDocument.getSourceAndMetadata(); + + assertThat(data, hasKey("target_field")); + Map target = (Map) data.get("target_field"); + + assertThat(target.get("name"), is("Firefox")); + assertThat(target.get("version"), is("128.0")); + } } From 430741fb3ab0d96419c452a757733787f24b2493 Mon Sep 17 00:00:00 2001 From: Felix Barnsteiner Date: Wed, 4 Dec 2024 09:27:32 +0100 Subject: [PATCH 05/45] Fix false positive date detection with trailing dot (#116953) (#117960) --- docs/changelog/116953.yaml | 6 ++++++ .../elasticsearch/common/time/EpochTime.java | 5 ++++- .../common/time/DateFormattersTests.java | 11 +++++++++++ .../index/mapper/DynamicMappingTests.java | 17 +++++++++++++++++ 4 files changed, 38 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/116953.yaml diff --git a/docs/changelog/116953.yaml b/docs/changelog/116953.yaml new file mode 100644 index 000000000000..33616510d8fd --- /dev/null +++ b/docs/changelog/116953.yaml @@ -0,0 +1,6 @@ +pr: 116953 +summary: Fix false positive date detection with trailing dot +area: Mapping +type: bug +issues: + - 116946 diff --git a/server/src/main/java/org/elasticsearch/common/time/EpochTime.java b/server/src/main/java/org/elasticsearch/common/time/EpochTime.java index 5693ce50dbe0..c53c9d0c03df 100644 --- a/server/src/main/java/org/elasticsearch/common/time/EpochTime.java +++ b/server/src/main/java/org/elasticsearch/common/time/EpochTime.java @@ -246,7 +246,10 @@ public long getFrom(TemporalAccessor temporal) { .toFormatter(Locale.ROOT); // this supports milliseconds ending in dot - private static final DateTimeFormatter MILLISECONDS_FORMATTER2 = new DateTimeFormatterBuilder().append(MILLISECONDS_FORMATTER1) + private static final DateTimeFormatter MILLISECONDS_FORMATTER2 = new DateTimeFormatterBuilder().optionalStart() + .appendText(NEGATIVE_SIGN_FIELD, Map.of(-1L, "-")) // field is only created in the presence of a '-' char. + .optionalEnd() + .appendValue(UNSIGNED_MILLIS, 1, 19, SignStyle.NOT_NEGATIVE) .appendLiteral('.') .toFormatter(Locale.ROOT); diff --git a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java index dfe3cf10fd49..ca1ef6cba91e 100644 --- a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java @@ -242,6 +242,17 @@ public void testEpochMillisParser() { assertThat(formatter.format(instant), is("-0.12345")); assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant)); } + { + Instant instant = Instant.from(formatter.parse("12345.")); + assertThat(instant.getEpochSecond(), is(12L)); + assertThat(instant.getNano(), is(345_000_000)); + assertThat(formatter.format(instant), is("12345")); + assertThat(Instant.from(formatter.parse(formatter.format(instant))), is(instant)); + } + { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("12345.0.")); + assertThat(e.getMessage(), is("failed to parse date field [12345.0.] with format [epoch_millis]")); + } } /** diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index 5df2503a31c1..3968498b71b3 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -597,6 +597,23 @@ private void doTestDefaultFloatingPointMappings(DocumentMapper mapper, XContentB assertThat(((FieldMapper) update.getRoot().getMapper("quux")).fieldType().typeName(), equalTo("float")); } + public void testDateDetectionEnabled() throws Exception { + MapperService mapperService = createMapperService(topMapping(b -> b.field("date_detection", true))); + + ParsedDocument doc = mapperService.documentMapper().parse(source(b -> { + b.field("date", "2024-11-18"); + b.field("no_date", "128.0."); + })); + assertNotNull(doc.dynamicMappingsUpdate()); + merge(mapperService, dynamicMapping(doc.dynamicMappingsUpdate())); + + Mapper mapper = mapperService.documentMapper().mappers().getMapper("date"); + assertThat(mapper.typeName(), equalTo("date")); + + mapper = mapperService.documentMapper().mappers().getMapper("no_date"); + assertThat(mapper.typeName(), equalTo("text")); + } + public void testNumericDetectionEnabled() throws Exception { MapperService mapperService = createMapperService(topMapping(b -> b.field("numeric_detection", true))); From 7853e1270406ba2faccb76a5a18f253f6c9b1f26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 4 Dec 2024 09:43:02 +0100 Subject: [PATCH 06/45] Remove test mute for OldRepositoryAccessIT testOldRepoAccess (#117845) This has been fixed with #117649 on "main" and the changes causing this haven't been backported to 8.x. The test was muted due to occasional timeouts that are unrelated that should have been fixed in the meantime. --- muted-tests.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 4c542aa78591..ec7bb6ab4188 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -326,9 +326,6 @@ tests: - class: org.elasticsearch.xpack.searchablesnapshots.RetrySearchIntegTests method: testRetryPointInTime issue: https://github.com/elastic/elasticsearch/issues/117116 -- class: org.elasticsearch.oldrepos.OldRepositoryAccessIT - method: testOldRepoAccess - issue: https://github.com/elastic/elasticsearch/issues/115631 - class: org.elasticsearch.xpack.inference.InferenceRestIT method: test {p0=inference/40_semantic_text_query/Query a field that uses the default ELSER 2 endpoint} issue: https://github.com/elastic/elasticsearch/issues/117027 From 6375bb02a686a669a4a884ca3e5e1e1d7e321aa5 Mon Sep 17 00:00:00 2001 From: Jan Kuipers <148754765+jan-elastic@users.noreply.github.com> Date: Wed, 4 Dec 2024 11:05:11 +0100 Subject: [PATCH 07/45] Document ES|QL categorize limitations (#117892) (#117965) * Document ES|QL categorize limitations * Update x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Categorize.java --------- Co-authored-by: Alexander Spies --- .../esql/functions/description/categorize.asciidoc | 6 ++++++ .../xpack/esql/expression/function/grouping/Categorize.java | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/docs/reference/esql/functions/description/categorize.asciidoc b/docs/reference/esql/functions/description/categorize.asciidoc index a5e8e2d50757..32af0051e91c 100644 --- a/docs/reference/esql/functions/description/categorize.asciidoc +++ b/docs/reference/esql/functions/description/categorize.asciidoc @@ -3,3 +3,9 @@ *Description* Groups text messages into categories of similarly formatted text values. + +`CATEGORIZE` has the following limitations: + +* can't be used within other expressions +* can't be used with multiple groupings +* can't be used or referenced within aggregations diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Categorize.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Categorize.java index ca0447ce11ec..e2c04ecb15b5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Categorize.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Categorize.java @@ -48,6 +48,12 @@ public class Categorize extends GroupingFunction implements Validatable { @FunctionInfo( returnType = "keyword", description = "Groups text messages into categories of similarly formatted text values.", + detailedDescription = """ + `CATEGORIZE` has the following limitations: + + * can't be used within other expressions + * can't be used with multiple groupings + * can't be used or referenced within aggregate functions""", examples = { @Example( file = "docs", From 3c988de6d3a90aa02ce675aec2c6704ac564ac01 Mon Sep 17 00:00:00 2001 From: Jakob Reiter Date: Wed, 4 Dec 2024 11:56:23 +0100 Subject: [PATCH 08/45] Update troubleshooting-unstable-cluster.asciidoc (#117887) Added missing word --- .../troubleshooting/troubleshooting-unstable-cluster.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/troubleshooting/troubleshooting-unstable-cluster.asciidoc b/docs/reference/troubleshooting/troubleshooting-unstable-cluster.asciidoc index cbb35f773103..e47b85aa9954 100644 --- a/docs/reference/troubleshooting/troubleshooting-unstable-cluster.asciidoc +++ b/docs/reference/troubleshooting/troubleshooting-unstable-cluster.asciidoc @@ -126,7 +126,7 @@ repeatedly-dropped connections will severely affect its operation. The connections from the elected master node to every other node in the cluster are particularly important. The elected master never spontaneously closes its outbound connections to other nodes. Similarly, once an inbound connection is -fully established, a node never spontaneously it unless the node is shutting +fully established, a node never spontaneously closes it unless the node is shutting down. If you see a node unexpectedly leave the cluster with the `disconnected` From 516688a36b213a4b4ca27b61941982d6f4c10bb3 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Wed, 4 Dec 2024 12:59:42 +0200 Subject: [PATCH 09/45] Doc parsing error logging with throttling (#117828) (#117968) * Throttled doc parsing error logging * add test * move throttler to separate class * small changes * refactor unittest * fix test --- .../index/mapper/DocumentMapper.java | 10 +-- .../index/mapper/IntervalThrottler.java | 66 +++++++++++++++++++ .../index/mapper/DocumentMapperTests.java | 37 +++++++++++ .../index/mapper/IntervalThrottlerTests.java | 27 ++++++++ 4 files changed, 136 insertions(+), 4 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/index/mapper/IntervalThrottler.java create mode 100644 server/src/test/java/org/elasticsearch/index/mapper/IntervalThrottlerTests.java diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 1c9321737ab5..c56885eded38 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -21,6 +21,8 @@ import java.util.List; public class DocumentMapper { + static final NodeFeature INDEX_SORTING_ON_NESTED = new NodeFeature("mapper.index_sorting_on_nested"); + private final String type; private final CompressedXContent mappingSource; private final MappingLookup mappingLookup; @@ -29,8 +31,6 @@ public class DocumentMapper { private final IndexVersion indexVersion; private final Logger logger; - static final NodeFeature INDEX_SORTING_ON_NESTED = new NodeFeature("mapper.index_sorting_on_nested"); - /** * Create a new {@link DocumentMapper} that holds empty mappings. * @param mapperService the mapper service that holds the needed components @@ -72,9 +72,11 @@ public static DocumentMapper createEmpty(MapperService mapperService) { : "provided source [" + source + "] differs from mapping [" + mapping.toCompressedXContent() + "]"; } - private void maybeLogDebug(Exception ex) { + private void maybeLog(Exception ex) { if (logger.isDebugEnabled()) { logger.debug("Error while parsing document: " + ex.getMessage(), ex); + } else if (IntervalThrottler.DOCUMENT_PARSING_FAILURE.accept()) { + logger.error("Error while parsing document: " + ex.getMessage(), ex); } } @@ -125,7 +127,7 @@ public ParsedDocument parse(SourceToParse source) throws DocumentParsingExceptio try { return documentParser.parseDocument(source, mappingLookup); } catch (Exception e) { - maybeLogDebug(e); + maybeLog(e); throw e; } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IntervalThrottler.java b/server/src/main/java/org/elasticsearch/index/mapper/IntervalThrottler.java new file mode 100644 index 000000000000..ffc35d9eaf76 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/IntervalThrottler.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.mapper; + +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Throttles tracked operations based on a time interval, restricting them to 1 per N seconds. + */ +enum IntervalThrottler { + DOCUMENT_PARSING_FAILURE(60); + + static final int MILLISECONDS_IN_SECOND = 1000; + + private final Acceptor acceptor; + + IntervalThrottler(long intervalSeconds) { + acceptor = new Acceptor(intervalSeconds * MILLISECONDS_IN_SECOND); + } + + /** + * @return true if the operation gets accepted, false if throttled. + */ + boolean accept() { + return acceptor.accept(); + } + + // Defined separately for testing. + static class Acceptor { + private final long intervalMillis; + private final AtomicBoolean lastAcceptedGuard = new AtomicBoolean(false); + private volatile long lastAcceptedTimeMillis = 0; + + Acceptor(long intervalMillis) { + this.intervalMillis = intervalMillis; + } + + boolean accept() { + final long now = System.currentTimeMillis(); + // Check without guarding first, to reduce contention. + if (now - lastAcceptedTimeMillis > intervalMillis) { + // Check if another concurrent operation succeeded. + if (lastAcceptedGuard.compareAndSet(false, true)) { + try { + // Repeat check under guard protection, so that only one message gets written per interval. + if (now - lastAcceptedTimeMillis > intervalMillis) { + lastAcceptedTimeMillis = now; + return true; + } + } finally { + // Reset guard. + lastAcceptedGuard.set(false); + } + } + } + return false; + } + } +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java index db9fdead949d..b2ba3d60d217 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java @@ -9,12 +9,18 @@ package org.elasticsearch.index.mapper; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LogEvent; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.MockAppender; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; @@ -493,4 +499,35 @@ public void testDeeplyNestedMapping() throws Exception { } } } + + public void testParsingErrorLogging() throws Exception { + MockAppender appender = new MockAppender("mock_appender"); + appender.start(); + Logger testLogger = LogManager.getLogger(DocumentMapper.class); + Loggers.addAppender(testLogger, appender); + Level originalLogLevel = testLogger.getLevel(); + Loggers.setLevel(testLogger, Level.ERROR); + + try { + DocumentMapper doc = createDocumentMapper(mapping(b -> b.startObject("value").field("type", "integer").endObject())); + + DocumentParsingException e = expectThrows( + DocumentParsingException.class, + () -> doc.parse(source(b -> b.field("value", "foo"))) + ); + assertThat(e.getMessage(), containsString("failed to parse field [value] of type [integer] in document with id '1'")); + LogEvent event = appender.getLastEventAndReset(); + if (event != null) { + assertThat(event.getMessage().getFormattedMessage(), containsString(e.getMessage())); + } + + e = expectThrows(DocumentParsingException.class, () -> doc.parse(source(b -> b.field("value", "foo")))); + assertThat(e.getMessage(), containsString("failed to parse field [value] of type [integer] in document with id '1'")); + assertThat(appender.getLastEventAndReset(), nullValue()); + } finally { + Loggers.setLevel(testLogger, originalLogLevel); + Loggers.removeAppender(testLogger, appender); + appender.stop(); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IntervalThrottlerTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IntervalThrottlerTests.java new file mode 100644 index 000000000000..25fd61452444 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/IntervalThrottlerTests.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.test.ESTestCase; + +public class IntervalThrottlerTests extends ESTestCase { + + public void testThrottling() throws Exception { + var throttler = new IntervalThrottler.Acceptor(10); + assertTrue(throttler.accept()); + assertFalse(throttler.accept()); + assertFalse(throttler.accept()); + + Thread.sleep(20); + assertTrue(throttler.accept()); + assertFalse(throttler.accept()); + assertFalse(throttler.accept()); + } +} From 9abeeeaf12366125b7b8aef2be2109ef3587c4a6 Mon Sep 17 00:00:00 2001 From: Luke Whiting Date: Wed, 4 Dec 2024 12:42:59 +0000 Subject: [PATCH 10/45] [CI] DocsClientYamlTestSuiteIT test {yaml=reference/watcher/example-watches/example-watch-clusterstatus/line_137} failing - (#115809) (#117354) (#117972) * Ignore system index access errors in YAML test index cleanup method * Remove test mute * Swap the logic back as it was right the first time * Resolve conflict with latest merge * Move warning handler into it's own method to reduce nesting (cherry picked from commit cda2fe68a3a549e2443ca7137b4df431af6f2ba7) --- .../test/rest/ESRestTestCase.java | 40 +++++++++---------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 40703e7b6bad..359fbdf7d022 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -26,6 +26,8 @@ import org.apache.http.ssl.SSLContextBuilder; import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Build; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; @@ -158,6 +160,8 @@ public abstract class ESRestTestCase extends ESTestCase { private static final Pattern SEMANTIC_VERSION_PATTERN = Pattern.compile("^(\\d+\\.\\d+\\.\\d+)\\D?.*"); + private static final Logger SUITE_LOGGER = LogManager.getLogger(ESRestTestCase.class); + /** * Convert the entity from a {@link Response} into a map of maps. * Consumes the underlying HttpEntity, releasing any resources it may be holding. @@ -1171,7 +1175,13 @@ protected static void wipeAllIndices(boolean preserveSecurityIndices) throws IOE } final Request deleteRequest = new Request("DELETE", Strings.collectionToCommaDelimitedString(indexPatterns)); deleteRequest.addParameter("expand_wildcards", "open,closed" + (includeHidden ? ",hidden" : "")); - deleteRequest.setOptions(deleteRequest.getOptions().toBuilder().setWarningsHandler(ignoreAsyncSearchWarning()).build()); + + // If system index warning, ignore but log + // See: https://github.com/elastic/elasticsearch/issues/117099 + // and: https://github.com/elastic/elasticsearch/issues/115809 + deleteRequest.setOptions( + RequestOptions.DEFAULT.toBuilder().setWarningsHandler(ESRestTestCase::ignoreSystemIndexAccessWarnings) + ); final Response response = adminClient().performRequest(deleteRequest); try (InputStream is = response.getEntity().getContent()) { assertTrue((boolean) XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true).get("acknowledged")); @@ -1184,28 +1194,16 @@ protected static void wipeAllIndices(boolean preserveSecurityIndices) throws IOE } } - // Make warnings handler that ignores the .async-search warning since .async-search may randomly appear when async requests are slow - // See: https://github.com/elastic/elasticsearch/issues/117099 - protected static WarningsHandler ignoreAsyncSearchWarning() { - return new WarningsHandler() { - @Override - public boolean warningsShouldFailRequest(List warnings) { - if (warnings.isEmpty()) { - return false; - } - return warnings.equals( - List.of( - "this request accesses system indices: [.async-search], " - + "but in a future major version, direct access to system indices will be prevented by default" - ) - ) == false; + private static boolean ignoreSystemIndexAccessWarnings(List warnings) { + for (String warning : warnings) { + if (warning.startsWith("this request accesses system indices:")) { + SUITE_LOGGER.warn("Ignoring system index access warning during test cleanup: {}", warning); + } else { + return true; } + } - @Override - public String toString() { - return "ignore .async-search warning"; - } - }; + return false; } protected static void wipeDataStreams() throws IOException { From 13b296120d485f3b0d7ad43a899f8008a735c308 Mon Sep 17 00:00:00 2001 From: Luke Whiting Date: Wed, 4 Dec 2024 13:39:55 +0000 Subject: [PATCH 11/45] Watcher history index has too many indexed fields - (#71479) (#117701) (#117979) * Exclude result.input.chain from watcher history index mappings * Update docs/changelog/117701.yaml * Fixup text now fields are disabled higher up the chain * Revert priority change --- docs/changelog/117701.yaml | 6 ++++++ .../support/WatcherIndexTemplateRegistryField.java | 3 ++- .../src/main/resources/watch-history-no-ilm.json | 9 +++++++++ .../src/main/resources/watch-history.json | 9 +++++++++ .../test/integration/HistoryIntegrationTests.java | 6 +++--- 5 files changed, 29 insertions(+), 4 deletions(-) create mode 100644 docs/changelog/117701.yaml diff --git a/docs/changelog/117701.yaml b/docs/changelog/117701.yaml new file mode 100644 index 000000000000..5a72bdeb143e --- /dev/null +++ b/docs/changelog/117701.yaml @@ -0,0 +1,6 @@ +pr: 117701 +summary: Watcher history index has too many indexed fields - +area: Watcher +type: bug +issues: + - 71479 diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java index 20dcb84dffe3..098549029e0c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherIndexTemplateRegistryField.java @@ -22,8 +22,9 @@ public final class WatcherIndexTemplateRegistryField { // version 14: move watch history to data stream // version 15: remove watches and triggered watches, these are now system indices // version 16: change watch history ILM policy + // version 17: exclude input chain from indexing // Note: if you change this, also inform the kibana team around the watcher-ui - public static final int INDEX_TEMPLATE_VERSION = 16; + public static final int INDEX_TEMPLATE_VERSION = 17; public static final String HISTORY_TEMPLATE_NAME = ".watch-history-" + INDEX_TEMPLATE_VERSION; public static final String HISTORY_TEMPLATE_NAME_NO_ILM = ".watch-history-no-ilm-" + INDEX_TEMPLATE_VERSION; public static final String[] TEMPLATE_NAMES = new String[] { HISTORY_TEMPLATE_NAME }; diff --git a/x-pack/plugin/core/template-resources/src/main/resources/watch-history-no-ilm.json b/x-pack/plugin/core/template-resources/src/main/resources/watch-history-no-ilm.json index 2eed69c7c58e..da459cda1346 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/watch-history-no-ilm.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/watch-history-no-ilm.json @@ -54,6 +54,15 @@ "enabled": false } } + }, + { + "disabled_result_input_chain_fields": { + "path_match": "result.input.chain", + "mapping": { + "type": "object", + "enabled": false + } + } } ], "dynamic": false, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/watch-history.json b/x-pack/plugin/core/template-resources/src/main/resources/watch-history.json index 19e4dc022daa..2abf6570d1f8 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/watch-history.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/watch-history.json @@ -55,6 +55,15 @@ "enabled": false } } + }, + { + "disabled_result_input_chain_fields": { + "path_match": "result.input.chain", + "mapping": { + "type": "object", + "enabled": false + } + } } ], "dynamic": false, diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java index 0070554d99d2..1bcdd060994c 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java @@ -130,7 +130,7 @@ public void testFailedInputResultWithDotsInFieldNameGetsStored() throws Exceptio String chainedPath = SINGLE_MAPPING_NAME + ".properties.result.properties.input.properties.chain.properties.chained.properties.search" + ".properties.request.properties.body.enabled"; - assertThat(source.getValue(chainedPath), is(false)); + assertThat(source.getValue(chainedPath), nullValue()); } else { String path = SINGLE_MAPPING_NAME + ".properties.result.properties.input.properties.search.properties.request.properties.body.enabled"; @@ -168,11 +168,11 @@ public void testPayloadInputWithDotsInFieldNameWorks() throws Exception { XContentType.JSON ); - // lets make sure the body fields are disabled + // let's make sure the body fields are disabled or, in the case of chained, the whole object is not indexed if (useChained) { String path = SINGLE_MAPPING_NAME + ".properties.result.properties.input.properties.chain.properties.chained.properties.payload.enabled"; - assertThat(source.getValue(path), is(false)); + assertThat(source.getValue(path), nullValue()); } else { String path = SINGLE_MAPPING_NAME + ".properties.result.properties.input.properties.payload.enabled"; assertThat(source.getValue(path), is(false)); From 2272d4818aaa19cc63584b6ce7dcfec9e553eb41 Mon Sep 17 00:00:00 2001 From: kosabogi <105062005+kosabogi@users.noreply.github.com> Date: Wed, 4 Dec 2024 14:49:39 +0100 Subject: [PATCH 12/45] Updates minimum_number_of_allocations description (#117746) (#117984) --- docs/reference/ml/ml-shared.asciidoc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/reference/ml/ml-shared.asciidoc b/docs/reference/ml/ml-shared.asciidoc index d01047eac981..4948db48664e 100644 --- a/docs/reference/ml/ml-shared.asciidoc +++ b/docs/reference/ml/ml-shared.asciidoc @@ -18,7 +18,8 @@ end::adaptive-allocation-max-number[] tag::adaptive-allocation-min-number[] Specifies the minimum number of allocations to scale to. -If set, it must be greater than or equal to `1`. +If set, it must be greater than or equal to `0`. +If not defined, the deployment scales to `0`. end::adaptive-allocation-min-number[] tag::aggregations[] From 232bfe1a6abbc9b88404b34820631df1a287572b Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Wed, 4 Dec 2024 08:56:47 -0600 Subject: [PATCH 13/45] Removing unnecessary state from DataStreamReindexTask (#117942) (#117988) --- ...indexDataStreamPersistentTaskExecutor.java | 9 ++--- .../task/ReindexDataStreamTask.java | 35 ++++++++++--------- 2 files changed, 24 insertions(+), 20 deletions(-) diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskExecutor.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskExecutor.java index bcd1ae314ac3..095a310e5844 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskExecutor.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskExecutor.java @@ -53,7 +53,6 @@ protected ReindexDataStreamTask createTask( params.startTime(), params.totalIndices(), params.totalIndicesToBeUpgraded(), - threadPool, id, type, action, @@ -76,9 +75,11 @@ protected void nodeOperation(AllocatedPersistentTask task, ReindexDataStreamTask List indicesToBeReindexed = indices.stream() .filter(index -> clusterService.state().getMetadata().index(index).getCreationVersion().isLegacyIndexVersion()) .toList(); - reindexDataStreamTask.setPendingIndices(indicesToBeReindexed.stream().map(Index::getName).toList()); + reindexDataStreamTask.setPendingIndicesCount(indicesToBeReindexed.size()); for (Index index : indicesToBeReindexed) { + reindexDataStreamTask.incrementInProgressIndicesCount(); // TODO This is just a placeholder. This is where the real data stream reindex logic will go + reindexDataStreamTask.reindexSucceeded(); } completeSuccessfulPersistentTask(reindexDataStreamTask); @@ -89,12 +90,12 @@ protected void nodeOperation(AllocatedPersistentTask task, ReindexDataStreamTask } private void completeSuccessfulPersistentTask(ReindexDataStreamTask persistentTask) { - persistentTask.reindexSucceeded(); + persistentTask.allReindexesCompleted(); threadPool.schedule(persistentTask::markAsCompleted, getTimeToLive(persistentTask), threadPool.generic()); } private void completeFailedPersistentTask(ReindexDataStreamTask persistentTask, Exception e) { - persistentTask.reindexFailed(e); + persistentTask.taskFailed(e); threadPool.schedule(() -> persistentTask.markAsFailed(e), getTimeToLive(persistentTask), threadPool.generic()); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamTask.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamTask.java index 2ae244679659..068579a37edb 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamTask.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamTask.java @@ -12,29 +12,27 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.tasks.TaskId; -import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; public class ReindexDataStreamTask extends AllocatedPersistentTask { public static final String TASK_NAME = "reindex-data-stream"; private final long persistentTaskStartTime; private final int totalIndices; private final int totalIndicesToBeUpgraded; - private final ThreadPool threadPool; private boolean complete = false; private Exception exception; - private List inProgress = new ArrayList<>(); - private List pending = List.of(); + private AtomicInteger inProgress = new AtomicInteger(0); + private AtomicInteger pending = new AtomicInteger(); private List> errors = new ArrayList<>(); public ReindexDataStreamTask( long persistentTaskStartTime, int totalIndices, int totalIndicesToBeUpgraded, - ThreadPool threadPool, long id, String type, String action, @@ -46,7 +44,6 @@ public ReindexDataStreamTask( this.persistentTaskStartTime = persistentTaskStartTime; this.totalIndices = totalIndices; this.totalIndicesToBeUpgraded = totalIndicesToBeUpgraded; - this.threadPool = threadPool; } @Override @@ -57,30 +54,36 @@ public ReindexDataStreamStatus getStatus() { totalIndicesToBeUpgraded, complete, exception, - inProgress.size(), - pending.size(), + inProgress.get(), + pending.get(), errors ); } - public void reindexSucceeded() { + public void allReindexesCompleted() { this.complete = true; } - public void reindexFailed(Exception e) { + public void taskFailed(Exception e) { this.complete = true; this.exception = e; } - public void setInProgressIndices(List inProgressIndices) { - this.inProgress = inProgressIndices; + public void reindexSucceeded() { + inProgress.decrementAndGet(); + } + + public void reindexFailed(String index, Exception error) { + this.errors.add(Tuple.tuple(index, error)); + inProgress.decrementAndGet(); } - public void setPendingIndices(List pendingIndices) { - this.pending = pendingIndices; + public void incrementInProgressIndicesCount() { + inProgress.incrementAndGet(); + pending.decrementAndGet(); } - public void addErrorIndex(String index, Exception error) { - this.errors.add(Tuple.tuple(index, error)); + public void setPendingIndicesCount(int size) { + pending.set(size); } } From 285ab013ce6a317af220c1ce5042e101dd1390e3 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Wed, 4 Dec 2024 09:04:50 -0600 Subject: [PATCH 14/45] Muting SparseVectorQueryBuilderTests.testToQuery (#117998) --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index ec7bb6ab4188..488629ce8ea7 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -414,3 +414,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/117862 - class: org.elasticsearch.xpack.security.authc.ldap.UserAttributeGroupsResolverTests issue: https://github.com/elastic/elasticsearch/issues/116537 +- class: org.elasticsearch.xpack.core.ml.search.SparseVectorQueryBuilderTests + method: testToQuery + issue: https://github.com/elastic/elasticsearch/issues/117998 From c888ce7e7e477d46ef7a9a654e85859afeda2805 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Wed, 4 Dec 2024 16:49:36 +0100 Subject: [PATCH 15/45] ES|QL fix telemetry tests after promoting CATEGORIZE (#117878) (#117915) --- muted-tests.yml | 3 --- .../resources/rest-api-spec/test/esql/60_usage.yml | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 488629ce8ea7..7c82b1f0c1b9 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -409,9 +409,6 @@ tests: - class: org.elasticsearch.xpack.ml.integration.RegressionIT method: testTwoJobsWithSameRandomizeSeedUseSameTrainingSet issue: https://github.com/elastic/elasticsearch/issues/117805 -- class: org.elasticsearch.xpack.test.rest.XPackRestIT - method: test {p0=esql/60_usage/Basic ESQL usage output (telemetry) non-snapshot version} - issue: https://github.com/elastic/elasticsearch/issues/117862 - class: org.elasticsearch.xpack.security.authc.ldap.UserAttributeGroupsResolverTests issue: https://github.com/elastic/elasticsearch/issues/116537 - class: org.elasticsearch.xpack.core.ml.search.SparseVectorQueryBuilderTests diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml index f7dd979540af..c23b44c00bd1 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml @@ -163,4 +163,4 @@ setup: - match: {esql.functions.cos: $functions_cos} - gt: {esql.functions.to_long: $functions_to_long} - match: {esql.functions.coalesce: $functions_coalesce} - - length: {esql.functions: 118} # check the "sister" test above for a likely update to the same esql.functions length check + - length: {esql.functions: 119} # check the "sister" test above for a likely update to the same esql.functions length check From 13149b833579c2f1fd4478de62b8738f427fc31c Mon Sep 17 00:00:00 2001 From: elasticsearchmachine Date: Wed, 4 Dec 2024 16:00:34 +0000 Subject: [PATCH 16/45] Bump versions after 7.17.26 release --- .buildkite/pipelines/intake.yml | 2 +- .buildkite/pipelines/periodic-packaging.yml | 6 +++--- .buildkite/pipelines/periodic.yml | 10 +++++----- .ci/bwcVersions | 2 +- .ci/snapshotBwcVersions | 2 +- server/src/main/java/org/elasticsearch/Version.java | 1 + .../resources/org/elasticsearch/TransportVersions.csv | 1 + .../org/elasticsearch/index/IndexVersions.csv | 1 + 8 files changed, 14 insertions(+), 11 deletions(-) diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index a80a529ca2e9..5981d0020f11 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -56,7 +56,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.26", "8.16.2", "8.17.0", "8.18.0"] + BWC_VERSION: ["7.17.27", "8.16.2", "8.17.0", "8.18.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 11066f4321e6..40c832e7cda2 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -304,8 +304,8 @@ steps: env: BWC_VERSION: 7.16.3 - - label: "{{matrix.image}} / 7.17.26 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.26 + - label: "{{matrix.image}} / 7.17.27 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.27 timeout_in_minutes: 300 matrix: setup: @@ -318,7 +318,7 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 7.17.26 + BWC_VERSION: 7.17.27 - label: "{{matrix.image}} / 8.0.1 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.0.1 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index acf27c75040d..ca9e0ede3966 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -325,8 +325,8 @@ steps: - signal_reason: agent_stop limit: 3 - - label: 7.17.26 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.26#bwcTest + - label: 7.17.27 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.27#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -335,7 +335,7 @@ steps: buildDirectory: /dev/shm/bk preemptible: true env: - BWC_VERSION: 7.17.26 + BWC_VERSION: 7.17.27 retry: automatic: - exit_status: "-1" @@ -771,7 +771,7 @@ steps: setup: ES_RUNTIME_JAVA: - openjdk17 - BWC_VERSION: ["7.17.26", "8.16.2", "8.17.0", "8.18.0"] + BWC_VERSION: ["7.17.27", "8.16.2", "8.17.0", "8.18.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 @@ -819,7 +819,7 @@ steps: - openjdk21 - openjdk22 - openjdk23 - BWC_VERSION: ["7.17.26", "8.16.2", "8.17.0", "8.18.0"] + BWC_VERSION: ["7.17.27", "8.16.2", "8.17.0", "8.18.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index a9f9b659a71a..aa9ac9776dcc 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -16,7 +16,7 @@ BWC_VERSION: - "7.14.2" - "7.15.2" - "7.16.3" - - "7.17.26" + - "7.17.27" - "8.0.1" - "8.1.3" - "8.2.3" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index f309556b9d9f..e8ee5a6fa75e 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,5 @@ BWC_VERSION: - - "7.17.26" + - "7.17.27" - "8.16.2" - "8.17.0" - "8.18.0" diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 90b0e1b34961..fcc3d097f3fb 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -127,6 +127,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_7_17_24 = new Version(7_17_24_99); public static final Version V_7_17_25 = new Version(7_17_25_99); public static final Version V_7_17_26 = new Version(7_17_26_99); + public static final Version V_7_17_27 = new Version(7_17_27_99); public static final Version V_8_0_0 = new Version(8_00_00_99); public static final Version V_8_0_1 = new Version(8_00_01_99); diff --git a/server/src/main/resources/org/elasticsearch/TransportVersions.csv b/server/src/main/resources/org/elasticsearch/TransportVersions.csv index 678cf5086659..2cdb0a4879f8 100644 --- a/server/src/main/resources/org/elasticsearch/TransportVersions.csv +++ b/server/src/main/resources/org/elasticsearch/TransportVersions.csv @@ -73,6 +73,7 @@ 7.17.23,7172399 7.17.24,7172499 7.17.25,7172599 +7.17.26,7172699 8.0.0,8000099 8.0.1,8000199 8.1.0,8010099 diff --git a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv index 332dff2bd83b..58d0db5f6c3e 100644 --- a/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv +++ b/server/src/main/resources/org/elasticsearch/index/IndexVersions.csv @@ -73,6 +73,7 @@ 7.17.23,7172399 7.17.24,7172499 7.17.25,7172599 +7.17.26,7172699 8.0.0,8000099 8.0.1,8000199 8.1.0,8010099 From 70da167e52b453df7e76e1a690a12da20ee912ad Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Wed, 4 Dec 2024 10:20:05 -0600 Subject: [PATCH 17/45] Moving the data stream reindex task to x-pack (#117927) (#117936) --- .../datastreams/DataStreamsPlugin.java | 65 +------------- x-pack/plugin/migrate/build.gradle | 24 +++++ .../ReindexDataStreamTransportActionIT.java | 20 ++--- .../xpack/migrate/MigratePlugin.java | 90 +++++++++++++++++++ .../action}/ReindexDataStreamAction.java | 10 +-- .../ReindexDataStreamTransportAction.java | 19 ++-- ...indexDataStreamPersistentTaskExecutor.java | 10 +-- .../ReindexDataStreamPersistentTaskState.java | 10 +-- .../task/ReindexDataStreamStatus.java | 10 +-- .../migrate}/task/ReindexDataStreamTask.java | 10 +-- .../task/ReindexDataStreamTaskParams.java | 10 +-- .../ReindexDataStreamResponseTests.java | 12 ++- ...dexDataStreamPersistentTaskStateTests.java | 10 +-- .../task/ReindexDataStreamStatusTests.java | 10 +-- .../ReindexDataStreamTaskParamsTests.java | 10 +-- 15 files changed, 173 insertions(+), 147 deletions(-) create mode 100644 x-pack/plugin/migrate/build.gradle rename {modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams => x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate}/action/ReindexDataStreamTransportActionIT.java (89%) create mode 100644 x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java rename {server/src/main/java/org/elasticsearch/action/datastreams => x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action}/ReindexDataStreamAction.java (88%) rename {modules/data-streams/src/main/java/org/elasticsearch/datastreams => x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate}/action/ReindexDataStreamTransportAction.java (81%) rename {modules/data-streams/src/main/java/org/elasticsearch/datastreams => x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate}/task/ReindexDataStreamPersistentTaskExecutor.java (92%) rename {modules/data-streams/src/main/java/org/elasticsearch/datastreams => x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate}/task/ReindexDataStreamPersistentTaskState.java (82%) rename {modules/data-streams/src/main/java/org/elasticsearch/datastreams => x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate}/task/ReindexDataStreamStatus.java (87%) rename {modules/data-streams/src/main/java/org/elasticsearch/datastreams => x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate}/task/ReindexDataStreamTask.java (85%) rename {modules/data-streams/src/main/java/org/elasticsearch/datastreams => x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate}/task/ReindexDataStreamTaskParams.java (88%) rename {server/src/test/java/org/elasticsearch/action/datastreams => x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action}/ReindexDataStreamResponseTests.java (76%) rename {modules/data-streams/src/test/java/org/elasticsearch/datastreams => x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate}/task/ReindexDataStreamPersistentTaskStateTests.java (74%) rename {modules/data-streams/src/test/java/org/elasticsearch/datastreams => x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate}/task/ReindexDataStreamStatusTests.java (92%) rename {modules/data-streams/src/test/java/org/elasticsearch/datastreams => x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate}/task/ReindexDataStreamTaskParamsTests.java (86%) diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index 2f3b63d27ca3..cb7445705537 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -19,23 +19,19 @@ import org.elasticsearch.action.datastreams.MigrateToDataStreamAction; import org.elasticsearch.action.datastreams.ModifyDataStreamsAction; import org.elasticsearch.action.datastreams.PromoteDataStreamAction; -import org.elasticsearch.action.datastreams.ReindexDataStreamAction; import org.elasticsearch.action.datastreams.lifecycle.ExplainDataStreamLifecycleAction; import org.elasticsearch.action.datastreams.lifecycle.GetDataStreamLifecycleAction; import org.elasticsearch.action.datastreams.lifecycle.PutDataStreamLifecycleAction; -import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; -import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.datastreams.action.CreateDataStreamTransportAction; @@ -44,7 +40,6 @@ import org.elasticsearch.datastreams.action.MigrateToDataStreamTransportAction; import org.elasticsearch.datastreams.action.ModifyDataStreamsTransportAction; import org.elasticsearch.datastreams.action.PromoteDataStreamTransportAction; -import org.elasticsearch.datastreams.action.ReindexDataStreamTransportAction; import org.elasticsearch.datastreams.action.TransportGetDataStreamsAction; import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore; import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService; @@ -78,27 +73,14 @@ import org.elasticsearch.datastreams.rest.RestMigrateToDataStreamAction; import org.elasticsearch.datastreams.rest.RestModifyDataStreamsAction; import org.elasticsearch.datastreams.rest.RestPromoteDataStreamAction; -import org.elasticsearch.datastreams.task.ReindexDataStreamPersistentTaskExecutor; -import org.elasticsearch.datastreams.task.ReindexDataStreamPersistentTaskState; -import org.elasticsearch.datastreams.task.ReindexDataStreamStatus; -import org.elasticsearch.datastreams.task.ReindexDataStreamTask; -import org.elasticsearch.datastreams.task.ReindexDataStreamTaskParams; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.health.HealthIndicatorService; import org.elasticsearch.index.IndexSettingProvider; -import org.elasticsearch.persistent.PersistentTaskParams; -import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksExecutor; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.HealthPlugin; -import org.elasticsearch.plugins.PersistentTaskPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.ParseField; import java.io.IOException; import java.time.Clock; @@ -111,7 +93,7 @@ import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.DATA_STREAM_LIFECYCLE_ORIGIN; -public class DataStreamsPlugin extends Plugin implements ActionPlugin, HealthPlugin, PersistentTaskPlugin { +public class DataStreamsPlugin extends Plugin implements ActionPlugin, HealthPlugin { public static final Setting TIME_SERIES_POLL_INTERVAL = Setting.timeSetting( "time_series.poll_interval", @@ -262,7 +244,6 @@ public Collection createComponents(PluginServices services) { actions.add(new ActionHandler<>(PutDataStreamOptionsAction.INSTANCE, TransportPutDataStreamOptionsAction.class)); actions.add(new ActionHandler<>(DeleteDataStreamOptionsAction.INSTANCE, TransportDeleteDataStreamOptionsAction.class)); } - actions.add(new ActionHandler<>(ReindexDataStreamAction.INSTANCE, ReindexDataStreamTransportAction.class)); return actions; } @@ -321,48 +302,4 @@ public void close() throws IOException { public Collection getHealthIndicatorServices() { return List.of(dataStreamLifecycleHealthIndicatorService.get()); } - - @Override - public List getNamedXContent() { - return List.of( - new NamedXContentRegistry.Entry( - PersistentTaskState.class, - new ParseField(ReindexDataStreamPersistentTaskState.NAME), - ReindexDataStreamPersistentTaskState::fromXContent - ), - new NamedXContentRegistry.Entry( - PersistentTaskParams.class, - new ParseField(ReindexDataStreamTaskParams.NAME), - ReindexDataStreamTaskParams::fromXContent - ) - ); - } - - @Override - public List getNamedWriteables() { - return List.of( - new NamedWriteableRegistry.Entry( - PersistentTaskState.class, - ReindexDataStreamPersistentTaskState.NAME, - ReindexDataStreamPersistentTaskState::new - ), - new NamedWriteableRegistry.Entry( - PersistentTaskParams.class, - ReindexDataStreamTaskParams.NAME, - ReindexDataStreamTaskParams::new - ), - new NamedWriteableRegistry.Entry(Task.Status.class, ReindexDataStreamStatus.NAME, ReindexDataStreamStatus::new) - ); - } - - @Override - public List> getPersistentTasksExecutor( - ClusterService clusterService, - ThreadPool threadPool, - Client client, - SettingsModule settingsModule, - IndexNameExpressionResolver expressionResolver - ) { - return List.of(new ReindexDataStreamPersistentTaskExecutor(client, clusterService, ReindexDataStreamTask.TASK_NAME, threadPool)); - } } diff --git a/x-pack/plugin/migrate/build.gradle b/x-pack/plugin/migrate/build.gradle new file mode 100644 index 000000000000..87ea7a07ab41 --- /dev/null +++ b/x-pack/plugin/migrate/build.gradle @@ -0,0 +1,24 @@ +apply plugin: 'elasticsearch.internal-es-plugin' +apply plugin: 'elasticsearch.internal-cluster-test' + +esplugin { + name 'x-pack-migrate' + description 'Elasticsearch Expanded Pack Plugin - Index and Data Stream Migration' + classname 'org.elasticsearch.xpack.migrate.MigratePlugin' + extendedPlugins = ['x-pack-core'] + hasNativeController false + requiresKeystore true +} +base { + archivesName = 'x-pack-migrate' +} + +dependencies { + compileOnly project(path: xpackModule('core')) + testImplementation(testArtifact(project(xpackModule('core')))) + testImplementation project(xpackModule('ccr')) + testImplementation project(':modules:data-streams') + testImplementation project(path: ':modules:reindex') +} + +addQaCheckDependencies(project) diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/action/ReindexDataStreamTransportActionIT.java b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportActionIT.java similarity index 89% rename from modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/action/ReindexDataStreamTransportActionIT.java rename to x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportActionIT.java index fdc96892d4b2..3b68fc9995b5 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/action/ReindexDataStreamTransportActionIT.java +++ b/x-pack/plugin/migrate/src/internalClusterTest/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportActionIT.java @@ -1,13 +1,11 @@ /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. */ -package org.elasticsearch.datastreams.action; +package org.elasticsearch.xpack.migrate.action; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; @@ -17,21 +15,21 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverRequestBuilder; import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.datastreams.CreateDataStreamAction; -import org.elasticsearch.action.datastreams.ReindexDataStreamAction; -import org.elasticsearch.action.datastreams.ReindexDataStreamAction.ReindexDataStreamRequest; -import org.elasticsearch.action.datastreams.ReindexDataStreamAction.ReindexDataStreamResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.datastreams.DataStreamsPlugin; -import org.elasticsearch.datastreams.task.ReindexDataStreamTask; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.migrate.MigratePlugin; +import org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.ReindexDataStreamRequest; +import org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.ReindexDataStreamResponse; +import org.elasticsearch.xpack.migrate.task.ReindexDataStreamTask; import java.util.Collection; import java.util.List; @@ -48,7 +46,7 @@ public class ReindexDataStreamTransportActionIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return List.of(DataStreamsPlugin.class); + return List.of(DataStreamsPlugin.class, MigratePlugin.class); } public void testNonExistentDataStream() { diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java new file mode 100644 index 000000000000..118cd69ece4d --- /dev/null +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/MigratePlugin.java @@ -0,0 +1,90 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.migrate; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.SettingsModule; +import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.persistent.PersistentTaskState; +import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.PersistentTaskPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction; +import org.elasticsearch.xpack.migrate.action.ReindexDataStreamTransportAction; +import org.elasticsearch.xpack.migrate.task.ReindexDataStreamPersistentTaskExecutor; +import org.elasticsearch.xpack.migrate.task.ReindexDataStreamPersistentTaskState; +import org.elasticsearch.xpack.migrate.task.ReindexDataStreamStatus; +import org.elasticsearch.xpack.migrate.task.ReindexDataStreamTask; +import org.elasticsearch.xpack.migrate.task.ReindexDataStreamTaskParams; + +import java.util.ArrayList; +import java.util.List; + +public class MigratePlugin extends Plugin implements ActionPlugin, PersistentTaskPlugin { + + @Override + public List> getActions() { + List> actions = new ArrayList<>(); + actions.add(new ActionHandler<>(ReindexDataStreamAction.INSTANCE, ReindexDataStreamTransportAction.class)); + return actions; + } + + @Override + public List getNamedXContent() { + return List.of( + new NamedXContentRegistry.Entry( + PersistentTaskState.class, + new ParseField(ReindexDataStreamPersistentTaskState.NAME), + ReindexDataStreamPersistentTaskState::fromXContent + ), + new NamedXContentRegistry.Entry( + PersistentTaskParams.class, + new ParseField(ReindexDataStreamTaskParams.NAME), + ReindexDataStreamTaskParams::fromXContent + ) + ); + } + + @Override + public List getNamedWriteables() { + return List.of( + new NamedWriteableRegistry.Entry( + PersistentTaskState.class, + ReindexDataStreamPersistentTaskState.NAME, + ReindexDataStreamPersistentTaskState::new + ), + new NamedWriteableRegistry.Entry( + PersistentTaskParams.class, + ReindexDataStreamTaskParams.NAME, + ReindexDataStreamTaskParams::new + ), + new NamedWriteableRegistry.Entry(Task.Status.class, ReindexDataStreamStatus.NAME, ReindexDataStreamStatus::new) + ); + } + + @Override + public List> getPersistentTasksExecutor( + ClusterService clusterService, + ThreadPool threadPool, + Client client, + SettingsModule settingsModule, + IndexNameExpressionResolver expressionResolver + ) { + return List.of(new ReindexDataStreamPersistentTaskExecutor(client, clusterService, ReindexDataStreamTask.TASK_NAME, threadPool)); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/ReindexDataStreamAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamAction.java similarity index 88% rename from server/src/main/java/org/elasticsearch/action/datastreams/ReindexDataStreamAction.java rename to x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamAction.java index 814c512c43be..1785e6971f82 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/ReindexDataStreamAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamAction.java @@ -1,13 +1,11 @@ /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. */ -package org.elasticsearch.action.datastreams; +package org.elasticsearch.xpack.migrate.action; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/ReindexDataStreamTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportAction.java similarity index 81% rename from modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/ReindexDataStreamTransportAction.java rename to x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportAction.java index 0a86985c6c7b..d532b001f5aa 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/ReindexDataStreamTransportAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportAction.java @@ -1,32 +1,29 @@ /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. */ -package org.elasticsearch.datastreams.action; +package org.elasticsearch.xpack.migrate.action; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.datastreams.ReindexDataStreamAction; -import org.elasticsearch.action.datastreams.ReindexDataStreamAction.ReindexDataStreamRequest; -import org.elasticsearch.action.datastreams.ReindexDataStreamAction.ReindexDataStreamResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.datastreams.task.ReindexDataStreamTask; -import org.elasticsearch.datastreams.task.ReindexDataStreamTaskParams; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.ReindexDataStreamRequest; +import org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.ReindexDataStreamResponse; +import org.elasticsearch.xpack.migrate.task.ReindexDataStreamTask; +import org.elasticsearch.xpack.migrate.task.ReindexDataStreamTaskParams; /* * This transport action creates a new persistent task for reindexing the source data stream given in the request. On successful creation diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskExecutor.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java similarity index 92% rename from modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskExecutor.java rename to x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java index 095a310e5844..126b806d7a36 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskExecutor.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java @@ -1,13 +1,11 @@ /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. */ -package org.elasticsearch.datastreams.task; +package org.elasticsearch.xpack.migrate.task; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskState.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskState.java similarity index 82% rename from modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskState.java rename to x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskState.java index d6f32a3d34a7..130a8f7ce372 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskState.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskState.java @@ -1,13 +1,11 @@ /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. */ -package org.elasticsearch.datastreams.task; +package org.elasticsearch.xpack.migrate.task; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamStatus.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatus.java similarity index 87% rename from modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamStatus.java rename to x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatus.java index 10dfded853a1..358062550b50 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamStatus.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatus.java @@ -1,13 +1,11 @@ /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. */ -package org.elasticsearch.datastreams.task; +package org.elasticsearch.xpack.migrate.task; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamTask.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTask.java similarity index 85% rename from modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamTask.java rename to x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTask.java index 068579a37edb..72ddb87e9dea 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamTask.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTask.java @@ -1,13 +1,11 @@ /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. */ -package org.elasticsearch.datastreams.task; +package org.elasticsearch.xpack.migrate.task; import org.elasticsearch.core.Tuple; import org.elasticsearch.persistent.AllocatedPersistentTask; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamTaskParams.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTaskParams.java similarity index 88% rename from modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamTaskParams.java rename to x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTaskParams.java index 5efbc6b67221..0f26713a7518 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/task/ReindexDataStreamTaskParams.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTaskParams.java @@ -1,13 +1,11 @@ /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. */ -package org.elasticsearch.datastreams.task; +package org.elasticsearch.xpack.migrate.task; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; diff --git a/server/src/test/java/org/elasticsearch/action/datastreams/ReindexDataStreamResponseTests.java b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamResponseTests.java similarity index 76% rename from server/src/test/java/org/elasticsearch/action/datastreams/ReindexDataStreamResponseTests.java rename to x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamResponseTests.java index fe839c28aab8..06844577c4e3 100644 --- a/server/src/test/java/org/elasticsearch/action/datastreams/ReindexDataStreamResponseTests.java +++ b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamResponseTests.java @@ -1,21 +1,19 @@ /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. */ -package org.elasticsearch.action.datastreams; +package org.elasticsearch.xpack.migrate.action; -import org.elasticsearch.action.datastreams.ReindexDataStreamAction.ReindexDataStreamResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.ReindexDataStreamResponse; import java.io.IOException; import java.util.Map; diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskStateTests.java b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskStateTests.java similarity index 74% rename from modules/data-streams/src/test/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskStateTests.java rename to x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskStateTests.java index 7cd95bca7a12..140558b997e4 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/task/ReindexDataStreamPersistentTaskStateTests.java +++ b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskStateTests.java @@ -1,13 +1,11 @@ /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. */ -package org.elasticsearch.datastreams.task; +package org.elasticsearch.xpack.migrate.task; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractXContentSerializingTestCase; diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/task/ReindexDataStreamStatusTests.java b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatusTests.java similarity index 92% rename from modules/data-streams/src/test/java/org/elasticsearch/datastreams/task/ReindexDataStreamStatusTests.java rename to x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatusTests.java index 8f0fabc2ce7e..d81e9d35cd49 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/task/ReindexDataStreamStatusTests.java +++ b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamStatusTests.java @@ -1,13 +1,11 @@ /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. */ -package org.elasticsearch.datastreams.task; +package org.elasticsearch.xpack.migrate.task; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.bytes.BytesReference; diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/task/ReindexDataStreamTaskParamsTests.java b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTaskParamsTests.java similarity index 86% rename from modules/data-streams/src/test/java/org/elasticsearch/datastreams/task/ReindexDataStreamTaskParamsTests.java rename to x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTaskParamsTests.java index 55098bf4a68d..fc39b5d8cb70 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/task/ReindexDataStreamTaskParamsTests.java +++ b/x-pack/plugin/migrate/src/test/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamTaskParamsTests.java @@ -1,13 +1,11 @@ /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. */ -package org.elasticsearch.datastreams.task; +package org.elasticsearch.xpack.migrate.task; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.Writeable; From edf4419780514ead2573af49c09a6787ea1174e8 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 4 Dec 2024 08:40:43 -0800 Subject: [PATCH 18/45] Make Murmur3Hasher#update public (#117961) (#118002) Closes #117883 --- .../java/org/elasticsearch/common/hash/Murmur3Hasher.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/common/hash/Murmur3Hasher.java b/server/src/main/java/org/elasticsearch/common/hash/Murmur3Hasher.java index 817587771d79..aec28484138f 100644 --- a/server/src/main/java/org/elasticsearch/common/hash/Murmur3Hasher.java +++ b/server/src/main/java/org/elasticsearch/common/hash/Murmur3Hasher.java @@ -40,7 +40,12 @@ public void update(byte[] inputBytes) { update(inputBytes, 0, inputBytes.length); } - private void update(byte[] inputBytes, int offset, int length) { + /** + * Similar to {@link #update(byte[])}, but processes a specific portion of the input bytes + * starting from the given {@code offset} for the specified {@code length}. + * @see #update(byte[]) + */ + public void update(byte[] inputBytes, int offset, int length) { if (remainderLength + length >= remainder.length) { if (remainderLength > 0) { // fill rest of remainder from inputBytes and hash remainder From 4230d72c2205dc9db43497e5ccfab6a0e7d3464c Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 4 Dec 2024 08:42:34 -0800 Subject: [PATCH 19/45] Acquire stats searcher for data stream stats (#117953) (#118006) Here, we only need to extract the minimum and maximum values of the timestamp field; therefore, using a stats searcher should suffice. This is important for frozen indices. --- docs/changelog/117953.yaml | 5 +++++ .../datastreams/action/DataStreamsStatsTransportAction.java | 3 ++- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/117953.yaml diff --git a/docs/changelog/117953.yaml b/docs/changelog/117953.yaml new file mode 100644 index 000000000000..62f0218b1cdc --- /dev/null +++ b/docs/changelog/117953.yaml @@ -0,0 +1,5 @@ +pr: 117953 +summary: Acquire stats searcher for data stream stats +area: Data streams +type: bug +issues: [] diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java index 1b0b0aa6abeb..1d3b1b676282 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DataStreamsStatsTransportAction.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.ReadOnlyEngine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.indices.IndicesService; @@ -130,7 +131,7 @@ protected void shardOperation( DataStream dataStream = indexAbstraction.getParentDataStream(); assert dataStream != null; long maxTimestamp = 0L; - try (Engine.Searcher searcher = indexShard.acquireSearcher("data_stream_stats")) { + try (Engine.Searcher searcher = indexShard.acquireSearcher(ReadOnlyEngine.FIELD_RANGE_SEARCH_SOURCE)) { IndexReader indexReader = searcher.getIndexReader(); byte[] maxPackedValue = PointValues.getMaxPackedValue(indexReader, DataStream.TIMESTAMP_FIELD_NAME); if (maxPackedValue != null) { From 20957d6e1e39ce7183fef8661c7ed09fdbc95943 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Wed, 4 Dec 2024 18:28:37 +0100 Subject: [PATCH 20/45] Refactor how we build the final aggregations in GlobalOrdinalsStringTermsAggregator (#117627) (#118009) This change refactor the method #buildAggregations(LongArray owningBucketOrds) so it is specific to the Collection strategy. --- .../GlobalOrdinalsStringTermsAggregator.java | 261 +++++++++--------- 1 file changed, 133 insertions(+), 128 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 84dd2e7b1e52..037870016a5f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -20,12 +20,10 @@ import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.common.util.ObjectArrayPriorityQueue; -import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.search.DocValueFormat; @@ -103,14 +101,14 @@ public GlobalOrdinalsStringTermsAggregator( this.valueCount = valuesSupplier.get().getValueCount(); this.acceptedGlobalOrdinals = acceptedOrds; if (remapGlobalOrds) { - this.collectionStrategy = new RemapGlobalOrds(cardinality, excludeDeletedDocs); + this.collectionStrategy = new RemapGlobalOrds<>(this.resultStrategy, cardinality, excludeDeletedDocs); } else { this.collectionStrategy = cardinality.map(estimate -> { if (estimate > 1) { // This is a 500 class error, because we should never be able to reach it. throw new AggregationExecutionException("Dense ords don't know how to collect from many buckets"); } - return new DenseGlobalOrds(excludeDeletedDocs); + return new DenseGlobalOrds<>(this.resultStrategy, excludeDeletedDocs); }); } } @@ -192,7 +190,13 @@ public void collect(int doc, long owningBucketOrd) throws IOException { @Override public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { - return resultStrategy.buildAggregations(owningBucketOrds); + if (valueCount == 0) { // no context in this reader + return GlobalOrdinalsStringTermsAggregator.this.buildAggregations( + Math.toIntExact(owningBucketOrds.size()), + ordIdx -> resultStrategy.buildNoValuesResult(owningBucketOrds.get(ordIdx)) + ); + } + return collectionStrategy.buildAggregations(owningBucketOrds); } @Override @@ -399,8 +403,8 @@ private void mapSegmentCountsToGlobalCounts(LongUnaryOperator mapping) throws IO * The {@link GlobalOrdinalsStringTermsAggregator} uses one of these * to collect the global ordinals by calling * {@link CollectionStrategy#collectGlobalOrd} for each global ordinal - * that it hits and then calling {@link CollectionStrategy#forEach} - * once to iterate on the results. + * that it hits and then calling {@link CollectionStrategy#buildAggregations} + * to generate the results. */ abstract static class CollectionStrategy implements Releasable { /** @@ -436,15 +440,9 @@ abstract static class CollectionStrategy implements Releasable { abstract long globalOrdToBucketOrd(long owningBucketOrd, long globalOrd); /** - * Iterate all of the buckets. Implementations take into account - * the {@link BucketCountThresholds}. In particular, - * if the {@link BucketCountThresholds#getMinDocCount()} is 0 then - * they'll make sure to iterate a bucket even if it was never - * {{@link #collectGlobalOrd collected}. - * If {@link BucketCountThresholds#getMinDocCount()} is not 0 then - * they'll skip all global ords that weren't collected. + * Create the aggregation result */ - abstract void forEach(long owningBucketOrd, BucketInfoConsumer consumer) throws IOException; + abstract InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException; } interface BucketInfoConsumer { @@ -455,12 +453,17 @@ interface BucketInfoConsumer { * {@linkplain CollectionStrategy} that just uses the global ordinal as the * bucket ordinal. */ - class DenseGlobalOrds extends CollectionStrategy { + class DenseGlobalOrds< + R extends InternalAggregation, + B extends InternalMultiBucketAggregation.InternalBucket, + TB extends InternalMultiBucketAggregation.InternalBucket> extends CollectionStrategy { private final boolean excludeDeletedDocs; + private final ResultStrategy collectionStrategy; - DenseGlobalOrds(boolean excludeDeletedDocs) { + DenseGlobalOrds(ResultStrategy collectionStrategy, boolean excludeDeletedDocs) { this.excludeDeletedDocs = excludeDeletedDocs; + this.collectionStrategy = collectionStrategy; } @Override @@ -490,9 +493,7 @@ long globalOrdToBucketOrd(long owningBucketOrd, long globalOrd) { return globalOrd; } - @Override - void forEach(long owningBucketOrd, BucketInfoConsumer consumer) throws IOException { - assert owningBucketOrd == 0; + private void collect(BucketInfoConsumer consumer) throws IOException { if (excludeDeletedDocs) { forEachExcludeDeletedDocs(consumer); } else { @@ -516,7 +517,7 @@ private void forEachAllowDeletedDocs(BucketInfoConsumer consumer) throws IOExcep * Excludes deleted docs in the results by cross-checking with liveDocs. */ private void forEachExcludeDeletedDocs(BucketInfoConsumer consumer) throws IOException { - try (LongHash accepted = new LongHash(20, new BigArrays(null, null, ""))) { + try (LongHash accepted = new LongHash(20, bigArrays())) { for (LeafReaderContext ctx : searcher().getTopReaderContext().leaves()) { LeafReader reader = ctx.reader(); Bits liveDocs = reader.getLiveDocs(); @@ -547,6 +548,55 @@ private void forEachExcludeDeletedDocs(BucketInfoConsumer consumer) throws IOExc @Override public void close() {} + + @Override + InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { + assert owningBucketOrds.size() == 1 && owningBucketOrds.get(0) == 0; + try ( + LongArray otherDocCount = bigArrays().newLongArray(1, true); + ObjectArray topBucketsPreOrd = collectionStrategy.buildTopBucketsPerOrd(1) + ) { + GlobalOrdLookupFunction lookupGlobalOrd = valuesSupplier.get()::lookupOrd; + final int size = (int) Math.min(valueCount, bucketCountThresholds.getShardSize()); + try (ObjectArrayPriorityQueue ordered = collectionStrategy.buildPriorityQueue(size)) { + BucketUpdater updater = collectionStrategy.bucketUpdater(0, lookupGlobalOrd); + collect(new BucketInfoConsumer() { + TB spare = null; + + @Override + public void accept(long globalOrd, long bucketOrd, long docCount) throws IOException { + otherDocCount.increment(0, docCount); + if (docCount >= bucketCountThresholds.getShardMinDocCount()) { + if (spare == null) { + checkRealMemoryCBForInternalBucket(); + spare = collectionStrategy.buildEmptyTemporaryBucket(); + } + updater.updateBucket(spare, globalOrd, bucketOrd, docCount); + spare = ordered.insertWithOverflow(spare); + } + } + }); + + // Get the top buckets + topBucketsPreOrd.set(0, collectionStrategy.buildBuckets((int) ordered.size())); + for (int i = (int) ordered.size() - 1; i >= 0; --i) { + checkRealMemoryCBForInternalBucket(); + B bucket = collectionStrategy.convertTempBucketToRealBucket(ordered.pop(), lookupGlobalOrd); + topBucketsPreOrd.get(0)[i] = bucket; + otherDocCount.increment(0, -bucket.getDocCount()); + } + } + collectionStrategy.buildSubAggs(topBucketsPreOrd); + return GlobalOrdinalsStringTermsAggregator.this.buildAggregations( + Math.toIntExact(owningBucketOrds.size()), + ordIdx -> collectionStrategy.buildResult( + owningBucketOrds.get(ordIdx), + otherDocCount.get(ordIdx), + topBucketsPreOrd.get(ordIdx) + ) + ); + } + } } /** @@ -555,13 +605,22 @@ public void close() {} * {@link DenseGlobalOrds} when collecting every ordinal, but significantly * less when collecting only a few. */ - private class RemapGlobalOrds extends CollectionStrategy { + private class RemapGlobalOrds< + R extends InternalAggregation, + B extends InternalMultiBucketAggregation.InternalBucket, + TB extends InternalMultiBucketAggregation.InternalBucket> extends CollectionStrategy { private final LongKeyedBucketOrds bucketOrds; private final boolean excludeDeletedDocs; + private final ResultStrategy collectionStrategy; - private RemapGlobalOrds(CardinalityUpperBound cardinality, boolean excludeDeletedDocs) { + private RemapGlobalOrds( + ResultStrategy collectionStrategy, + CardinalityUpperBound cardinality, + boolean excludeDeletedDocs + ) { bucketOrds = LongKeyedBucketOrds.buildForValueRange(bigArrays(), cardinality, 0, valueCount - 1); this.excludeDeletedDocs = excludeDeletedDocs; + this.collectionStrategy = collectionStrategy; } @Override @@ -593,30 +652,14 @@ long globalOrdToBucketOrd(long owningBucketOrd, long globalOrd) { return bucketOrds.find(owningBucketOrd, globalOrd); } - @Override - void forEach(long owningBucketOrd, BucketInfoConsumer consumer) throws IOException { + private void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException { if (excludeDeletedDocs) { - forEachExcludeDeletedDocs(owningBucketOrd, consumer); - } else { - forEachAllowDeletedDocs(owningBucketOrd, consumer); - } - } - - void forEachAllowDeletedDocs(long owningBucketOrd, BucketInfoConsumer consumer) throws IOException { - if (bucketCountThresholds.getMinDocCount() == 0) { + forEachExcludeDeletedDocs(owningBucketOrd); + } else if (bucketCountThresholds.getMinDocCount() == 0) { for (long globalOrd = 0; globalOrd < valueCount; globalOrd++) { - if (false == acceptedGlobalOrdinals.test(globalOrd)) { - continue; - } - addBucketForMinDocCountZero(owningBucketOrd, globalOrd, consumer, null); - } - } else { - LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd); - while (ordsEnum.next()) { - if (false == acceptedGlobalOrdinals.test(ordsEnum.value())) { - continue; + if (acceptedGlobalOrdinals.test(globalOrd)) { + bucketOrds.add(owningBucketOrd, globalOrd); } - consumer.accept(ordsEnum.value(), ordsEnum.ord(), bucketDocCount(ordsEnum.ord())); } } } @@ -624,9 +667,9 @@ void forEachAllowDeletedDocs(long owningBucketOrd, BucketInfoConsumer consumer) /** * Excludes deleted docs in the results by cross-checking with liveDocs. */ - void forEachExcludeDeletedDocs(long owningBucketOrd, BucketInfoConsumer consumer) throws IOException { + private void forEachExcludeDeletedDocs(long owningBucketOrd) throws IOException { assert bucketCountThresholds.getMinDocCount() == 0; - try (LongHash accepted = new LongHash(20, new BigArrays(null, null, ""))) { + try (LongHash accepted = new LongHash(20, bigArrays())) { for (LeafReaderContext ctx : searcher().getTopReaderContext().leaves()) { LeafReader reader = ctx.reader(); Bits liveDocs = reader.getLiveDocs(); @@ -642,7 +685,8 @@ void forEachExcludeDeletedDocs(long owningBucketOrd, BucketInfoConsumer consumer if (false == acceptedGlobalOrdinals.test(globalOrd)) { continue; } - addBucketForMinDocCountZero(owningBucketOrd, globalOrd, consumer, accepted); + bucketOrds.add(owningBucketOrd, globalOrd); + accepted.add(globalOrd); } } } @@ -651,110 +695,71 @@ void forEachExcludeDeletedDocs(long owningBucketOrd, BucketInfoConsumer consumer } } - private void addBucketForMinDocCountZero( - long owningBucketOrd, - long globalOrd, - BucketInfoConsumer consumer, - @Nullable LongHash accepted - ) throws IOException { - /* - * Use `add` instead of `find` here to assign an ordinal - * even if the global ord wasn't found so we can build - * sub-aggregations without trouble even though we haven't - * hit any documents for them. This is wasteful, but - * settings minDocCount == 0 is wasteful in general..... - */ - long bucketOrd = bucketOrds.add(owningBucketOrd, globalOrd); - long docCount; - if (bucketOrd < 0) { - bucketOrd = -1 - bucketOrd; - docCount = bucketDocCount(bucketOrd); - } else { - docCount = 0; - } - assert globalOrd >= 0; - consumer.accept(globalOrd, bucketOrd, docCount); - if (accepted != null) { - accepted.add(globalOrd); - } - } - @Override public void close() { bucketOrds.close(); } - } - - /** - * Strategy for building results. - */ - abstract class ResultStrategy< - R extends InternalAggregation, - B extends InternalMultiBucketAggregation.InternalBucket, - TB extends InternalMultiBucketAggregation.InternalBucket> implements Releasable { - - private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { - if (valueCount == 0) { // no context in this reader - return GlobalOrdinalsStringTermsAggregator.this.buildAggregations( - Math.toIntExact(owningBucketOrds.size()), - ordIdx -> buildNoValuesResult(owningBucketOrds.get(ordIdx)) - ); - } + @Override + InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOException { try ( LongArray otherDocCount = bigArrays().newLongArray(owningBucketOrds.size(), true); - ObjectArray topBucketsPreOrd = buildTopBucketsPerOrd(owningBucketOrds.size()) + ObjectArray topBucketsPreOrd = collectionStrategy.buildTopBucketsPerOrd(owningBucketOrds.size()) ) { GlobalOrdLookupFunction lookupGlobalOrd = valuesSupplier.get()::lookupOrd; for (long ordIdx = 0; ordIdx < topBucketsPreOrd.size(); ordIdx++) { - final int size; - if (bucketCountThresholds.getMinDocCount() == 0) { - // if minDocCount == 0 then we can end up with more buckets then maxBucketOrd() returns - size = (int) Math.min(valueCount, bucketCountThresholds.getShardSize()); - } else { - size = (int) Math.min(maxBucketOrd(), bucketCountThresholds.getShardSize()); - } - try (ObjectArrayPriorityQueue ordered = buildPriorityQueue(size)) { - final long finalOrdIdx = ordIdx; - final long owningBucketOrd = owningBucketOrds.get(ordIdx); - BucketUpdater updater = bucketUpdater(owningBucketOrd, lookupGlobalOrd); - collectionStrategy.forEach(owningBucketOrd, new BucketInfoConsumer() { - TB spare = null; - - @Override - public void accept(long globalOrd, long bucketOrd, long docCount) throws IOException { - otherDocCount.increment(finalOrdIdx, docCount); - if (docCount >= bucketCountThresholds.getShardMinDocCount()) { - if (spare == null) { - checkRealMemoryCBForInternalBucket(); - spare = buildEmptyTemporaryBucket(); - } - updater.updateBucket(spare, globalOrd, bucketOrd, docCount); - spare = ordered.insertWithOverflow(spare); - } + long owningBucketOrd = owningBucketOrds.get(ordIdx); + collectZeroDocEntriesIfNeeded(owningBucketOrds.get(ordIdx)); + int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrd), bucketCountThresholds.getShardSize()); + try (ObjectArrayPriorityQueue ordered = collectionStrategy.buildPriorityQueue(size)) { + BucketUpdater updater = collectionStrategy.bucketUpdater(owningBucketOrd, lookupGlobalOrd); + LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd); + TB spare = null; + while (ordsEnum.next()) { + long docCount = bucketDocCount(ordsEnum.ord()); + otherDocCount.increment(ordIdx, docCount); + if (docCount < bucketCountThresholds.getShardMinDocCount()) { + continue; } - }); - + if (spare == null) { + checkRealMemoryCBForInternalBucket(); + spare = collectionStrategy.buildEmptyTemporaryBucket(); + } + updater.updateBucket(spare, ordsEnum.value(), ordsEnum.ord(), docCount); + spare = ordered.insertWithOverflow(spare); + } // Get the top buckets - topBucketsPreOrd.set(ordIdx, buildBuckets((int) ordered.size())); + topBucketsPreOrd.set(ordIdx, collectionStrategy.buildBuckets((int) ordered.size())); for (int i = (int) ordered.size() - 1; i >= 0; --i) { checkRealMemoryCBForInternalBucket(); - B bucket = convertTempBucketToRealBucket(ordered.pop(), lookupGlobalOrd); + B bucket = collectionStrategy.convertTempBucketToRealBucket(ordered.pop(), lookupGlobalOrd); topBucketsPreOrd.get(ordIdx)[i] = bucket; otherDocCount.increment(ordIdx, -bucket.getDocCount()); } } } - - buildSubAggs(topBucketsPreOrd); - + collectionStrategy.buildSubAggs(topBucketsPreOrd); return GlobalOrdinalsStringTermsAggregator.this.buildAggregations( Math.toIntExact(owningBucketOrds.size()), - ordIdx -> buildResult(owningBucketOrds.get(ordIdx), otherDocCount.get(ordIdx), topBucketsPreOrd.get(ordIdx)) + ordIdx -> collectionStrategy.buildResult( + owningBucketOrds.get(ordIdx), + otherDocCount.get(ordIdx), + topBucketsPreOrd.get(ordIdx) + ) ); } } + } + + /** + * Strategy for building results. + */ + abstract class ResultStrategy< + R extends InternalAggregation, + B extends InternalMultiBucketAggregation.InternalBucket, + TB extends InternalMultiBucketAggregation.InternalBucket> implements Releasable { + /** * Short description of the collection mechanism added to the profile * output to help with debugging. @@ -776,7 +781,7 @@ public void accept(long globalOrd, long bucketOrd, long docCount) throws IOExcep * Update fields in {@code spare} to reflect information collected for * this bucket ordinal. */ - abstract BucketUpdater bucketUpdater(long owningBucketOrd, GlobalOrdLookupFunction lookupGlobalOrd) throws IOException; + abstract BucketUpdater bucketUpdater(long owningBucketOrd, GlobalOrdLookupFunction lookupGlobalOrd); /** * Build a {@link PriorityQueue} to sort the buckets. After we've @@ -858,7 +863,7 @@ OrdBucket buildEmptyTemporaryBucket() { } @Override - BucketUpdater bucketUpdater(long owningBucketOrd, GlobalOrdLookupFunction lookupGlobalOrd) throws IOException { + BucketUpdater bucketUpdater(long owningBucketOrd, GlobalOrdLookupFunction lookupGlobalOrd) { return (spare, globalOrd, bucketOrd, docCount) -> { spare.globalOrd = globalOrd; spare.bucketOrd = bucketOrd; From 4254ca376fb1948daba730315ff0a988374d1c3c Mon Sep 17 00:00:00 2001 From: Kathleen DeRusso Date: Wed, 4 Dec 2024 13:00:59 -0500 Subject: [PATCH 21/45] Kderusso/sparse vector ci failure (#117930) (#118014) * Fix CI failure in SparseVectorQueryBuilderTests --- muted-tests.yml | 3 --- .../xpack/core/ml/search/SparseVectorQueryBuilderTests.java | 6 ++++++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/muted-tests.yml b/muted-tests.yml index 7c82b1f0c1b9..a09952e87831 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -411,6 +411,3 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/117805 - class: org.elasticsearch.xpack.security.authc.ldap.UserAttributeGroupsResolverTests issue: https://github.com/elastic/elasticsearch/issues/116537 -- class: org.elasticsearch.xpack.core.ml.search.SparseVectorQueryBuilderTests - method: testToQuery - issue: https://github.com/elastic/elasticsearch/issues/117998 diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/search/SparseVectorQueryBuilderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/search/SparseVectorQueryBuilderTests.java index b5296bef05b7..7774b29bfc97 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/search/SparseVectorQueryBuilderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/search/SparseVectorQueryBuilderTests.java @@ -232,6 +232,12 @@ public void testToQuery() throws IOException { private void testDoToQuery(SparseVectorQueryBuilder queryBuilder, SearchExecutionContext context) throws IOException { Query query = queryBuilder.doToQuery(context); + + // test query builder can randomly have no vectors, which rewrites to a MatchNoneQuery - nothing more to do in this case. + if (query instanceof MatchNoDocsQuery) { + return; + } + assertTrue(query instanceof SparseVectorQueryWrapper); var sparseQuery = (SparseVectorQueryWrapper) query; if (queryBuilder.shouldPruneTokens()) { From ffea5a65fcffe98acb843db4430562d3166c6760 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Wed, 4 Dec 2024 14:38:01 -0500 Subject: [PATCH 22/45] Indicate that rescore isn't allowed with retrievers, yet (#118019) (#118022) --- docs/reference/search/retriever.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/search/retriever.asciidoc b/docs/reference/search/retriever.asciidoc index b90b7e312c79..cb04d4fb6fbf 100644 --- a/docs/reference/search/retriever.asciidoc +++ b/docs/reference/search/retriever.asciidoc @@ -765,11 +765,11 @@ clauses in a <>. [[retriever-restrictions]] ==== Restrictions on search parameters when specifying a retriever -When a retriever is specified as part of a search, the following elements are not allowed at the top-level. -Instead they are only allowed as elements of specific retrievers: +When a retriever is specified as part of a search, the following elements are not allowed at the top-level: * <> * <> * <> * <> * <> +* <> From 570a8cb9f77216ac6daed626a21b0593096d27de Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 4 Dec 2024 15:35:18 -0500 Subject: [PATCH 23/45] ESQL: Limit size of query (#117898) (#118024) Queries bigger than a mb tend to take a lot of memory. In the worse case it's an astounding amount of memory. --- docs/changelog/117898.yaml | 5 +++++ .../xpack/esql/heap_attack/HeapAttackIT.java | 21 +++++++++++++++++++ .../xpack/esql/parser/EsqlParser.java | 17 ++++++++++++++- .../xpack/esql/analysis/ParsingTests.java | 8 +++++++ 4 files changed, 50 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/117898.yaml diff --git a/docs/changelog/117898.yaml b/docs/changelog/117898.yaml new file mode 100644 index 000000000000..c60061abc49f --- /dev/null +++ b/docs/changelog/117898.yaml @@ -0,0 +1,5 @@ +pr: 117898 +summary: Limit size of query +area: ES|QL +type: bug +issues: [] diff --git a/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java b/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java index 8b9176a346e3..ace3db377664 100644 --- a/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java +++ b/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java @@ -194,6 +194,13 @@ private void assertCircuitBreaks(ThrowingRunnable r) throws IOException { ); } + private void assertParseFailure(ThrowingRunnable r) throws IOException { + ResponseException e = expectThrows(ResponseException.class, r); + Map map = responseAsMap(e.getResponse()); + logger.info("expected parse failure {}", map); + assertMap(map, matchesMap().entry("status", 400).entry("error", matchesMap().extraOk().entry("type", "parsing_exception"))); + } + private Response sortByManyLongs(int count) throws IOException { logger.info("sorting by {} longs", count); return query(makeSortByManyLongs(count).toString(), null); @@ -318,6 +325,13 @@ public void testManyConcatFromRow() throws IOException { assertManyStrings(resp, strings); } + /** + * Fails to parse a huge huge query. + */ + public void testHugeHugeManyConcatFromRow() throws IOException { + assertParseFailure(() -> manyConcat("ROW a=9999, b=9999, c=9999, d=9999, e=9999", 50000)); + } + /** * Tests that generate many moderately long strings. */ @@ -378,6 +392,13 @@ public void testManyRepeatFromRow() throws IOException { assertManyStrings(resp, strings); } + /** + * Fails to parse a huge huge query. + */ + public void testHugeHugeManyRepeatFromRow() throws IOException { + assertParseFailure(() -> manyRepeat("ROW a = 99", 100000)); + } + /** * Tests that generate many moderately long strings. */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlParser.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlParser.java index 620a25e0170e..2e55b4df1e22 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlParser.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlParser.java @@ -33,6 +33,15 @@ public class EsqlParser { private static final Logger log = LogManager.getLogger(EsqlParser.class); + /** + * Maximum number of characters in an ESQL query. Antlr may parse the entire + * query into tokens to make the choices, buffering the world. There's a lot we + * can do in the grammar to prevent that, but let's be paranoid and assume we'll + * fail at preventing antlr from slurping in the world. Instead, let's make sure + * that the world just isn't that big. + */ + public static final int MAX_LENGTH = 1_000_000; + private EsqlConfig config = new EsqlConfig(); public EsqlConfig config() { @@ -60,8 +69,14 @@ private T invokeParser( Function parseFunction, BiFunction result ) { + if (query.length() > MAX_LENGTH) { + throw new org.elasticsearch.xpack.esql.core.ParsingException( + "ESQL statement is too large [{} characters > {}]", + query.length(), + MAX_LENGTH + ); + } try { - // new CaseChangingCharStream() EsqlBaseLexer lexer = new EsqlBaseLexer(CharStreams.fromString(query)); lexer.removeErrorListeners(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java index 3cafd42b731f..68529e99c6b1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/ParsingTests.java @@ -103,6 +103,14 @@ public void testInlineCast() throws IOException { logger.info("Wrote to file: {}", file); } + public void testTooBigQuery() { + StringBuilder query = new StringBuilder("FROM foo | EVAL a = a"); + while (query.length() < EsqlParser.MAX_LENGTH) { + query.append(", a = CONCAT(a, a)"); + } + assertEquals("-1:0: ESQL statement is too large [1000011 characters > 1000000]", error(query.toString())); + } + private String functionName(EsqlFunctionRegistry registry, Expression functionCall) { for (FunctionDefinition def : registry.listFunctions()) { if (functionCall.getClass().equals(def.clazz())) { From 44044be213f6dc937b61c2c392a598dbe81bd3b9 Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 5 Dec 2024 09:15:25 +1100 Subject: [PATCH 24/45] Mute org.elasticsearch.xpack.esql.plugin.ClusterRequestTests testFallbackIndicesOptions #117937 --- muted-tests.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/muted-tests.yml b/muted-tests.yml index a09952e87831..5ad274212528 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -411,3 +411,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/117805 - class: org.elasticsearch.xpack.security.authc.ldap.UserAttributeGroupsResolverTests issue: https://github.com/elastic/elasticsearch/issues/116537 +- class: org.elasticsearch.xpack.esql.plugin.ClusterRequestTests + method: testFallbackIndicesOptions + issue: https://github.com/elastic/elasticsearch/issues/117937 From 171d7c4c8febb8db6d44e3f8f7642e700935efbd Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 4 Dec 2024 23:31:50 +0100 Subject: [PATCH 25/45] Remove some dead code from SearchPhase and friends (#116645) (#118032) The separate `onFailure` is unnecessary, just fail the phase like we do elsewhere. Also make utility method static. --- .../action/search/AbstractSearchAsyncAction.java | 9 +-------- .../elasticsearch/action/search/ExpandSearchPhase.java | 8 ++++++-- .../action/search/MockSearchPhaseContext.java | 5 ----- 3 files changed, 7 insertions(+), 15 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 72f17569b78d..96aa459fd998 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -764,7 +764,7 @@ void sendReleaseSearchContext(ShardSearchContextId contextId, Transport.Connecti * @see #onShardFailure(int, SearchShardTarget, Exception) * @see #onShardResult(SearchPhaseResult, SearchShardIterator) */ - final void onPhaseDone() { // as a tribute to @kimchy aka. finishHim() + private void onPhaseDone() { // as a tribute to @kimchy aka. finishHim() executeNextPhase(this, this::getNextPhase); } @@ -792,13 +792,6 @@ public final void execute(Runnable command) { executor.execute(command); } - /** - * Notifies the top-level listener of the provided exception - */ - public void onFailure(Exception e) { - listener.onFailure(e); - } - /** * Builds an request for the initial search phase. * diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java index 8feed2aea00b..e8d94c32bdcc 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java @@ -102,7 +102,7 @@ private void doRun() { for (InnerHitBuilder innerHitBuilder : innerHitBuilders) { MultiSearchResponse.Item item = it.next(); if (item.isFailure()) { - context.onPhaseFailure(this, "failed to expand hits", item.getFailure()); + phaseFailure(item.getFailure()); return; } SearchHits innerHits = item.getResponse().getHits(); @@ -119,7 +119,11 @@ private void doRun() { } } onPhaseDone(); - }, context::onFailure)); + }, this::phaseFailure)); + } + + private void phaseFailure(Exception ex) { + context.onPhaseFailure(this, "failed to expand hits", ex); } private static SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder options, CollapseBuilder innerCollapseBuilder) { diff --git a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java index 484b3c6b386f..7a38858d8477 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java @@ -154,11 +154,6 @@ protected void executePhaseOnShard( }, shardIt); } - @Override - public void onFailure(Exception e) { - Assert.fail("should not be called"); - } - @Override public void sendReleaseSearchContext(ShardSearchContextId contextId, Transport.Connection connection, OriginalIndices originalIndices) { releasedSearchContexts.add(contextId); From e1304593b2115e4b233b8287a564d245c1f381eb Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Wed, 4 Dec 2024 22:55:29 +0000 Subject: [PATCH 26/45] Add option to store `sparse_vector` outside `_source` (#117917) (#118018) This PR introduces an option for `sparse_vector` to store its values separately from `_source` by using term vectors. This capability is primarly needed by the semantic text field. --- docs/changelog/117917.yaml | 5 + .../mapping/types/sparse-vector.asciidoc | 17 ++ .../test/search.vectors/90_sparse_vector.yml | 117 ++++++++++++ .../index/mapper/MapperFeatures.java | 4 +- .../vectors/SparseVectorFieldMapper.java | 155 ++++++++++++++- .../index/mapper/vectors/XFeatureField.java | 177 ++++++++++++++++++ .../vectors/SparseVectorFieldMapperTests.java | 135 +++++++++++-- .../vectors/SparseVectorFieldTypeTests.java | 4 +- .../mapper/SemanticTextFieldMapperTests.java | 4 +- 9 files changed, 589 insertions(+), 29 deletions(-) create mode 100644 docs/changelog/117917.yaml create mode 100644 server/src/main/java/org/elasticsearch/index/mapper/vectors/XFeatureField.java diff --git a/docs/changelog/117917.yaml b/docs/changelog/117917.yaml new file mode 100644 index 000000000000..b6dc90f6b903 --- /dev/null +++ b/docs/changelog/117917.yaml @@ -0,0 +1,5 @@ +pr: 117917 +summary: Add option to store `sparse_vector` outside `_source` +area: Mapping +type: feature +issues: [] diff --git a/docs/reference/mapping/types/sparse-vector.asciidoc b/docs/reference/mapping/types/sparse-vector.asciidoc index b24f65fcf97c..22d4644ede49 100644 --- a/docs/reference/mapping/types/sparse-vector.asciidoc +++ b/docs/reference/mapping/types/sparse-vector.asciidoc @@ -26,6 +26,23 @@ PUT my-index See <> for a complete example on adding documents to a `sparse_vector` mapped field using ELSER. +[[sparse-vectors-params]] +==== Parameters for `sparse_vector` fields + +The following parameters are accepted by `sparse_vector` fields: + +[horizontal] + +<>:: + +Indicates whether the field value should be stored and retrievable independently of the <> field. +Accepted values: true or false (default). +The field's data is stored using term vectors, a disk-efficient structure compared to the original JSON input. +The input map can be retrieved during a search request via the <>. +To benefit from reduced disk usage, you must either: + * Exclude the field from <>. + * Use <>. + [[index-multi-value-sparse-vectors]] ==== Multi-value sparse vectors diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml index 2505e6d7e353..0b65a69bf500 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml @@ -472,3 +472,120 @@ - match: _source.ml.tokens: {} + +--- +"stored sparse_vector": + + - requires: + cluster_features: [ "mapper.sparse_vector.store_support" ] + reason: "sparse_vector supports store parameter" + + - do: + indices.create: + index: test + body: + mappings: + properties: + ml.tokens: + type: sparse_vector + store: true + + - match: { acknowledged: true } + - do: + index: + index: test + id: "1" + body: + ml: + tokens: + running: 2 + good: 3 + run: 5 + race: 7 + for: 9 + + - match: { result: "created" } + + - do: + indices.refresh: { } + + - do: + search: + index: test + body: + fields: [ "ml.tokens" ] + + - length: { hits.hits.0.fields.ml\\.tokens: 1 } + - length: { hits.hits.0.fields.ml\\.tokens.0: 5 } + - match: { hits.hits.0.fields.ml\\.tokens.0.running: 2.0 } + - match: { hits.hits.0.fields.ml\\.tokens.0.good: 3.0 } + - match: { hits.hits.0.fields.ml\\.tokens.0.run: 5.0 } + - match: { hits.hits.0.fields.ml\\.tokens.0.race: 7.0 } + - match: { hits.hits.0.fields.ml\\.tokens.0.for: 9.0 } + +--- +"stored sparse_vector synthetic source": + + - requires: + cluster_features: [ "mapper.source.mode_from_index_setting", "mapper.sparse_vector.store_support" ] + reason: "sparse_vector supports store parameter" + + - do: + indices.create: + index: test + body: + settings: + index: + mapping.source.mode: synthetic + mappings: + properties: + ml.tokens: + type: sparse_vector + store: true + + - match: { acknowledged: true } + + - do: + index: + index: test + id: "1" + body: + ml: + tokens: + running: 2 + good: 3 + run: 5 + race: 7 + for: 9 + + - match: { result: "created" } + + - do: + indices.refresh: { } + + - do: + search: + index: test + body: + fields: [ "ml.tokens" ] + + - match: + hits.hits.0._source: { + ml: { + tokens: { + running: 2.0, + good: 3.0, + run: 5.0, + race: 7.0, + for: 9.0 + } + } + } + + - length: { hits.hits.0.fields.ml\\.tokens: 1 } + - length: { hits.hits.0.fields.ml\\.tokens.0: 5 } + - match: { hits.hits.0.fields.ml\\.tokens.0.running: 2.0 } + - match: { hits.hits.0.fields.ml\\.tokens.0.good: 3.0 } + - match: { hits.hits.0.fields.ml\\.tokens.0.run: 5.0 } + - match: { hits.hits.0.fields.ml\\.tokens.0.race: 7.0 } + - match: { hits.hits.0.fields.ml\\.tokens.0.for: 9.0 } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java index 1f310287ef74..193312477dd0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java @@ -56,6 +56,7 @@ public Set getFeatures() { ); public static final NodeFeature META_FETCH_FIELDS_ERROR_CODE_CHANGED = new NodeFeature("meta_fetch_fields_error_code_changed"); + public static final NodeFeature SPARSE_VECTOR_STORE_SUPPORT = new NodeFeature("mapper.sparse_vector.store_support"); @Override public Set getTestFeatures() { @@ -68,7 +69,8 @@ public Set getTestFeatures() { MapperService.LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT, DocumentParser.FIX_PARSING_SUBOBJECTS_FALSE_DYNAMIC_FALSE, CONSTANT_KEYWORD_SYNTHETIC_SOURCE_WRITE_FIX, - META_FETCH_FIELDS_ERROR_CODE_CHANGED + META_FETCH_FIELDS_ERROR_CODE_CHANGED, + SPARSE_VECTOR_STORE_SUPPORT ); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java index d0a8dfae4f24..62740b0fc380 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java @@ -11,6 +11,12 @@ import org.apache.lucene.document.FeatureField; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.TermVectors; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; @@ -25,14 +31,22 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; +import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.mapper.SourceValueFetcher; import org.elasticsearch.index.mapper.TextSearchInfo; import org.elasticsearch.index.mapper.ValueFetcher; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.search.fetch.StoredFieldsSpec; +import org.elasticsearch.search.lookup.Source; +import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser.Token; import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.LinkedHashMap; +import java.util.List; import java.util.Map; +import java.util.stream.Stream; import static org.elasticsearch.index.query.AbstractQueryBuilder.DEFAULT_BOOST; @@ -52,8 +66,12 @@ public class SparseVectorFieldMapper extends FieldMapper { static final IndexVersion NEW_SPARSE_VECTOR_INDEX_VERSION = IndexVersions.NEW_SPARSE_VECTOR; static final IndexVersion SPARSE_VECTOR_IN_FIELD_NAMES_INDEX_VERSION = IndexVersions.SPARSE_VECTOR_IN_FIELD_NAMES_SUPPORT; - public static class Builder extends FieldMapper.Builder { + private static SparseVectorFieldMapper toType(FieldMapper in) { + return (SparseVectorFieldMapper) in; + } + public static class Builder extends FieldMapper.Builder { + private final Parameter stored = Parameter.storeParam(m -> toType(m).fieldType().isStored(), false); private final Parameter> meta = Parameter.metaParam(); public Builder(String name) { @@ -62,14 +80,14 @@ public Builder(String name) { @Override protected Parameter[] getParameters() { - return new Parameter[] { meta }; + return new Parameter[] { stored, meta }; } @Override public SparseVectorFieldMapper build(MapperBuilderContext context) { return new SparseVectorFieldMapper( leafName(), - new SparseVectorFieldType(context.buildFullName(leafName()), meta.getValue()), + new SparseVectorFieldType(context.buildFullName(leafName()), stored.getValue(), meta.getValue()), builderParams(this, context) ); } @@ -87,8 +105,8 @@ public SparseVectorFieldMapper build(MapperBuilderContext context) { public static final class SparseVectorFieldType extends MappedFieldType { - public SparseVectorFieldType(String name, Map meta) { - super(name, true, false, false, TextSearchInfo.SIMPLE_MATCH_ONLY, meta); + public SparseVectorFieldType(String name, boolean isStored, Map meta) { + super(name, true, isStored, false, TextSearchInfo.SIMPLE_MATCH_ONLY, meta); } @Override @@ -103,6 +121,9 @@ public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext @Override public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { + if (isStored()) { + return new SparseVectorValueFetcher(name()); + } return SourceValueFetcher.identity(name(), context, format); } @@ -135,6 +156,14 @@ private SparseVectorFieldMapper(String simpleName, MappedFieldType mappedFieldTy super(simpleName, mappedFieldType, builderParams); } + @Override + protected SyntheticSourceSupport syntheticSourceSupport() { + if (fieldType().isStored()) { + return new SyntheticSourceSupport.Native(new SparseVectorSyntheticFieldLoader(fullPath(), leafName())); + } + return super.syntheticSourceSupport(); + } + @Override public Map indexAnalyzers() { return Map.of(mappedFieldType.name(), Lucene.KEYWORD_ANALYZER); @@ -189,9 +218,9 @@ public void parse(DocumentParserContext context) throws IOException { // based on recommendations from this paper: https://arxiv.org/pdf/2305.18494.pdf IndexableField currentField = context.doc().getByKey(key); if (currentField == null) { - context.doc().addWithKey(key, new FeatureField(fullPath(), feature, value)); - } else if (currentField instanceof FeatureField && ((FeatureField) currentField).getFeatureValue() < value) { - ((FeatureField) currentField).setFeatureValue(value); + context.doc().addWithKey(key, new XFeatureField(fullPath(), feature, value, fieldType().isStored())); + } else if (currentField instanceof XFeatureField && ((XFeatureField) currentField).getFeatureValue() < value) { + ((XFeatureField) currentField).setFeatureValue(value); } } else { throw new IllegalArgumentException( @@ -219,4 +248,114 @@ protected String contentType() { return CONTENT_TYPE; } + private static class SparseVectorValueFetcher implements ValueFetcher { + private final String fieldName; + private TermVectors termVectors; + + private SparseVectorValueFetcher(String fieldName) { + this.fieldName = fieldName; + } + + @Override + public void setNextReader(LeafReaderContext context) { + try { + termVectors = context.reader().termVectors(); + } catch (IOException exc) { + throw new UncheckedIOException(exc); + } + } + + @Override + public List fetchValues(Source source, int doc, List ignoredValues) throws IOException { + if (termVectors == null) { + return List.of(); + } + var terms = termVectors.get(doc, fieldName); + if (terms == null) { + return List.of(); + } + + var termsEnum = terms.iterator(); + PostingsEnum postingsScratch = null; + Map result = new LinkedHashMap<>(); + while (termsEnum.next() != null) { + postingsScratch = termsEnum.postings(postingsScratch); + postingsScratch.nextDoc(); + result.put(termsEnum.term().utf8ToString(), XFeatureField.decodeFeatureValue(postingsScratch.freq())); + assert postingsScratch.nextDoc() == DocIdSetIterator.NO_MORE_DOCS; + } + return List.of(result); + } + + @Override + public StoredFieldsSpec storedFieldsSpec() { + return StoredFieldsSpec.NO_REQUIREMENTS; + } + } + + private static class SparseVectorSyntheticFieldLoader implements SourceLoader.SyntheticFieldLoader { + private final String fullPath; + private final String leafName; + + private TermsEnum termsDocEnum; + + private SparseVectorSyntheticFieldLoader(String fullPath, String leafName) { + this.fullPath = fullPath; + this.leafName = leafName; + } + + @Override + public Stream> storedFieldLoaders() { + return Stream.of(); + } + + @Override + public DocValuesLoader docValuesLoader(LeafReader leafReader, int[] docIdsInLeaf) throws IOException { + var fieldInfos = leafReader.getFieldInfos().fieldInfo(fullPath); + if (fieldInfos == null || fieldInfos.hasVectors() == false) { + return null; + } + return docId -> { + var terms = leafReader.termVectors().get(docId, fullPath); + if (terms == null) { + return false; + } + termsDocEnum = terms.iterator(); + if (termsDocEnum.next() == null) { + termsDocEnum = null; + return false; + } + return true; + }; + } + + @Override + public boolean hasValue() { + return termsDocEnum != null; + } + + @Override + public void write(XContentBuilder b) throws IOException { + assert termsDocEnum != null; + PostingsEnum reuse = null; + b.startObject(leafName); + do { + reuse = termsDocEnum.postings(reuse); + reuse.nextDoc(); + b.field(termsDocEnum.term().utf8ToString(), XFeatureField.decodeFeatureValue(reuse.freq())); + } while (termsDocEnum.next() != null); + b.endObject(); + } + + @Override + public String fieldName() { + return leafName; + } + + @Override + public void reset() { + termsDocEnum = null; + } + } + } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/XFeatureField.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/XFeatureField.java new file mode 100644 index 000000000000..5f4afb4a86ac --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/XFeatureField.java @@ -0,0 +1,177 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.elasticsearch.index.mapper.vectors; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.TermFrequencyAttribute; +import org.apache.lucene.document.FeatureField; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.index.IndexOptions; + +/** + * This class is forked from the Lucene {@link FeatureField} implementation to enable support for storing term vectors. + * It should be removed once apache/lucene#14034 becomes available. + */ +public final class XFeatureField extends Field { + private static final FieldType FIELD_TYPE = new FieldType(); + private static final FieldType FIELD_TYPE_STORE_TERM_VECTORS = new FieldType(); + + static { + FIELD_TYPE.setTokenized(false); + FIELD_TYPE.setOmitNorms(true); + FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + + FIELD_TYPE_STORE_TERM_VECTORS.setTokenized(false); + FIELD_TYPE_STORE_TERM_VECTORS.setOmitNorms(true); + FIELD_TYPE_STORE_TERM_VECTORS.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + FIELD_TYPE_STORE_TERM_VECTORS.setStoreTermVectors(true); + } + + private float featureValue; + + /** + * Create a feature. + * + * @param fieldName The name of the field to store the information into. All features may be + * stored in the same field. + * @param featureName The name of the feature, eg. 'pagerank`. It will be indexed as a term. + * @param featureValue The value of the feature, must be a positive, finite, normal float. + */ + public XFeatureField(String fieldName, String featureName, float featureValue) { + this(fieldName, featureName, featureValue, false); + } + + /** + * Create a feature. + * + * @param fieldName The name of the field to store the information into. All features may be + * stored in the same field. + * @param featureName The name of the feature, eg. 'pagerank`. It will be indexed as a term. + * @param featureValue The value of the feature, must be a positive, finite, normal float. + */ + public XFeatureField(String fieldName, String featureName, float featureValue, boolean storeTermVectors) { + super(fieldName, featureName, storeTermVectors ? FIELD_TYPE_STORE_TERM_VECTORS : FIELD_TYPE); + setFeatureValue(featureValue); + } + + /** + * Update the feature value of this field. + */ + public void setFeatureValue(float featureValue) { + if (Float.isFinite(featureValue) == false) { + throw new IllegalArgumentException( + "featureValue must be finite, got: " + featureValue + " for feature " + fieldsData + " on field " + name + ); + } + if (featureValue < Float.MIN_NORMAL) { + throw new IllegalArgumentException( + "featureValue must be a positive normal float, got: " + + featureValue + + " for feature " + + fieldsData + + " on field " + + name + + " which is less than the minimum positive normal float: " + + Float.MIN_NORMAL + ); + } + this.featureValue = featureValue; + } + + @Override + public TokenStream tokenStream(Analyzer analyzer, TokenStream reuse) { + FeatureTokenStream stream; + if (reuse instanceof FeatureTokenStream) { + stream = (FeatureTokenStream) reuse; + } else { + stream = new FeatureTokenStream(); + } + + int freqBits = Float.floatToIntBits(featureValue); + stream.setValues((String) fieldsData, freqBits >>> 15); + return stream; + } + + /** + * This is useful if you have multiple features sharing a name and you want to take action to + * deduplicate them. + * + * @return the feature value of this field. + */ + public float getFeatureValue() { + return featureValue; + } + + private static final class FeatureTokenStream extends TokenStream { + private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class); + private final TermFrequencyAttribute freqAttribute = addAttribute(TermFrequencyAttribute.class); + private boolean used = true; + private String value = null; + private int freq = 0; + + private FeatureTokenStream() {} + + /** + * Sets the values + */ + void setValues(String value, int freq) { + this.value = value; + this.freq = freq; + } + + @Override + public boolean incrementToken() { + if (used) { + return false; + } + clearAttributes(); + termAttribute.append(value); + freqAttribute.setTermFrequency(freq); + used = true; + return true; + } + + @Override + public void reset() { + used = false; + } + + @Override + public void close() { + value = null; + } + } + + static final int MAX_FREQ = Float.floatToIntBits(Float.MAX_VALUE) >>> 15; + + static float decodeFeatureValue(float freq) { + if (freq > MAX_FREQ) { + // This is never used in practice but callers of the SimScorer API might + // occasionally call it on eg. Float.MAX_VALUE to compute the max score + // so we need to be consistent. + return Float.MAX_VALUE; + } + int tf = (int) freq; // lossless + int featureBits = tf << 15; + return Float.intBitsToFloat(featureBits); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapperTests.java index 8caab46ef33a..2bb351ae6495 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapperTests.java @@ -11,19 +11,26 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.TermFrequencyAttribute; -import org.apache.lucene.document.FeatureField; +import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentParsingException; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperTestCase; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.search.lookup.Source; import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -33,18 +40,25 @@ import java.io.IOException; import java.util.Arrays; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import static org.elasticsearch.index.mapper.vectors.SparseVectorFieldMapper.NEW_SPARSE_VECTOR_INDEX_VERSION; import static org.elasticsearch.index.mapper.vectors.SparseVectorFieldMapper.PREVIOUS_SPARSE_VECTOR_INDEX_VERSION; +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; public class SparseVectorFieldMapperTests extends MapperTestCase { @Override protected Object getSampleValueForDocument() { - return Map.of("ten", 10, "twenty", 20); + Map map = new LinkedHashMap<>(); + map.put("ten", 10f); + map.put("twenty", 20f); + return map; } @Override @@ -92,14 +106,18 @@ public void testDefaults() throws Exception { List fields = doc1.rootDoc().getFields("field"); assertEquals(2, fields.size()); - assertThat(fields.get(0), Matchers.instanceOf(FeatureField.class)); - FeatureField featureField1 = null; - FeatureField featureField2 = null; + if (IndexVersion.current().luceneVersion().major == 10) { + // TODO: Update to use Lucene's FeatureField after upgrading to Lucene 10.1. + assertThat(IndexVersion.current().luceneVersion().minor, equalTo(0)); + } + assertThat(fields.get(0), Matchers.instanceOf(XFeatureField.class)); + XFeatureField featureField1 = null; + XFeatureField featureField2 = null; for (IndexableField field : fields) { if (field.stringValue().equals("ten")) { - featureField1 = (FeatureField) field; + featureField1 = (XFeatureField) field; } else if (field.stringValue().equals("twenty")) { - featureField2 = (FeatureField) field; + featureField2 = (XFeatureField) field; } else { throw new UnsupportedOperationException(); } @@ -116,14 +134,14 @@ public void testDotInFieldName() throws Exception { List fields = parsedDocument.rootDoc().getFields("field"); assertEquals(2, fields.size()); - assertThat(fields.get(0), Matchers.instanceOf(FeatureField.class)); - FeatureField featureField1 = null; - FeatureField featureField2 = null; + assertThat(fields.get(0), Matchers.instanceOf(XFeatureField.class)); + XFeatureField featureField1 = null; + XFeatureField featureField2 = null; for (IndexableField field : fields) { if (field.stringValue().equals("foo.bar")) { - featureField1 = (FeatureField) field; + featureField1 = (XFeatureField) field; } else if (field.stringValue().equals("foobar")) { - featureField2 = (FeatureField) field; + featureField2 = (XFeatureField) field; } else { throw new UnsupportedOperationException(); } @@ -171,13 +189,13 @@ public void testHandlesMultiValuedFields() throws MapperParsingException, IOExce })); // then validate that the generate document stored both values appropriately and we have only the max value stored - FeatureField barField = ((FeatureField) doc1.rootDoc().getByKey("foo.field\\.bar")); + XFeatureField barField = ((XFeatureField) doc1.rootDoc().getByKey("foo.field\\.bar")); assertEquals(20, barField.getFeatureValue(), 1); - FeatureField storedBarField = ((FeatureField) doc1.rootDoc().getFields("foo.field").get(1)); + XFeatureField storedBarField = ((XFeatureField) doc1.rootDoc().getFields("foo.field").get(1)); assertEquals(20, storedBarField.getFeatureValue(), 1); - assertEquals(3, doc1.rootDoc().getFields().stream().filter((f) -> f instanceof FeatureField).count()); + assertEquals(3, doc1.rootDoc().getFields().stream().filter((f) -> f instanceof XFeatureField).count()); } public void testCannotBeUsedInMultiFields() { @@ -192,6 +210,53 @@ public void testCannotBeUsedInMultiFields() { assertThat(e.getMessage(), containsString("Field [feature] of type [sparse_vector] can't be used in multifields")); } + public void testStoreIsNotUpdateable() throws IOException { + var mapperService = createMapperService(fieldMapping(this::minimalMapping)); + XContentBuilder mapping = jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject("field") + .field("type", "sparse_vector") + .field("store", true) + .endObject() + .endObject() + .endObject() + .endObject(); + var exc = expectThrows( + Exception.class, + () -> mapperService.merge("_doc", new CompressedXContent(Strings.toString(mapping)), MapperService.MergeReason.MAPPING_UPDATE) + ); + assertThat(exc.getMessage(), containsString("Cannot update parameter [store]")); + } + + @SuppressWarnings("unchecked") + public void testValueFetcher() throws Exception { + for (boolean store : new boolean[] { true, false }) { + var mapperService = createMapperService(fieldMapping(store ? this::minimalStoreMapping : this::minimalMapping)); + var mapper = mapperService.documentMapper(); + try (Directory directory = newDirectory()) { + RandomIndexWriter iw = new RandomIndexWriter(random(), directory); + var sourceToParse = source(this::writeField); + ParsedDocument doc1 = mapper.parse(sourceToParse); + iw.addDocument(doc1.rootDoc()); + iw.close(); + try (DirectoryReader reader = wrapInMockESDirectoryReader(DirectoryReader.open(directory))) { + LeafReader leafReader = getOnlyLeafReader(reader); + var searchContext = createSearchExecutionContext(mapperService, new IndexSearcher(leafReader)); + var fieldType = mapper.mappers().getFieldType("field"); + var valueFetcher = fieldType.valueFetcher(searchContext, null); + valueFetcher.setNextReader(leafReader.getContext()); + + var source = Source.fromBytes(sourceToParse.source()); + var result = valueFetcher.fetchValues(source, 0, List.of()); + assertThat(result.size(), equalTo(1)); + assertThat(result.get(0), instanceOf(Map.class)); + assertThat(toFloats((Map) result.get(0)), equalTo(toFloats((Map) source.source().get("field")))); + } + } + } + } + @Override protected Object generateRandomInputValue(MappedFieldType ft) { assumeFalse("Test implemented in a follow up", true); @@ -205,7 +270,29 @@ protected boolean allowsNullValues() { @Override protected SyntheticSourceSupport syntheticSourceSupport(boolean syntheticSource) { - throw new AssumptionViolatedException("not supported"); + boolean withStore = randomBoolean(); + return new SyntheticSourceSupport() { + @Override + public boolean preservesExactSource() { + return withStore == false; + } + + @Override + public SyntheticSourceExample example(int maxValues) { + return new SyntheticSourceExample(getSampleValueForDocument(), getSampleValueForDocument(), b -> { + if (withStore) { + minimalStoreMapping(b); + } else { + minimalMapping(b); + } + }); + } + + @Override + public List invalidExample() { + return List.of(); + } + }; } @Override @@ -276,4 +363,20 @@ public void testSparseVectorUnsupportedIndex() throws Exception { }))); assertThat(e.getMessage(), containsString(SparseVectorFieldMapper.ERROR_MESSAGE_8X)); } + + /** + * Handles float/double conversion when reading/writing with xcontent by converting all numbers to floats. + */ + private Map toFloats(Map value) { + // preserve order + Map result = new LinkedHashMap<>(); + for (var entry : value.entrySet()) { + if (entry.getValue() instanceof Number num) { + result.put(entry.getKey(), num.floatValue()); + } else { + throw new IllegalArgumentException("Expected Number, got: " + value.getClass().getSimpleName()); + } + } + return result; + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldTypeTests.java index 4627d4d87195..0dbe3817c3e8 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldTypeTests.java @@ -18,13 +18,13 @@ public class SparseVectorFieldTypeTests extends FieldTypeTestCase { public void testDocValuesDisabled() { - MappedFieldType fieldType = new SparseVectorFieldMapper.SparseVectorFieldType("field", Collections.emptyMap()); + MappedFieldType fieldType = new SparseVectorFieldMapper.SparseVectorFieldType("field", false, Collections.emptyMap()); assertFalse(fieldType.hasDocValues()); expectThrows(IllegalArgumentException.class, () -> fieldType.fielddataBuilder(FieldDataContext.noRuntimeFields("test"))); } public void testIsNotAggregatable() { - MappedFieldType fieldType = new SparseVectorFieldMapper.SparseVectorFieldType("field", Collections.emptyMap()); + MappedFieldType fieldType = new SparseVectorFieldMapper.SparseVectorFieldType("field", false, Collections.emptyMap()); assertFalse(fieldType.isAggregatable()); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java index 18096ebee4f0..ef70dbe401e5 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapperTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.inference.mapper; -import org.apache.lucene.document.FeatureField; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.IndexableField; @@ -47,6 +46,7 @@ import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.index.mapper.vectors.SparseVectorFieldMapper; +import org.elasticsearch.index.mapper.vectors.XFeatureField; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.search.ESToParentBlockJoinQuery; import org.elasticsearch.inference.Model; @@ -1130,7 +1130,7 @@ private static void assertChildLeafNestedDocument( private static void assertSparseFeatures(LuceneDocument doc, String fieldName, int expectedCount) { int count = 0; for (IndexableField field : doc.getFields()) { - if (field instanceof FeatureField featureField) { + if (field instanceof XFeatureField featureField) { assertThat(featureField.name(), equalTo(fieldName)); ++count; } From b931c7c798b8ecd3d7358ee8b1b1b982f2bb6910 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Wed, 4 Dec 2024 20:39:36 -0500 Subject: [PATCH 27/45] ESQL Date Nanos Addition and Subtraction (#116839) (#117848) Resolves #109995 This adds support and tests for addition and subtraction of date nanos with periods and durations. It does not include support for date_diff, which is a separate ticket (#109999). The bulk of the PR is testing, the actual date math is all handled by library functions. --------- Co-authored-by: Elastic Machine --- .../esql/functions/kibana/definition/add.json | 72 ++++ .../esql/functions/kibana/definition/sub.json | 72 ++++ .../esql/functions/types/add.asciidoc | 4 + .../esql/functions/types/sub.asciidoc | 4 + .../xpack/esql/core/type/DataType.java | 8 + .../src/main/resources/date_nanos.csv-spec | 401 ++++++++++++++++++ .../arithmetic/AddDateNanosEvaluator.java | 142 +++++++ .../arithmetic/SubDateNanosEvaluator.java | 142 +++++++ .../xpack/esql/action/EsqlCapabilities.java | 4 + .../predicate/operator/arithmetic/Add.java | 34 +- .../DateTimeArithmeticOperation.java | 42 +- .../predicate/operator/arithmetic/Sub.java | 28 +- .../esql/type/EsqlDataTypeConverter.java | 7 +- .../xpack/esql/analysis/AnalyzerTests.java | 6 +- .../xpack/esql/analysis/VerifierTests.java | 4 +- .../expression/function/TestCaseSupplier.java | 95 ++++- .../operator/arithmetic/AddTests.java | 55 ++- .../operator/arithmetic/SubTests.java | 72 +++- .../esql/type/EsqlDataTypeConverterTests.java | 21 +- 19 files changed, 1152 insertions(+), 61 deletions(-) create mode 100644 x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddDateNanosEvaluator.java create mode 100644 x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubDateNanosEvaluator.java diff --git a/docs/reference/esql/functions/kibana/definition/add.json b/docs/reference/esql/functions/kibana/definition/add.json index bd9fbf4d4f9e..cfb4755a93d5 100644 --- a/docs/reference/esql/functions/kibana/definition/add.json +++ b/docs/reference/esql/functions/kibana/definition/add.json @@ -40,6 +40,42 @@ "variadic" : false, "returnType" : "date" }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_nanos", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "date_period", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_nanos", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "time_duration", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, { "params" : [ { @@ -58,6 +94,24 @@ "variadic" : false, "returnType" : "date" }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_period", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "date_nanos", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, { "params" : [ { @@ -256,6 +310,24 @@ "variadic" : false, "returnType" : "date" }, + { + "params" : [ + { + "name" : "lhs", + "type" : "time_duration", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "date_nanos", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/kibana/definition/sub.json b/docs/reference/esql/functions/kibana/definition/sub.json index e10e5a662c8c..608b5eb1009a 100644 --- a/docs/reference/esql/functions/kibana/definition/sub.json +++ b/docs/reference/esql/functions/kibana/definition/sub.json @@ -40,6 +40,60 @@ "variadic" : false, "returnType" : "date" }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_nanos", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "date_period", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_nanos", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "time_duration", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, + { + "params" : [ + { + "name" : "lhs", + "type" : "date_period", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "date_nanos", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, { "params" : [ { @@ -220,6 +274,24 @@ "variadic" : false, "returnType" : "long" }, + { + "params" : [ + { + "name" : "lhs", + "type" : "time_duration", + "optional" : false, + "description" : "A numeric value or a date time value." + }, + { + "name" : "rhs", + "type" : "date_nanos", + "optional" : false, + "description" : "A numeric value or a date time value." + } + ], + "variadic" : false, + "returnType" : "date_nanos" + }, { "params" : [ { diff --git a/docs/reference/esql/functions/types/add.asciidoc b/docs/reference/esql/functions/types/add.asciidoc index 54d1aec463c1..e47a0d81f27e 100644 --- a/docs/reference/esql/functions/types/add.asciidoc +++ b/docs/reference/esql/functions/types/add.asciidoc @@ -7,7 +7,10 @@ lhs | rhs | result date | date_period | date date | time_duration | date +date_nanos | date_period | date_nanos +date_nanos | time_duration | date_nanos date_period | date | date +date_period | date_nanos | date_nanos date_period | date_period | date_period double | double | double double | integer | double @@ -19,6 +22,7 @@ long | double | double long | integer | long long | long | long time_duration | date | date +time_duration | date_nanos | date_nanos time_duration | time_duration | time_duration unsigned_long | unsigned_long | unsigned_long |=== diff --git a/docs/reference/esql/functions/types/sub.asciidoc b/docs/reference/esql/functions/types/sub.asciidoc index c3ded301ebe6..dca56026071e 100644 --- a/docs/reference/esql/functions/types/sub.asciidoc +++ b/docs/reference/esql/functions/types/sub.asciidoc @@ -7,6 +7,9 @@ lhs | rhs | result date | date_period | date date | time_duration | date +date_nanos | date_period | date_nanos +date_nanos | time_duration | date_nanos +date_period | date_nanos | date_nanos date_period | date_period | date_period double | double | double double | integer | double @@ -17,6 +20,7 @@ integer | long | long long | double | double long | integer | long long | long | long +time_duration | date_nanos | date_nanos time_duration | time_duration | time_duration unsigned_long | unsigned_long | unsigned_long |=== diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java index 1c65dd386667..a63571093ba5 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java @@ -415,6 +415,14 @@ public static boolean isDateTimeOrTemporal(DataType t) { return isDateTime(t) || isTemporalAmount(t); } + public static boolean isDateTimeOrNanosOrTemporal(DataType t) { + return isDateTime(t) || isTemporalAmount(t) || t == DATE_NANOS; + } + + public static boolean isMillisOrNanos(DataType t) { + return t == DATETIME || t == DATE_NANOS; + } + public static boolean areCompatible(DataType left, DataType right) { if (left == right) { return true; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec index eeeec6ae55aa..cf6236afb801 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec @@ -458,3 +458,404 @@ yr:date_nanos | mo:date_nanos | mn:date_nanos 2023-01-01T00:00:00.000000000Z | 2023-10-01T00:00:00.000000000Z | 2023-10-23T12:10:00.000000000Z | 2023-10-23T12:15:03.360000000Z 2023-01-01T00:00:00.000000000Z | 2023-10-01T00:00:00.000000000Z | 2023-10-23T12:10:00.000000000Z | 2023-10-23T12:15:03.360000000Z ; + +Add date nanos +required_capability: date_nanos_add_subtract + +FROM date_nanos +| WHERE millis > "2020-01-01" +| EVAL mo = nanos + 1 month, hr = nanos + 1 hour, dy = nanos - 4 days, mn = nanos - 2 minutes +| SORT millis DESC +| KEEP mo, hr, dy, mn; + +mo:date_nanos | hr:date_nanos | dy:date_nanos | mn:date_nanos +2023-11-23T13:55:01.543123456Z | 2023-10-23T14:55:01.543123456Z | 2023-10-19T13:55:01.543123456Z | 2023-10-23T13:53:01.543123456Z +2023-11-23T13:53:55.832987654Z | 2023-10-23T14:53:55.832987654Z | 2023-10-19T13:53:55.832987654Z | 2023-10-23T13:51:55.832987654Z +2023-11-23T13:52:55.015787878Z | 2023-10-23T14:52:55.015787878Z | 2023-10-19T13:52:55.015787878Z | 2023-10-23T13:50:55.015787878Z +2023-11-23T13:51:54.732102837Z | 2023-10-23T14:51:54.732102837Z | 2023-10-19T13:51:54.732102837Z | 2023-10-23T13:49:54.732102837Z +2023-11-23T13:33:34.937193000Z | 2023-10-23T14:33:34.937193000Z | 2023-10-19T13:33:34.937193000Z | 2023-10-23T13:31:34.937193000Z +2023-11-23T12:27:28.948000000Z | 2023-10-23T13:27:28.948000000Z | 2023-10-19T12:27:28.948000000Z | 2023-10-23T12:25:28.948000000Z +2023-11-23T12:15:03.360103847Z | 2023-10-23T13:15:03.360103847Z | 2023-10-19T12:15:03.360103847Z | 2023-10-23T12:13:03.360103847Z +2023-11-23T12:15:03.360103847Z | 2023-10-23T13:15:03.360103847Z | 2023-10-19T12:15:03.360103847Z | 2023-10-23T12:13:03.360103847Z +; + +datePlusPeriod +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos("2100-01-01T01:01:01.000123456Z") +| eval plus = dt + 4 years + 3 months + 2 weeks + 1 day; + +dt:date_nanos | plus:date_nanos +2100-01-01T01:01:01.000123456Z | 2104-04-16T01:01:01.000123456Z +; + +datePlusPeriodFromLeft +required_capability: date_nanos_add_subtract + +row n = to_date_nanos("2053-04-04T00:00:00.000123456Z") | eval then = 4 years + 3 months + 2 weeks + 1 day + n | keep then; + +then:date_nanos +2057-07-19T00:00:00.000123456Z +; + +datePlusMixedPeriodsFromLeft +required_capability: date_nanos_add_subtract + +row n = to_date_nanos("2053-04-01T00:00:00.000123456Z") +| eval then = 4 years + 3 months + 1 year + 2 weeks + 1 month + 1 day + 1 week + 1 day + n +| keep then; + +then:date_nanos +2058-08-24T00:00:00.000123456Z +; + +datePlusSumOfPeriodsFromLeft +required_capability: date_nanos_add_subtract + +row n = to_date_nanos("2053-04-04T00:00:00.000123456Z") | eval then = (4 years + 3 months + 2 weeks + 1 day) + n | keep then; + +then:date_nanos +2057-07-19T00:00:00.000123456Z +; + +datePlusNegatedPeriod +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos("2104-04-16T01:01:01.000123456Z") +| eval plus = dt + (-(4 years + 3 months + 2 weeks + 1 day)); + +dt:date_nanos | plus:date_nanos +2104-04-16T01:01:01.000123456Z | 2100-01-01T01:01:01.000123456Z +; + +dateMinusPeriod +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos("2104-04-16T01:01:01.000123456Z") +| eval minus = dt - 4 years - 3 months - 2 weeks - 1 day; + +dt:date_nanos | minus:date_nanos +2104-04-16T01:01:01.000123456Z | 2100-01-01T01:01:01.000123456Z +; + +dateMinusPeriodFromLeft +required_capability: date_nanos_add_subtract + +row n = to_date_nanos("2057-07-19T00:00:00.000123456Z") | eval then = -4 years - 3 months - 2 weeks - 1 day + n | keep then; + +then:date_nanos +2053-04-04T00:00:00.000123456Z +; + +dateMinusSumOfNegativePeriods +required_capability: date_nanos_add_subtract + +row n = to_date_nanos("2053-04-04T00:00:00.000123456Z") | eval then = n - (-4 years - 3 months - 2 weeks - 1 day)| keep then; + +then:date_nanos +2057-07-19T00:00:00.000123456Z +; + +dateMinusPeriodsFromLeftMultipleEvals +required_capability: date_nanos_add_subtract + +row n = to_date_nanos("2053-04-04T00:00:00.000123456Z") +| eval x = -4 years + n +| eval y = -3 months + x, then = y + (-2 weeks - 1 day) +| keep then; + +then:date_nanos +2048-12-20T00:00:00.000123456Z +; + +datePlusDuration +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos("2100-01-01T00:00:00.000123456Z") +| eval plus = dt + 1 hour + 1 minute + 1 second + 1 milliseconds; + +dt:date_nanos | plus:date_nanos +2100-01-01T00:00:00.000123456Z | 2100-01-01T01:01:01.001123456Z +; + +datePlusDurationFromLeft +required_capability: date_nanos_add_subtract + +row n = to_date_nanos("2053-04-04T00:00:00.000123456Z") | eval then = 1 hour + 1 minute + 1 second + 1 milliseconds + n | keep then; + +then:date_nanos +2053-04-04T01:01:01.001123456Z +; + +datePlusMixedDurationsFromLeft +required_capability: date_nanos_add_subtract + +row n = to_date_nanos("2053-04-04T00:00:00.000123456Z") +| eval then = 1 hour + 1 minute + 2 hour + 1 second + 2 minute + 1 milliseconds + 2 second + 2 millisecond + n +| keep then; + +then:date_nanos +2053-04-04T03:03:03.003123456Z +; + +datePlusSumOfDurationsFromLeft +required_capability: date_nanos_add_subtract + +row n = to_date_nanos("2053-04-04T00:00:00.000123456Z") | eval then = (1 hour + 1 minute + 1 second + 1 milliseconds) + n | keep then; + +then:date_nanos +2053-04-04T01:01:01.001123456Z +; + +datePlusNegatedDuration +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos("2100-01-01T01:01:01.001123456Z") +| eval plus = dt + (-(1 hour + 1 minute + 1 second + 1 milliseconds)); + +dt:date_nanos | plus:date_nanos +2100-01-01T01:01:01.001123456Z | 2100-01-01T00:00:00.000123456Z +; + +datePlusNull +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos("2100-01-01T01:01:01.001123456Z") +| eval plus_post = dt + null, plus_pre = null + dt; + +dt:date_nanos | plus_post:date_nanos | plus_pre:date_nanos +2100-01-01T01:01:01.001123456Z | null | null +; + +datePlusNullAndDuration +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos("2100-01-01T01:01:01.001123456Z") +| eval plus_post = dt + null + 1 hour, plus_pre = 1 second + null + dt; + +dt:date_nanos | plus_post:date_nanos | plus_pre:date_nanos +2100-01-01T01:01:01.001123456Z | null | null +; + +datePlusNullAndPeriod +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos("2100-01-01T01:01:01.001123456Z") +| eval plus_post = dt + null + 2 years, plus_pre = 3 weeks + null + dt; + +dt:date_nanos | plus_post:date_nanos | plus_pre:date_nanos +2100-01-01T01:01:01.001123456Z | null | null +; + +datePlusQuarter +required_capability: date_nanos_add_subtract + +required_capability: timespan_abbreviations +row dt = to_date_nanos("2100-01-01T01:01:01.000123456Z") +| eval plusQuarter = dt + 2 quarters +; + +dt:date_nanos | plusQuarter:date_nanos +2100-01-01T01:01:01.000123456Z | 2100-07-01T01:01:01.000123456Z +; + +datePlusAbbreviatedDurations +required_capability: timespan_abbreviations +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos("2100-01-01T00:00:00.000123456Z") +| eval plusDurations = dt + 1 h + 2 min + 2 sec + 1 s + 4 ms +; + +dt:date_nanos | plusDurations:date_nanos +2100-01-01T00:00:00.000123456Z | 2100-01-01T01:02:03.004123456Z +; + +datePlusAbbreviatedPeriods +required_capability: timespan_abbreviations +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos("2100-01-01T00:00:00.000123456Z") +| eval plusDurations = dt + 0 yr + 1y + 2 q + 3 mo + 4 w + 3 d +; + +dt:date_nanos | plusDurations:date_nanos +2100-01-01T00:00:00.000123456Z | 2101-11-01T00:00:00.000123456Z +; + + +dateMinusDuration +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos("2100-01-01T01:01:01.001123456Z") +| eval minus = dt - 1 hour - 1 minute - 1 second - 1 milliseconds; + +dt:date_nanos | minus:date_nanos +2100-01-01T01:01:01.001123456Z | 2100-01-01T00:00:00.000123456Z +; + +dateMinusDurationFromLeft +required_capability: date_nanos_add_subtract + +row n = to_date_nanos("2053-04-04T01:01:01.001123456Z") | eval then = -1 hour - 1 minute - 1 second - 1 milliseconds + n | keep then; + +then:date_nanos +2053-04-04T00:00:00.000123456Z +; + +dateMinusSumOfNegativeDurations +required_capability: date_nanos_add_subtract + +row n = to_date_nanos("2053-04-04T00:00:00.000123456Z") | eval then = n - (-1 hour - 1 minute - 1 second - 1 milliseconds) | keep then; + +then:date_nanos +2053-04-04T01:01:01.001123456Z +; + +dateMinusDurationsFromLeftMultipleEvals +required_capability: date_nanos_add_subtract + +row n = to_date_nanos("2053-04-04T04:03:02.001123456Z") +| eval x = -4 hour + n +| eval y = -3 minute + x, then = y + (-2 second - 1 millisecond) +| keep then +; + +then:date_nanos +2053-04-04T00:00:00.000123456Z +; + +dateMinusNull +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos("2053-04-04T04:03:02.001123456Z") +| eval minus = dt - null +; + +dt:date_nanos | minus:date_nanos +2053-04-04T04:03:02.001123456Z | null +; + +dateMinusNullAndPeriod +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos("2053-04-04T04:03:02.001123456Z") +| eval minus = dt - null - 4 minutes +; + +dt:date_nanos | minus:date_nanos +2053-04-04T04:03:02.001123456Z | null +; + +dateMinusNullAndDuration +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos("2053-04-04T04:03:02.001123456Z") +| eval minus = dt - 6 days - null +; + +dt:date_nanos | minus:date_nanos +2053-04-04T04:03:02.001123456Z | null +; + +datePlusPeriodAndDuration +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos("2100-01-01T00:00:00.000123456Z") +| eval plus = dt + 4 years + 3 months + 2 weeks + 1 day + 1 hour + 1 minute + 1 second + 1 milliseconds; + +dt:date_nanos | plus:date_nanos +2100-01-01T00:00:00.000123456Z | 2104-04-16T01:01:01.001123456Z +; + +dateMinusPeriodAndDuration +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos("2104-04-16T01:01:01.001123456Z") +| eval minus = dt - 4 years - 3 months - 2 weeks - 1 day - 1 hour - 1 minute - 1 second - 1 milliseconds; + +dt:date_nanos |minus:date_nanos +2104-04-16T01:01:01.001123456Z |2100-01-01T00:00:00.000123456Z +; + +datePlusPeriodMinusDuration +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos("2100-01-01T01:01:01.001123456Z") +| eval plus = dt + 4 years + 3 months + 2 weeks + 1 day - 1 hour - 1 minute - 1 second - 1 milliseconds; + +dt:date_nanos | plus:date_nanos +2100-01-01T01:01:01.001123456Z | 2104-04-16T00:00:00.000123456Z +; + +datePlusDurationMinusPeriod +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos("2104-04-16T00:00:00.000123456Z") +| eval plus = dt - 4 years - 3 months - 2 weeks - 1 day + 1 hour + 1 minute + 1 second + 1 milliseconds; + +dt:date_nanos | plus:date_nanos +2104-04-16T00:00:00.000123456Z | 2100-01-01T01:01:01.001123456Z +; + +dateMathArithmeticOverflow from addition +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos(9223372036854775807) +| eval plus = dt + 1 day +| keep plus; + +warning:Line 2:15: evaluation of [dt + 1 day] failed, treating result as null. Only first 20 failures recorded. +warning:Line 2:15: java.time.DateTimeException: Date nanos out of range. Must be between 1970-01-01T00:00:00Z and 2262-04-11T23:47:16.854775807 +plus:date_nanos +null +; + +date nanos subtraction before 1970 +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos(0::long) +| eval minus = dt - 1 day +| keep minus; + +warning:Line 2:16: evaluation of [dt - 1 day] failed, treating result as null. Only first 20 failures recorded. +warning:Line 2:16: java.time.DateTimeException: Date nanos out of range. Must be between 1970-01-01T00:00:00Z and 2262-04-11T23:47:16.854775807 +minus:date_nanos +null +; + +dateMathDateException +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos(0::long) +| eval plus = dt + 2147483647 years +| keep plus; + +warning:Line 2:15: evaluation of [dt + 2147483647 years] failed, treating result as null. Only first 20 failures recorded. +warning:Line 2:15: java.time.DateTimeException: Invalid value for Year (valid values -999999999 - 999999999): 2147485617 + +plus:date_nanos +null +; + +dateMathNegatedPeriod +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos(0::long) +| eval plus = -(-1 year) + dt +| keep plus; + +plus:date_nanos +1971-01-01T00:00:00.000Z +; + +dateMathNegatedDuration +required_capability: date_nanos_add_subtract + +row dt = to_date_nanos(0::long) +| eval plus = -(-1 second) + dt +| keep plus; + +plus:date_nanos +1970-01-01T00:00:01.000Z +; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddDateNanosEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddDateNanosEvaluator.java new file mode 100644 index 000000000000..fe80536ea5d0 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddDateNanosEvaluator.java @@ -0,0 +1,142 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; + +import java.lang.ArithmeticException; +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import java.time.DateTimeException; +import java.time.temporal.TemporalAmount; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.compute.operator.Warnings; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link Add}. + * This class is generated. Do not edit it. + */ +public final class AddDateNanosEvaluator implements EvalOperator.ExpressionEvaluator { + private final Source source; + + private final EvalOperator.ExpressionEvaluator dateNanos; + + private final TemporalAmount temporalAmount; + + private final DriverContext driverContext; + + private Warnings warnings; + + public AddDateNanosEvaluator(Source source, EvalOperator.ExpressionEvaluator dateNanos, + TemporalAmount temporalAmount, DriverContext driverContext) { + this.source = source; + this.dateNanos = dateNanos; + this.temporalAmount = temporalAmount; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (LongBlock dateNanosBlock = (LongBlock) dateNanos.eval(page)) { + LongVector dateNanosVector = dateNanosBlock.asVector(); + if (dateNanosVector == null) { + return eval(page.getPositionCount(), dateNanosBlock); + } + return eval(page.getPositionCount(), dateNanosVector); + } + } + + public LongBlock eval(int positionCount, LongBlock dateNanosBlock) { + try(LongBlock.Builder result = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + if (dateNanosBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (dateNanosBlock.getValueCount(p) != 1) { + if (dateNanosBlock.getValueCount(p) > 1) { + warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + try { + result.appendLong(Add.processDateNanos(dateNanosBlock.getLong(dateNanosBlock.getFirstValueIndex(p)), this.temporalAmount)); + } catch (ArithmeticException | DateTimeException e) { + warnings().registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + public LongBlock eval(int positionCount, LongVector dateNanosVector) { + try(LongBlock.Builder result = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + try { + result.appendLong(Add.processDateNanos(dateNanosVector.getLong(p), this.temporalAmount)); + } catch (ArithmeticException | DateTimeException e) { + warnings().registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "AddDateNanosEvaluator[" + "dateNanos=" + dateNanos + ", temporalAmount=" + temporalAmount + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(dateNanos); + } + + private Warnings warnings() { + if (warnings == null) { + this.warnings = Warnings.createWarnings( + driverContext.warningsMode(), + source.source().getLineNumber(), + source.source().getColumnNumber(), + source.text() + ); + } + return warnings; + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory dateNanos; + + private final TemporalAmount temporalAmount; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory dateNanos, + TemporalAmount temporalAmount) { + this.source = source; + this.dateNanos = dateNanos; + this.temporalAmount = temporalAmount; + } + + @Override + public AddDateNanosEvaluator get(DriverContext context) { + return new AddDateNanosEvaluator(source, dateNanos.get(context), temporalAmount, context); + } + + @Override + public String toString() { + return "AddDateNanosEvaluator[" + "dateNanos=" + dateNanos + ", temporalAmount=" + temporalAmount + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubDateNanosEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubDateNanosEvaluator.java new file mode 100644 index 000000000000..3b6f4c1046d4 --- /dev/null +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubDateNanosEvaluator.java @@ -0,0 +1,142 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; + +import java.lang.ArithmeticException; +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import java.time.DateTimeException; +import java.time.temporal.TemporalAmount; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.compute.operator.Warnings; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.core.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link Sub}. + * This class is generated. Do not edit it. + */ +public final class SubDateNanosEvaluator implements EvalOperator.ExpressionEvaluator { + private final Source source; + + private final EvalOperator.ExpressionEvaluator dateNanos; + + private final TemporalAmount temporalAmount; + + private final DriverContext driverContext; + + private Warnings warnings; + + public SubDateNanosEvaluator(Source source, EvalOperator.ExpressionEvaluator dateNanos, + TemporalAmount temporalAmount, DriverContext driverContext) { + this.source = source; + this.dateNanos = dateNanos; + this.temporalAmount = temporalAmount; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (LongBlock dateNanosBlock = (LongBlock) dateNanos.eval(page)) { + LongVector dateNanosVector = dateNanosBlock.asVector(); + if (dateNanosVector == null) { + return eval(page.getPositionCount(), dateNanosBlock); + } + return eval(page.getPositionCount(), dateNanosVector); + } + } + + public LongBlock eval(int positionCount, LongBlock dateNanosBlock) { + try(LongBlock.Builder result = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + if (dateNanosBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (dateNanosBlock.getValueCount(p) != 1) { + if (dateNanosBlock.getValueCount(p) > 1) { + warnings().registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + try { + result.appendLong(Sub.processDateNanos(dateNanosBlock.getLong(dateNanosBlock.getFirstValueIndex(p)), this.temporalAmount)); + } catch (ArithmeticException | DateTimeException e) { + warnings().registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + public LongBlock eval(int positionCount, LongVector dateNanosVector) { + try(LongBlock.Builder result = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { + position: for (int p = 0; p < positionCount; p++) { + try { + result.appendLong(Sub.processDateNanos(dateNanosVector.getLong(p), this.temporalAmount)); + } catch (ArithmeticException | DateTimeException e) { + warnings().registerException(e); + result.appendNull(); + } + } + return result.build(); + } + } + + @Override + public String toString() { + return "SubDateNanosEvaluator[" + "dateNanos=" + dateNanos + ", temporalAmount=" + temporalAmount + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(dateNanos); + } + + private Warnings warnings() { + if (warnings == null) { + this.warnings = Warnings.createWarnings( + driverContext.warningsMode(), + source.source().getLineNumber(), + source.source().getColumnNumber(), + source.text() + ); + } + return warnings; + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory dateNanos; + + private final TemporalAmount temporalAmount; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory dateNanos, + TemporalAmount temporalAmount) { + this.source = source; + this.dateNanos = dateNanos; + this.temporalAmount = temporalAmount; + } + + @Override + public SubDateNanosEvaluator get(DriverContext context) { + return new SubDateNanosEvaluator(source, dateNanos.get(context), temporalAmount, context); + } + + @Override + public String toString() { + return "SubDateNanosEvaluator[" + "dateNanos=" + dateNanos + ", temporalAmount=" + temporalAmount + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 7aa2fd92b164..9fad9123944f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -338,6 +338,10 @@ public enum Cap { */ LEAST_GREATEST_FOR_DATENANOS(), + /** + * Support add and subtract on date nanos + */ + DATE_NANOS_ADD_SUBTRACT(), /** * Support for date_trunc function on date nanos type */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Add.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Add.java index 8f8d885ee379..9d34410e8a16 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Add.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Add.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.xpack.esql.core.expression.Expression; @@ -21,7 +22,9 @@ import java.io.IOException; import java.time.DateTimeException; import java.time.Duration; +import java.time.Instant; import java.time.Period; +import java.time.ZonedDateTime; import java.time.temporal.TemporalAmount; import static org.elasticsearch.xpack.esql.core.util.DateUtils.asDateTime; @@ -33,7 +36,7 @@ public class Add extends DateTimeArithmeticOperation implements BinaryComparison public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Add", Add::new); @FunctionInfo( - returnType = { "double", "integer", "long", "date_period", "datetime", "time_duration", "unsigned_long" }, + returnType = { "double", "integer", "long", "date_nanos", "date_period", "datetime", "time_duration", "unsigned_long" }, description = "Add two numbers together. " + "If either field is <> then the result is `null`." ) public Add( @@ -41,12 +44,12 @@ public Add( @Param( name = "lhs", description = "A numeric value or a date time value.", - type = { "double", "integer", "long", "date_period", "datetime", "time_duration", "unsigned_long" } + type = { "double", "integer", "long", "date_nanos", "date_period", "datetime", "time_duration", "unsigned_long" } ) Expression left, @Param( name = "rhs", description = "A numeric value or a date time value.", - type = { "double", "integer", "long", "date_period", "datetime", "time_duration", "unsigned_long" } + type = { "double", "integer", "long", "date_nanos", "date_period", "datetime", "time_duration", "unsigned_long" } ) Expression right ) { super( @@ -58,7 +61,8 @@ public Add( AddLongsEvaluator.Factory::new, AddUnsignedLongsEvaluator.Factory::new, AddDoublesEvaluator.Factory::new, - AddDatetimesEvaluator.Factory::new + AddDatetimesEvaluator.Factory::new, + AddDateNanosEvaluator.Factory::new ); } @@ -70,7 +74,8 @@ private Add(StreamInput in) throws IOException { AddLongsEvaluator.Factory::new, AddUnsignedLongsEvaluator.Factory::new, AddDoublesEvaluator.Factory::new, - AddDatetimesEvaluator.Factory::new + AddDatetimesEvaluator.Factory::new, + AddDateNanosEvaluator.Factory::new ); } @@ -130,6 +135,25 @@ static long processDatetimes(long datetime, @Fixed TemporalAmount temporalAmount return asMillis(asDateTime(datetime).plus(temporalAmount)); } + @Evaluator(extraName = "DateNanos", warnExceptions = { ArithmeticException.class, DateTimeException.class }) + static long processDateNanos(long dateNanos, @Fixed TemporalAmount temporalAmount) { + // Instant.plus behaves differently from ZonedDateTime.plus, but DateUtils generally works with instants. + try { + return DateUtils.toLong( + Instant.from( + ZonedDateTime.ofInstant(DateUtils.toInstant(dateNanos), org.elasticsearch.xpack.esql.core.util.DateUtils.UTC) + .plus(temporalAmount) + ) + ); + } catch (IllegalArgumentException e) { + /* + toLong will throw IllegalArgumentException for out of range dates, but that includes the actual value which we want + to avoid returning here. + */ + throw new DateTimeException("Date nanos out of range. Must be between 1970-01-01T00:00:00Z and 2262-04-11T23:47:16.854775807"); + } + } + @Override public Period fold(Period left, Period right) { return left.plus(right); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java index d407dd8bf7de..8bb166fac60b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java @@ -22,10 +22,11 @@ import java.util.Collection; import static org.elasticsearch.xpack.esql.core.type.DataType.DATETIME; +import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_NANOS; import static org.elasticsearch.xpack.esql.core.type.DataType.DATE_PERIOD; import static org.elasticsearch.xpack.esql.core.type.DataType.TIME_DURATION; -import static org.elasticsearch.xpack.esql.core.type.DataType.isDateTime; -import static org.elasticsearch.xpack.esql.core.type.DataType.isDateTimeOrTemporal; +import static org.elasticsearch.xpack.esql.core.type.DataType.isDateTimeOrNanosOrTemporal; +import static org.elasticsearch.xpack.esql.core.type.DataType.isMillisOrNanos; import static org.elasticsearch.xpack.esql.core.type.DataType.isNull; import static org.elasticsearch.xpack.esql.core.type.DataType.isTemporalAmount; @@ -35,7 +36,8 @@ interface DatetimeArithmeticEvaluator { ExpressionEvaluator.Factory apply(Source source, ExpressionEvaluator.Factory expressionEvaluator, TemporalAmount temporalAmount); } - private final DatetimeArithmeticEvaluator datetimes; + private final DatetimeArithmeticEvaluator millisEvaluator; + private final DatetimeArithmeticEvaluator nanosEvaluator; DateTimeArithmeticOperation( Source source, @@ -46,10 +48,12 @@ interface DatetimeArithmeticEvaluator { BinaryEvaluator longs, BinaryEvaluator ulongs, BinaryEvaluator doubles, - DatetimeArithmeticEvaluator datetimes + DatetimeArithmeticEvaluator millisEvaluator, + DatetimeArithmeticEvaluator nanosEvaluator ) { super(source, left, right, op, ints, longs, ulongs, doubles); - this.datetimes = datetimes; + this.millisEvaluator = millisEvaluator; + this.nanosEvaluator = nanosEvaluator; } DateTimeArithmeticOperation( @@ -59,19 +63,22 @@ interface DatetimeArithmeticEvaluator { BinaryEvaluator longs, BinaryEvaluator ulongs, BinaryEvaluator doubles, - DatetimeArithmeticEvaluator datetimes + DatetimeArithmeticEvaluator millisEvaluator, + DatetimeArithmeticEvaluator nanosEvaluator ) throws IOException { super(in, op, ints, longs, ulongs, doubles); - this.datetimes = datetimes; + this.millisEvaluator = millisEvaluator; + this.nanosEvaluator = nanosEvaluator; } @Override protected TypeResolution resolveInputType(Expression e, TypeResolutions.ParamOrdinal paramOrdinal) { return TypeResolutions.isType( e, - t -> t.isNumeric() || DataType.isDateTimeOrTemporal(t) || DataType.isNull(t), + t -> t.isNumeric() || DataType.isDateTimeOrNanosOrTemporal(t) || DataType.isNull(t), sourceText(), paramOrdinal, + "date_nanos", "datetime", "numeric" ); @@ -86,11 +93,11 @@ protected TypeResolution checkCompatibility() { // - one argument is a DATETIME and the other a (foldable) TemporalValue, or // - both arguments are TemporalValues (so we can fold them), or // - one argument is NULL and the other one a DATETIME. - if (isDateTimeOrTemporal(leftType) || isDateTimeOrTemporal(rightType)) { + if (isDateTimeOrNanosOrTemporal(leftType) || isDateTimeOrNanosOrTemporal(rightType)) { if (isNull(leftType) || isNull(rightType)) { return TypeResolution.TYPE_RESOLVED; } - if ((isDateTime(leftType) && isTemporalAmount(rightType)) || (isTemporalAmount(leftType) && isDateTime(rightType))) { + if ((isMillisOrNanos(leftType) && isTemporalAmount(rightType)) || (isTemporalAmount(leftType) && isMillisOrNanos(rightType))) { return TypeResolution.TYPE_RESOLVED; } if (isTemporalAmount(leftType) && isTemporalAmount(rightType) && leftType == rightType) { @@ -171,7 +178,20 @@ public ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { temporalAmountArgument = left(); } - return datetimes.apply(source(), toEvaluator.apply(datetimeArgument), (TemporalAmount) temporalAmountArgument.fold()); + return millisEvaluator.apply(source(), toEvaluator.apply(datetimeArgument), (TemporalAmount) temporalAmountArgument.fold()); + } else if (dataType() == DATE_NANOS) { + // One of the arguments has to be a date_nanos and the other a temporal amount. + Expression dateNanosArgument; + Expression temporalAmountArgument; + if (left().dataType() == DATE_NANOS) { + dateNanosArgument = left(); + temporalAmountArgument = right(); + } else { + dateNanosArgument = right(); + temporalAmountArgument = left(); + } + + return nanosEvaluator.apply(source(), toEvaluator.apply(dateNanosArgument), (TemporalAmount) temporalAmountArgument.fold()); } else { return super.toEvaluator(toEvaluator); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Sub.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Sub.java index 27f5579129cc..e072619e6772 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Sub.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Sub.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.xpack.esql.core.expression.Expression; @@ -22,7 +23,9 @@ import java.io.IOException; import java.time.DateTimeException; import java.time.Duration; +import java.time.Instant; import java.time.Period; +import java.time.ZonedDateTime; import java.time.temporal.TemporalAmount; import static org.elasticsearch.common.logging.LoggerMessageFormat.format; @@ -61,7 +64,8 @@ public Sub( SubLongsEvaluator.Factory::new, SubUnsignedLongsEvaluator.Factory::new, SubDoublesEvaluator.Factory::new, - SubDatetimesEvaluator.Factory::new + SubDatetimesEvaluator.Factory::new, + SubDateNanosEvaluator.Factory::new ); } @@ -73,7 +77,8 @@ private Sub(StreamInput in) throws IOException { SubLongsEvaluator.Factory::new, SubUnsignedLongsEvaluator.Factory::new, SubDoublesEvaluator.Factory::new, - SubDatetimesEvaluator.Factory::new + SubDatetimesEvaluator.Factory::new, + SubDateNanosEvaluator.Factory::new ); } @@ -143,6 +148,25 @@ static long processDatetimes(long datetime, @Fixed TemporalAmount temporalAmount return asMillis(asDateTime(datetime).minus(temporalAmount)); } + @Evaluator(extraName = "DateNanos", warnExceptions = { ArithmeticException.class, DateTimeException.class }) + static long processDateNanos(long dateNanos, @Fixed TemporalAmount temporalAmount) { + // Instant.plus behaves differently from ZonedDateTime.plus, but DateUtils generally works with instants. + try { + return DateUtils.toLong( + Instant.from( + ZonedDateTime.ofInstant(DateUtils.toInstant(dateNanos), org.elasticsearch.xpack.esql.core.util.DateUtils.UTC) + .minus(temporalAmount) + ) + ); + } catch (IllegalArgumentException e) { + /* + toLong will throw IllegalArgumentException for out of range dates, but that includes the actual value which we want + to avoid returning here. + */ + throw new DateTimeException("Date nanos out of range. Must be between 1970-01-01T00:00:00Z and 2262-04-11T23:47:16.854775807"); + } + } + @Override public Period fold(Period left, Period right) { return left.minus(right); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java index 93fba06aab98..1eabf3ff7633 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverter.java @@ -80,7 +80,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.UNSIGNED_LONG; import static org.elasticsearch.xpack.esql.core.type.DataType.VERSION; import static org.elasticsearch.xpack.esql.core.type.DataType.isDateTime; -import static org.elasticsearch.xpack.esql.core.type.DataType.isDateTimeOrTemporal; +import static org.elasticsearch.xpack.esql.core.type.DataType.isDateTimeOrNanosOrTemporal; import static org.elasticsearch.xpack.esql.core.type.DataType.isNullOrDatePeriod; import static org.elasticsearch.xpack.esql.core.type.DataType.isNullOrTemporalAmount; import static org.elasticsearch.xpack.esql.core.type.DataType.isNullOrTimeDuration; @@ -380,10 +380,13 @@ public static DataType commonType(DataType left, DataType right) { if (right == NULL) { return left; } - if (isDateTimeOrTemporal(left) || isDateTimeOrTemporal(right)) { + if (isDateTimeOrNanosOrTemporal(left) || isDateTimeOrNanosOrTemporal(right)) { if ((isDateTime(left) && isNullOrTemporalAmount(right)) || (isNullOrTemporalAmount(left) && isDateTime(right))) { return DATETIME; } + if ((left == DATE_NANOS && isNullOrTemporalAmount(right)) || (isNullOrTemporalAmount(left) && right == DATE_NANOS)) { + return DATE_NANOS; + } if (isNullOrTimeDuration(left) && isNullOrTimeDuration(right)) { return TIME_DURATION; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 24c836f3a150..8875e1fbd995 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -2016,14 +2016,14 @@ public void testImplicitCasting() { assertThat( e.getMessage(), - containsString("first argument of [concat(\"2024\", \"-04\", \"-01\") + 1 day] must be [datetime or numeric]") + containsString("first argument of [concat(\"2024\", \"-04\", \"-01\") + 1 day] must be [date_nanos, datetime or numeric]") ); e = expectThrows(VerificationException.class, () -> analyze(""" from test | eval x = to_string(null) - 1 day """)); - assertThat(e.getMessage(), containsString("first argument of [to_string(null) - 1 day] must be [datetime or numeric]")); + assertThat(e.getMessage(), containsString("first argument of [to_string(null) - 1 day] must be [date_nanos, datetime or numeric]")); e = expectThrows(VerificationException.class, () -> analyze(""" from test | eval x = concat("2024", "-04", "-01") + "1 day" @@ -2031,7 +2031,7 @@ public void testImplicitCasting() { assertThat( e.getMessage(), - containsString("first argument of [concat(\"2024\", \"-04\", \"-01\") + \"1 day\"] must be [datetime or numeric]") + containsString("first argument of [concat(\"2024\", \"-04\", \"-01\") + \"1 day\"] must be [date_nanos, datetime or numeric]") ); e = expectThrows(VerificationException.class, () -> analyze(""" diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 2f274564ea95..74e2de114172 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -56,11 +56,11 @@ public class VerifierTests extends ESTestCase { public void testIncompatibleTypesInMathOperation() { assertEquals( - "1:40: second argument of [a + c] must be [datetime or numeric], found value [c] type [keyword]", + "1:40: second argument of [a + c] must be [date_nanos, datetime or numeric], found value [c] type [keyword]", error("row a = 1, b = 2, c = \"xxx\" | eval y = a + c") ); assertEquals( - "1:40: second argument of [a - c] must be [datetime or numeric], found value [c] type [keyword]", + "1:40: second argument of [a - c] must be [date_nanos, datetime or numeric], found value [c] type [keyword]", error("row a = 1, b = 2, c = \"xxx\" | eval y = a - c") ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java index d78dfd3141a0..816c9ef6f352 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java @@ -1113,31 +1113,83 @@ public static List dateCases(long min, long max) { * */ public static List dateNanosCases() { - return List.of( - new TypedDataSupplier("<1970-01-01T00:00:00.000000000Z>", () -> 0L, DataType.DATE_NANOS), - new TypedDataSupplier("", () -> ESTestCase.randomLongBetween(0, 10 * (long) 10e11), DataType.DATE_NANOS), - new TypedDataSupplier( - "", - () -> ESTestCase.randomLongBetween(10 * (long) 10e11, Long.MAX_VALUE), - DataType.DATE_NANOS - ), - new TypedDataSupplier( - "", - () -> ESTestCase.randomLongBetween(Long.MAX_VALUE / 100 * 99, Long.MAX_VALUE), - DataType.DATE_NANOS - ) - ); + return dateNanosCases(Instant.EPOCH, DateUtils.MAX_NANOSECOND_INSTANT); + } + + /** + * Generate cases for {@link DataType#DATE_NANOS}. + * + */ + public static List dateNanosCases(Instant minValue, Instant maxValue) { + // maximum nanosecond date in ES is 2262-04-11T23:47:16.854775807Z + Instant twentyOneHundred = Instant.parse("2100-01-01T00:00:00Z"); + Instant twentyTwoHundred = Instant.parse("2200-01-01T00:00:00Z"); + Instant twentyTwoFifty = Instant.parse("2250-01-01T00:00:00Z"); + + List cases = new ArrayList<>(); + if (minValue.isAfter(Instant.EPOCH) == false) { + cases.add( + new TypedDataSupplier("<1970-01-01T00:00:00.000000000Z>", () -> DateUtils.toLong(Instant.EPOCH), DataType.DATE_NANOS) + ); + } + + Instant lower = Instant.EPOCH.isBefore(minValue) ? minValue : Instant.EPOCH; + Instant upper = twentyOneHundred.isAfter(maxValue) ? maxValue : twentyOneHundred; + if (upper.isAfter(lower)) { + cases.add( + new TypedDataSupplier( + "<21st century date nanos>", + () -> DateUtils.toLong(ESTestCase.randomInstantBetween(lower, upper)), + DataType.DATE_NANOS + ) + ); + } + + Instant lower2 = twentyOneHundred.isBefore(minValue) ? minValue : twentyOneHundred; + Instant upper2 = twentyTwoHundred.isAfter(maxValue) ? maxValue : twentyTwoHundred; + if (upper.isAfter(lower)) { + cases.add( + new TypedDataSupplier( + "<22nd century date nanos>", + () -> DateUtils.toLong(ESTestCase.randomInstantBetween(lower2, upper2)), + DataType.DATE_NANOS + ) + ); + } + + Instant lower3 = twentyTwoHundred.isBefore(minValue) ? minValue : twentyTwoHundred; + Instant upper3 = twentyTwoFifty.isAfter(maxValue) ? maxValue : twentyTwoFifty; + if (upper.isAfter(lower)) { + cases.add( + new TypedDataSupplier( + "<23rd century date nanos>", + () -> DateUtils.toLong(ESTestCase.randomInstantBetween(lower3, upper3)), + DataType.DATE_NANOS + ) + ); + } + return cases; } public static List datePeriodCases() { + return datePeriodCases(-1000, -13, -32, 1000, 13, 32); + } + + public static List datePeriodCases(int yearMin, int monthMin, int dayMin, int yearMax, int monthMax, int dayMax) { + final int yMin = Math.max(yearMin, -1000); + final int mMin = Math.max(monthMin, -13); + final int dMin = Math.max(dayMin, -32); + final int yMax = Math.min(yearMax, 1000); + final int mMax = Math.min(monthMax, 13); + final int dMax = Math.min(dayMax, 32); return List.of( new TypedDataSupplier("", () -> Period.ZERO, DataType.DATE_PERIOD, true), new TypedDataSupplier( "", () -> Period.of( - ESTestCase.randomIntBetween(-1000, 1000), - ESTestCase.randomIntBetween(-13, 13), - ESTestCase.randomIntBetween(-32, 32) + ESTestCase.randomIntBetween(yMin, yMax), + ESTestCase.randomIntBetween(mMin, mMax), + ESTestCase.randomIntBetween(dMin, dMax) ), DataType.DATE_PERIOD, true @@ -1146,11 +1198,18 @@ public static List datePeriodCases() { } public static List timeDurationCases() { + return timeDurationCases(-604800000, 604800000); + } + + public static List timeDurationCases(long minValue, long maxValue) { + // plus/minus 7 days by default, with caller limits + final long min = Math.max(minValue, -604800000L); + final long max = Math.max(maxValue, 604800000L); return List.of( new TypedDataSupplier("", () -> Duration.ZERO, DataType.TIME_DURATION, true), new TypedDataSupplier( "", - () -> Duration.ofMillis(ESTestCase.randomLongBetween(-604800000L, 604800000L)), // plus/minus 7 days + () -> Duration.ofMillis(ESTestCase.randomLongBetween(min, max)), DataType.TIME_DURATION, true ) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java index 8c31b4a65dd1..abfb634d5f30 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; @@ -18,7 +19,9 @@ import java.math.BigInteger; import java.time.Duration; +import java.time.Instant; import java.time.Period; +import java.time.ZonedDateTime; import java.time.temporal.TemporalAmount; import java.util.ArrayList; import java.util.List; @@ -26,6 +29,7 @@ import java.util.function.BiFunction; import java.util.function.BinaryOperator; import java.util.function.Supplier; +import java.util.function.ToLongBiFunction; import static org.elasticsearch.xpack.esql.core.util.DateUtils.asDateTime; import static org.elasticsearch.xpack.esql.core.util.DateUtils.asMillis; @@ -148,14 +152,14 @@ public static Iterable parameters() { BinaryOperator result = (lhs, rhs) -> { try { - return addDatesAndTemporalAmount(lhs, rhs); + return addDatesAndTemporalAmount(lhs, rhs, AddTests::addMillis); } catch (ArithmeticException e) { return null; } }; BiFunction> warnings = (lhs, rhs) -> { try { - addDatesAndTemporalAmount(lhs.data(), rhs.data()); + addDatesAndTemporalAmount(lhs.data(), rhs.data(), AddTests::addMillis); return List.of(); } catch (ArithmeticException e) { return List.of( @@ -186,6 +190,37 @@ public static Iterable parameters() { true ) ); + + BinaryOperator nanosResult = (lhs, rhs) -> { + try { + return addDatesAndTemporalAmount(lhs, rhs, AddTests::addNanos); + } catch (ArithmeticException e) { + return null; + } + }; + suppliers.addAll( + TestCaseSupplier.forBinaryNotCasting( + nanosResult, + DataType.DATE_NANOS, + TestCaseSupplier.dateNanosCases(), + TestCaseSupplier.datePeriodCases(0, 0, 0, 10, 13, 32), + startsWith("AddDateNanosEvaluator[dateNanos=Attribute[channel=0], temporalAmount="), + warnings, + true + ) + ); + suppliers.addAll( + TestCaseSupplier.forBinaryNotCasting( + nanosResult, + DataType.DATE_NANOS, + TestCaseSupplier.dateNanosCases(), + TestCaseSupplier.timeDurationCases(0, 604800000L), + startsWith("AddDateNanosEvaluator[dateNanos=Attribute[channel=0], temporalAmount="), + warnings, + true + ) + ); + suppliers.addAll(TestCaseSupplier.dateCases().stream().mapMulti((tds, consumer) -> { consumer.accept( new TestCaseSupplier( @@ -284,7 +319,7 @@ public static Iterable parameters() { private static String addErrorMessageString(boolean includeOrdinal, List> validPerPosition, List types) { try { - return typeErrorMessage(includeOrdinal, validPerPosition, types, (a, b) -> "datetime or numeric"); + return typeErrorMessage(includeOrdinal, validPerPosition, types, (a, b) -> "date_nanos, datetime or numeric"); } catch (IllegalStateException e) { // This means all the positional args were okay, so the expected error is from the combination return "[+] has arguments with incompatible types [" + types.get(0).typeName() + "] and [" + types.get(1).typeName() + "]"; @@ -292,7 +327,7 @@ private static String addErrorMessageString(boolean includeOrdinal, List adder) { // this weird casting dance makes the expected value lambda symmetric Long date; TemporalAmount period; @@ -303,9 +338,21 @@ private static Object addDatesAndTemporalAmount(Object lhs, Object rhs) { date = (Long) rhs; period = (TemporalAmount) lhs; } + return adder.applyAsLong(date, period); + } + + private static long addMillis(Long date, TemporalAmount period) { return asMillis(asDateTime(date).plus(period)); } + private static long addNanos(Long date, TemporalAmount period) { + return DateUtils.toLong( + Instant.from( + ZonedDateTime.ofInstant(DateUtils.toInstant(date), org.elasticsearch.xpack.esql.core.util.DateUtils.UTC).plus(period) + ) + ); + } + @Override protected Expression build(Source source, List args) { return new Add(source, args.get(0), args.get(1)); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java index 39d55d1ba0b5..1338299b3a12 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java @@ -10,16 +10,23 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.expression.function.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.hamcrest.Matchers; import java.time.Duration; +import java.time.Instant; import java.time.Period; +import java.time.ZonedDateTime; +import java.time.temporal.TemporalAmount; import java.util.List; +import java.util.function.BinaryOperator; import java.util.function.Supplier; +import java.util.function.ToLongBiFunction; import static org.elasticsearch.xpack.esql.EsqlTestUtils.randomLiteral; import static org.elasticsearch.xpack.esql.core.util.DateUtils.asDateTime; @@ -28,6 +35,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.startsWith; public class SubTests extends AbstractScalarFunctionTestCase { public SubTests(@Name("TestCase") Supplier testCaseSupplier) { @@ -117,13 +125,44 @@ public static Iterable parameters() { return new TestCaseSupplier.TestCase( List.of( new TestCaseSupplier.TypedData(lhs, DataType.DATETIME, "lhs"), - new TestCaseSupplier.TypedData(rhs, DataType.DATE_PERIOD, "rhs") + new TestCaseSupplier.TypedData(rhs, DataType.DATE_PERIOD, "rhs").forceLiteral() ), - "SubDatetimesEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", + Matchers.startsWith("SubDatetimesEvaluator[datetime=Attribute[channel=0], temporalAmount="), DataType.DATETIME, equalTo(asMillis(asDateTime(lhs).minus(rhs))) ); })); + + BinaryOperator nanosResult = (lhs, rhs) -> { + try { + return subtractDatesAndTemporalAmount(lhs, rhs, SubTests::subtractNanos); + } catch (ArithmeticException e) { + return null; + } + }; + suppliers.addAll( + TestCaseSupplier.forBinaryNotCasting( + nanosResult, + DataType.DATE_NANOS, + TestCaseSupplier.dateNanosCases(Instant.parse("1985-01-01T00:00:00Z"), DateUtils.MAX_NANOSECOND_INSTANT), + TestCaseSupplier.datePeriodCases(0, 0, 0, 10, 13, 32), + startsWith("SubDateNanosEvaluator[dateNanos=Attribute[channel=0], temporalAmount="), + (l, r) -> List.of(), + true + ) + ); + suppliers.addAll( + TestCaseSupplier.forBinaryNotCasting( + nanosResult, + DataType.DATE_NANOS, + TestCaseSupplier.dateNanosCases(Instant.parse("1985-01-01T00:00:00Z"), DateUtils.MAX_NANOSECOND_INSTANT), + TestCaseSupplier.timeDurationCases(0, 604800000L), + startsWith("SubDateNanosEvaluator[dateNanos=Attribute[channel=0], temporalAmount="), + (l, r) -> List.of(), + true + ) + ); + suppliers.add(new TestCaseSupplier("Period - Period", List.of(DataType.DATE_PERIOD, DataType.DATE_PERIOD), () -> { Period lhs = (Period) randomLiteral(DataType.DATE_PERIOD).value(); Period rhs = (Period) randomLiteral(DataType.DATE_PERIOD).value(); @@ -143,9 +182,9 @@ public static Iterable parameters() { TestCaseSupplier.TestCase testCase = new TestCaseSupplier.TestCase( List.of( new TestCaseSupplier.TypedData(lhs, DataType.DATETIME, "lhs"), - new TestCaseSupplier.TypedData(rhs, DataType.TIME_DURATION, "rhs") + new TestCaseSupplier.TypedData(rhs, DataType.TIME_DURATION, "rhs").forceLiteral() ), - "SubDatetimesEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", + Matchers.startsWith("SubDatetimesEvaluator[datetime=Attribute[channel=0], temporalAmount="), DataType.DATETIME, equalTo(asMillis(asDateTime(lhs).minus(rhs))) ); @@ -164,6 +203,7 @@ public static Iterable parameters() { equalTo(lhs.minus(rhs)) ); })); + // exact math arithmetic exceptions suppliers.add( arithmeticExceptionOverflowCase( @@ -210,7 +250,7 @@ public static Iterable parameters() { return original.getData().get(nullPosition == 0 ? 1 : 0).type(); } return original.expectedType(); - }, (nullPosition, nullData, original) -> original); + }, (nullPosition, nullData, original) -> nullData.isForceLiteral() ? equalTo("LiteralsEvaluator[lit=null]") : original); suppliers.add(new TestCaseSupplier("MV", List.of(DataType.INTEGER, DataType.INTEGER), () -> { // Ensure we don't have an overflow @@ -236,4 +276,26 @@ public static Iterable parameters() { protected Expression build(Source source, List args) { return new Sub(source, args.get(0), args.get(1)); } + + private static Object subtractDatesAndTemporalAmount(Object lhs, Object rhs, ToLongBiFunction subtract) { + // this weird casting dance makes the expected value lambda symmetric + Long date; + TemporalAmount period; + if (lhs instanceof Long) { + date = (Long) lhs; + period = (TemporalAmount) rhs; + } else { + date = (Long) rhs; + period = (TemporalAmount) lhs; + } + return subtract.applyAsLong(date, period); + } + + private static long subtractNanos(Long date, TemporalAmount period) { + return DateUtils.toLong( + Instant.from( + ZonedDateTime.ofInstant(DateUtils.toInstant(date), org.elasticsearch.xpack.esql.core.util.DateUtils.UTC).minus(period) + ) + ); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java index b2228b5543ef..54aa8d7696ff 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeConverterTests.java @@ -45,7 +45,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.UNSUPPORTED; import static org.elasticsearch.xpack.esql.core.type.DataType.VERSION; import static org.elasticsearch.xpack.esql.core.type.DataType.isDateTime; -import static org.elasticsearch.xpack.esql.core.type.DataType.isDateTimeOrTemporal; +import static org.elasticsearch.xpack.esql.core.type.DataType.isDateTimeOrNanosOrTemporal; import static org.elasticsearch.xpack.esql.core.type.DataType.isString; import static org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter.commonType; @@ -84,14 +84,18 @@ public void testCommonTypeStrings() { } public void testCommonTypeDateTimeIntervals() { - List DATE_TIME_INTERVALS = Arrays.stream(DataType.values()).filter(DataType::isDateTimeOrTemporal).toList(); + List DATE_TIME_INTERVALS = Arrays.stream(DataType.values()).filter(DataType::isDateTimeOrNanosOrTemporal).toList(); for (DataType dataType1 : DATE_TIME_INTERVALS) { for (DataType dataType2 : DataType.values()) { if (dataType2 == NULL) { assertEqualsCommonType(dataType1, NULL, dataType1); - } else if (isDateTimeOrTemporal(dataType2)) { - if (isDateTime(dataType1) || isDateTime(dataType2)) { + } else if (isDateTimeOrNanosOrTemporal(dataType2)) { + if ((dataType1 == DATE_NANOS && dataType2 == DATETIME) || (dataType1 == DATETIME && dataType2 == DATE_NANOS)) { + assertNullCommonType(dataType1, dataType2); + } else if (isDateTime(dataType1) || isDateTime(dataType2)) { assertEqualsCommonType(dataType1, dataType2, DATETIME); + } else if (dataType1 == DATE_NANOS || dataType2 == DATE_NANOS) { + assertEqualsCommonType(dataType1, dataType2, DATE_NANOS); } else if (dataType1 == dataType2) { assertEqualsCommonType(dataType1, dataType2, dataType1); } else { @@ -145,7 +149,6 @@ public void testCommonTypeMiscellaneous() { UNSUPPORTED, OBJECT, SOURCE, - DATE_NANOS, DOC_DATA_TYPE, TSID_DATA_TYPE, PARTIAL_AGG, @@ -169,12 +172,12 @@ public void testCommonTypeMiscellaneous() { } private static void assertEqualsCommonType(DataType dataType1, DataType dataType2, DataType commonType) { - assertEquals(commonType, commonType(dataType1, dataType2)); - assertEquals(commonType, commonType(dataType2, dataType1)); + assertEquals("Expected " + commonType + " for " + dataType1 + " and " + dataType2, commonType, commonType(dataType1, dataType2)); + assertEquals("Expected " + commonType + " for " + dataType1 + " and " + dataType2, commonType, commonType(dataType2, dataType1)); } private static void assertNullCommonType(DataType dataType1, DataType dataType2) { - assertNull(commonType(dataType1, dataType2)); - assertNull(commonType(dataType2, dataType1)); + assertNull("Expected null for " + dataType1 + " and " + dataType2, commonType(dataType1, dataType2)); + assertNull("Expected null for " + dataType1 + " and " + dataType2, commonType(dataType2, dataType1)); } } From 36d8307abd5fa5609336e23cd5d353f3873a6f08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Thu, 5 Dec 2024 07:50:46 +0100 Subject: [PATCH 28/45] CreateClassLoaderEntitlement + extensions to parse logic (#117754) (#117978) --- .../policy/CreateClassLoaderEntitlement.java | 16 ++++ .../runtime/policy/PolicyParser.java | 81 ++++++++++++------- .../policy/PolicyParserFailureTests.java | 7 +- .../runtime/policy/PolicyParserTests.java | 39 +++++++++ 4 files changed, 109 insertions(+), 34 deletions(-) create mode 100644 libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java new file mode 100644 index 000000000000..708e0b87711f --- /dev/null +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java @@ -0,0 +1,16 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.runtime.policy; + +public class CreateClassLoaderEntitlement implements Entitlement { + @ExternalEntitlement + public CreateClassLoaderEntitlement() {} + +} diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java index ea6603af9992..0d1a7c14ece4 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyParser.java @@ -19,22 +19,43 @@ import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; - -import static org.elasticsearch.entitlement.runtime.policy.PolicyParserException.newPolicyParserException; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.Stream; /** * A parser to parse policy files for entitlements. */ public class PolicyParser { - protected static final String entitlementPackageName = Entitlement.class.getPackage().getName(); + private static final Map> EXTERNAL_ENTITLEMENTS = Stream.of(FileEntitlement.class, CreateClassLoaderEntitlement.class) + .collect(Collectors.toUnmodifiableMap(PolicyParser::getEntitlementTypeName, Function.identity())); protected final XContentParser policyParser; protected final String policyName; + static String getEntitlementTypeName(Class entitlementClass) { + var entitlementClassName = entitlementClass.getSimpleName(); + + if (entitlementClassName.endsWith("Entitlement") == false) { + throw new IllegalArgumentException( + entitlementClassName + " is not a valid Entitlement class name. A valid class name must end with 'Entitlement'" + ); + } + + var strippedClassName = entitlementClassName.substring(0, entitlementClassName.indexOf("Entitlement")); + return Arrays.stream(strippedClassName.split("(?=\\p{Lu})")) + .filter(Predicate.not(String::isEmpty)) + .map(s -> s.toLowerCase(Locale.ROOT)) + .collect(Collectors.joining("_")); + } + public PolicyParser(InputStream inputStream, String policyName) throws IOException { this.policyParser = YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, Objects.requireNonNull(inputStream)); this.policyName = policyName; @@ -67,18 +88,23 @@ protected Scope parseScope(String scopeName) throws IOException { } List entitlements = new ArrayList<>(); while (policyParser.nextToken() != XContentParser.Token.END_ARRAY) { - if (policyParser.currentToken() != XContentParser.Token.START_OBJECT) { - throw newPolicyParserException(scopeName, "expected object "); - } - if (policyParser.nextToken() != XContentParser.Token.FIELD_NAME) { + if (policyParser.currentToken() == XContentParser.Token.VALUE_STRING) { + String entitlementType = policyParser.text(); + Entitlement entitlement = parseEntitlement(scopeName, entitlementType); + entitlements.add(entitlement); + } else if (policyParser.currentToken() == XContentParser.Token.START_OBJECT) { + if (policyParser.nextToken() != XContentParser.Token.FIELD_NAME) { + throw newPolicyParserException(scopeName, "expected object "); + } + String entitlementType = policyParser.currentName(); + Entitlement entitlement = parseEntitlement(scopeName, entitlementType); + entitlements.add(entitlement); + if (policyParser.nextToken() != XContentParser.Token.END_OBJECT) { + throw newPolicyParserException(scopeName, "expected closing object"); + } + } else { throw newPolicyParserException(scopeName, "expected object "); } - String entitlementType = policyParser.currentName(); - Entitlement entitlement = parseEntitlement(scopeName, entitlementType); - entitlements.add(entitlement); - if (policyParser.nextToken() != XContentParser.Token.END_OBJECT) { - throw newPolicyParserException(scopeName, "expected closing object"); - } } return new Scope(scopeName, entitlements); } catch (IOException ioe) { @@ -87,34 +113,29 @@ protected Scope parseScope(String scopeName) throws IOException { } protected Entitlement parseEntitlement(String scopeName, String entitlementType) throws IOException { - Class entitlementClass; - try { - entitlementClass = Class.forName( - entitlementPackageName - + "." - + Character.toUpperCase(entitlementType.charAt(0)) - + entitlementType.substring(1) - + "Entitlement" - ); - } catch (ClassNotFoundException cnfe) { - throw newPolicyParserException(scopeName, "unknown entitlement type [" + entitlementType + "]"); - } - if (Entitlement.class.isAssignableFrom(entitlementClass) == false) { + Class entitlementClass = EXTERNAL_ENTITLEMENTS.get(entitlementType); + + if (entitlementClass == null) { throw newPolicyParserException(scopeName, "unknown entitlement type [" + entitlementType + "]"); } + Constructor entitlementConstructor = entitlementClass.getConstructors()[0]; ExternalEntitlement entitlementMetadata = entitlementConstructor.getAnnotation(ExternalEntitlement.class); if (entitlementMetadata == null) { throw newPolicyParserException(scopeName, "unknown entitlement type [" + entitlementType + "]"); } - if (policyParser.nextToken() != XContentParser.Token.START_OBJECT) { - throw newPolicyParserException(scopeName, entitlementType, "expected entitlement parameters"); + Class[] parameterTypes = entitlementConstructor.getParameterTypes(); + String[] parametersNames = entitlementMetadata.parameterNames(); + + if (parameterTypes.length != 0 || parametersNames.length != 0) { + if (policyParser.nextToken() != XContentParser.Token.START_OBJECT) { + throw newPolicyParserException(scopeName, entitlementType, "expected entitlement parameters"); + } } + Map parsedValues = policyParser.map(); - Class[] parameterTypes = entitlementConstructor.getParameterTypes(); - String[] parametersNames = entitlementMetadata.parameterNames(); Object[] parameterValues = new Object[parameterTypes.length]; for (int parameterIndex = 0; parameterIndex < parameterTypes.length; ++parameterIndex) { String parameterName = parametersNames[parameterIndex]; diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java index de8280ea87fe..7eb2b1fb476b 100644 --- a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java +++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserFailureTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.test.ESTestCase; import java.io.ByteArrayInputStream; -import java.io.IOException; import java.nio.charset.StandardCharsets; public class PolicyParserFailureTests extends ESTestCase { @@ -26,7 +25,7 @@ public void testParserSyntaxFailures() { assertEquals("[1:1] policy parsing error for [test-failure-policy.yaml]: expected object ", ppe.getMessage()); } - public void testEntitlementDoesNotExist() throws IOException { + public void testEntitlementDoesNotExist() { PolicyParserException ppe = expectThrows(PolicyParserException.class, () -> new PolicyParser(new ByteArrayInputStream(""" entitlement-module-name: - does_not_exist: {} @@ -38,7 +37,7 @@ public void testEntitlementDoesNotExist() throws IOException { ); } - public void testEntitlementMissingParameter() throws IOException { + public void testEntitlementMissingParameter() { PolicyParserException ppe = expectThrows(PolicyParserException.class, () -> new PolicyParser(new ByteArrayInputStream(""" entitlement-module-name: - file: {} @@ -61,7 +60,7 @@ public void testEntitlementMissingParameter() throws IOException { ); } - public void testEntitlementExtraneousParameter() throws IOException { + public void testEntitlementExtraneousParameter() { PolicyParserException ppe = expectThrows(PolicyParserException.class, () -> new PolicyParser(new ByteArrayInputStream(""" entitlement-module-name: - file: diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java index 40016b2e3027..a514cfe41889 100644 --- a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java +++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyParserTests.java @@ -11,11 +11,31 @@ import org.elasticsearch.test.ESTestCase; +import java.io.ByteArrayInputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.List; +import static org.elasticsearch.test.LambdaMatchers.transformedMatch; +import static org.hamcrest.Matchers.both; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + public class PolicyParserTests extends ESTestCase { + private static class TestWrongEntitlementName implements Entitlement {} + + public void testGetEntitlementTypeName() { + assertEquals("create_class_loader", PolicyParser.getEntitlementTypeName(CreateClassLoaderEntitlement.class)); + + var ex = expectThrows(IllegalArgumentException.class, () -> PolicyParser.getEntitlementTypeName(TestWrongEntitlementName.class)); + assertThat( + ex.getMessage(), + equalTo("TestWrongEntitlementName is not a valid Entitlement class name. A valid class name must end with 'Entitlement'") + ); + } + public void testPolicyBuilder() throws IOException { Policy parsedPolicy = new PolicyParser(PolicyParserTests.class.getResourceAsStream("test-policy.yaml"), "test-policy.yaml") .parsePolicy(); @@ -25,4 +45,23 @@ public void testPolicyBuilder() throws IOException { ); assertEquals(parsedPolicy, builtPolicy); } + + public void testParseCreateClassloader() throws IOException { + Policy parsedPolicy = new PolicyParser(new ByteArrayInputStream(""" + entitlement-module-name: + - create_class_loader + """.getBytes(StandardCharsets.UTF_8)), "test-policy.yaml").parsePolicy(); + Policy builtPolicy = new Policy( + "test-policy.yaml", + List.of(new Scope("entitlement-module-name", List.of(new CreateClassLoaderEntitlement()))) + ); + assertThat( + parsedPolicy.scopes, + contains( + both(transformedMatch((Scope scope) -> scope.name, equalTo("entitlement-module-name"))).and( + transformedMatch(scope -> scope.entitlements, contains(instanceOf(CreateClassLoaderEntitlement.class))) + ) + ) + ); + } } From 3e57a57b283b19a72649845e1185de404ac8ddba Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Thu, 5 Dec 2024 10:46:40 +0200 Subject: [PATCH 29/45] [8.x] Fix for propagating filters from compound to inner retrievers (#117914) (#118046) * Fix for propagating filters from compound to inner retrievers (#117914) * Update RRFRetrieverBuilderIT.java --- docs/changelog/117914.yaml | 5 ++ .../retriever/CompoundRetrieverBuilder.java | 32 +++++--- .../search/retriever/KnnRetrieverBuilder.java | 3 +- .../retriever/RankDocsRetrieverBuilder.java | 16 +--- .../RankDocsRetrieverBuilderTests.java | 7 +- .../vectors/TestQueryVectorBuilderPlugin.java | 8 +- .../TestCompoundRetrieverBuilder.java | 10 ++- .../retriever/QueryRuleRetrieverBuilder.java | 15 +++- .../TextSimilarityRankRetrieverBuilder.java | 7 +- .../xpack/rank/rrf/RRFRetrieverBuilderIT.java | 38 +++++++++- .../xpack/rank/rrf/RRFFeatures.java | 6 ++ .../xpack/rank/rrf/RRFRetrieverBuilder.java | 7 +- ...rrf_retriever_search_api_compatibility.yml | 74 +++++++++++++++++++ 13 files changed, 180 insertions(+), 48 deletions(-) create mode 100644 docs/changelog/117914.yaml diff --git a/docs/changelog/117914.yaml b/docs/changelog/117914.yaml new file mode 100644 index 000000000000..da58ed7bb04b --- /dev/null +++ b/docs/changelog/117914.yaml @@ -0,0 +1,5 @@ +pr: 117914 +summary: Fix for propagating filters from compound to inner retrievers +area: Ranking +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java index db839de9f573..2ab6395db73b 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java @@ -20,6 +20,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.TransportMultiSearchAction; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.rest.RestStatus; @@ -46,6 +47,8 @@ */ public abstract class CompoundRetrieverBuilder> extends RetrieverBuilder { + public static final NodeFeature INNER_RETRIEVERS_FILTER_SUPPORT = new NodeFeature("inner_retrievers_filter_support"); + public record RetrieverSource(RetrieverBuilder retriever, SearchSourceBuilder source) {} protected final int rankWindowSize; @@ -64,9 +67,9 @@ public T addChild(RetrieverBuilder retrieverBuilder) { /** * Returns a clone of the original retriever, replacing the sub-retrievers with - * the provided {@code newChildRetrievers}. + * the provided {@code newChildRetrievers} and the filters with the {@code newPreFilterQueryBuilders}. */ - protected abstract T clone(List newChildRetrievers); + protected abstract T clone(List newChildRetrievers, List newPreFilterQueryBuilders); /** * Combines the provided {@code rankResults} to return the final top documents. @@ -85,13 +88,25 @@ public final RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOExceptio } // Rewrite prefilters - boolean hasChanged = false; + // We eagerly rewrite prefilters, because some of the innerRetrievers + // could be compound too, so we want to propagate all the necessary filter information to them + // and have it available as part of their own rewrite step var newPreFilters = rewritePreFilters(ctx); - hasChanged |= newPreFilters != preFilterQueryBuilders; + if (newPreFilters != preFilterQueryBuilders) { + return clone(innerRetrievers, newPreFilters); + } + boolean hasChanged = false; // Rewrite retriever sources List newRetrievers = new ArrayList<>(); for (var entry : innerRetrievers) { + // we propagate the filters only for compound retrievers as they won't be attached through + // the createSearchSourceBuilder. + // We could remove this check, but we would end up adding the same filters + // multiple times in case an inner retriever rewrites itself, when we re-enter to rewrite + if (entry.retriever.isCompound() && false == preFilterQueryBuilders.isEmpty()) { + entry.retriever.getPreFilterQueryBuilders().addAll(preFilterQueryBuilders); + } RetrieverBuilder newRetriever = entry.retriever.rewrite(ctx); if (newRetriever != entry.retriever) { newRetrievers.add(new RetrieverSource(newRetriever, null)); @@ -106,7 +121,7 @@ public final RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOExceptio } } if (hasChanged) { - return clone(newRetrievers); + return clone(newRetrievers, newPreFilters); } // execute searches @@ -166,12 +181,7 @@ public void onFailure(Exception e) { }); }); - return new RankDocsRetrieverBuilder( - rankWindowSize, - newRetrievers.stream().map(s -> s.retriever).toList(), - results::get, - newPreFilters - ); + return new RankDocsRetrieverBuilder(rankWindowSize, newRetrievers.stream().map(s -> s.retriever).toList(), results::get); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java index 8be9a78dae15..f1464c41ca3b 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/KnnRetrieverBuilder.java @@ -184,8 +184,7 @@ public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { ll.onResponse(null); })); }); - var rewritten = new KnnRetrieverBuilder(this, () -> toSet.get(), null); - return rewritten; + return new KnnRetrieverBuilder(this, () -> toSet.get(), null); } return super.rewrite(ctx); } diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java index 02f890f51d01..4d3f3fefd446 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilder.java @@ -33,19 +33,13 @@ public class RankDocsRetrieverBuilder extends RetrieverBuilder { final List sources; final Supplier rankDocs; - public RankDocsRetrieverBuilder( - int rankWindowSize, - List sources, - Supplier rankDocs, - List preFilterQueryBuilders - ) { + public RankDocsRetrieverBuilder(int rankWindowSize, List sources, Supplier rankDocs) { this.rankWindowSize = rankWindowSize; this.rankDocs = rankDocs; if (sources == null || sources.isEmpty()) { throw new IllegalArgumentException("sources must not be null or empty"); } this.sources = sources; - this.preFilterQueryBuilders = preFilterQueryBuilders; } @Override @@ -73,10 +67,6 @@ private boolean sourceShouldRewrite(QueryRewriteContext ctx) throws IOException @Override public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { assert false == sourceShouldRewrite(ctx) : "retriever sources should be rewritten first"; - var rewrittenFilters = rewritePreFilters(ctx); - if (rewrittenFilters != preFilterQueryBuilders) { - return new RankDocsRetrieverBuilder(rankWindowSize, sources, rankDocs, rewrittenFilters); - } return this; } @@ -94,7 +84,7 @@ public QueryBuilder topDocsQuery() { boolQuery.should(query); } } - // ignore prefilters of this level, they are already propagated to children + // ignore prefilters of this level, they were already propagated to children return boolQuery; } @@ -133,7 +123,7 @@ public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder } else { rankQuery = new RankDocsQueryBuilder(rankDocResults, null, false); } - // ignore prefilters of this level, they are already propagated to children + // ignore prefilters of this level, they were already propagated to children searchSourceBuilder.query(rankQuery); if (sourceHasMinScore()) { searchSourceBuilder.minScore(this.minScore() == null ? Float.MIN_VALUE : this.minScore()); diff --git a/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java b/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java index af6782c45dce..ccf33c0b71b6 100644 --- a/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/retriever/RankDocsRetrieverBuilderTests.java @@ -95,12 +95,7 @@ private List preFilters(QueryRewriteContext queryRewriteContext) t } private RankDocsRetrieverBuilder createRandomRankDocsRetrieverBuilder(QueryRewriteContext queryRewriteContext) throws IOException { - return new RankDocsRetrieverBuilder( - randomIntBetween(1, 100), - innerRetrievers(queryRewriteContext), - rankDocsSupplier(), - preFilters(queryRewriteContext) - ); + return new RankDocsRetrieverBuilder(randomIntBetween(1, 100), innerRetrievers(queryRewriteContext), rankDocsSupplier()); } public void testExtractToSearchSourceBuilder() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java b/server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java index c47c8c16f6a2..5733a51bb7e9 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java @@ -27,9 +27,9 @@ /** * A SearchPlugin to exercise query vector builder */ -class TestQueryVectorBuilderPlugin implements SearchPlugin { +public class TestQueryVectorBuilderPlugin implements SearchPlugin { - static class TestQueryVectorBuilder implements QueryVectorBuilder { + public static class TestQueryVectorBuilder implements QueryVectorBuilder { private static final String NAME = "test_query_vector_builder"; private static final ParseField QUERY_VECTOR = new ParseField("query_vector"); @@ -47,11 +47,11 @@ static class TestQueryVectorBuilder implements QueryVectorBuilder { private List vectorToBuild; - TestQueryVectorBuilder(List vectorToBuild) { + public TestQueryVectorBuilder(List vectorToBuild) { this.vectorToBuild = vectorToBuild; } - TestQueryVectorBuilder(float[] expected) { + public TestQueryVectorBuilder(float[] expected) { this.vectorToBuild = new ArrayList<>(expected.length); for (float f : expected) { vectorToBuild.add(f); diff --git a/test/framework/src/main/java/org/elasticsearch/search/retriever/TestCompoundRetrieverBuilder.java b/test/framework/src/main/java/org/elasticsearch/search/retriever/TestCompoundRetrieverBuilder.java index 9f199aa7f3ef..4a5f280c10a9 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/retriever/TestCompoundRetrieverBuilder.java +++ b/test/framework/src/main/java/org/elasticsearch/search/retriever/TestCompoundRetrieverBuilder.java @@ -10,6 +10,7 @@ package org.elasticsearch.search.retriever; import org.apache.lucene.search.ScoreDoc; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.rank.RankDoc; import org.elasticsearch.xcontent.XContentBuilder; @@ -23,16 +24,17 @@ public class TestCompoundRetrieverBuilder extends CompoundRetrieverBuilder(), rankWindowSize); + this(new ArrayList<>(), rankWindowSize, new ArrayList<>()); } - TestCompoundRetrieverBuilder(List childRetrievers, int rankWindowSize) { + TestCompoundRetrieverBuilder(List childRetrievers, int rankWindowSize, List preFilterQueryBuilders) { super(childRetrievers, rankWindowSize); + this.preFilterQueryBuilders = preFilterQueryBuilders; } @Override - protected TestCompoundRetrieverBuilder clone(List newChildRetrievers) { - return new TestCompoundRetrieverBuilder(newChildRetrievers, rankWindowSize); + protected TestCompoundRetrieverBuilder clone(List newChildRetrievers, List newPreFilterQueryBuilders) { + return new TestCompoundRetrieverBuilder(newChildRetrievers, rankWindowSize, newPreFilterQueryBuilders); } @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java index 4eb4d5c8a741..6ea8afd3e51c 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/retriever/QueryRuleRetrieverBuilder.java @@ -110,12 +110,14 @@ public QueryRuleRetrieverBuilder( Map matchCriteria, List retrieverSource, int rankWindowSize, - String retrieverName + String retrieverName, + List preFilterQueryBuilders ) { super(retrieverSource, rankWindowSize); this.rulesetIds = rulesetIds; this.matchCriteria = matchCriteria; this.retrieverName = retrieverName; + this.preFilterQueryBuilders = preFilterQueryBuilders; } @Override @@ -156,8 +158,15 @@ public void doToXContent(XContentBuilder builder, Params params) throws IOExcept } @Override - protected QueryRuleRetrieverBuilder clone(List newChildRetrievers) { - return new QueryRuleRetrieverBuilder(rulesetIds, matchCriteria, newChildRetrievers, rankWindowSize, retrieverName); + protected QueryRuleRetrieverBuilder clone(List newChildRetrievers, List newPreFilterQueryBuilders) { + return new QueryRuleRetrieverBuilder( + rulesetIds, + matchCriteria, + newChildRetrievers, + rankWindowSize, + retrieverName, + newPreFilterQueryBuilders + ); } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java index d8d85993ac90..0cc1a300a02b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java @@ -129,7 +129,10 @@ public TextSimilarityRankRetrieverBuilder( } @Override - protected TextSimilarityRankRetrieverBuilder clone(List newChildRetrievers) { + protected TextSimilarityRankRetrieverBuilder clone( + List newChildRetrievers, + List newPreFilterQueryBuilders + ) { return new TextSimilarityRankRetrieverBuilder( newChildRetrievers, inferenceId, @@ -138,7 +141,7 @@ protected TextSimilarityRankRetrieverBuilder clone(List newChil rankWindowSize, minScore, retrieverName, - preFilterQueryBuilders + newPreFilterQueryBuilders ); } diff --git a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderIT.java b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderIT.java index ab4b2b34e5b9..e4e06b503100 100644 --- a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderIT.java +++ b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderIT.java @@ -33,6 +33,7 @@ import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.search.vectors.KnnVectorQueryBuilder; import org.elasticsearch.search.vectors.QueryVectorBuilder; +import org.elasticsearch.search.vectors.TestQueryVectorBuilderPlugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.xcontent.XContentBuilder; @@ -57,7 +58,6 @@ public class RRFRetrieverBuilderIT extends ESIntegTestCase { protected static String INDEX = "test_index"; - protected static final String ID_FIELD = "_id"; protected static final String DOC_FIELD = "doc"; protected static final String TEXT_FIELD = "text"; protected static final String VECTOR_FIELD = "vector"; @@ -743,6 +743,42 @@ public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder expectThrows(UnsupportedOperationException.class, () -> client().prepareSearch(INDEX).setSource(source).get()); } + public void testRRFFiltersPropagatedToKnnQueryVectorBuilder() { + final int rankWindowSize = 100; + final int rankConstant = 10; + SearchSourceBuilder source = new SearchSourceBuilder(); + // this will retriever all but 7 only due to top-level filter + StandardRetrieverBuilder standardRetriever = new StandardRetrieverBuilder(QueryBuilders.matchAllQuery()); + // this will too retrieve just doc 7 + KnnRetrieverBuilder knnRetriever = new KnnRetrieverBuilder( + "vector", + null, + new TestQueryVectorBuilderPlugin.TestQueryVectorBuilder(new float[] { 3 }), + 10, + 10, + null + ); + source.retriever( + new RRFRetrieverBuilder( + Arrays.asList( + new CompoundRetrieverBuilder.RetrieverSource(standardRetriever, null), + new CompoundRetrieverBuilder.RetrieverSource(knnRetriever, null) + ), + rankWindowSize, + rankConstant + ) + ); + source.retriever().getPreFilterQueryBuilders().add(QueryBuilders.boolQuery().must(QueryBuilders.termQuery(DOC_FIELD, "doc_7"))); + source.size(10); + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + ElasticsearchAssertions.assertResponse(req, resp -> { + assertNull(resp.pointInTimeId()); + assertNotNull(resp.getHits().getTotalHits()); + assertThat(resp.getHits().getTotalHits().value, equalTo(1L)); + assertThat(resp.getHits().getHits()[0].getId(), equalTo("doc_7")); + }); + } + public void testRewriteOnce() { final float[] vector = new float[] { 1 }; AtomicInteger numAsyncCalls = new AtomicInteger(); diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFFeatures.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFFeatures.java index bbc0f622724a..bb61fa951948 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFFeatures.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFFeatures.java @@ -12,6 +12,7 @@ import java.util.Set; +import static org.elasticsearch.search.retriever.CompoundRetrieverBuilder.INNER_RETRIEVERS_FILTER_SUPPORT; import static org.elasticsearch.xpack.rank.rrf.RRFRetrieverBuilder.RRF_RETRIEVER_COMPOSITION_SUPPORTED; /** @@ -23,4 +24,9 @@ public class RRFFeatures implements FeatureSpecification { public Set getFeatures() { return Set.of(RRFRetrieverBuilder.RRF_RETRIEVER_SUPPORTED, RRF_RETRIEVER_COMPOSITION_SUPPORTED); } + + @Override + public Set getTestFeatures() { + return Set.of(INNER_RETRIEVERS_FILTER_SUPPORT); + } } diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java index 792ff4eac389..f1171b74f746 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilder.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.util.Maps; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.search.rank.RankBuilder; import org.elasticsearch.search.rank.RankDoc; @@ -108,8 +109,10 @@ public String getName() { } @Override - protected RRFRetrieverBuilder clone(List newRetrievers) { - return new RRFRetrieverBuilder(newRetrievers, this.rankWindowSize, this.rankConstant); + protected RRFRetrieverBuilder clone(List newRetrievers, List newPreFilterQueryBuilders) { + RRFRetrieverBuilder clone = new RRFRetrieverBuilder(newRetrievers, this.rankWindowSize, this.rankConstant); + clone.preFilterQueryBuilders = newPreFilterQueryBuilders; + return clone; } @Override diff --git a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/700_rrf_retriever_search_api_compatibility.yml b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/700_rrf_retriever_search_api_compatibility.yml index 42c01f0b9636..cb30542d8000 100644 --- a/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/700_rrf_retriever_search_api_compatibility.yml +++ b/x-pack/plugin/rank-rrf/src/yamlRestTest/resources/rest-api-spec/test/rrf/700_rrf_retriever_search_api_compatibility.yml @@ -1071,3 +1071,77 @@ setup: - match: { hits.hits.2.inner_hits.nested_data_field.hits.total.value: 0 } - match: { hits.hits.2.inner_hits.nested_vector_field.hits.total.value: 0 } + + +--- +"rrf retriever with filters to be passed to nested rrf retrievers": + - requires: + cluster_features: 'inner_retrievers_filter_support' + reason: 'requires fix for properly propagating filters to nested sub-retrievers' + + - do: + search: + _source: false + index: test + body: + retriever: + { + rrf: + { + filter: { + term: { + keyword: "technology" + } + }, + retrievers: [ + { + rrf: { + retrievers: [ + { + # this should only return docs 3 and 5 due to top level filter + standard: { + query: { + knn: { + field: vector, + query_vector: [ 4.0 ], + k: 3 + } + } + } }, + { + # this should return no docs as no docs match both biology and technology + standard: { + query: { + term: { + keyword: "biology" + } + } + } + } + ], + rank_window_size: 10, + rank_constant: 10 + } + }, + # this should only return doc 5 + { + standard: { + query: { + term: { + text: "term5" + } + } + } + } + ], + rank_window_size: 10, + rank_constant: 10 + } + } + size: 10 + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.1._id: "3" } + + From fa7ac36f9c4a76aecaa1b8b38e1e88bd3d47214e Mon Sep 17 00:00:00 2001 From: Matteo Piergiovanni <134913285+piergm@users.noreply.github.com> Date: Thu, 5 Dec 2024 10:32:26 +0100 Subject: [PATCH 30/45] [8.x] Only aggregations require at least one shard request (#115314) (#115794) * Only aggregations require at least one shard request (#115314) * unskipping shards only when aggs * Update docs/changelog/115314.yaml * fixed more tests * null check for searchRequest.source() (cherry picked from commit 7f573c6c28fb42e89d8bb76d6764dc681c239e06) * applying #115774 * skipped test * fixed test --------- Co-authored-by: Elastic Machine --- docs/changelog/115314.yaml | 5 +++ .../datastreams/TSDBIndexingIT.java | 2 +- .../org/elasticsearch/search/CCSDuelIT.java | 4 ++- .../test/multi_cluster/70_skip_shards.yml | 12 +++---- .../multi_cluster/90_index_name_query.yml | 4 +-- rest-api-spec/build.gradle | 1 + .../search/ccs/CrossClusterSearchIT.java | 4 +-- .../search/profile/query/QueryProfilerIT.java | 6 +++- .../search/stats/FieldUsageStatsIT.java | 12 ++++--- .../action/search/TransportSearchAction.java | 4 ++- .../search/CrossClusterAsyncSearchIT.java | 33 ++++++++++++------- .../mapper/SearchIdleTests.java | 10 ++---- .../rrf/RRFRankCoordinatorCanMatchIT.java | 5 +-- .../rank/rrf/RRFRankShardCanMatchIT.java | 5 +-- ...pshotsCanMatchOnCoordinatorIntegTests.java | 12 +++---- .../checkpoint/TransformCCSCanMatchIT.java | 6 ++-- .../oldrepos/OldRepositoryAccessIT.java | 4 +-- 17 files changed, 72 insertions(+), 57 deletions(-) create mode 100644 docs/changelog/115314.yaml diff --git a/docs/changelog/115314.yaml b/docs/changelog/115314.yaml new file mode 100644 index 000000000000..76ac12d58fcf --- /dev/null +++ b/docs/changelog/115314.yaml @@ -0,0 +1,5 @@ +pr: 115314 +summary: Only aggregations require at least one shard request +area: Search +type: enhancement +issues: [] diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java index c404029fa409..68049a750c3b 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java @@ -412,7 +412,7 @@ public void testSkippingShards() throws Exception { assertResponse(client().search(searchRequest), searchResponse -> { ElasticsearchAssertions.assertNoSearchHits(searchResponse); assertThat(searchResponse.getTotalShards(), equalTo(2)); - assertThat(searchResponse.getSkippedShards(), equalTo(1)); + assertThat(searchResponse.getSkippedShards(), equalTo(2)); assertThat(searchResponse.getSuccessfulShards(), equalTo(2)); }); } diff --git a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java index 5dde1d664402..79cdc1047aec 100644 --- a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java +++ b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java @@ -43,6 +43,7 @@ import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; @@ -580,13 +581,14 @@ public void testSortByField() throws Exception { public void testSortByFieldOneClusterHasNoResults() throws Exception { assumeMultiClusterSetup(); - // set to a value greater than the number of shards to avoid differences due to the skipping of shards + // setting aggs to avoid differences due to the skipping of shards when matching none SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); boolean onlyRemote = randomBoolean(); sourceBuilder.query(new TermQueryBuilder("_index", onlyRemote ? REMOTE_INDEX_NAME : INDEX_NAME)); sourceBuilder.sort("type.keyword", SortOrder.ASC); sourceBuilder.sort("creationDate", SortOrder.DESC); sourceBuilder.sort("user.keyword", SortOrder.ASC); + sourceBuilder.aggregation(AggregationBuilders.max("max").field("creationDate")); CheckedConsumer responseChecker = response -> { assertHits(response); int size = response.evaluateArraySize("hits.hits"); diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml index 92ae11c712b2..f392ae6d0941 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/70_skip_shards.yml @@ -166,8 +166,7 @@ - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - # When all shards are skipped current logic returns 1 to produce a valid search result - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } # check that skipped when we don't match the alias with a terms query @@ -183,8 +182,7 @@ - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - # When all shards are skipped current logic returns 1 to produce a valid search result - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } # check that skipped when we don't match the alias with a prefix query @@ -200,8 +198,7 @@ - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - # When all shards are skipped current logic returns 1 to produce a valid search result - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } # check that skipped when we don't match the alias with a wildcard query @@ -217,7 +214,6 @@ - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - # When all shards are skipped current logic returns 1 to produce a valid search result - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/90_index_name_query.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/90_index_name_query.yml index a60a1b0d812e..be2ce033b123 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/90_index_name_query.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/90_index_name_query.yml @@ -81,7 +81,7 @@ teardown: - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } - do: @@ -98,5 +98,5 @@ teardown: - match: { hits.total.value: 0 } - match: { _shards.total: 2 } - match: { _shards.successful: 2 } - - match: { _shards.skipped : 1} + - match: { _shards.skipped : 2} - match: { _shards.failed: 0 } diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index e1b51a3e1a6a..c390919a11af 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -86,6 +86,7 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> task.skipTest("search/240_date_nanos/doc value fields are working as expected across date and date_nanos fields", "Fetching docvalues field multiple times is no longer allowed") task.skipTest("search/110_field_collapsing/field collapsing and rescore", "#107779 Field collapsing is compatible with rescore in 8.15") task.skipTest("indices.create/11_basic_with_types/Create index with mappings", "Empty mapping creation has changed") + task.skipTest("search/140_pre_filter_search_shards/pre_filter_shard_size with shards that have no hit", "#115314 we skipp all shards unless the query has aggs, therefore the _shard.skipped do not match") task.replaceValueInMatch("_type", "_doc") task.addAllowedWarningRegex("\\[types removal\\].*") diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java index 6c28efe839ff..233c8a3e1fe5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java @@ -214,7 +214,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except // with DFS_QUERY_THEN_FETCH, the local shards are never skipped assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); } else { - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards)); } assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); @@ -224,7 +224,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); if (clusters.isCcsMinimizeRoundtrips()) { - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } else { assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java index 6993f24b895e..bda0d6f7e88b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java @@ -68,7 +68,11 @@ public void testProfileQuery() throws Exception { prepareSearch().setQuery(q).setTrackTotalHits(true).setProfile(true).setSearchType(SearchType.QUERY_THEN_FETCH), response -> { assertNotNull("Profile response element should not be null", response.getProfileResults()); - assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); + if (response.getSkippedShards() == response.getSuccessfulShards()) { + assertEquals(0, response.getProfileResults().size()); + } else { + assertThat("Profile response should not be an empty array", response.getProfileResults().size(), not(0)); + } for (Map.Entry shard : response.getProfileResults().entrySet()) { for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java index 140afd6b269b..3d5120226ebe 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java @@ -158,11 +158,15 @@ public void testFieldUsageStats() throws ExecutionException, InterruptedExceptio assertTrue(stats.hasField("date_field")); assertEquals(Set.of(UsageContext.POINTS), stats.get("date_field").keySet()); - // can_match does not enter search stats - // there is a special case though where we have no hit but we need to get at least one search response in order - // to produce a valid search result with all the aggs etc., so we hit one of the two shards + + long expectedShards = 2L * numShards; + if (numShards == 1) { + // with 1 shard and setPreFilterShardSize(1) we don't perform can_match phase but instead directly query the shard + expectedShards += 1; + } + assertEquals( - (2 * numShards) + 1, + expectedShards, indicesAdmin().prepareStats("test") .clear() .setSearch(true) diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 30ca30c7aec7..190d949602aa 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -1457,6 +1457,8 @@ public SearchPhase newSearchPhase( SearchResponse.Clusters clusters ) { if (preFilter) { + // only for aggs we need to contact shards even if there are no matches + boolean requireAtLeastOneMatch = searchRequest.source() != null && searchRequest.source().aggregations() != null; return new CanMatchPreFilterSearchPhase( logger, searchTransportService, @@ -1468,7 +1470,7 @@ public SearchPhase newSearchPhase( shardIterators, timeProvider, task, - true, + requireAtLeastOneMatch, searchService.getCoordinatorRewriteContextProvider(timeProvider::absoluteStartMillis), listener.delegateFailureAndWrap( (l, iters) -> newSearchPhase( diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java index 9d83f88a043e..3b5647da1399 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java @@ -274,6 +274,8 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except boolean dfs = randomBoolean(); if (dfs) { request.getSearchRequest().searchType(SearchType.DFS_QUERY_THEN_FETCH); + } else { + request.getSearchRequest().searchType(SearchType.QUERY_THEN_FETCH); } RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder("@timestamp").from(100).to(2000); request.getSearchRequest().source(new SearchSourceBuilder().query(rangeQueryBuilder).size(10)); @@ -285,23 +287,32 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except try { responseId = response.getId(); assertNotNull(response.getSearchResponse()); - assertTrue(response.isRunning()); SearchResponse.Clusters clusters = response.getSearchResponse().getClusters(); assertThat(clusters.getTotal(), equalTo(2)); - assertTrue("search cluster results should be marked as partial", clusters.hasPartialResults()); - + if (dfs) { + assertTrue("search cluster results should be marked as partial", clusters.hasPartialResults()); + } else { + assertFalse( + "search cluster results should not be marked as partial as all shards are skipped", + clusters.hasPartialResults() + ); + } SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); assertNotNull(localClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.RUNNING)); + if (dfs) { + assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.RUNNING)); + } else { + assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); + } SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER); assertNotNull(remoteClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.RUNNING)); } finally { response.decRef(); } - - SearchListenerPlugin.waitSearchStarted(); + if (dfs) { + SearchListenerPlugin.waitSearchStarted(); + } SearchListenerPlugin.allowQueryPhase(); waitForSearchTasksToFinish(); @@ -331,7 +342,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except // no skipped shards locally when DFS_QUERY_THEN_FETCH is used assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); } else { - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards)); } assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); @@ -341,7 +352,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); if (minimizeRoundtrips) { - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } else { assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } @@ -377,7 +388,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except // no skipped shards locally when DFS_QUERY_THEN_FETCH is used assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); } else { - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards)); } assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); @@ -387,7 +398,7 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); if (minimizeRoundtrips) { - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } else { assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); } diff --git a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java index 2da4e2802bdb..9eb792428537 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java +++ b/x-pack/plugin/mapper-constant-keyword/src/test/java/org/elasticsearch/xpack/constantkeyword/mapper/SearchIdleTests.java @@ -42,7 +42,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; public class SearchIdleTests extends ESSingleNodeTestCase { @@ -133,8 +132,7 @@ public void testSearchIdleConstantKeywordMatchNoIndex() throws InterruptedExcept // WHEN assertResponse(search("test*", "constant_keyword", randomAlphaOfLength(5), 5), searchResponse -> { assertEquals(RestStatus.OK, searchResponse.status()); - // NOTE: we need an empty result from at least one shard - assertEquals(idleIndexShardsCount + activeIndexShardsCount - 1, searchResponse.getSkippedShards()); + assertEquals(idleIndexShardsCount + activeIndexShardsCount, searchResponse.getSkippedShards()); assertEquals(0, searchResponse.getFailedShards()); assertEquals(0, searchResponse.getHits().getHits().length); }); @@ -144,12 +142,8 @@ public void testSearchIdleConstantKeywordMatchNoIndex() throws InterruptedExcept assertIdleShardsRefreshStats(beforeStatsResponse, afterStatsResponse); - // If no shards match the can match phase then at least one shard gets queries for an empty response. - // However, this affects the search idle stats. List active = Arrays.stream(afterStatsResponse.getShards()).filter(s -> s.isSearchIdle() == false).toList(); - assertThat(active, hasSize(1)); - assertThat(active.get(0).getShardRouting().getIndexName(), equalTo("test1")); - assertThat(active.get(0).getShardRouting().id(), equalTo(0)); + assertThat(active, hasSize(0)); } public void testSearchIdleConstantKeywordMatchOneIndex() throws InterruptedException { diff --git a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorCanMatchIT.java b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorCanMatchIT.java index 445aeaa375e1..467668f008b0 100644 --- a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorCanMatchIT.java +++ b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorCanMatchIT.java @@ -10,6 +10,7 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.PointValues; +import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.Strings; import org.elasticsearch.index.IndexSettings; @@ -206,10 +207,10 @@ public void testCanMatchCoordinator() throws Exception { ) .setSize(5), response -> { - assertNull(response.getHits().getTotalHits()); + assertEquals(new TotalHits(0, TotalHits.Relation.EQUAL_TO), response.getHits().getTotalHits()); assertEquals(0, response.getHits().getHits().length); assertEquals(5, response.getSuccessfulShards()); - assertEquals(4, response.getSkippedShards()); + assertEquals(5, response.getSkippedShards()); } ); diff --git a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardCanMatchIT.java b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardCanMatchIT.java index 084ccc88bee3..09fe8d1b7ad6 100644 --- a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardCanMatchIT.java +++ b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardCanMatchIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.rank.rrf; +import org.apache.lucene.search.TotalHits; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.search.SearchType; @@ -199,10 +200,10 @@ public void testCanMatchShard() throws IOException { ) .setSize(5), response -> { - assertNull(response.getHits().getTotalHits()); + assertEquals(new TotalHits(0, TotalHits.Relation.EQUAL_TO), response.getHits().getTotalHits()); assertEquals(0, response.getHits().getHits().length); assertEquals(5, response.getSuccessfulShards()); - assertEquals(4, response.getSkippedShards()); + assertEquals(5, response.getSkippedShards()); } ); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java index 6e4f8daf9a38..d3df2b4ef970 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsCanMatchOnCoordinatorIntegTests.java @@ -385,11 +385,9 @@ public void testSearchableSnapshotShardsAreSkippedBySearchRequestWithoutQuerying } } else { assertResponse(client().search(request), newSearchResponse -> { - // When all shards are skipped, at least one of them should be queried in order to - // provide a proper search response. - assertThat(newSearchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount - 1)); - assertThat(newSearchResponse.getSuccessfulShards(), equalTo(indexOutsideSearchRangeShardCount - 1)); - assertThat(newSearchResponse.getFailedShards(), equalTo(1)); + assertThat(newSearchResponse.getSkippedShards(), equalTo(indexOutsideSearchRangeShardCount)); + assertThat(newSearchResponse.getSuccessfulShards(), equalTo(indexOutsideSearchRangeShardCount)); + assertThat(newSearchResponse.getFailedShards(), equalTo(0)); assertThat(newSearchResponse.getTotalShards(), equalTo(indexOutsideSearchRangeShardCount)); }); @@ -749,9 +747,7 @@ public void testQueryPhaseIsExecutedInAnAvailableNodeWhenAllShardsCanBeSkipped() // All the regular index searches succeeded assertThat(newSearchResponse.getSuccessfulShards(), equalTo(totalShards)); assertThat(newSearchResponse.getFailedShards(), equalTo(0)); - // We have to query at least one node to construct a valid response, and we pick - // a shard that's available in order to construct the search response - assertThat(newSearchResponse.getSkippedShards(), equalTo(totalShards - 1)); + assertThat(newSearchResponse.getSkippedShards(), equalTo(totalShards)); assertThat(newSearchResponse.getTotalShards(), equalTo(totalShards)); assertThat(newSearchResponse.getHits().getTotalHits().value, equalTo(0L)); }); diff --git a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java index a7f7b5bd3edd..208da4177fd4 100644 --- a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java +++ b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCCSCanMatchIT.java @@ -197,15 +197,13 @@ public void testSearchAction_RangeQueryThatMatchesNoShards() throws ExecutionExc QueryBuilders.rangeQuery("@timestamp").from(100_000_000), // This query matches no documents true, 0, - // All but 2 shards are skipped. TBH I don't know why this 2 shards are not skipped - oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards - 2 + oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards ); testSearchAction( QueryBuilders.rangeQuery("@timestamp").from(100_000_000), // This query matches no documents false, 0, - // All but 1 shards are skipped. TBH I don't know why this 1 shard is not skipped - oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards - 1 + oldLocalNumShards + newLocalNumShards + oldRemoteNumShards + newRemoteNumShards ); } diff --git a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java index c8c72855eaf7..629aa8ebdb6b 100644 --- a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java +++ b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java @@ -488,8 +488,8 @@ private void assertDocs( logger.info(searchResponse); assertEquals(0, searchResponse.getHits().getTotalHits().value); assertEquals(numberOfShards, searchResponse.getSuccessfulShards()); - // When all shards are skipped, at least one of them is queried in order to provide a proper search response. - assertEquals(numberOfShards - 1, searchResponse.getSkippedShards()); + int expectedSkips = numberOfShards == 1 ? 0 : numberOfShards; + assertEquals(expectedSkips, searchResponse.getSkippedShards()); } finally { searchResponse.decRef(); } From 41770b7c5e4d96eb525aa07662b8ec2228ded695 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Thu, 5 Dec 2024 10:19:16 +0000 Subject: [PATCH 31/45] Collapse transport versions for 8.16 (#117991) (#118053) --- .../geoip/EnterpriseGeoIpTaskState.java | 2 +- .../ingest/geoip/GeoIpTaskState.java | 2 +- .../ingest/geoip/IngestGeoIpMetadata.java | 4 +- .../geoip/direct/DatabaseConfiguration.java | 4 +- .../elasticsearch/ElasticsearchException.java | 4 +- .../org/elasticsearch/TransportVersions.java | 73 +------------------ .../TransportGetAllocationStatsAction.java | 4 +- .../stats/NodesStatsRequestParameters.java | 4 +- .../create/CreateSnapshotRequest.java | 4 +- .../stats/ClusterStatsNodeResponse.java | 13 +--- .../stats/RemoteClusterStatsRequest.java | 4 +- .../admin/cluster/stats/SearchUsageStats.java | 6 +- .../stats/TransportClusterStatsAction.java | 4 +- .../get/GetComponentTemplateAction.java | 6 +- .../get/GetComposableIndexTemplateAction.java | 6 +- .../post/SimulateIndexTemplateResponse.java | 6 +- .../action/bulk/BulkItemResponse.java | 4 +- .../action/bulk/BulkRequest.java | 4 +- .../action/bulk/BulkResponse.java | 4 +- .../bulk/IndexDocFailureStoreStatus.java | 4 +- .../action/bulk/SimulateBulkRequest.java | 11 +-- .../datastreams/GetDataStreamAction.java | 8 +- .../FieldCapabilitiesIndexResponse.java | 8 +- .../action/index/IndexRequest.java | 8 +- .../action/index/IndexResponse.java | 8 +- .../action/search/OpenPointInTimeRequest.java | 4 +- .../search/OpenPointInTimeResponse.java | 2 +- .../action/search/SearchContextId.java | 6 +- .../action/search/SearchContextIdForNode.java | 8 +- .../TransportOpenPointInTimeAction.java | 3 +- .../action/support/IndicesOptions.java | 10 +-- .../cluster/health/ClusterIndexHealth.java | 4 +- .../cluster/health/ClusterShardHealth.java | 4 +- .../cluster/health/ClusterStateHealth.java | 4 +- .../cluster/metadata/DataStream.java | 11 ++- .../metadata/InferenceFieldMetadata.java | 7 +- .../cluster/routing/RoutingTable.java | 8 +- .../common/io/stream/StreamInput.java | 7 +- .../common/io/stream/StreamOutput.java | 9 +-- .../index/engine/CommitStats.java | 4 +- .../index/mapper/NodeMappingStats.java | 4 +- .../index/query/IntervalsSourceProvider.java | 4 +- .../index/query/RankDocsQueryBuilder.java | 8 +- .../index/search/stats/SearchStats.java | 4 +- .../inference/EmptySecretSettings.java | 2 +- .../inference/ModelConfigurations.java | 4 +- .../ingest/EnterpriseGeoIpTask.java | 2 +- .../elasticsearch/search/DocValueFormat.java | 4 +- .../elasticsearch/search/rank/RankDoc.java | 2 +- .../search/vectors/ExactKnnQueryBuilder.java | 6 +- .../vectors/KnnScoreDocQueryBuilder.java | 6 +- .../snapshots/RegisteredPolicySnapshots.java | 4 +- .../elasticsearch/TransportVersionTests.java | 2 +- .../NodesStatsRequestParametersTests.java | 2 +- .../cluster/stats/SearchUsageStatsTests.java | 2 +- .../common/io/stream/AbstractStreamTests.java | 15 ++-- .../DataStreamLifecycleFeatureSetUsage.java | 4 +- .../core/enrich/action/EnrichStatsAction.java | 4 +- .../ilm/IndexLifecycleExplainResponse.java | 4 +- .../core/ilm/SearchableSnapshotAction.java | 8 +- .../action/DeleteInferenceEndpointAction.java | 4 +- .../action/GetInferenceModelAction.java | 4 +- .../ml/MachineLearningFeatureSetUsage.java | 4 +- .../CreateTrainedModelAssignmentAction.java | 4 +- .../StartTrainedModelDeploymentAction.java | 8 +- .../UpdateTrainedModelDeploymentAction.java | 4 +- .../core/ml/calendars/ScheduledEvent.java | 4 +- .../inference/assignment/AssignmentStats.java | 4 +- .../assignment/TrainedModelAssignment.java | 4 +- .../trainedmodel/LearningToRankConfig.java | 3 +- .../core/ml/job/config/DetectionRule.java | 4 +- .../ConfigurableClusterPrivileges.java | 2 +- .../MachineLearningFeatureSetUsageTests.java | 2 +- .../rules/QueryRulesetListItem.java | 6 +- ...setsActionResponseBWCSerializingTests.java | 3 +- ...lesetActionRequestBWCSerializingTests.java | 2 +- ...esetActionResponseBWCSerializingTests.java | 2 +- .../xpack/esql/core/type/EsField.java | 2 +- .../xpack/esql/core/util/PlanStreamInput.java | 2 +- .../esql/core/util/PlanStreamOutput.java | 2 +- .../compute/operator/AggregationOperator.java | 4 +- .../compute/operator/DriverProfile.java | 4 +- .../compute/operator/DriverSleeps.java | 4 +- .../xpack/esql/action/EsqlExecutionInfo.java | 4 +- .../xpack/esql/action/EsqlQueryResponse.java | 4 +- .../esql/action/EsqlResolveFieldsAction.java | 2 +- .../esql/enrich/ResolvedEnrichPolicy.java | 6 +- .../function/UnsupportedAttribute.java | 6 +- .../function/aggregate/AggregateFunction.java | 8 +- .../function/aggregate/CountDistinct.java | 6 +- .../function/aggregate/FromPartial.java | 6 +- .../function/aggregate/Percentile.java | 6 +- .../expression/function/aggregate/Rate.java | 6 +- .../function/aggregate/ToPartial.java | 6 +- .../expression/function/aggregate/Top.java | 6 +- .../function/aggregate/WeightedAvg.java | 6 +- .../xpack/esql/index/EsIndex.java | 4 +- .../xpack/esql/io/stream/PlanStreamInput.java | 6 +- .../esql/io/stream/PlanStreamOutput.java | 6 +- .../esql/plan/physical/AggregateExec.java | 2 +- .../xpack/esql/plugin/ComputeResponse.java | 4 +- .../xpack/esql/plugin/DataNodeRequest.java | 4 +- .../xpack/esql/plugin/RemoteClusterPlan.java | 4 +- .../esql/querydsl/query/SingleValueQuery.java | 4 +- .../xpack/esql/session/Configuration.java | 4 +- .../esql/plugin/ClusterRequestTests.java | 6 +- .../SentenceBoundaryChunkingSettings.java | 6 +- .../WordBoundaryChunkingSettings.java | 2 +- .../rank/random/RandomRankBuilder.java | 2 +- .../textsimilarity/TextSimilarityRankDoc.java | 2 +- .../AlibabaCloudSearchService.java | 2 +- .../AlibabaCloudSearchServiceSettings.java | 2 +- ...aCloudSearchCompletionServiceSettings.java | 2 +- ...babaCloudSearchCompletionTaskSettings.java | 2 +- ...aCloudSearchEmbeddingsServiceSettings.java | 2 +- ...babaCloudSearchEmbeddingsTaskSettings.java | 2 +- ...ibabaCloudSearchRerankServiceSettings.java | 2 +- .../AlibabaCloudSearchRerankTaskSettings.java | 2 +- ...ibabaCloudSearchSparseServiceSettings.java | 2 +- .../AlibabaCloudSearchSparseTaskSettings.java | 2 +- .../rerank/CohereRerankServiceSettings.java | 4 +- .../elastic/ElasticInferenceService.java | 2 +- ...erviceSparseEmbeddingsServiceSettings.java | 2 +- .../ElasticsearchInternalServiceSettings.java | 14 ++-- .../ibmwatsonx/IbmWatsonxService.java | 2 +- .../IbmWatsonxEmbeddingsServiceSettings.java | 2 +- .../ltr/LearningToRankRescorerBuilder.java | 2 +- .../xpack/rank/rrf/RRFRankDoc.java | 6 +- .../xpack/security/authc/ApiKeyService.java | 8 +- .../authz/store/NativeRolesStore.java | 4 +- .../RolesBackwardsCompatibilityIT.java | 10 +-- 131 files changed, 259 insertions(+), 414 deletions(-) diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java index c4d0aef0183e..c128af69009b 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java @@ -123,7 +123,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER; + return TransportVersions.V_8_16_0; } @Override diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java index 47ca79e3cb3b..96525d427d3e 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java @@ -44,7 +44,7 @@ public class GeoIpTaskState implements PersistentTaskState, VersionedNamedWriteable { private static boolean includeSha256(TransportVersion version) { - return version.isPatchFrom(TransportVersions.V_8_15_0) || version.onOrAfter(TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER); + return version.onOrAfter(TransportVersions.V_8_15_0); } private static final ParseField DATABASES = new ParseField("databases"); diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java index b6e73f3f33f7..a50fe7dee900 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java @@ -69,7 +69,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER; + return TransportVersions.V_8_16_0; } public Map getDatabases() { @@ -138,7 +138,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER; + return TransportVersions.V_8_16_0; } } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java index a26364f9305e..aa48c73cf1d7 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/DatabaseConfiguration.java @@ -138,7 +138,7 @@ public DatabaseConfiguration(StreamInput in) throws IOException { } private static Provider readProvider(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersions.INGEST_GEO_DATABASE_PROVIDERS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { return in.readNamedWriteable(Provider.class); } else { // prior to the above version, everything was always a maxmind, so this half of the if is logical @@ -154,7 +154,7 @@ public static DatabaseConfiguration parse(XContentParser parser, String id) { public void writeTo(StreamOutput out) throws IOException { out.writeString(id); out.writeString(name); - if (out.getTransportVersion().onOrAfter(TransportVersions.INGEST_GEO_DATABASE_PROVIDERS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeNamedWriteable(provider); } else { if (provider instanceof Maxmind maxmind) { diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index a349e50a6b51..981aafa5424f 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1929,13 +1929,13 @@ private enum ElasticsearchExceptionHandle { org.elasticsearch.ingest.IngestPipelineException.class, org.elasticsearch.ingest.IngestPipelineException::new, 182, - TransportVersions.INGEST_PIPELINE_EXCEPTION_ADDED + TransportVersions.V_8_16_0 ), INDEX_RESPONSE_WRAPPER_EXCEPTION( IndexDocFailureStoreStatus.ExceptionWithFailureStoreStatus.class, IndexDocFailureStoreStatus.ExceptionWithFailureStoreStatus::new, 183, - TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE + TransportVersions.V_8_16_0 ); final Class exceptionClass; diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 5545563bbd84..2811c841db7a 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -104,78 +104,7 @@ static TransportVersion def(int id) { public static final TransportVersion V_8_14_0 = def(8_636_00_1); public static final TransportVersion V_8_15_0 = def(8_702_00_2); public static final TransportVersion V_8_15_2 = def(8_702_00_3); - public static final TransportVersion QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_15 = def(8_702_00_4); - public static final TransportVersion ML_INFERENCE_DONT_DELETE_WHEN_SEMANTIC_TEXT_EXISTS = def(8_703_00_0); - public static final TransportVersion INFERENCE_ADAPTIVE_ALLOCATIONS = def(8_704_00_0); - public static final TransportVersion INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN = def(8_705_00_0); - public static final TransportVersion ML_INFERENCE_COHERE_UNUSED_RERANK_SETTINGS_REMOVED = def(8_706_00_0); - public static final TransportVersion ENRICH_CACHE_STATS_SIZE_ADDED = def(8_707_00_0); - public static final TransportVersion ENTERPRISE_GEOIP_DOWNLOADER = def(8_708_00_0); - public static final TransportVersion NODES_STATS_ENUM_SET = def(8_709_00_0); - public static final TransportVersion MASTER_NODE_METRICS = def(8_710_00_0); - public static final TransportVersion SEGMENT_LEVEL_FIELDS_STATS = def(8_711_00_0); - public static final TransportVersion ML_ADD_DETECTION_RULE_PARAMS = def(8_712_00_0); - public static final TransportVersion FIX_VECTOR_SIMILARITY_INNER_HITS = def(8_713_00_0); - public static final TransportVersion INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN = def(8_714_00_0); - public static final TransportVersion ESQL_ATTRIBUTE_CACHED_SERIALIZATION = def(8_715_00_0); - public static final TransportVersion REGISTER_SLM_STATS = def(8_716_00_0); - public static final TransportVersion ESQL_NESTED_UNSUPPORTED = def(8_717_00_0); - public static final TransportVersion ESQL_SINGLE_VALUE_QUERY_SOURCE = def(8_718_00_0); - public static final TransportVersion ESQL_ORIGINAL_INDICES = def(8_719_00_0); - public static final TransportVersion ML_INFERENCE_EIS_INTEGRATION_ADDED = def(8_720_00_0); - public static final TransportVersion INGEST_PIPELINE_EXCEPTION_ADDED = def(8_721_00_0); - public static final TransportVersion ZDT_NANOS_SUPPORT_BROKEN = def(8_722_00_0); - public static final TransportVersion REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES = def(8_723_00_0); - public static final TransportVersion RANDOM_RERANKER_RETRIEVER = def(8_724_00_0); - public static final TransportVersion ESQL_PROFILE_SLEEPS = def(8_725_00_0); - public static final TransportVersion ZDT_NANOS_SUPPORT = def(8_726_00_0); - public static final TransportVersion LTR_SERVERLESS_RELEASE = def(8_727_00_0); - public static final TransportVersion ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT = def(8_728_00_0); - public static final TransportVersion RANK_DOCS_RETRIEVER = def(8_729_00_0); - public static final TransportVersion ESQL_ES_FIELD_CACHED_SERIALIZATION = def(8_730_00_0); - public static final TransportVersion ADD_MANAGE_ROLES_PRIVILEGE = def(8_731_00_0); - public static final TransportVersion REPOSITORIES_TELEMETRY = def(8_732_00_0); - public static final TransportVersion ML_INFERENCE_ALIBABACLOUD_SEARCH_ADDED = def(8_733_00_0); - public static final TransportVersion FIELD_CAPS_RESPONSE_INDEX_MODE = def(8_734_00_0); - public static final TransportVersion GET_DATA_STREAMS_VERBOSE = def(8_735_00_0); - public static final TransportVersion ESQL_ADD_INDEX_MODE_CONCRETE_INDICES = def(8_736_00_0); - public static final TransportVersion UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH = def(8_737_00_0); - public static final TransportVersion ESQL_AGGREGATE_EXEC_TRACKS_INTERMEDIATE_ATTRS = def(8_738_00_0); - public static final TransportVersion CCS_TELEMETRY_STATS = def(8_739_00_0); - public static final TransportVersion GLOBAL_RETENTION_TELEMETRY = def(8_740_00_0); - public static final TransportVersion ROUTING_TABLE_VERSION_REMOVED = def(8_741_00_0); - public static final TransportVersion ML_SCHEDULED_EVENT_TIME_SHIFT_CONFIGURATION = def(8_742_00_0); - public static final TransportVersion SIMULATE_COMPONENT_TEMPLATES_SUBSTITUTIONS = def(8_743_00_0); - public static final TransportVersion ML_INFERENCE_IBM_WATSONX_EMBEDDINGS_ADDED = def(8_744_00_0); - public static final TransportVersion BULK_INCREMENTAL_STATE = def(8_745_00_0); - public static final TransportVersion FAILURE_STORE_STATUS_IN_INDEX_RESPONSE = def(8_746_00_0); - public static final TransportVersion ESQL_AGGREGATION_OPERATOR_STATUS_FINISH_NANOS = def(8_747_00_0); - public static final TransportVersion ML_TELEMETRY_MEMORY_ADDED = def(8_748_00_0); - public static final TransportVersion ILM_ADD_SEARCHABLE_SNAPSHOT_TOTAL_SHARDS_PER_NODE = def(8_749_00_0); - public static final TransportVersion SEMANTIC_TEXT_SEARCH_INFERENCE_ID = def(8_750_00_0); - public static final TransportVersion ML_INFERENCE_CHUNKING_SETTINGS = def(8_751_00_0); - public static final TransportVersion SEMANTIC_QUERY_INNER_HITS = def(8_752_00_0); - public static final TransportVersion RETAIN_ILM_STEP_INFO = def(8_753_00_0); - public static final TransportVersion ADD_DATA_STREAM_OPTIONS = def(8_754_00_0); - public static final TransportVersion CCS_REMOTE_TELEMETRY_STATS = def(8_755_00_0); - public static final TransportVersion ESQL_CCS_EXECUTION_INFO = def(8_756_00_0); - public static final TransportVersion REGEX_AND_RANGE_INTERVAL_QUERIES = def(8_757_00_0); - public static final TransportVersion RRF_QUERY_REWRITE = def(8_758_00_0); - public static final TransportVersion SEARCH_FAILURE_STATS = def(8_759_00_0); - public static final TransportVersion INGEST_GEO_DATABASE_PROVIDERS = def(8_760_00_0); - public static final TransportVersion DATE_TIME_DOC_VALUES_LOCALES = def(8_761_00_0); - public static final TransportVersion FAST_REFRESH_RCO = def(8_762_00_0); - public static final TransportVersion TEXT_SIMILARITY_RERANKER_QUERY_REWRITE = def(8_763_00_0); - public static final TransportVersion SIMULATE_INDEX_TEMPLATES_SUBSTITUTIONS = def(8_764_00_0); - public static final TransportVersion RETRIEVERS_TELEMETRY_ADDED = def(8_765_00_0); - public static final TransportVersion ESQL_CACHED_STRING_SERIALIZATION = def(8_766_00_0); - public static final TransportVersion CHUNK_SENTENCE_OVERLAP_SETTING_ADDED = def(8_767_00_0); - public static final TransportVersion OPT_IN_ESQL_CCS_EXECUTION_INFO = def(8_768_00_0); - public static final TransportVersion QUERY_RULE_TEST_API = def(8_769_00_0); - public static final TransportVersion ESQL_PER_AGGREGATE_FILTER = def(8_770_00_0); - public static final TransportVersion ML_INFERENCE_ATTACH_TO_EXISTSING_DEPLOYMENT = def(8_771_00_0); - public static final TransportVersion CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY = def(8_772_00_0); - public static final TransportVersion INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16 = def(8_772_00_1); + public static final TransportVersion V_8_16_0 = def(8_772_00_1); public static final TransportVersion ADD_COMPATIBILITY_VERSIONS_TO_NODE_INFO_BACKPORT_8_16 = def(8_772_00_2); public static final TransportVersion SKIP_INNER_HITS_SEARCH_SOURCE_BACKPORT_8_16 = def(8_772_00_3); public static final TransportVersion QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16 = def(8_772_00_4); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java index e14f229f17ac..d929fb457d5d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportGetAllocationStatsAction.java @@ -118,7 +118,7 @@ public Request(TimeValue masterNodeTimeout, TaskId parentTaskId, EnumSet public Request(StreamInput in) throws IOException { super(in); - this.metrics = in.getTransportVersion().onOrAfter(TransportVersions.MASTER_NODE_METRICS) + this.metrics = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readEnumSet(Metric.class) : EnumSet.of(Metric.ALLOCATIONS, Metric.FS); } @@ -127,7 +127,7 @@ public Request(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { assert out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0); super.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.MASTER_NODE_METRICS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeEnumSet(metrics); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java index d34bc3ec0dc2..c5e8f37ed3a9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParameters.java @@ -117,7 +117,7 @@ public static Metric get(String name) { } public static void writeSetTo(StreamOutput out, EnumSet metrics) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.NODES_STATS_ENUM_SET)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeEnumSet(metrics); } else { out.writeCollection(metrics, (output, metric) -> output.writeString(metric.metricName)); @@ -125,7 +125,7 @@ public static void writeSetTo(StreamOutput out, EnumSet metrics) throws } public static EnumSet readSetFrom(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersions.NODES_STATS_ENUM_SET)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { return in.readEnumSet(Metric.class); } else { return in.readCollection((i) -> EnumSet.noneOf(Metric.class), (is, out) -> { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index 9c9467db40de..b6ced0662330 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -118,7 +118,7 @@ public CreateSnapshotRequest(StreamInput in) throws IOException { waitForCompletion = in.readBoolean(); partial = in.readBoolean(); userMetadata = in.readGenericMap(); - uuid = in.getTransportVersion().onOrAfter(TransportVersions.REGISTER_SLM_STATS) ? in.readOptionalString() : null; + uuid = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readOptionalString() : null; } @Override @@ -136,7 +136,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(waitForCompletion); out.writeBoolean(partial); out.writeGenericMap(userMetadata); - if (out.getTransportVersion().onOrAfter(TransportVersions.REGISTER_SLM_STATS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeOptionalString(uuid); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index f99baa855404..abeb73e5d8c3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -44,14 +44,11 @@ public ClusterStatsNodeResponse(StreamInput in) throws IOException { } else { searchUsageStats = new SearchUsageStats(); } - if (in.getTransportVersion().onOrAfter(TransportVersions.REPOSITORIES_TELEMETRY)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { repositoryUsageStats = RepositoryUsageStats.readFrom(in); - } else { - repositoryUsageStats = RepositoryUsageStats.EMPTY; - } - if (in.getTransportVersion().onOrAfter(TransportVersions.CCS_TELEMETRY_STATS)) { ccsMetrics = new CCSTelemetrySnapshot(in); } else { + repositoryUsageStats = RepositoryUsageStats.EMPTY; ccsMetrics = new CCSTelemetrySnapshot(); } } @@ -118,12 +115,10 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { searchUsageStats.writeTo(out); } - if (out.getTransportVersion().onOrAfter(TransportVersions.REPOSITORIES_TELEMETRY)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { repositoryUsageStats.writeTo(out); - } // else just drop these stats, ok for bwc - if (out.getTransportVersion().onOrAfter(TransportVersions.CCS_TELEMETRY_STATS)) { ccsMetrics.writeTo(out); - } + } // else just drop these stats, ok for bwc } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RemoteClusterStatsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RemoteClusterStatsRequest.java index 47843a91351e..6c3c5cbb50ec 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RemoteClusterStatsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RemoteClusterStatsRequest.java @@ -36,9 +36,9 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { - assert out.getTransportVersion().onOrAfter(TransportVersions.CCS_REMOTE_TELEMETRY_STATS) + assert out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) : "RemoteClusterStatsRequest is not supported by the remote cluster"; - if (out.getTransportVersion().before(TransportVersions.CCS_REMOTE_TELEMETRY_STATS)) { + if (out.getTransportVersion().before(TransportVersions.V_8_16_0)) { throw new UnsupportedOperationException("RemoteClusterStatsRequest is not supported by the remote cluster"); } super.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStats.java index 0f6c56fd21bd..a6e80b5efd08 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStats.java @@ -22,8 +22,8 @@ import java.util.Map; import java.util.Objects; -import static org.elasticsearch.TransportVersions.RETRIEVERS_TELEMETRY_ADDED; import static org.elasticsearch.TransportVersions.V_8_12_0; +import static org.elasticsearch.TransportVersions.V_8_16_0; /** * Holds a snapshot of the search usage statistics. @@ -71,7 +71,7 @@ public SearchUsageStats(StreamInput in) throws IOException { this.sections = in.readMap(StreamInput::readLong); this.totalSearchCount = in.readVLong(); this.rescorers = in.getTransportVersion().onOrAfter(V_8_12_0) ? in.readMap(StreamInput::readLong) : Map.of(); - this.retrievers = in.getTransportVersion().onOrAfter(RETRIEVERS_TELEMETRY_ADDED) ? in.readMap(StreamInput::readLong) : Map.of(); + this.retrievers = in.getTransportVersion().onOrAfter(V_8_16_0) ? in.readMap(StreamInput::readLong) : Map.of(); } @Override @@ -83,7 +83,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(V_8_12_0)) { out.writeMap(rescorers, StreamOutput::writeLong); } - if (out.getTransportVersion().onOrAfter(RETRIEVERS_TELEMETRY_ADDED)) { + if (out.getTransportVersion().onOrAfter(V_8_16_0)) { out.writeMap(retrievers, StreamOutput::writeLong); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 33e3a5ea049e..fa201b613057 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -75,8 +75,6 @@ import java.util.function.BooleanSupplier; import java.util.stream.Collectors; -import static org.elasticsearch.TransportVersions.CCS_REMOTE_TELEMETRY_STATS; - /** * Transport action implementing _cluster/stats API. */ @@ -461,7 +459,7 @@ protected void sendItemRequest(String clusterAlias, ActionListener { - if (connection.getTransportVersion().before(CCS_REMOTE_TELEMETRY_STATS)) { + if (connection.getTransportVersion().before(TransportVersions.V_8_16_0)) { responseListener.onResponse(null); } else { remoteClusterClient.execute(connection, TransportRemoteClusterStatsAction.REMOTE_TYPE, remoteRequest, responseListener); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java index c6d990e5a1d6..f729455edcc2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java @@ -131,8 +131,7 @@ public Response(StreamInput in) throws IOException { } else { rolloverConfiguration = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) - && in.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + if (in.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) { in.readOptionalWriteable(DataStreamGlobalRetention::read); } } @@ -190,8 +189,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) - && out.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + if (out.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) { out.writeOptionalWriteable(null); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java index a47f89030cc6..67f87476ea6a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java @@ -132,8 +132,7 @@ public Response(StreamInput in) throws IOException { } else { rolloverConfiguration = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) - && in.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + if (in.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) { in.readOptionalWriteable(DataStreamGlobalRetention::read); } } @@ -191,8 +190,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) - && out.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + if (out.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) { out.writeOptionalWriteable(null); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java index 064c24cf4afa..80e6fbfe051a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java @@ -82,8 +82,7 @@ public SimulateIndexTemplateResponse(StreamInput in) throws IOException { rolloverConfiguration = in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) ? in.readOptionalWriteable(RolloverConfiguration::new) : null; - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) - && in.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + if (in.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) { in.readOptionalWriteable(DataStreamGlobalRetention::read); } } @@ -104,8 +103,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) - && out.getTransportVersion().before(TransportVersions.REMOVE_GLOBAL_RETENTION_FROM_TEMPLATES)) { + if (out.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) { out.writeOptionalWriteable(null); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index c0ceab139ff1..6ea2045315c3 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -205,7 +205,7 @@ public Failure(StreamInput in) throws IOException { seqNo = in.readZLong(); term = in.readVLong(); aborted = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { failureStoreStatus = IndexDocFailureStoreStatus.read(in); } else { failureStoreStatus = IndexDocFailureStoreStatus.NOT_APPLICABLE_OR_UNKNOWN; @@ -223,7 +223,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeZLong(seqNo); out.writeVLong(term); out.writeBoolean(aborted); - if (out.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { failureStoreStatus.writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index f62b2f48fa2f..91caebc420ff 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -98,7 +98,7 @@ public BulkRequest(StreamInput in) throws IOException { for (DocWriteRequest request : requests) { indices.add(Objects.requireNonNull(request.index(), "request index must not be null")); } - if (in.getTransportVersion().onOrAfter(TransportVersions.BULK_INCREMENTAL_STATE)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { incrementalState = new BulkRequest.IncrementalState(in); } else { incrementalState = BulkRequest.IncrementalState.EMPTY; @@ -454,7 +454,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(requests, DocWriteRequest::writeDocumentRequest); refreshPolicy.writeTo(out); out.writeTimeValue(timeout); - if (out.getTransportVersion().onOrAfter(TransportVersions.BULK_INCREMENTAL_STATE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { incrementalState.writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java index ec7a08007de9..12d3aa67ca9b 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java @@ -46,7 +46,7 @@ public BulkResponse(StreamInput in) throws IOException { responses = in.readArray(BulkItemResponse::new, BulkItemResponse[]::new); tookInMillis = in.readVLong(); ingestTookInMillis = in.readZLong(); - if (in.getTransportVersion().onOrAfter(TransportVersions.BULK_INCREMENTAL_STATE)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { incrementalState = new BulkRequest.IncrementalState(in); } else { incrementalState = BulkRequest.IncrementalState.EMPTY; @@ -151,7 +151,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeArray(responses); out.writeVLong(tookInMillis); out.writeZLong(ingestTookInMillis); - if (out.getTransportVersion().onOrAfter(TransportVersions.BULK_INCREMENTAL_STATE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { incrementalState.writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/IndexDocFailureStoreStatus.java b/server/src/main/java/org/elasticsearch/action/bulk/IndexDocFailureStoreStatus.java index cb83d693a415..7367dfa1d53f 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/IndexDocFailureStoreStatus.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/IndexDocFailureStoreStatus.java @@ -124,7 +124,7 @@ public ExceptionWithFailureStoreStatus(BulkItemResponse.Failure failure) { public ExceptionWithFailureStoreStatus(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { failureStoreStatus = IndexDocFailureStoreStatus.fromId(in.readByte()); } else { failureStoreStatus = NOT_APPLICABLE_OR_UNKNOWN; @@ -134,7 +134,7 @@ public ExceptionWithFailureStoreStatus(StreamInput in) throws IOException { @Override protected void writeTo(StreamOutput out, Writer nestedExceptionsWriter) throws IOException { super.writeTo(out, nestedExceptionsWriter); - if (out.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeByte(failureStoreStatus.getId()); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java index cc7fd431d809..290d342e9dc1 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/SimulateBulkRequest.java @@ -135,14 +135,11 @@ public SimulateBulkRequest( public SimulateBulkRequest(StreamInput in) throws IOException { super(in); this.pipelineSubstitutions = (Map>) in.readGenericValue(); - if (in.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_COMPONENT_TEMPLATES_SUBSTITUTIONS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.componentTemplateSubstitutions = (Map>) in.readGenericValue(); - } else { - componentTemplateSubstitutions = Map.of(); - } - if (in.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_INDEX_TEMPLATES_SUBSTITUTIONS)) { this.indexTemplateSubstitutions = (Map>) in.readGenericValue(); } else { + componentTemplateSubstitutions = Map.of(); indexTemplateSubstitutions = Map.of(); } if (in.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_MAPPING_ADDITION)) { @@ -156,10 +153,8 @@ public SimulateBulkRequest(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeGenericValue(pipelineSubstitutions); - if (out.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_COMPONENT_TEMPLATES_SUBSTITUTIONS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeGenericValue(componentTemplateSubstitutions); - } - if (out.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_INDEX_TEMPLATES_SUBSTITUTIONS)) { out.writeGenericValue(indexTemplateSubstitutions); } if (out.getTransportVersion().onOrAfter(TransportVersions.SIMULATE_MAPPING_ADDITION)) { diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index c1cf0fa7aab4..93c40ad18cc8 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -112,7 +112,7 @@ public Request(StreamInput in) throws IOException { } else { this.includeDefaults = false; } - if (in.getTransportVersion().onOrAfter(TransportVersions.GET_DATA_STREAMS_VERBOSE)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.verbose = in.readBoolean(); } else { this.verbose = false; @@ -127,7 +127,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeBoolean(includeDefaults); } - if (out.getTransportVersion().onOrAfter(TransportVersions.GET_DATA_STREAMS_VERBOSE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeBoolean(verbose); } } @@ -275,7 +275,7 @@ public DataStreamInfo( in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0) ? in.readOptionalWriteable(TimeSeries::new) : null, in.getTransportVersion().onOrAfter(V_8_11_X) ? in.readMap(Index::new, IndexProperties::new) : Map.of(), in.getTransportVersion().onOrAfter(V_8_11_X) ? in.readBoolean() : true, - in.getTransportVersion().onOrAfter(TransportVersions.GET_DATA_STREAMS_VERBOSE) ? in.readOptionalVLong() : null + in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readOptionalVLong() : null ); } @@ -328,7 +328,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(indexSettingsValues); out.writeBoolean(templatePreferIlmValue); } - if (out.getTransportVersion().onOrAfter(TransportVersions.GET_DATA_STREAMS_VERBOSE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeOptionalVLong(maximumTimestamp); } } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java index d16100a64713..6f510ad26f5e 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java @@ -62,7 +62,7 @@ public FieldCapabilitiesIndexResponse( } else { this.indexMappingHash = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.FIELD_CAPS_RESPONSE_INDEX_MODE)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.indexMode = IndexMode.readFrom(in); } else { this.indexMode = IndexMode.STANDARD; @@ -77,7 +77,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(MAPPING_HASH_VERSION)) { out.writeOptionalString(indexMappingHash); } - if (out.getTransportVersion().onOrAfter(TransportVersions.FIELD_CAPS_RESPONSE_INDEX_MODE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { IndexMode.writeTo(indexMode, out); } } @@ -105,7 +105,7 @@ static List readList(StreamInput input) throws I private static void collectCompressedResponses(StreamInput input, int groups, ArrayList responses) throws IOException { final CompressedGroup[] compressedGroups = new CompressedGroup[groups]; - final boolean readIndexMode = input.getTransportVersion().onOrAfter(TransportVersions.FIELD_CAPS_RESPONSE_INDEX_MODE); + final boolean readIndexMode = input.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0); for (int i = 0; i < groups; i++) { final String[] indices = input.readStringArray(); final IndexMode indexMode = readIndexMode ? IndexMode.readFrom(input) : IndexMode.STANDARD; @@ -179,7 +179,7 @@ private static void writeCompressedResponses(StreamOutput output, Map { o.writeCollection(fieldCapabilitiesIndexResponses, (oo, r) -> oo.writeString(r.indexName)); var first = fieldCapabilitiesIndexResponses.get(0); - if (output.getTransportVersion().onOrAfter(TransportVersions.FIELD_CAPS_RESPONSE_INDEX_MODE)) { + if (output.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { IndexMode.writeTo(first.indexMode, o); } o.writeString(first.indexMappingHash); diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index c0811e7424b0..5254c6fd06db 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -205,10 +205,8 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { in.readZLong(); // obsolete normalisedBytesParsed } - if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { in.readBoolean(); // obsolete originatesFromUpdateByScript - } - if (in.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN)) { in.readBoolean(); // obsolete originatesFromUpdateByDoc } } @@ -789,10 +787,8 @@ private void writeBody(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { out.writeZLong(-1); // obsolete normalisedBytesParsed } - if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_SCRIPT_ORIGIN)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeBoolean(false); // obsolete originatesFromUpdateByScript - } - if (out.getTransportVersion().onOrAfter(TransportVersions.INDEX_REQUEST_UPDATE_BY_DOC_ORIGIN)) { out.writeBoolean(false); // obsolete originatesFromUpdateByDoc } } diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java index 8d1bdf227e24..7c45de890517 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexResponse.java @@ -46,7 +46,7 @@ public IndexResponse(ShardId shardId, StreamInput in) throws IOException { } else { executedPipelines = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { failureStoreStatus = IndexDocFailureStoreStatus.read(in); } else { failureStoreStatus = IndexDocFailureStoreStatus.NOT_APPLICABLE_OR_UNKNOWN; @@ -60,7 +60,7 @@ public IndexResponse(StreamInput in) throws IOException { } else { executedPipelines = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { failureStoreStatus = IndexDocFailureStoreStatus.read(in); } else { failureStoreStatus = IndexDocFailureStoreStatus.NOT_APPLICABLE_OR_UNKNOWN; @@ -126,7 +126,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { out.writeOptionalCollection(executedPipelines, StreamOutput::writeString); } - if (out.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { failureStoreStatus.writeTo(out); } } @@ -137,7 +137,7 @@ public void writeThin(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { out.writeOptionalCollection(executedPipelines, StreamOutput::writeString); } - if (out.getTransportVersion().onOrAfter(TransportVersions.FAILURE_STORE_STATUS_IN_INDEX_RESPONSE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { failureStoreStatus.writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java index 969ba2ad983c..d68e2ce1b02b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java @@ -63,7 +63,7 @@ public OpenPointInTimeRequest(StreamInput in) throws IOException { if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { this.indexFilter = in.readOptionalNamedWriteable(QueryBuilder.class); } - if (in.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.allowPartialSearchResults = in.readBoolean(); } } @@ -82,7 +82,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { out.writeOptionalWriteable(indexFilter); } - if (out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeBoolean(allowPartialSearchResults); } else if (allowPartialSearchResults) { throw new IOException("[allow_partial_search_results] is not supported on nodes with version " + out.getTransportVersion()); diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java index 3c830c8ed9dc..b3ffc564d848 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java @@ -47,7 +47,7 @@ public OpenPointInTimeResponse( @Override public void writeTo(StreamOutput out) throws IOException { out.writeBytesReference(pointInTimeId); - if (out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeVInt(totalShards); out.writeVInt(successfulShards); out.writeVInt(failedShards); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java index ca810bb88653..c2f1510341fb 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java @@ -63,14 +63,14 @@ public static BytesReference encode( TransportVersion version, ShardSearchFailure[] shardFailures ) { - assert shardFailures.length == 0 || version.onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT) + assert shardFailures.length == 0 || version.onOrAfter(TransportVersions.V_8_16_0) : "[allow_partial_search_results] cannot be enabled on a cluster that has not been fully upgraded to version [" - + TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT + + TransportVersions.V_8_16_0.toReleaseVersion() + "] or higher."; try (var out = new BytesStreamOutput()) { out.setTransportVersion(version); TransportVersion.writeVersion(version, out); - boolean allowNullContextId = out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT); + boolean allowNullContextId = out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0); int shardSize = searchPhaseResults.size() + (allowNullContextId ? shardFailures.length : 0); out.writeVInt(shardSize); for (var searchResult : searchPhaseResults) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java index 7509a7b0fed0..f91a9d09f4bb 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java @@ -37,7 +37,7 @@ public final class SearchContextIdForNode implements Writeable { } SearchContextIdForNode(StreamInput in) throws IOException { - boolean allowNull = in.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT); + boolean allowNull = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0); this.node = allowNull ? in.readOptionalString() : in.readString(); this.clusterAlias = in.readOptionalString(); this.searchContextId = allowNull ? in.readOptionalWriteable(ShardSearchContextId::new) : new ShardSearchContextId(in); @@ -45,7 +45,7 @@ public final class SearchContextIdForNode implements Writeable { @Override public void writeTo(StreamOutput out) throws IOException { - boolean allowNull = out.getTransportVersion().onOrAfter(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT); + boolean allowNull = out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0); if (allowNull) { out.writeOptionalString(node); } else { @@ -53,7 +53,7 @@ public void writeTo(StreamOutput out) throws IOException { // We should never set a null node if the cluster is not fully upgraded to a version that can handle it. throw new IOException( "Cannot write null node value to a node in version " - + out.getTransportVersion() + + out.getTransportVersion().toReleaseVersion() + ". The target node must be specified to retrieve the ShardSearchContextId." ); } @@ -67,7 +67,7 @@ public void writeTo(StreamOutput out) throws IOException { // We should never set a null search context id if the cluster is not fully upgraded to a version that can handle it. throw new IOException( "Cannot write null search context ID to a node in version " - + out.getTransportVersion() + + out.getTransportVersion().toReleaseVersion() + ". A valid search context ID is required to identify the shard's search context in this version." ); } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java index ae3d1e4a5118..75dbfaf1f962 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java @@ -104,8 +104,7 @@ public TransportOpenPointInTimeAction( protected void doExecute(Task task, OpenPointInTimeRequest request, ActionListener listener) { final ClusterState clusterState = clusterService.state(); // Check if all the nodes in this cluster know about the service - if (request.allowPartialSearchResults() - && clusterState.getMinTransportVersion().before(TransportVersions.ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT)) { + if (request.allowPartialSearchResults() && clusterState.getMinTransportVersion().before(TransportVersions.V_8_16_0)) { listener.onFailure( new ElasticsearchStatusException( format( diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index 85889d8398cb..ebbd47336e3d 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -982,12 +982,11 @@ public void writeIndicesOptions(StreamOutput out) throws IOException { states.add(WildcardStates.HIDDEN); } out.writeEnumSet(states); - if (out.getTransportVersion() - .between(TransportVersions.V_8_14_0, TransportVersions.CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY)) { + if (out.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) { out.writeBoolean(includeRegularIndices()); out.writeBoolean(includeFailureIndices()); } - if (out.getTransportVersion().onOrAfter(TransportVersions.CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { selectorOptions.writeTo(out); } } @@ -1010,8 +1009,7 @@ public static IndicesOptions readIndicesOptions(StreamInput in) throws IOExcepti .ignoreThrottled(options.contains(Option.IGNORE_THROTTLED)) .build(); SelectorOptions selectorOptions = SelectorOptions.DEFAULT; - if (in.getTransportVersion() - .between(TransportVersions.V_8_14_0, TransportVersions.CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY)) { + if (in.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) { // Reading from an older node, which will be sending two booleans that we must read out and ignore. var includeData = in.readBoolean(); var includeFailures = in.readBoolean(); @@ -1023,7 +1021,7 @@ public static IndicesOptions readIndicesOptions(StreamInput in) throws IOExcepti selectorOptions = SelectorOptions.FAILURES; } } - if (in.getTransportVersion().onOrAfter(TransportVersions.CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { selectorOptions = SelectorOptions.read(in); } return new IndicesOptions( diff --git a/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java b/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java index b6c1defe91a7..9cf567c21966 100644 --- a/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java +++ b/server/src/main/java/org/elasticsearch/cluster/health/ClusterIndexHealth.java @@ -111,7 +111,7 @@ public ClusterIndexHealth(final StreamInput in) throws IOException { unassignedShards = in.readVInt(); status = ClusterHealthStatus.readFrom(in); shards = in.readMapValues(ClusterShardHealth::new, ClusterShardHealth::getShardId); - if (in.getTransportVersion().onOrAfter(TransportVersions.UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { unassignedPrimaryShards = in.readVInt(); } else { unassignedPrimaryShards = 0; @@ -203,7 +203,7 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeVInt(unassignedShards); out.writeByte(status.value()); out.writeMapValues(shards); - if (out.getTransportVersion().onOrAfter(TransportVersions.UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeVInt(unassignedPrimaryShards); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java b/server/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java index 63863542564c..f512acb6e04d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java +++ b/server/src/main/java/org/elasticsearch/cluster/health/ClusterShardHealth.java @@ -96,7 +96,7 @@ public ClusterShardHealth(final StreamInput in) throws IOException { initializingShards = in.readVInt(); unassignedShards = in.readVInt(); primaryActive = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersions.UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { unassignedPrimaryShards = in.readVInt(); } else { unassignedPrimaryShards = 0; @@ -167,7 +167,7 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeVInt(initializingShards); out.writeVInt(unassignedShards); out.writeBoolean(primaryActive); - if (out.getTransportVersion().onOrAfter(TransportVersions.UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeVInt(unassignedPrimaryShards); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java b/server/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java index 579429b5d51d..31f275e29c36 100644 --- a/server/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java +++ b/server/src/main/java/org/elasticsearch/cluster/health/ClusterStateHealth.java @@ -120,7 +120,7 @@ public ClusterStateHealth(final StreamInput in) throws IOException { status = ClusterHealthStatus.readFrom(in); indices = in.readMapValues(ClusterIndexHealth::new, ClusterIndexHealth::getIndex); activeShardsPercent = in.readDouble(); - if (in.getTransportVersion().onOrAfter(TransportVersions.UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { unassignedPrimaryShards = in.readVInt(); } else { unassignedPrimaryShards = 0; @@ -212,7 +212,7 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeByte(status.value()); out.writeMapValues(indices); out.writeDouble(activeShardsPercent); - if (out.getTransportVersion().onOrAfter(TransportVersions.UNASSIGNED_PRIMARY_COUNT_ON_CLUSTER_HEALTH)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeVInt(unassignedPrimaryShards); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index 4dcc7c73c280..979434950cf7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -71,6 +71,7 @@ public final class DataStream implements SimpleDiffable, ToXContentO public static final FeatureFlag FAILURE_STORE_FEATURE_FLAG = new FeatureFlag("failure_store"); public static final TransportVersion ADDED_FAILURE_STORE_TRANSPORT_VERSION = TransportVersions.V_8_12_0; public static final TransportVersion ADDED_AUTO_SHARDING_EVENT_VERSION = TransportVersions.V_8_14_0; + public static final TransportVersion ADD_DATA_STREAM_OPTIONS_VERSION = TransportVersions.V_8_16_0; public static boolean isFailureStoreFeatureFlagEnabled() { return FAILURE_STORE_FEATURE_FLAG.isEnabled(); @@ -200,9 +201,7 @@ public static DataStream read(StreamInput in) throws IOException { : null; // This boolean flag has been moved in data stream options var failureStoreEnabled = in.getTransportVersion() - .between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, TransportVersions.ADD_DATA_STREAM_OPTIONS) - ? in.readBoolean() - : false; + .between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, TransportVersions.V_8_16_0) ? in.readBoolean() : false; var failureIndices = in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? readIndices(in) : List.of(); @@ -216,7 +215,7 @@ public static DataStream read(StreamInput in) throws IOException { .setAutoShardingEvent(in.readOptionalWriteable(DataStreamAutoShardingEvent::new)); } DataStreamOptions dataStreamOptions; - if (in.getTransportVersion().onOrAfter(TransportVersions.ADD_DATA_STREAM_OPTIONS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { dataStreamOptions = in.readOptionalWriteable(DataStreamOptions::read); } else { // We cannot distinguish if failure store was explicitly disabled or not. Given that failure store @@ -1077,7 +1076,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(lifecycle); } if (out.getTransportVersion() - .between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, TransportVersions.ADD_DATA_STREAM_OPTIONS)) { + .between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, DataStream.ADD_DATA_STREAM_OPTIONS_VERSION)) { out.writeBoolean(isFailureStoreEnabled()); } if (out.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) { @@ -1093,7 +1092,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(failureIndices.rolloverOnWrite); out.writeOptionalWriteable(failureIndices.autoShardingEvent); } - if (out.getTransportVersion().onOrAfter(TransportVersions.ADD_DATA_STREAM_OPTIONS)) { + if (out.getTransportVersion().onOrAfter(DataStream.ADD_DATA_STREAM_OPTIONS_VERSION)) { out.writeOptionalWriteable(dataStreamOptions.isEmpty() ? null : dataStreamOptions); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/InferenceFieldMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/InferenceFieldMetadata.java index 271c60e829a8..8917d5a9cbbb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/InferenceFieldMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/InferenceFieldMetadata.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.io.stream.StreamInput; @@ -23,8 +24,6 @@ import java.util.List; import java.util.Objects; -import static org.elasticsearch.TransportVersions.SEMANTIC_TEXT_SEARCH_INFERENCE_ID; - /** * Contains inference field data for fields. * As inference is done in the coordinator node to avoid re-doing it at shard / replica level, the coordinator needs to check for the need @@ -56,7 +55,7 @@ public InferenceFieldMetadata(String name, String inferenceId, String searchInfe public InferenceFieldMetadata(StreamInput input) throws IOException { this.name = input.readString(); this.inferenceId = input.readString(); - if (input.getTransportVersion().onOrAfter(SEMANTIC_TEXT_SEARCH_INFERENCE_ID)) { + if (input.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.searchInferenceId = input.readString(); } else { this.searchInferenceId = this.inferenceId; @@ -68,7 +67,7 @@ public InferenceFieldMetadata(StreamInput input) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeString(inferenceId); - if (out.getTransportVersion().onOrAfter(SEMANTIC_TEXT_SEARCH_INFERENCE_ID)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeString(searchInferenceId); } out.writeStringArray(sourceFields); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index 790b8e4ab75f..60cf6b10417f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -317,7 +317,7 @@ public static Diff readDiffFrom(StreamInput in) throws IOException public static RoutingTable readFrom(StreamInput in) throws IOException { Builder builder = new Builder(); - if (in.getTransportVersion().before(TransportVersions.ROUTING_TABLE_VERSION_REMOVED)) { + if (in.getTransportVersion().before(TransportVersions.V_8_16_0)) { in.readLong(); // previously 'version', unused in all applicable versions so any number will do } int size = in.readVInt(); @@ -331,7 +331,7 @@ public static RoutingTable readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().before(TransportVersions.ROUTING_TABLE_VERSION_REMOVED)) { + if (out.getTransportVersion().before(TransportVersions.V_8_16_0)) { out.writeLong(0); // previously 'version', unused in all applicable versions so any number will do } out.writeCollection(indicesRouting.values()); @@ -349,7 +349,7 @@ private static class RoutingTableDiff implements Diff { new DiffableUtils.DiffableValueReader<>(IndexRoutingTable::readFrom, IndexRoutingTable::readDiffFrom); RoutingTableDiff(StreamInput in) throws IOException { - if (in.getTransportVersion().before(TransportVersions.ROUTING_TABLE_VERSION_REMOVED)) { + if (in.getTransportVersion().before(TransportVersions.V_8_16_0)) { in.readLong(); // previously 'version', unused in all applicable versions so any number will do } indicesRouting = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), DIFF_VALUE_READER); @@ -366,7 +366,7 @@ public RoutingTable apply(RoutingTable part) { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().before(TransportVersions.ROUTING_TABLE_VERSION_REMOVED)) { + if (out.getTransportVersion().before(TransportVersions.V_8_16_0)) { out.writeLong(0); // previously 'version', unused in all applicable versions so any number will do } indicesRouting.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 644cc6bb6992..e07861ba0543 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -908,11 +908,8 @@ public final Instant readOptionalInstant() throws IOException { private ZonedDateTime readZonedDateTime() throws IOException { final String timeZoneId = readString(); final Instant instant; - if (getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT_BROKEN)) { - // epoch seconds can be negative, but it was incorrectly first written as vlong - boolean zlong = getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT); - long seconds = zlong ? readZLong() : readVLong(); - instant = Instant.ofEpochSecond(seconds, readInt()); + if (getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { + instant = Instant.ofEpochSecond(readZLong(), readInt()); } else { instant = Instant.ofEpochMilli(readLong()); } diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index d724e5ea25ca..6738af32f04d 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -768,13 +768,8 @@ public final void writeOptionalInstant(@Nullable Instant instant) throws IOExcep final ZonedDateTime zonedDateTime = (ZonedDateTime) v; o.writeString(zonedDateTime.getZone().getId()); Instant instant = zonedDateTime.toInstant(); - if (o.getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT_BROKEN)) { - // epoch seconds can be negative, but it was incorrectly first written as vlong - if (o.getTransportVersion().onOrAfter(TransportVersions.ZDT_NANOS_SUPPORT)) { - o.writeZLong(instant.getEpochSecond()); - } else { - o.writeVLong(instant.getEpochSecond()); - } + if (o.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { + o.writeZLong(instant.getEpochSecond()); o.writeInt(instant.getNano()); } else { o.writeLong(instant.toEpochMilli()); diff --git a/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java b/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java index a871524b45e9..520174a4b363 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java @@ -46,7 +46,7 @@ public CommitStats(SegmentInfos segmentInfos) { generation = in.readLong(); id = in.readOptionalString(); numDocs = in.readInt(); - numLeaves = in.getTransportVersion().onOrAfter(TransportVersions.SEGMENT_LEVEL_FIELDS_STATS) ? in.readVInt() : 0; + numLeaves = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readVInt() : 0; } @Override @@ -100,7 +100,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(generation); out.writeOptionalString(id); out.writeInt(numDocs); - if (out.getTransportVersion().onOrAfter(TransportVersions.SEGMENT_LEVEL_FIELDS_STATS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeVInt(numLeaves); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NodeMappingStats.java b/server/src/main/java/org/elasticsearch/index/mapper/NodeMappingStats.java index 56210a292995..10b085654039 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NodeMappingStats.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NodeMappingStats.java @@ -52,7 +52,7 @@ public NodeMappingStats() { public NodeMappingStats(StreamInput in) throws IOException { totalCount = in.readVLong(); totalEstimatedOverhead = in.readVLong(); - if (in.getTransportVersion().onOrAfter(TransportVersions.SEGMENT_LEVEL_FIELDS_STATS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { totalSegments = in.readVLong(); totalSegmentFields = in.readVLong(); } @@ -93,7 +93,7 @@ public long getTotalSegmentFields() { public void writeTo(StreamOutput out) throws IOException { out.writeVLong(totalCount); out.writeVLong(totalEstimatedOverhead); - if (out.getTransportVersion().onOrAfter(TransportVersions.SEGMENT_LEVEL_FIELDS_STATS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeVLong(totalSegments); out.writeVLong(totalSegmentFields); } diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java b/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java index 647e45d1beda..6ae0c4872cfa 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java +++ b/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java @@ -825,7 +825,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.REGEX_AND_RANGE_INTERVAL_QUERIES; + return TransportVersions.V_8_16_0; } @Override @@ -1129,7 +1129,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.REGEX_AND_RANGE_INTERVAL_QUERIES; + return TransportVersions.V_8_16_0; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/query/RankDocsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RankDocsQueryBuilder.java index 33077697a2ce..889fa40b79aa 100644 --- a/server/src/main/java/org/elasticsearch/index/query/RankDocsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/RankDocsQueryBuilder.java @@ -25,8 +25,6 @@ import java.util.Map; import java.util.Objects; -import static org.elasticsearch.TransportVersions.RRF_QUERY_REWRITE; - public class RankDocsQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "rank_docs_query"; @@ -44,7 +42,7 @@ public RankDocsQueryBuilder(RankDoc[] rankDocs, QueryBuilder[] queryBuilders, bo public RankDocsQueryBuilder(StreamInput in) throws IOException { super(in); this.rankDocs = in.readArray(c -> c.readNamedWriteable(RankDoc.class), RankDoc[]::new); - if (in.getTransportVersion().onOrAfter(RRF_QUERY_REWRITE)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.queryBuilders = in.readOptionalArray(c -> c.readNamedWriteable(QueryBuilder.class), QueryBuilder[]::new); this.onlyRankDocs = in.readBoolean(); } else { @@ -85,7 +83,7 @@ public RankDoc[] rankDocs() { @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeArray(StreamOutput::writeNamedWriteable, rankDocs); - if (out.getTransportVersion().onOrAfter(RRF_QUERY_REWRITE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeOptionalArray(StreamOutput::writeNamedWriteable, queryBuilders); out.writeBoolean(onlyRankDocs); } @@ -145,6 +143,6 @@ protected int doHashCode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.RANK_DOCS_RETRIEVER; + return TransportVersions.V_8_16_0; } } diff --git a/server/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java b/server/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java index ff514091979c..8b19d72ccc09 100644 --- a/server/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java @@ -105,7 +105,7 @@ private Stats(StreamInput in) throws IOException { suggestTimeInMillis = in.readVLong(); suggestCurrent = in.readVLong(); - if (in.getTransportVersion().onOrAfter(TransportVersions.SEARCH_FAILURE_STATS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { queryFailure = in.readVLong(); fetchFailure = in.readVLong(); } @@ -129,7 +129,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(suggestTimeInMillis); out.writeVLong(suggestCurrent); - if (out.getTransportVersion().onOrAfter(TransportVersions.SEARCH_FAILURE_STATS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeVLong(queryFailure); out.writeVLong(fetchFailure); } diff --git a/server/src/main/java/org/elasticsearch/inference/EmptySecretSettings.java b/server/src/main/java/org/elasticsearch/inference/EmptySecretSettings.java index 9c666bd4a35f..ee38273f13da 100644 --- a/server/src/main/java/org/elasticsearch/inference/EmptySecretSettings.java +++ b/server/src/main/java/org/elasticsearch/inference/EmptySecretSettings.java @@ -44,7 +44,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ML_INFERENCE_EIS_INTEGRATION_ADDED; + return TransportVersions.V_8_16_0; } @Override diff --git a/server/src/main/java/org/elasticsearch/inference/ModelConfigurations.java b/server/src/main/java/org/elasticsearch/inference/ModelConfigurations.java index ebf32f041155..53ce0bab6361 100644 --- a/server/src/main/java/org/elasticsearch/inference/ModelConfigurations.java +++ b/server/src/main/java/org/elasticsearch/inference/ModelConfigurations.java @@ -121,7 +121,7 @@ public ModelConfigurations(StreamInput in) throws IOException { this.service = in.readString(); this.serviceSettings = in.readNamedWriteable(ServiceSettings.class); this.taskSettings = in.readNamedWriteable(TaskSettings.class); - this.chunkingSettings = in.getTransportVersion().onOrAfter(TransportVersions.ML_INFERENCE_CHUNKING_SETTINGS) + this.chunkingSettings = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readOptionalNamedWriteable(ChunkingSettings.class) : null; } @@ -133,7 +133,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(service); out.writeNamedWriteable(serviceSettings); out.writeNamedWriteable(taskSettings); - if (out.getTransportVersion().onOrAfter(TransportVersions.ML_INFERENCE_CHUNKING_SETTINGS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeOptionalNamedWriteable(chunkingSettings); } } diff --git a/server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java b/server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java index e696c38b9f01..ff6a687da9b4 100644 --- a/server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java +++ b/server/src/main/java/org/elasticsearch/ingest/EnterpriseGeoIpTask.java @@ -64,7 +64,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ENTERPRISE_GEOIP_DOWNLOADER; + return TransportVersions.V_8_16_0; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java index a1e8eb25f478..f8d161ef1f5e 100644 --- a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -263,7 +263,7 @@ private DateTime(DateFormatter formatter, ZoneId timeZone, DateFieldMapper.Resol private DateTime(StreamInput in) throws IOException { String formatterPattern = in.readString(); - Locale locale = in.getTransportVersion().onOrAfter(TransportVersions.DATE_TIME_DOC_VALUES_LOCALES) + Locale locale = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? LocaleUtils.parse(in.readString()) : DateFieldMapper.DEFAULT_LOCALE; String zoneId = in.readString(); @@ -297,7 +297,7 @@ public static DateTime readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(formatter.pattern()); - if (out.getTransportVersion().onOrAfter(TransportVersions.DATE_TIME_DOC_VALUES_LOCALES)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeString(formatter.locale().toString()); } out.writeString(timeZone.getId()); diff --git a/server/src/main/java/org/elasticsearch/search/rank/RankDoc.java b/server/src/main/java/org/elasticsearch/search/rank/RankDoc.java index 9ab14aa9362b..d4127836a4e4 100644 --- a/server/src/main/java/org/elasticsearch/search/rank/RankDoc.java +++ b/server/src/main/java/org/elasticsearch/search/rank/RankDoc.java @@ -44,7 +44,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.RANK_DOCS_RETRIEVER; + return TransportVersions.V_8_16_0; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java index c8670a8dfeec..77d708432cf2 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java @@ -55,8 +55,7 @@ public ExactKnnQueryBuilder(StreamInput in) throws IOException { this.query = VectorData.fromFloats(in.readFloatArray()); } this.field = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.FIX_VECTOR_SIMILARITY_INNER_HITS) - || in.getTransportVersion().isPatchFrom(TransportVersions.V_8_15_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) { this.vectorSimilarity = in.readOptionalFloat(); } else { this.vectorSimilarity = null; @@ -88,8 +87,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeFloatArray(query.asFloatVector()); } out.writeString(field); - if (out.getTransportVersion().onOrAfter(TransportVersions.FIX_VECTOR_SIMILARITY_INNER_HITS) - || out.getTransportVersion().isPatchFrom(TransportVersions.V_8_15_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) { out.writeOptionalFloat(vectorSimilarity); } } diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java index f52addefc8b1..b5ba97906f0e 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java @@ -71,8 +71,7 @@ public KnnScoreDocQueryBuilder(StreamInput in) throws IOException { this.fieldName = null; this.queryVector = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.FIX_VECTOR_SIMILARITY_INNER_HITS) - || in.getTransportVersion().isPatchFrom(TransportVersions.V_8_15_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) { this.vectorSimilarity = in.readOptionalFloat(); } else { this.vectorSimilarity = null; @@ -116,8 +115,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeBoolean(false); } } - if (out.getTransportVersion().onOrAfter(TransportVersions.FIX_VECTOR_SIMILARITY_INNER_HITS) - || out.getTransportVersion().isPatchFrom(TransportVersions.V_8_15_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) { out.writeOptionalFloat(vectorSimilarity); } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/RegisteredPolicySnapshots.java b/server/src/main/java/org/elasticsearch/snapshots/RegisteredPolicySnapshots.java index f34b87669747..231894875b7f 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RegisteredPolicySnapshots.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RegisteredPolicySnapshots.java @@ -101,7 +101,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.REGISTER_SLM_STATS; + return TransportVersions.V_8_16_0; } @Override @@ -171,7 +171,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.REGISTER_SLM_STATS; + return TransportVersions.V_8_16_0; } } diff --git a/server/src/test/java/org/elasticsearch/TransportVersionTests.java b/server/src/test/java/org/elasticsearch/TransportVersionTests.java index 6c2cc5c1f4cc..08b12cec2e17 100644 --- a/server/src/test/java/org/elasticsearch/TransportVersionTests.java +++ b/server/src/test/java/org/elasticsearch/TransportVersionTests.java @@ -211,7 +211,7 @@ public void testDenseTransportVersions() { Set missingVersions = new TreeSet<>(); TransportVersion previous = null; for (var tv : TransportVersions.getAllVersions()) { - if (tv.before(TransportVersions.V_8_15_2)) { + if (tv.before(TransportVersions.V_8_16_0)) { continue; } if (previous == null) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParametersTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParametersTests.java index f37b1d1b4171..cfdbfdfbfcf8 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParametersTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestParametersTests.java @@ -23,7 +23,7 @@ public class NodesStatsRequestParametersTests extends ESTestCase { public void testReadWriteMetricSet() { - for (var version : List.of(TransportVersions.V_8_15_0, TransportVersions.NODES_STATS_ENUM_SET)) { + for (var version : List.of(TransportVersions.V_8_15_0, TransportVersions.V_8_16_0)) { var randSet = randomSubsetOf(Metric.ALL); var metricsOut = randSet.isEmpty() ? EnumSet.noneOf(Metric.class) : EnumSet.copyOf(randSet); try { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStatsTests.java index 89ccd4ab63d7..46b757407e6a 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStatsTests.java @@ -199,7 +199,7 @@ public void testSerializationBWC() throws IOException { randomQueryUsage(QUERY_TYPES.size()), version.onOrAfter(TransportVersions.V_8_12_0) ? randomRescorerUsage(RESCORER_TYPES.size()) : Map.of(), randomSectionsUsage(SECTIONS.size()), - version.onOrAfter(TransportVersions.RETRIEVERS_TELEMETRY_ADDED) ? randomRetrieversUsage(RETRIEVERS.size()) : Map.of(), + version.onOrAfter(TransportVersions.V_8_16_0) ? randomRetrieversUsage(RETRIEVERS.size()) : Map.of(), randomLongBetween(0, Long.MAX_VALUE) ); assertSerialization(testInstance, version); diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java index d2b6d0a6ec6d..afaa7a9a3288 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -53,8 +54,6 @@ import static java.time.Instant.ofEpochSecond; import static java.time.ZonedDateTime.ofInstant; -import static org.elasticsearch.TransportVersions.ZDT_NANOS_SUPPORT; -import static org.elasticsearch.TransportVersions.ZDT_NANOS_SUPPORT_BROKEN; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; @@ -729,15 +728,11 @@ public void testReadAfterReachingEndOfStream() throws IOException { } public void testZonedDateTimeSerialization() throws IOException { - checkZonedDateTimeSerialization(ZDT_NANOS_SUPPORT); - } - - public void testZonedDateTimeMillisBwcSerializationV1() throws IOException { - checkZonedDateTimeSerialization(TransportVersionUtils.getPreviousVersion(ZDT_NANOS_SUPPORT_BROKEN)); + checkZonedDateTimeSerialization(TransportVersions.V_8_16_0); } public void testZonedDateTimeMillisBwcSerialization() throws IOException { - checkZonedDateTimeSerialization(TransportVersionUtils.getPreviousVersion(ZDT_NANOS_SUPPORT)); + checkZonedDateTimeSerialization(TransportVersionUtils.getPreviousVersion(TransportVersions.V_8_16_0)); } public void checkZonedDateTimeSerialization(TransportVersion tv) throws IOException { @@ -745,12 +740,12 @@ public void checkZonedDateTimeSerialization(TransportVersion tv) throws IOExcept assertGenericRoundtrip(ofInstant(ofEpochSecond(1), randomZone()), tv); // just want to test a large number that will use 5+ bytes long maxEpochSecond = Integer.MAX_VALUE; - long minEpochSecond = tv.between(ZDT_NANOS_SUPPORT_BROKEN, ZDT_NANOS_SUPPORT) ? 0 : Integer.MIN_VALUE; + long minEpochSecond = Integer.MIN_VALUE; assertGenericRoundtrip(ofInstant(ofEpochSecond(maxEpochSecond), randomZone()), tv); assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(minEpochSecond, maxEpochSecond)), randomZone()), tv); assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(minEpochSecond, maxEpochSecond), 1_000_000), randomZone()), tv); assertGenericRoundtrip(ofInstant(ofEpochSecond(randomLongBetween(minEpochSecond, maxEpochSecond), 999_000_000), randomZone()), tv); - if (tv.onOrAfter(ZDT_NANOS_SUPPORT)) { + if (tv.onOrAfter(TransportVersions.V_8_16_0)) { assertGenericRoundtrip( ofInstant(ofEpochSecond(randomLongBetween(minEpochSecond, maxEpochSecond), 999_999_999), randomZone()), tv diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java index 7a31888a440c..a61a86eea710 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java @@ -111,7 +111,7 @@ public LifecycleStats( } public static LifecycleStats read(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersions.GLOBAL_RETENTION_TELEMETRY)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { return new LifecycleStats( in.readVLong(), in.readBoolean(), @@ -139,7 +139,7 @@ public static LifecycleStats read(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.GLOBAL_RETENTION_TELEMETRY)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeVLong(dataStreamsWithLifecyclesCount); out.writeBoolean(defaultRolloverUsed); dataRetentionStats.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/EnrichStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/EnrichStatsAction.java index 0457de6edcc9..36322ed6c6cb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/EnrichStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/EnrichStatsAction.java @@ -209,7 +209,7 @@ public CacheStats(StreamInput in) throws IOException { in.readVLong(), in.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0) ? in.readLong() : -1, in.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0) ? in.readLong() : -1, - in.getTransportVersion().onOrAfter(TransportVersions.ENRICH_CACHE_STATS_SIZE_ADDED) ? in.readLong() : -1 + in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readLong() : -1 ); } @@ -237,7 +237,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(hitsTimeInMillis); out.writeLong(missesTimeInMillis); } - if (out.getTransportVersion().onOrAfter(TransportVersions.ENRICH_CACHE_STATS_SIZE_ADDED)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeLong(cacheSizeInBytes); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java index 33402671a223..5d635c97d9c8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java @@ -328,7 +328,7 @@ public IndexLifecycleExplainResponse(StreamInput in) throws IOException { } else { indexCreationDate = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.RETAIN_ILM_STEP_INFO)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { previousStepInfo = in.readOptionalBytesReference(); } else { previousStepInfo = null; @@ -379,7 +379,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { out.writeOptionalLong(indexCreationDate); } - if (out.getTransportVersion().onOrAfter(TransportVersions.RETAIN_ILM_STEP_INFO)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeOptionalBytesReference(previousStepInfo); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java index c06dcc0f083d..da64df2672bd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java @@ -8,6 +8,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.TransportVersions; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexAbstraction; @@ -32,7 +33,6 @@ import java.util.List; import java.util.Objects; -import static org.elasticsearch.TransportVersions.ILM_ADD_SEARCHABLE_SNAPSHOT_TOTAL_SHARDS_PER_NODE; import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOTS_REPOSITORY_NAME_SETTING_KEY; import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOTS_SNAPSHOT_NAME_SETTING_KEY; import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOT_PARTIAL_SETTING_KEY; @@ -102,9 +102,7 @@ public SearchableSnapshotAction(String snapshotRepository) { public SearchableSnapshotAction(StreamInput in) throws IOException { this.snapshotRepository = in.readString(); this.forceMergeIndex = in.readBoolean(); - this.totalShardsPerNode = in.getTransportVersion().onOrAfter(ILM_ADD_SEARCHABLE_SNAPSHOT_TOTAL_SHARDS_PER_NODE) - ? in.readOptionalInt() - : null; + this.totalShardsPerNode = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readOptionalInt() : null; } boolean isForceMergeIndex() { @@ -424,7 +422,7 @@ public String getWriteableName() { public void writeTo(StreamOutput out) throws IOException { out.writeString(snapshotRepository); out.writeBoolean(forceMergeIndex); - if (out.getTransportVersion().onOrAfter(ILM_ADD_SEARCHABLE_SNAPSHOT_TOTAL_SHARDS_PER_NODE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeOptionalInt(totalShardsPerNode); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/DeleteInferenceEndpointAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/DeleteInferenceEndpointAction.java index 226fe3630b38..c3f991a8b4e1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/DeleteInferenceEndpointAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/DeleteInferenceEndpointAction.java @@ -127,7 +127,7 @@ public Response(StreamInput in) throws IOException { pipelineIds = Set.of(); } - if (in.getTransportVersion().onOrAfter(TransportVersions.ML_INFERENCE_DONT_DELETE_WHEN_SEMANTIC_TEXT_EXISTS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { indexes = in.readCollectionAsSet(StreamInput::readString); dryRunMessage = in.readOptionalString(); } else { @@ -143,7 +143,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) { out.writeCollection(pipelineIds, StreamOutput::writeString); } - if (out.getTransportVersion().onOrAfter(TransportVersions.ML_INFERENCE_DONT_DELETE_WHEN_SEMANTIC_TEXT_EXISTS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeCollection(indexes, StreamOutput::writeString); out.writeOptionalString(dryRunMessage); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceModelAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceModelAction.java index ea0462d0f103..ba3d417d0267 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceModelAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceModelAction.java @@ -63,7 +63,7 @@ public Request(StreamInput in) throws IOException { this.inferenceEntityId = in.readString(); this.taskType = TaskType.fromStream(in); if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ) - || in.getTransportVersion().isPatchFrom(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16)) { + || in.getTransportVersion().isPatchFrom(TransportVersions.V_8_16_0)) { this.persistDefaultConfig = in.readBoolean(); } else { this.persistDefaultConfig = PERSIST_DEFAULT_CONFIGS; @@ -89,7 +89,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(inferenceEntityId); taskType.writeTo(out); if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ) - || out.getTransportVersion().isPatchFrom(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16)) { + || out.getTransportVersion().isPatchFrom(TransportVersions.V_8_16_0)) { out.writeBoolean(this.persistDefaultConfig); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java index 0645299dfc30..8c4611f05e72 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java @@ -66,7 +66,7 @@ public MachineLearningFeatureSetUsage(StreamInput in) throws IOException { this.analyticsUsage = in.readGenericMap(); this.inferenceUsage = in.readGenericMap(); this.nodeCount = in.readInt(); - if (in.getTransportVersion().onOrAfter(TransportVersions.ML_TELEMETRY_MEMORY_ADDED)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.memoryUsage = in.readGenericMap(); } else { this.memoryUsage = Map.of(); @@ -86,7 +86,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeGenericMap(analyticsUsage); out.writeGenericMap(inferenceUsage); out.writeInt(nodeCount); - if (out.getTransportVersion().onOrAfter(TransportVersions.ML_TELEMETRY_MEMORY_ADDED)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeGenericMap(memoryUsage); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentAction.java index c6976ab4b513..2aedb4634753 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentAction.java @@ -47,7 +47,7 @@ public Request(StartTrainedModelDeploymentAction.TaskParams taskParams, Adaptive public Request(StreamInput in) throws IOException { super(in); this.taskParams = new StartTrainedModelDeploymentAction.TaskParams(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.adaptiveAllocationsSettings = in.readOptionalWriteable(AdaptiveAllocationsSettings::new); } else { this.adaptiveAllocationsSettings = null; @@ -63,7 +63,7 @@ public ActionRequestValidationException validate() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); taskParams.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeOptionalWriteable(adaptiveAllocationsSettings); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java index b298d486c9e0..1bf92262b30f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java @@ -169,7 +169,7 @@ public Request(StreamInput in) throws IOException { modelId = in.readString(); timeout = in.readTimeValue(); waitForState = in.readEnum(AllocationStatus.State.class); - if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { numberOfAllocations = in.readOptionalVInt(); } else { numberOfAllocations = in.readVInt(); @@ -189,7 +189,7 @@ public Request(StreamInput in) throws IOException { } else { this.deploymentId = modelId; } - if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.adaptiveAllocationsSettings = in.readOptionalWriteable(AdaptiveAllocationsSettings::new); } else { this.adaptiveAllocationsSettings = null; @@ -297,7 +297,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(modelId); out.writeTimeValue(timeout); out.writeEnum(waitForState); - if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeOptionalVInt(numberOfAllocations); } else { out.writeVInt(numberOfAllocations); @@ -313,7 +313,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeString(deploymentId); } - if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeOptionalWriteable(adaptiveAllocationsSettings); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelDeploymentAction.java index cb578fdb157d..2018c9526ec8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateTrainedModelDeploymentAction.java @@ -87,7 +87,7 @@ public Request(String deploymentId) { public Request(StreamInput in) throws IOException { super(in); deploymentId = in.readString(); - if (in.getTransportVersion().before(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + if (in.getTransportVersion().before(TransportVersions.V_8_16_0)) { numberOfAllocations = in.readVInt(); adaptiveAllocationsSettings = null; isInternal = false; @@ -134,7 +134,7 @@ public AdaptiveAllocationsSettings getAdaptiveAllocationsSettings() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(deploymentId); - if (out.getTransportVersion().before(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + if (out.getTransportVersion().before(TransportVersions.V_8_16_0)) { out.writeVInt(numberOfAllocations); } else { out.writeOptionalVInt(numberOfAllocations); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEvent.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEvent.java index b007c1da451f..742daa1bf613 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEvent.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEvent.java @@ -115,7 +115,7 @@ public ScheduledEvent(StreamInput in) throws IOException { description = in.readString(); startTime = in.readInstant(); endTime = in.readInstant(); - if (in.getTransportVersion().onOrAfter(TransportVersions.ML_SCHEDULED_EVENT_TIME_SHIFT_CONFIGURATION)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { skipResult = in.readBoolean(); skipModelUpdate = in.readBoolean(); forceTimeShift = in.readOptionalInt(); @@ -204,7 +204,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(description); out.writeInstant(startTime); out.writeInstant(endTime); - if (out.getTransportVersion().onOrAfter(TransportVersions.ML_SCHEDULED_EVENT_TIME_SHIFT_CONFIGURATION)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeBoolean(skipResult); out.writeBoolean(skipModelUpdate); out.writeOptionalInt(forceTimeShift); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AssignmentStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AssignmentStats.java index 858d97bf6f95..31b513eea161 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AssignmentStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AssignmentStats.java @@ -483,7 +483,7 @@ public AssignmentStats(StreamInput in) throws IOException { } else { deploymentId = modelId; } - if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { adaptiveAllocationsSettings = in.readOptionalWriteable(AdaptiveAllocationsSettings::new); } else { adaptiveAllocationsSettings = null; @@ -666,7 +666,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeString(deploymentId); } - if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeOptionalWriteable(adaptiveAllocationsSettings); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java index efd07cceae09..249e27d6f25e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java @@ -178,7 +178,7 @@ public TrainedModelAssignment(StreamInput in) throws IOException { } else { this.maxAssignedAllocations = totalCurrentAllocations(); } - if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.adaptiveAllocationsSettings = in.readOptionalWriteable(AdaptiveAllocationsSettings::new); } else { this.adaptiveAllocationsSettings = null; @@ -382,7 +382,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeVInt(maxAssignedAllocations); } - if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeOptionalWriteable(adaptiveAllocationsSettings); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java index 9929e59a9c80..a4d7c9c7fa08 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearningToRankConfig.java @@ -41,7 +41,6 @@ public class LearningToRankConfig extends RegressionConfig implements Rewriteable { public static final ParseField NAME = new ParseField("learning_to_rank"); - static final TransportVersion MIN_SUPPORTED_TRANSPORT_VERSION = TransportVersions.LTR_SERVERLESS_RELEASE; public static final ParseField NUM_TOP_FEATURE_IMPORTANCE_VALUES = new ParseField("num_top_feature_importance_values"); public static final ParseField FEATURE_EXTRACTORS = new ParseField("feature_extractors"); public static final ParseField DEFAULT_PARAMS = new ParseField("default_params"); @@ -226,7 +225,7 @@ public MlConfigVersion getMinimalSupportedMlConfigVersion() { @Override public TransportVersion getMinimalSupportedTransportVersion() { - return MIN_SUPPORTED_TRANSPORT_VERSION; + return TransportVersions.V_8_16_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java index eb952a7dc7e5..4bdced325311 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java @@ -68,7 +68,7 @@ public DetectionRule(StreamInput in) throws IOException { actions = in.readEnumSet(RuleAction.class); scope = new RuleScope(in); conditions = in.readCollectionAsList(RuleCondition::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.ML_ADD_DETECTION_RULE_PARAMS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { params = new RuleParams(in); } else { params = new RuleParams(); @@ -80,7 +80,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeEnumSet(actions); scope.writeTo(out); out.writeCollection(conditions); - if (out.getTransportVersion().onOrAfter(TransportVersions.ML_ADD_DETECTION_RULE_PARAMS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { params.writeTo(out); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java index b93aa079a28d..148fdf21fd2d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java @@ -82,7 +82,7 @@ public static ConfigurableClusterPrivilege[] readArray(StreamInput in) throws IO * Utility method to write an array of {@link ConfigurableClusterPrivilege} objects to a {@link StreamOutput} */ public static void writeArray(StreamOutput out, ConfigurableClusterPrivilege[] privileges) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeArray(WRITER, privileges); } else { out.writeArray( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsageTests.java index 87d658c6f983..e9ec8dfe8ee5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsageTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsageTests.java @@ -57,7 +57,7 @@ protected MachineLearningFeatureSetUsage mutateInstance(MachineLearningFeatureSe @Override protected MachineLearningFeatureSetUsage mutateInstanceForVersion(MachineLearningFeatureSetUsage instance, TransportVersion version) { - if (version.before(TransportVersions.ML_TELEMETRY_MEMORY_ADDED)) { + if (version.before(TransportVersions.V_8_16_0)) { return new MachineLearningFeatureSetUsage( instance.available(), instance.enabled(), diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java index 3a61c848d381..d694b2681ee8 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java @@ -68,8 +68,7 @@ public QueryRulesetListItem(StreamInput in) throws IOException { this.criteriaTypeToCountMap = Map.of(); } TransportVersion streamTransportVersion = in.getTransportVersion(); - if (streamTransportVersion.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_15) - || streamTransportVersion.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16) + if (streamTransportVersion.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16) || streamTransportVersion.onOrAfter(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES)) { this.ruleTypeToCountMap = in.readMap(m -> in.readEnum(QueryRule.QueryRuleType.class), StreamInput::readInt); } else { @@ -104,8 +103,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(criteriaTypeToCountMap, StreamOutput::writeEnum, StreamOutput::writeInt); } TransportVersion streamTransportVersion = out.getTransportVersion(); - if (streamTransportVersion.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_15) - || streamTransportVersion.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16) + if (streamTransportVersion.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16) || streamTransportVersion.onOrAfter(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES)) { out.writeMap(ruleTypeToCountMap, StreamOutput::writeEnum, StreamOutput::writeInt); } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsActionResponseBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsActionResponseBWCSerializingTests.java index 27d5e240534b..c822dd123d3f 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsActionResponseBWCSerializingTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/ListQueryRulesetsActionResponseBWCSerializingTests.java @@ -59,8 +59,7 @@ protected ListQueryRulesetsAction.Response mutateInstanceForVersion( ListQueryRulesetsAction.Response instance, TransportVersion version ) { - if (version.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_15) - || version.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16) + if (version.isPatchFrom(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES_BACKPORT_8_16) || version.onOrAfter(TransportVersions.QUERY_RULES_LIST_INCLUDES_TYPES)) { return instance; } else if (version.onOrAfter(QueryRulesetListItem.EXPANDED_RULESET_COUNT_TRANSPORT_VERSION)) { diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetActionRequestBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetActionRequestBWCSerializingTests.java index 7041de1106b5..8582ee1bd8d2 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetActionRequestBWCSerializingTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetActionRequestBWCSerializingTests.java @@ -51,6 +51,6 @@ protected TestQueryRulesetAction.Request mutateInstanceForVersion(TestQueryRules @Override protected List bwcVersions() { - return getAllBWCVersions().stream().filter(v -> v.onOrAfter(TransportVersions.QUERY_RULE_TEST_API)).collect(Collectors.toList()); + return getAllBWCVersions().stream().filter(v -> v.onOrAfter(TransportVersions.V_8_16_0)).collect(Collectors.toList()); } } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetActionResponseBWCSerializingTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetActionResponseBWCSerializingTests.java index a6562fb7b52a..142310ac4033 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetActionResponseBWCSerializingTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/rules/action/TestQueryRulesetActionResponseBWCSerializingTests.java @@ -47,6 +47,6 @@ protected TestQueryRulesetAction.Response mutateInstanceForVersion(TestQueryRule @Override protected List bwcVersions() { - return getAllBWCVersions().stream().filter(v -> v.onOrAfter(TransportVersions.QUERY_RULE_TEST_API)).collect(Collectors.toList()); + return getAllBWCVersions().stream().filter(v -> v.onOrAfter(TransportVersions.V_8_16_0)).collect(Collectors.toList()); } } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java index 47dadcbb11de..73e2d5ec626a 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java @@ -72,7 +72,7 @@ public EsField(StreamInput in) throws IOException { private DataType readDataType(StreamInput in) throws IOException { String name = readCachedStringWithVersionCheck(in); - if (in.getTransportVersion().before(TransportVersions.ESQL_NESTED_UNSUPPORTED) && name.equalsIgnoreCase("NESTED")) { + if (in.getTransportVersion().before(TransportVersions.V_8_16_0) && name.equalsIgnoreCase("NESTED")) { /* * The "nested" data type existed in older versions of ESQL but was * entirely used to filter mappings away. Those versions will still diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java index e8ccae342900..b570a50535a5 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java @@ -52,7 +52,7 @@ public interface PlanStreamInput { String readCachedString() throws IOException; static String readCachedStringWithVersionCheck(StreamInput planStreamInput) throws IOException { - if (planStreamInput.getTransportVersion().before(TransportVersions.ESQL_CACHED_STRING_SERIALIZATION)) { + if (planStreamInput.getTransportVersion().before(TransportVersions.V_8_16_0)) { return planStreamInput.readString(); } return ((PlanStreamInput) planStreamInput).readCachedString(); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java index fb4af33d2fd6..a5afcb5fa29a 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java @@ -37,7 +37,7 @@ public interface PlanStreamOutput { void writeCachedString(String field) throws IOException; static void writeCachedStringWithVersionCheck(StreamOutput planStreamOutput, String string) throws IOException { - if (planStreamOutput.getTransportVersion().before(TransportVersions.ESQL_CACHED_STRING_SERIALIZATION)) { + if (planStreamOutput.getTransportVersion().before(TransportVersions.V_8_16_0)) { planStreamOutput.writeString(string); } else { ((PlanStreamOutput) planStreamOutput).writeCachedString(string); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AggregationOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AggregationOperator.java index 9338077a5557..f57f450c7ee3 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AggregationOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/AggregationOperator.java @@ -219,7 +219,7 @@ public Status(long aggregationNanos, long aggregationFinishNanos, int pagesProce protected Status(StreamInput in) throws IOException { aggregationNanos = in.readVLong(); - if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_AGGREGATION_OPERATOR_STATUS_FINISH_NANOS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { aggregationFinishNanos = in.readOptionalVLong(); } else { aggregationFinishNanos = null; @@ -230,7 +230,7 @@ protected Status(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(aggregationNanos); - if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_AGGREGATION_OPERATOR_STATUS_FINISH_NANOS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeOptionalVLong(aggregationFinishNanos); } out.writeVInt(pagesProcessed); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java index d98613f1817a..c071b5055df7 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverProfile.java @@ -79,7 +79,7 @@ public DriverProfile( } public DriverProfile(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_SLEEPS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.startMillis = in.readVLong(); this.stopMillis = in.readVLong(); } else { @@ -101,7 +101,7 @@ public DriverProfile(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE_SLEEPS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeVLong(startMillis); out.writeVLong(stopMillis); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverSleeps.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverSleeps.java index 01e9a73c4fb5..d8856ebedb80 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverSleeps.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverSleeps.java @@ -76,7 +76,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws static final int RECORDS = 10; public static DriverSleeps read(StreamInput in) throws IOException { - if (in.getTransportVersion().before(TransportVersions.ESQL_PROFILE_SLEEPS)) { + if (in.getTransportVersion().before(TransportVersions.V_8_16_0)) { return empty(); } return new DriverSleeps( @@ -88,7 +88,7 @@ public static DriverSleeps read(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().before(TransportVersions.ESQL_PROFILE_SLEEPS)) { + if (out.getTransportVersion().before(TransportVersions.V_8_16_0)) { return; } out.writeMap(counts, StreamOutput::writeVLong); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java index ba7a7e826684..52170dfb0525 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlExecutionInfo.java @@ -107,7 +107,7 @@ public EsqlExecutionInfo(StreamInput in) throws IOException { clusterList.forEach(c -> m.put(c.getClusterAlias(), c)); this.clusterInfo = m; } - if (in.getTransportVersion().onOrAfter(TransportVersions.OPT_IN_ESQL_CCS_EXECUTION_INFO)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.includeCCSMetadata = in.readBoolean(); } else { this.includeCCSMetadata = false; @@ -124,7 +124,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeCollection(Collections.emptyList()); } - if (out.getTransportVersion().onOrAfter(TransportVersions.OPT_IN_ESQL_CCS_EXECUTION_INFO)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeBoolean(includeCCSMetadata); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java index 77aed298baea..dc0e9fd1fb06 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java @@ -113,7 +113,7 @@ static EsqlQueryResponse deserialize(BlockStreamInput in) throws IOException { } boolean columnar = in.readBoolean(); EsqlExecutionInfo executionInfo = null; - if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_EXECUTION_INFO)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { executionInfo = in.readOptionalWriteable(EsqlExecutionInfo::new); } return new EsqlQueryResponse(columns, pages, profile, columnar, asyncExecutionId, isRunning, isAsync, executionInfo); @@ -132,7 +132,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(profile); } out.writeBoolean(columnar); - if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_EXECUTION_INFO)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeOptionalWriteable(executionInfo); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResolveFieldsAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResolveFieldsAction.java index f7e6793fc4fb..f7fd991a9ef1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResolveFieldsAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResolveFieldsAction.java @@ -58,7 +58,7 @@ void executeRemoteRequest( ActionListener remoteListener ) { remoteClient.getConnection(remoteRequest, remoteListener.delegateFailure((l, conn) -> { - var remoteAction = conn.getTransportVersion().onOrAfter(TransportVersions.ESQL_ORIGINAL_INDICES) + var remoteAction = conn.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? RESOLVE_REMOTE_TYPE : TransportFieldCapabilitiesAction.REMOTE_TYPE; remoteClient.execute(conn, remoteAction, remoteRequest, l); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/ResolvedEnrichPolicy.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/ResolvedEnrichPolicy.java index e891089aa55b..64595e776a96 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/ResolvedEnrichPolicy.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/ResolvedEnrichPolicy.java @@ -35,8 +35,7 @@ public ResolvedEnrichPolicy(StreamInput in) throws IOException { } private static Reader getEsFieldReader(StreamInput in) { - if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_ES_FIELD_CACHED_SERIALIZATION) - || in.getTransportVersion().isPatchFrom(TransportVersions.V_8_15_2)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_15_2)) { return EsField::readFrom; } return EsField::new; @@ -56,8 +55,7 @@ public void writeTo(StreamOutput out) throws IOException { */ (o, v) -> { var field = new EsField(v.getName(), v.getDataType(), v.getProperties(), v.isAggregatable(), v.isAlias()); - if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_ES_FIELD_CACHED_SERIALIZATION) - || out.getTransportVersion().isPatchFrom(TransportVersions.V_8_15_2)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_2)) { field.writeTo(o); } else { field.writeContent(o); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java index d372eddb961a..089f6db373c5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java @@ -81,8 +81,7 @@ private UnsupportedAttribute(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), readCachedStringWithVersionCheck(in), - in.getTransportVersion().onOrAfter(TransportVersions.ESQL_ES_FIELD_CACHED_SERIALIZATION) - || in.getTransportVersion().isPatchFrom(TransportVersions.V_8_15_2) ? EsField.readFrom(in) : new UnsupportedEsField(in), + in.getTransportVersion().onOrAfter(TransportVersions.V_8_15_2) ? EsField.readFrom(in) : new UnsupportedEsField(in), in.readOptionalString(), NameId.readFrom((PlanStreamInput) in) ); @@ -93,8 +92,7 @@ public void writeTo(StreamOutput out) throws IOException { if (((PlanStreamOutput) out).writeAttributeCacheHeader(this)) { Source.EMPTY.writeTo(out); writeCachedStringWithVersionCheck(out, name()); - if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_ES_FIELD_CACHED_SERIALIZATION) - || out.getTransportVersion().isPatchFrom(TransportVersions.V_8_15_2)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_2)) { field().writeTo(out); } else { field().writeContent(out); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateFunction.java index 87efccfc90ab..265b08de5556 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AggregateFunction.java @@ -53,10 +53,8 @@ protected AggregateFunction(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), - in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PER_AGGREGATE_FILTER) - ? in.readNamedWriteable(Expression.class) - : Literal.TRUE, - in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PER_AGGREGATE_FILTER) + in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readNamedWriteable(Expression.class) : Literal.TRUE, + in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readNamedWriteableCollectionAsList(Expression.class) : emptyList() ); @@ -66,7 +64,7 @@ protected AggregateFunction(StreamInput in) throws IOException { public final void writeTo(StreamOutput out) throws IOException { Source.EMPTY.writeTo(out); out.writeNamedWriteable(field); - if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PER_AGGREGATE_FILTER)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeNamedWriteable(filter); out.writeNamedWriteableCollection(parameters); } else { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java index 5ae162f1fbb1..f80333d83d6c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java @@ -146,10 +146,8 @@ private CountDistinct(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), - in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PER_AGGREGATE_FILTER) - ? in.readNamedWriteable(Expression.class) - : Literal.TRUE, - in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PER_AGGREGATE_FILTER) + in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readNamedWriteable(Expression.class) : Literal.TRUE, + in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readNamedWriteableCollectionAsList(Expression.class) : nullSafeList(in.readOptionalNamedWriteable(Expression.class)) ); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/FromPartial.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/FromPartial.java index 0f9037a28d7d..a67b87c7617c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/FromPartial.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/FromPartial.java @@ -58,10 +58,8 @@ private FromPartial(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), - in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PER_AGGREGATE_FILTER) - ? in.readNamedWriteable(Expression.class) - : Literal.TRUE, - in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PER_AGGREGATE_FILTER) + in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readNamedWriteable(Expression.class) : Literal.TRUE, + in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readNamedWriteableCollectionAsList(Expression.class).get(0) : in.readNamedWriteable(Expression.class) ); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java index febd9f28b229..0d57267da1e2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Percentile.java @@ -92,10 +92,8 @@ private Percentile(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), - in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PER_AGGREGATE_FILTER) - ? in.readNamedWriteable(Expression.class) - : Literal.TRUE, - in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PER_AGGREGATE_FILTER) + in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readNamedWriteable(Expression.class) : Literal.TRUE, + in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readNamedWriteableCollectionAsList(Expression.class).get(0) : in.readNamedWriteable(Expression.class) ); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Rate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Rate.java index b7b04658f8d5..87ac9b77a682 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Rate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Rate.java @@ -74,10 +74,8 @@ public Rate(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), - in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PER_AGGREGATE_FILTER) - ? in.readNamedWriteable(Expression.class) - : Literal.TRUE, - in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PER_AGGREGATE_FILTER) + in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readNamedWriteable(Expression.class) : Literal.TRUE, + in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readNamedWriteableCollectionAsList(Expression.class) : nullSafeList(in.readNamedWriteable(Expression.class), in.readOptionalNamedWriteable(Expression.class)) ); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ToPartial.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ToPartial.java index cffac616b3c8..a2856f60e4c5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ToPartial.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/ToPartial.java @@ -80,10 +80,8 @@ private ToPartial(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), - in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PER_AGGREGATE_FILTER) - ? in.readNamedWriteable(Expression.class) - : Literal.TRUE, - in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PER_AGGREGATE_FILTER) + in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readNamedWriteable(Expression.class) : Literal.TRUE, + in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readNamedWriteableCollectionAsList(Expression.class).get(0) : in.readNamedWriteable(Expression.class) ); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Top.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Top.java index e0a7da806b3a..40777b4d78dc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Top.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Top.java @@ -81,10 +81,8 @@ private Top(StreamInput in) throws IOException { super( Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), - in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PER_AGGREGATE_FILTER) - ? in.readNamedWriteable(Expression.class) - : Literal.TRUE, - in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PER_AGGREGATE_FILTER) + in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readNamedWriteable(Expression.class) : Literal.TRUE, + in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readNamedWriteableCollectionAsList(Expression.class) : asList(in.readNamedWriteable(Expression.class), in.readNamedWriteable(Expression.class)) ); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/WeightedAvg.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/WeightedAvg.java index dbcc50cea3b9..49c68d002440 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/WeightedAvg.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/WeightedAvg.java @@ -68,10 +68,8 @@ private WeightedAvg(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), in.readNamedWriteable(Expression.class), - in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PER_AGGREGATE_FILTER) - ? in.readNamedWriteable(Expression.class) - : Literal.TRUE, - in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PER_AGGREGATE_FILTER) + in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readNamedWriteable(Expression.class) : Literal.TRUE, + in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readNamedWriteableCollectionAsList(Expression.class).get(0) : in.readNamedWriteable(Expression.class) ); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java index ce52b3a7611b..ee51a6f391a6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java @@ -50,7 +50,7 @@ public void writeTo(StreamOutput out) throws IOException { @SuppressWarnings("unchecked") private static Map readIndexNameWithModes(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_ADD_INDEX_MODE_CONCRETE_INDICES)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { return in.readMap(IndexMode::readFrom); } else { Set indices = (Set) in.readGenericValue(); @@ -60,7 +60,7 @@ private static Map readIndexNameWithModes(StreamInput in) thr } private static void writeIndexNameWithModes(Map concreteIndices, StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_ADD_INDEX_MODE_CONCRETE_INDICES)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeMap(concreteIndices, (o, v) -> IndexMode.writeTo(v, out)); } else { out.writeGenericValue(concreteIndices.keySet()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java index 47e5b9acfbf9..948fd1c68354 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java @@ -182,8 +182,7 @@ public NameId mapNameId(long l) { @Override @SuppressWarnings("unchecked") public A readAttributeWithCache(CheckedFunction constructor) throws IOException { - if (getTransportVersion().onOrAfter(TransportVersions.ESQL_ATTRIBUTE_CACHED_SERIALIZATION) - || getTransportVersion().isPatchFrom(TransportVersions.V_8_15_2)) { + if (getTransportVersion().onOrAfter(TransportVersions.V_8_15_2)) { // it's safe to cast to int, since the max value for this is {@link PlanStreamOutput#MAX_SERIALIZED_ATTRIBUTES} int cacheId = Math.toIntExact(readZLong()); if (cacheId < 0) { @@ -222,8 +221,7 @@ private void cacheAttribute(int id, Attribute attr) { @SuppressWarnings("unchecked") public A readEsFieldWithCache() throws IOException { - if (getTransportVersion().onOrAfter(TransportVersions.ESQL_ES_FIELD_CACHED_SERIALIZATION) - || getTransportVersion().isPatchFrom(TransportVersions.V_8_15_2)) { + if (getTransportVersion().onOrAfter(TransportVersions.V_8_15_2)) { // it's safe to cast to int, since the max value for this is {@link PlanStreamOutput#MAX_SERIALIZED_ATTRIBUTES} int cacheId = Math.toIntExact(readZLong()); if (cacheId < 0) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java index 615c4266620c..63d95c21d7d9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java @@ -154,8 +154,7 @@ public void writeCachedBlock(Block block) throws IOException { @Override public boolean writeAttributeCacheHeader(Attribute attribute) throws IOException { - if (getTransportVersion().onOrAfter(TransportVersions.ESQL_ATTRIBUTE_CACHED_SERIALIZATION) - || getTransportVersion().isPatchFrom(TransportVersions.V_8_15_2)) { + if (getTransportVersion().onOrAfter(TransportVersions.V_8_15_2)) { Integer cacheId = attributeIdFromCache(attribute); if (cacheId != null) { writeZLong(cacheId); @@ -186,8 +185,7 @@ private int cacheAttribute(Attribute attr) { @Override public boolean writeEsFieldCacheHeader(EsField field) throws IOException { - if (getTransportVersion().onOrAfter(TransportVersions.ESQL_ES_FIELD_CACHED_SERIALIZATION) - || getTransportVersion().isPatchFrom(TransportVersions.V_8_15_2)) { + if (getTransportVersion().onOrAfter(TransportVersions.V_8_15_2)) { Integer cacheId = esFieldIdFromCache(field); if (cacheId != null) { writeZLong(cacheId); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/AggregateExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/AggregateExec.java index dff55f073897..891d03c571b2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/AggregateExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/AggregateExec.java @@ -85,7 +85,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeNamedWriteable(child()); out.writeNamedWriteableCollection(groupings()); out.writeNamedWriteableCollection(aggregates()); - if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_AGGREGATE_EXEC_TRACKS_INTERMEDIATE_ATTRS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeEnum(getMode()); out.writeNamedWriteableCollection(intermediateAttributes()); } else { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeResponse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeResponse.java index 308192704fe0..8d2e092cd414 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeResponse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeResponse.java @@ -61,7 +61,7 @@ final class ComputeResponse extends TransportResponse { } else { profiles = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_EXECUTION_INFO)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.took = in.readOptionalTimeValue(); this.totalShards = in.readVInt(); this.successfulShards = in.readVInt(); @@ -86,7 +86,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(profiles); } } - if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_EXECUTION_INFO)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeOptionalTimeValue(took); out.writeVInt(totalShards); out.writeVInt(successfulShards); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java index 8f890e63bf54..4c01d326ed7b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java @@ -81,7 +81,7 @@ final class DataNodeRequest extends TransportRequest implements IndicesRequest.R this.shardIds = in.readCollectionAsList(ShardId::new); this.aliasFilters = in.readMap(Index::new, AliasFilter::readFrom); this.plan = new PlanStreamInput(in, in.namedWriteableRegistry(), configuration).readNamedWriteable(PhysicalPlan.class); - if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_ORIGINAL_INDICES)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.indices = in.readStringArray(); this.indicesOptions = IndicesOptions.readIndicesOptions(in); } else { @@ -101,7 +101,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(shardIds); out.writeMap(aliasFilters); new PlanStreamOutput(out, configuration).writeNamedWriteable(plan); - if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_ORIGINAL_INDICES)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/RemoteClusterPlan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/RemoteClusterPlan.java index 031bfd7139a8..aed196f963e9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/RemoteClusterPlan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/RemoteClusterPlan.java @@ -23,7 +23,7 @@ static RemoteClusterPlan from(PlanStreamInput planIn) throws IOException { var plan = planIn.readNamedWriteable(PhysicalPlan.class); var targetIndices = planIn.readStringArray(); final OriginalIndices originalIndices; - if (planIn.getTransportVersion().onOrAfter(TransportVersions.ESQL_ORIGINAL_INDICES)) { + if (planIn.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { originalIndices = OriginalIndices.readOriginalIndices(planIn); } else { // fallback to the previous behavior @@ -35,7 +35,7 @@ static RemoteClusterPlan from(PlanStreamInput planIn) throws IOException { public void writeTo(PlanStreamOutput out) throws IOException { out.writeNamedWriteable(plan); out.writeStringArray(targetIndices); - if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_ORIGINAL_INDICES)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { OriginalIndices.writeOriginalIndices(originalIndices, out); } else { out.writeStringArray(originalIndices.indices()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java index 8d33e9b48059..bc11d246904d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java @@ -107,7 +107,7 @@ public static class Builder extends AbstractQueryBuilder { super(in); this.next = in.readNamedWriteable(QueryBuilder.class); this.field = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_SINGLE_VALUE_QUERY_SOURCE)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { if (in instanceof PlanStreamInput psi) { this.source = Source.readFrom(psi); } else { @@ -128,7 +128,7 @@ public static class Builder extends AbstractQueryBuilder { protected void doWriteTo(StreamOutput out) throws IOException { out.writeNamedWriteable(next); out.writeString(field); - if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_SINGLE_VALUE_QUERY_SOURCE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { source.writeTo(out); } else if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { writeOldSource(out, source); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Configuration.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Configuration.java index 4ec2746b24ee..997f3265803f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Configuration.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Configuration.java @@ -101,7 +101,7 @@ public Configuration(BlockStreamInput in) throws IOException { } else { this.tables = Map.of(); } - if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_EXECUTION_INFO)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.queryStartTimeNanos = in.readLong(); } else { this.queryStartTimeNanos = -1; @@ -127,7 +127,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0)) { out.writeMap(tables, (o1, columns) -> o1.writeMap(columns, StreamOutput::writeWriteable)); } - if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_CCS_EXECUTION_INFO)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeLong(queryStartTimeNanos); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ClusterRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ClusterRequestTests.java index 07ca112e8c52..3dfc0f611eb2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ClusterRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/ClusterRequestTests.java @@ -156,11 +156,7 @@ protected ClusterComputeRequest mutateInstance(ClusterComputeRequest in) throws public void testFallbackIndicesOptions() throws Exception { ClusterComputeRequest request = createTestInstance(); - var version = TransportVersionUtils.randomVersionBetween( - random(), - TransportVersions.V_8_14_0, - TransportVersions.ESQL_ORIGINAL_INDICES - ); + var version = TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_8_14_0, TransportVersions.V_8_16_0); ClusterComputeRequest cloned = copyInstance(request, version); assertThat(cloned.clusterAlias(), equalTo(request.clusterAlias())); assertThat(cloned.sessionId(), equalTo(request.sessionId())); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkingSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkingSettings.java index def52e97666f..9d6f5bb89218 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkingSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkingSettings.java @@ -49,7 +49,7 @@ public SentenceBoundaryChunkingSettings(Integer maxChunkSize, @Nullable Integer public SentenceBoundaryChunkingSettings(StreamInput in) throws IOException { maxChunkSize = in.readInt(); - if (in.getTransportVersion().onOrAfter(TransportVersions.CHUNK_SENTENCE_OVERLAP_SETTING_ADDED)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { sentenceOverlap = in.readVInt(); } } @@ -113,13 +113,13 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ML_INFERENCE_CHUNKING_SETTINGS; + return TransportVersions.V_8_16_0; } @Override public void writeTo(StreamOutput out) throws IOException { out.writeInt(maxChunkSize); - if (out.getTransportVersion().onOrAfter(TransportVersions.CHUNK_SENTENCE_OVERLAP_SETTING_ADDED)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeVInt(sentenceOverlap); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkingSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkingSettings.java index 7fb0fdc91bf7..7e0378d5b0cd 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkingSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkingSettings.java @@ -104,7 +104,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ML_INFERENCE_CHUNKING_SETTINGS; + return TransportVersions.V_8_16_0; } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilder.java index fdb5503e491e..15d41301d0a3 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/random/RandomRankBuilder.java @@ -85,7 +85,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.RANDOM_RERANKER_RETRIEVER; + return TransportVersions.V_8_16_0; } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankDoc.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankDoc.java index d208623e5332..7ad3e8eea053 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankDoc.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankDoc.java @@ -98,6 +98,6 @@ protected void doToXContent(XContentBuilder builder, Params params) throws IOExc @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.TEXT_SIMILARITY_RERANKER_QUERY_REWRITE; + return TransportVersions.V_8_16_0; } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchService.java index 3e20e4e5d275..2637d9755bd5 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchService.java @@ -359,7 +359,7 @@ public Model updateModelWithEmbeddingDetails(Model model, int embeddingSize) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ML_INFERENCE_ALIBABACLOUD_SEARCH_ADDED; + return TransportVersions.V_8_16_0; } public static class Configuration { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchServiceSettings.java index 3500bdf814e1..f6ddac34a2b2 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/AlibabaCloudSearchServiceSettings.java @@ -163,7 +163,7 @@ public ToXContentObject getFilteredXContentObject() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ML_INFERENCE_ALIBABACLOUD_SEARCH_ADDED; + return TransportVersions.V_8_16_0; } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/completion/AlibabaCloudSearchCompletionServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/completion/AlibabaCloudSearchCompletionServiceSettings.java index 631ec8a8648e..a299cf5b655c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/completion/AlibabaCloudSearchCompletionServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/completion/AlibabaCloudSearchCompletionServiceSettings.java @@ -74,7 +74,7 @@ public ToXContentObject getFilteredXContentObject() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ML_INFERENCE_ALIBABACLOUD_SEARCH_ADDED; + return TransportVersions.V_8_16_0; } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/completion/AlibabaCloudSearchCompletionTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/completion/AlibabaCloudSearchCompletionTaskSettings.java index 05b5873a81d8..7883e7b1d90d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/completion/AlibabaCloudSearchCompletionTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/completion/AlibabaCloudSearchCompletionTaskSettings.java @@ -115,7 +115,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ML_INFERENCE_ALIBABACLOUD_SEARCH_ADDED; + return TransportVersions.V_8_16_0; } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/embeddings/AlibabaCloudSearchEmbeddingsServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/embeddings/AlibabaCloudSearchEmbeddingsServiceSettings.java index 8896e983d3e7..8f40ce2a8b8b 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/embeddings/AlibabaCloudSearchEmbeddingsServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/embeddings/AlibabaCloudSearchEmbeddingsServiceSettings.java @@ -135,7 +135,7 @@ public ToXContentObject getFilteredXContentObject() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ML_INFERENCE_ALIBABACLOUD_SEARCH_ADDED; + return TransportVersions.V_8_16_0; } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/embeddings/AlibabaCloudSearchEmbeddingsTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/embeddings/AlibabaCloudSearchEmbeddingsTaskSettings.java index 9a431717d9fb..a08ca6cce66d 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/embeddings/AlibabaCloudSearchEmbeddingsTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/embeddings/AlibabaCloudSearchEmbeddingsTaskSettings.java @@ -151,7 +151,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ML_INFERENCE_ALIBABACLOUD_SEARCH_ADDED; + return TransportVersions.V_8_16_0; } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/rerank/AlibabaCloudSearchRerankServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/rerank/AlibabaCloudSearchRerankServiceSettings.java index 42c7238aefa7..40e645074f61 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/rerank/AlibabaCloudSearchRerankServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/rerank/AlibabaCloudSearchRerankServiceSettings.java @@ -74,7 +74,7 @@ public ToXContentObject getFilteredXContentObject() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ML_INFERENCE_ALIBABACLOUD_SEARCH_ADDED; + return TransportVersions.V_8_16_0; } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/rerank/AlibabaCloudSearchRerankTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/rerank/AlibabaCloudSearchRerankTaskSettings.java index 40c3dee00d6c..2a7806f4beab 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/rerank/AlibabaCloudSearchRerankTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/rerank/AlibabaCloudSearchRerankTaskSettings.java @@ -85,7 +85,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ML_INFERENCE_ALIBABACLOUD_SEARCH_ADDED; + return TransportVersions.V_8_16_0; } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/sparse/AlibabaCloudSearchSparseServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/sparse/AlibabaCloudSearchSparseServiceSettings.java index fe44c936c4e6..0a55d2aba6ce 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/sparse/AlibabaCloudSearchSparseServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/sparse/AlibabaCloudSearchSparseServiceSettings.java @@ -74,7 +74,7 @@ public ToXContentObject getFilteredXContentObject() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ML_INFERENCE_ALIBABACLOUD_SEARCH_ADDED; + return TransportVersions.V_8_16_0; } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/sparse/AlibabaCloudSearchSparseTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/sparse/AlibabaCloudSearchSparseTaskSettings.java index 0f4ebce92016..17c5b178c2a1 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/sparse/AlibabaCloudSearchSparseTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/alibabacloudsearch/sparse/AlibabaCloudSearchSparseTaskSettings.java @@ -164,7 +164,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ML_INFERENCE_ALIBABACLOUD_SEARCH_ADDED; + return TransportVersions.V_8_16_0; } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankServiceSettings.java index a3d2483a068e..78178466f9f3 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/cohere/rerank/CohereRerankServiceSettings.java @@ -92,7 +92,7 @@ public CohereRerankServiceSettings(@Nullable String url, @Nullable String modelI public CohereRerankServiceSettings(StreamInput in) throws IOException { this.uri = createOptionalUri(in.readOptionalString()); - if (in.getTransportVersion().before(TransportVersions.ML_INFERENCE_COHERE_UNUSED_RERANK_SETTINGS_REMOVED)) { + if (in.getTransportVersion().before(TransportVersions.V_8_16_0)) { // An older node sends these fields, so we need to skip them to progress through the serialized data in.readOptionalEnum(SimilarityMeasure.class); in.readOptionalVInt(); @@ -162,7 +162,7 @@ public void writeTo(StreamOutput out) throws IOException { var uriToWrite = uri != null ? uri.toString() : null; out.writeOptionalString(uriToWrite); - if (out.getTransportVersion().before(TransportVersions.ML_INFERENCE_COHERE_UNUSED_RERANK_SETTINGS_REMOVED)) { + if (out.getTransportVersion().before(TransportVersions.V_8_16_0)) { // An old node expects this data to be present, so we need to send at least the booleans // indicating that the fields are not set out.writeOptionalEnum(null); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceService.java index 8397943402d5..8acef4084063 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceService.java @@ -221,7 +221,7 @@ public Model parsePersistedConfig(String inferenceEntityId, TaskType taskType, M @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ML_INFERENCE_EIS_INTEGRATION_ADDED; + return TransportVersions.V_8_16_0; } private ElasticInferenceServiceModel createModelFromPersistent( diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSparseEmbeddingsServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSparseEmbeddingsServiceSettings.java index bbda1bb71679..3af404aeef36 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSparseEmbeddingsServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elastic/ElasticInferenceServiceSparseEmbeddingsServiceSettings.java @@ -113,7 +113,7 @@ public RateLimitSettings rateLimitSettings() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ML_INFERENCE_EIS_INTEGRATION_ADDED; + return TransportVersions.V_8_16_0; } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java index 962c939146ef..244108edc3dd 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java @@ -157,19 +157,17 @@ public ElasticsearchInternalServiceSettings(ElasticsearchInternalServiceSettings } public ElasticsearchInternalServiceSettings(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.numAllocations = in.readOptionalVInt(); } else { this.numAllocations = in.readVInt(); } this.numThreads = in.readVInt(); this.modelId = in.readString(); - this.adaptiveAllocationsSettings = in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS) + this.adaptiveAllocationsSettings = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readOptionalWriteable(AdaptiveAllocationsSettings::new) : null; - this.deploymentId = in.getTransportVersion().onOrAfter(TransportVersions.ML_INFERENCE_ATTACH_TO_EXISTSING_DEPLOYMENT) - ? in.readOptionalString() - : null; + this.deploymentId = in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readOptionalString() : null; } public void setNumAllocations(Integer numAllocations) { @@ -178,17 +176,15 @@ public void setNumAllocations(Integer numAllocations) { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeOptionalVInt(getNumAllocations()); } else { out.writeVInt(getNumAllocations()); } out.writeVInt(getNumThreads()); out.writeString(modelId()); - if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeOptionalWriteable(getAdaptiveAllocationsSettings()); - } - if (out.getTransportVersion().onOrAfter(TransportVersions.ML_INFERENCE_ATTACH_TO_EXISTSING_DEPLOYMENT)) { out.writeOptionalString(deploymentId); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxService.java index ea263fb77a2d..981a3e95808e 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/IbmWatsonxService.java @@ -223,7 +223,7 @@ public Model parsePersistedConfig(String inferenceEntityId, TaskType taskType, M @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ML_INFERENCE_IBM_WATSONX_EMBEDDINGS_ADDED; + return TransportVersions.V_8_16_0; } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/embeddings/IbmWatsonxEmbeddingsServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/embeddings/IbmWatsonxEmbeddingsServiceSettings.java index 53d5c6c8bb5e..3a9625aef31c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/embeddings/IbmWatsonxEmbeddingsServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ibmwatsonx/embeddings/IbmWatsonxEmbeddingsServiceSettings.java @@ -207,7 +207,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.ML_INFERENCE_IBM_WATSONX_EMBEDDINGS_ADDED; + return TransportVersions.V_8_16_0; } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java index 46edcf1f63c0..b59ef0c40e4f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java @@ -304,7 +304,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.LTR_SERVERLESS_RELEASE; + return TransportVersions.V_8_16_0; } @Override diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankDoc.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankDoc.java index 4cd10801b298..84961f844216 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankDoc.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankDoc.java @@ -62,7 +62,7 @@ public RRFRankDoc(StreamInput in) throws IOException { rank = in.readVInt(); positions = in.readIntArray(); scores = in.readFloatArray(); - if (in.getTransportVersion().onOrAfter(TransportVersions.RRF_QUERY_REWRITE)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { this.rankConstant = in.readVInt(); } else { this.rankConstant = DEFAULT_RANK_CONSTANT; @@ -119,7 +119,7 @@ public void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(rank); out.writeIntArray(positions); out.writeFloatArray(scores); - if (out.getTransportVersion().onOrAfter(TransportVersions.RRF_QUERY_REWRITE)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { out.writeVInt(rankConstant); } } @@ -173,6 +173,6 @@ protected void doToXContent(XContentBuilder builder, Params params) throws IOExc @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.RRF_QUERY_REWRITE; + return TransportVersions.V_8_16_0; } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index c4cf3127b897..3c63ec71e74e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -14,6 +14,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.DocWriteRequest; @@ -138,7 +139,6 @@ import java.util.function.Supplier; import java.util.stream.Collectors; -import static org.elasticsearch.TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.search.SearchService.DEFAULT_KEEPALIVE_SETTING; import static org.elasticsearch.transport.RemoteClusterPortSettings.TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY; @@ -430,17 +430,17 @@ private boolean validateRoleDescriptorsForMixedCluster( listener.onFailure( new IllegalArgumentException( "all nodes must have version [" - + ROLE_REMOTE_CLUSTER_PRIVS + + ROLE_REMOTE_CLUSTER_PRIVS.toReleaseVersion() + "] or higher to support remote cluster privileges for API keys" ) ); return false; } - if (transportVersion.before(ADD_MANAGE_ROLES_PRIVILEGE) && hasGlobalManageRolesPrivilege(roleDescriptors)) { + if (transportVersion.before(TransportVersions.V_8_16_0) && hasGlobalManageRolesPrivilege(roleDescriptors)) { listener.onFailure( new IllegalArgumentException( "all nodes must have version [" - + ADD_MANAGE_ROLES_PRIVILEGE + + TransportVersions.V_8_16_0.toReleaseVersion() + "] or higher to support the manage roles privilege for API keys" ) ); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java index 7c242fb07b68..3e4763156de1 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java @@ -481,10 +481,10 @@ private Exception validateRoleDescriptor(RoleDescriptor role) { ); } else if (Arrays.stream(role.getConditionalClusterPrivileges()) .anyMatch(privilege -> privilege instanceof ConfigurableClusterPrivileges.ManageRolesPrivilege) - && clusterService.state().getMinTransportVersion().before(TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE)) { + && clusterService.state().getMinTransportVersion().before(TransportVersions.V_8_16_0)) { return new IllegalStateException( "all nodes must have version [" - + TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE.toReleaseVersion() + + TransportVersions.V_8_16_0.toReleaseVersion() + "] or higher to support the manage roles privilege" ); } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RolesBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RolesBackwardsCompatibilityIT.java index ea1b2cdac5a1..54b7ff6fa484 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RolesBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RolesBackwardsCompatibilityIT.java @@ -158,8 +158,8 @@ public void testRolesWithDescription() throws Exception { public void testRolesWithManageRoles() throws Exception { assumeTrue( - "The manage roles privilege is supported after transport version: " + TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE, - minimumTransportVersion().before(TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE) + "The manage roles privilege is supported after transport version: " + TransportVersions.V_8_16_0, + minimumTransportVersion().before(TransportVersions.V_8_16_0) ); switch (CLUSTER_TYPE) { case OLD -> { @@ -190,7 +190,7 @@ public void testRolesWithManageRoles() throws Exception { } case MIXED -> { try { - this.createClientsByVersion(TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE); + this.createClientsByVersion(TransportVersions.V_8_16_0); // succeed when role manage roles is not provided final String initialRole = randomRoleDescriptorSerialized(); createRole(client(), "my-valid-mixed-role", initialRole); @@ -232,7 +232,7 @@ public void testRolesWithManageRoles() throws Exception { e.getMessage(), containsString( "all nodes must have version [" - + TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE.toReleaseVersion() + + TransportVersions.V_8_16_0.toReleaseVersion() + "] or higher to support the manage roles privilege" ) ); @@ -246,7 +246,7 @@ public void testRolesWithManageRoles() throws Exception { e.getMessage(), containsString( "all nodes must have version [" - + TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE.toReleaseVersion() + + TransportVersions.V_8_16_0.toReleaseVersion() + "] or higher to support the manage roles privilege" ) ); From aecb48c1835cc90a2b7a5b0e6584af36eb531a94 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Thu, 5 Dec 2024 11:36:21 +0100 Subject: [PATCH 32/45] [Entitlements] Integrate PluginsLoader with PolicyManager (#117239) (#118055) This PR expands `PolicyManager` to actually use `Policy` and `Entitlement` classes for checks, instead of hardcoding them. It also introduces a separate `PluginsResolver`, with a dedicated function to map a Class to a Plugin (name). `PluginsResolver` is initialized with data from `PluginsLoader`, and then its resolve function is used internally in `PolicyManager` to find a plugin policy (and then test against the entitlements declared in the policy). --- .../src/main/java/module-info.java | 1 + .../EntitlementInitialization.java | 8 +- .../api/ElasticsearchEntitlementChecker.java | 13 +- .../policy/CreateClassLoaderEntitlement.java | 1 - ...lementType.java => ExitVMEntitlement.java} | 8 +- .../runtime/policy/FileEntitlement.java | 7 +- .../runtime/policy/PolicyManager.java | 135 +++++++++- .../runtime/policy/PolicyManagerTests.java | 247 +++++++++++++++++ .../bootstrap/Elasticsearch.java | 22 +- .../bootstrap/PluginsResolver.java | 47 ++++ .../elasticsearch/plugins/PluginsLoader.java | 47 +++- .../bootstrap/PluginsResolverTests.java | 254 ++++++++++++++++++ .../plugins/MockPluginsService.java | 2 +- 13 files changed, 738 insertions(+), 54 deletions(-) rename libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/{FlagEntitlementType.java => ExitVMEntitlement.java} (79%) create mode 100644 libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java create mode 100644 server/src/main/java/org/elasticsearch/bootstrap/PluginsResolver.java create mode 100644 server/src/test/java/org/elasticsearch/bootstrap/PluginsResolverTests.java diff --git a/libs/entitlement/src/main/java/module-info.java b/libs/entitlement/src/main/java/module-info.java index 54075ba60bbe..b8a125b98e64 100644 --- a/libs/entitlement/src/main/java/module-info.java +++ b/libs/entitlement/src/main/java/module-info.java @@ -17,6 +17,7 @@ requires static org.elasticsearch.entitlement.bridge; // At runtime, this will be in java.base exports org.elasticsearch.entitlement.runtime.api; + exports org.elasticsearch.entitlement.runtime.policy; exports org.elasticsearch.entitlement.instrumentation; exports org.elasticsearch.entitlement.bootstrap to org.elasticsearch.server; exports org.elasticsearch.entitlement.initialization to java.base; diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java index 0ffab5f93969..fb694308466c 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/initialization/EntitlementInitialization.java @@ -18,6 +18,8 @@ import org.elasticsearch.entitlement.instrumentation.MethodKey; import org.elasticsearch.entitlement.instrumentation.Transformer; import org.elasticsearch.entitlement.runtime.api.ElasticsearchEntitlementChecker; +import org.elasticsearch.entitlement.runtime.policy.CreateClassLoaderEntitlement; +import org.elasticsearch.entitlement.runtime.policy.ExitVMEntitlement; import org.elasticsearch.entitlement.runtime.policy.Policy; import org.elasticsearch.entitlement.runtime.policy.PolicyManager; import org.elasticsearch.entitlement.runtime.policy.PolicyParser; @@ -86,9 +88,11 @@ private static Class internalNameToClass(String internalName) { private static PolicyManager createPolicyManager() throws IOException { Map pluginPolicies = createPluginPolicies(EntitlementBootstrap.bootstrapArgs().pluginData()); - // TODO: What should the name be? // TODO(ES-10031): Decide what goes in the elasticsearch default policy and extend it - var serverPolicy = new Policy("server", List.of()); + var serverPolicy = new Policy( + "server", + List.of(new Scope("org.elasticsearch.server", List.of(new ExitVMEntitlement(), new CreateClassLoaderEntitlement()))) + ); return new PolicyManager(serverPolicy, pluginPolicies, EntitlementBootstrap.bootstrapArgs().pluginResolver()); } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java index 28a080470c04..aa63b630ed7c 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/api/ElasticsearchEntitlementChecker.java @@ -10,7 +10,6 @@ package org.elasticsearch.entitlement.runtime.api; import org.elasticsearch.entitlement.bridge.EntitlementChecker; -import org.elasticsearch.entitlement.runtime.policy.FlagEntitlementType; import org.elasticsearch.entitlement.runtime.policy.PolicyManager; import java.net.URL; @@ -30,27 +29,27 @@ public ElasticsearchEntitlementChecker(PolicyManager policyManager) { @Override public void check$java_lang_System$exit(Class callerClass, int status) { - policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.SYSTEM_EXIT); + policyManager.checkExitVM(callerClass); } @Override public void check$java_net_URLClassLoader$(Class callerClass, URL[] urls) { - policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.CREATE_CLASSLOADER); + policyManager.checkCreateClassLoader(callerClass); } @Override public void check$java_net_URLClassLoader$(Class callerClass, URL[] urls, ClassLoader parent) { - policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.CREATE_CLASSLOADER); + policyManager.checkCreateClassLoader(callerClass); } @Override public void check$java_net_URLClassLoader$(Class callerClass, URL[] urls, ClassLoader parent, URLStreamHandlerFactory factory) { - policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.CREATE_CLASSLOADER); + policyManager.checkCreateClassLoader(callerClass); } @Override public void check$java_net_URLClassLoader$(Class callerClass, String name, URL[] urls, ClassLoader parent) { - policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.CREATE_CLASSLOADER); + policyManager.checkCreateClassLoader(callerClass); } @Override @@ -61,6 +60,6 @@ public ElasticsearchEntitlementChecker(PolicyManager policyManager) { ClassLoader parent, URLStreamHandlerFactory factory ) { - policyManager.checkFlagEntitlement(callerClass, FlagEntitlementType.CREATE_CLASSLOADER); + policyManager.checkCreateClassLoader(callerClass); } } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java index 708e0b87711f..138515be9ffc 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/CreateClassLoaderEntitlement.java @@ -12,5 +12,4 @@ public class CreateClassLoaderEntitlement implements Entitlement { @ExternalEntitlement public CreateClassLoaderEntitlement() {} - } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FlagEntitlementType.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExitVMEntitlement.java similarity index 79% rename from libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FlagEntitlementType.java rename to libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExitVMEntitlement.java index d40235ee1216..c4a8fc683358 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FlagEntitlementType.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/ExitVMEntitlement.java @@ -9,7 +9,7 @@ package org.elasticsearch.entitlement.runtime.policy; -public enum FlagEntitlementType { - SYSTEM_EXIT, - CREATE_CLASSLOADER; -} +/** + * Internal policy type (not-parseable -- not available to plugins). + */ +public class ExitVMEntitlement implements Entitlement {} diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java index 8df199591d3e..d0837bc09618 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/FileEntitlement.java @@ -20,6 +20,9 @@ public class FileEntitlement implements Entitlement { public static final int READ_ACTION = 0x1; public static final int WRITE_ACTION = 0x2; + public static final String READ = "read"; + public static final String WRITE = "write"; + private final String path; private final int actions; @@ -29,12 +32,12 @@ public FileEntitlement(String path, List actionsList) { int actionsInt = 0; for (String actionString : actionsList) { - if ("read".equals(actionString)) { + if (READ.equals(actionString)) { if ((actionsInt & READ_ACTION) == READ_ACTION) { throw new IllegalArgumentException("file action [read] specified multiple times"); } actionsInt |= READ_ACTION; - } else if ("write".equals(actionString)) { + } else if (WRITE.equals(actionString)) { if ((actionsInt & WRITE_ACTION) == WRITE_ACTION) { throw new IllegalArgumentException("file action [write] specified multiple times"); } diff --git a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java index b3fb5b75a1d5..a77c86d5ffd0 100644 --- a/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java +++ b/libs/entitlement/src/main/java/org/elasticsearch/entitlement/runtime/policy/PolicyManager.java @@ -17,17 +17,45 @@ import java.lang.module.ModuleFinder; import java.lang.module.ModuleReference; +import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; +import java.util.IdentityHashMap; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; +import java.util.stream.Stream; public class PolicyManager { private static final Logger logger = LogManager.getLogger(ElasticsearchEntitlementChecker.class); + static class ModuleEntitlements { + public static final ModuleEntitlements NONE = new ModuleEntitlements(List.of()); + private final IdentityHashMap, List> entitlementsByType; + + ModuleEntitlements(List entitlements) { + this.entitlementsByType = entitlements.stream() + .collect(Collectors.toMap(Entitlement::getClass, e -> new ArrayList<>(List.of(e)), (a, b) -> { + a.addAll(b); + return a; + }, IdentityHashMap::new)); + } + + public boolean hasEntitlement(Class entitlementClass) { + return entitlementsByType.containsKey(entitlementClass); + } + + public Stream getEntitlements(Class entitlementClass) { + return entitlementsByType.get(entitlementClass).stream().map(entitlementClass::cast); + } + } + + final Map moduleEntitlementsMap = new HashMap<>(); + protected final Policy serverPolicy; protected final Map pluginPolicies; private final Function, String> pluginResolver; @@ -56,27 +84,110 @@ public PolicyManager(Policy defaultPolicy, Map pluginPolicies, F this.pluginResolver = pluginResolver; } - public void checkFlagEntitlement(Class callerClass, FlagEntitlementType type) { + private static List lookupEntitlementsForModule(Policy policy, String moduleName) { + for (int i = 0; i < policy.scopes.size(); ++i) { + var scope = policy.scopes.get(i); + if (scope.name.equals(moduleName)) { + return scope.entitlements; + } + } + return null; + } + + public void checkExitVM(Class callerClass) { + checkEntitlementPresent(callerClass, ExitVMEntitlement.class); + } + + public void checkCreateClassLoader(Class callerClass) { + checkEntitlementPresent(callerClass, CreateClassLoaderEntitlement.class); + } + + private void checkEntitlementPresent(Class callerClass, Class entitlementClass) { var requestingModule = requestingModule(callerClass); if (isTriviallyAllowed(requestingModule)) { return; } - // TODO: real policy check. For now, we only allow our hardcoded System.exit policy for server. - // TODO: this will be checked using policies - if (requestingModule.isNamed() - && requestingModule.getName().equals("org.elasticsearch.server") - && (type == FlagEntitlementType.SYSTEM_EXIT || type == FlagEntitlementType.CREATE_CLASSLOADER)) { - logger.debug("Allowed: caller [{}] in module [{}] has entitlement [{}]", callerClass, requestingModule.getName(), type); + ModuleEntitlements entitlements = getEntitlementsOrThrow(callerClass, requestingModule); + if (entitlements.hasEntitlement(entitlementClass)) { + logger.debug( + () -> Strings.format( + "Entitled: caller [%s], module [%s], type [%s]", + callerClass, + requestingModule.getName(), + entitlementClass.getSimpleName() + ) + ); return; } - - // TODO: plugins policy check using pluginResolver and pluginPolicies throw new NotEntitledException( - Strings.format("Missing entitlement [%s] for caller [%s] in module [%s]", type, callerClass, requestingModule.getName()) + Strings.format( + "Missing entitlement: caller [%s], module [%s], type [%s]", + callerClass, + requestingModule.getName(), + entitlementClass.getSimpleName() + ) ); } + ModuleEntitlements getEntitlementsOrThrow(Class callerClass, Module requestingModule) { + ModuleEntitlements cachedEntitlement = moduleEntitlementsMap.get(requestingModule); + if (cachedEntitlement != null) { + if (cachedEntitlement == ModuleEntitlements.NONE) { + throw new NotEntitledException(buildModuleNoPolicyMessage(callerClass, requestingModule) + "[CACHED]"); + } + return cachedEntitlement; + } + + if (isServerModule(requestingModule)) { + var scopeName = requestingModule.getName(); + return getModuleEntitlementsOrThrow(callerClass, requestingModule, serverPolicy, scopeName); + } + + // plugins + var pluginName = pluginResolver.apply(callerClass); + if (pluginName != null) { + var pluginPolicy = pluginPolicies.get(pluginName); + if (pluginPolicy != null) { + final String scopeName; + if (requestingModule.isNamed() == false) { + scopeName = ALL_UNNAMED; + } else { + scopeName = requestingModule.getName(); + } + return getModuleEntitlementsOrThrow(callerClass, requestingModule, pluginPolicy, scopeName); + } + } + + moduleEntitlementsMap.put(requestingModule, ModuleEntitlements.NONE); + throw new NotEntitledException(buildModuleNoPolicyMessage(callerClass, requestingModule)); + } + + private static String buildModuleNoPolicyMessage(Class callerClass, Module requestingModule) { + return Strings.format("Missing entitlement policy: caller [%s], module [%s]", callerClass, requestingModule.getName()); + } + + private ModuleEntitlements getModuleEntitlementsOrThrow(Class callerClass, Module module, Policy policy, String moduleName) { + var entitlements = lookupEntitlementsForModule(policy, moduleName); + if (entitlements == null) { + // Module without entitlements - remember we don't have any + moduleEntitlementsMap.put(module, ModuleEntitlements.NONE); + throw new NotEntitledException(buildModuleNoPolicyMessage(callerClass, module)); + } + // We have a policy for this module + var classEntitlements = createClassEntitlements(entitlements); + moduleEntitlementsMap.put(module, classEntitlements); + return classEntitlements; + } + + private static boolean isServerModule(Module requestingModule) { + return requestingModule.isNamed() && requestingModule.getLayer() == ModuleLayer.boot(); + } + + private ModuleEntitlements createClassEntitlements(List entitlements) { + return new ModuleEntitlements(entitlements); + } + private static Module requestingModule(Class callerClass) { if (callerClass != null) { Module callerModule = callerClass.getModule(); @@ -102,10 +213,10 @@ private static Module requestingModule(Class callerClass) { private static boolean isTriviallyAllowed(Module requestingModule) { if (requestingModule == null) { - logger.debug("Trivially allowed: entire call stack is in composed of classes in system modules"); + logger.debug("Entitlement trivially allowed: entire call stack is in composed of classes in system modules"); return true; } - logger.trace("Not trivially allowed"); + logger.trace("Entitlement not trivially allowed"); return false; } diff --git a/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java new file mode 100644 index 000000000000..45bdf2e45782 --- /dev/null +++ b/libs/entitlement/src/test/java/org/elasticsearch/entitlement/runtime/policy/PolicyManagerTests.java @@ -0,0 +1,247 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.entitlement.runtime.policy; + +import org.elasticsearch.entitlement.runtime.api.NotEntitledException; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.compiler.InMemoryJavaCompiler; +import org.elasticsearch.test.jar.JarUtils; + +import java.io.IOException; +import java.lang.module.Configuration; +import java.lang.module.ModuleFinder; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static java.util.Map.entry; +import static org.elasticsearch.entitlement.runtime.policy.PolicyManager.ALL_UNNAMED; +import static org.elasticsearch.test.LambdaMatchers.transformedMatch; +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.sameInstance; + +@ESTestCase.WithoutSecurityManager +public class PolicyManagerTests extends ESTestCase { + + public void testGetEntitlementsThrowsOnMissingPluginUnnamedModule() { + var policyManager = new PolicyManager( + createEmptyTestServerPolicy(), + Map.of("plugin1", createPluginPolicy("plugin.module")), + c -> "plugin1" + ); + + // Any class from the current module (unnamed) will do + var callerClass = this.getClass(); + var requestingModule = callerClass.getModule(); + + var ex = assertThrows( + "No policy for the unnamed module", + NotEntitledException.class, + () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule) + ); + + assertEquals( + "Missing entitlement policy: caller [class org.elasticsearch.entitlement.runtime.policy.PolicyManagerTests], module [null]", + ex.getMessage() + ); + assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE)); + } + + public void testGetEntitlementsThrowsOnMissingPolicyForPlugin() { + var policyManager = new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "plugin1"); + + // Any class from the current module (unnamed) will do + var callerClass = this.getClass(); + var requestingModule = callerClass.getModule(); + + var ex = assertThrows( + "No policy for this plugin", + NotEntitledException.class, + () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule) + ); + + assertEquals( + "Missing entitlement policy: caller [class org.elasticsearch.entitlement.runtime.policy.PolicyManagerTests], module [null]", + ex.getMessage() + ); + assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE)); + } + + public void testGetEntitlementsFailureIsCached() { + var policyManager = new PolicyManager(createEmptyTestServerPolicy(), Map.of(), c -> "plugin1"); + + // Any class from the current module (unnamed) will do + var callerClass = this.getClass(); + var requestingModule = callerClass.getModule(); + + assertThrows(NotEntitledException.class, () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule)); + assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE)); + + // A second time + var ex = assertThrows(NotEntitledException.class, () -> policyManager.getEntitlementsOrThrow(callerClass, requestingModule)); + + assertThat(ex.getMessage(), endsWith("[CACHED]")); + // Nothing new in the map + assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1)); + } + + public void testGetEntitlementsReturnsEntitlementsForPluginUnnamedModule() { + var policyManager = new PolicyManager( + createEmptyTestServerPolicy(), + Map.ofEntries(entry("plugin2", createPluginPolicy(ALL_UNNAMED))), + c -> "plugin2" + ); + + // Any class from the current module (unnamed) will do + var callerClass = this.getClass(); + var requestingModule = callerClass.getModule(); + + var entitlements = policyManager.getEntitlementsOrThrow(callerClass, requestingModule); + assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); + } + + public void testGetEntitlementsThrowsOnMissingPolicyForServer() throws ClassNotFoundException { + var policyManager = new PolicyManager(createTestServerPolicy("example"), Map.of(), c -> null); + + // Tests do not run modular, so we cannot use a server class. + // But we know that in production code the server module and its classes are in the boot layer. + // So we use a random module in the boot layer, and a random class from that module (not java.base -- it is + // loaded too early) to mimic a class that would be in the server module. + var mockServerClass = ModuleLayer.boot().findLoader("jdk.httpserver").loadClass("com.sun.net.httpserver.HttpServer"); + var requestingModule = mockServerClass.getModule(); + + var ex = assertThrows( + "No policy for this module in server", + NotEntitledException.class, + () -> policyManager.getEntitlementsOrThrow(mockServerClass, requestingModule) + ); + + assertEquals( + "Missing entitlement policy: caller [class com.sun.net.httpserver.HttpServer], module [jdk.httpserver]", + ex.getMessage() + ); + assertThat(policyManager.moduleEntitlementsMap, hasEntry(requestingModule, PolicyManager.ModuleEntitlements.NONE)); + } + + public void testGetEntitlementsReturnsEntitlementsForServerModule() throws ClassNotFoundException { + var policyManager = new PolicyManager(createTestServerPolicy("jdk.httpserver"), Map.of(), c -> null); + + // Tests do not run modular, so we cannot use a server class. + // But we know that in production code the server module and its classes are in the boot layer. + // So we use a random module in the boot layer, and a random class from that module (not java.base -- it is + // loaded too early) to mimic a class that would be in the server module. + var mockServerClass = ModuleLayer.boot().findLoader("jdk.httpserver").loadClass("com.sun.net.httpserver.HttpServer"); + var requestingModule = mockServerClass.getModule(); + + var entitlements = policyManager.getEntitlementsOrThrow(mockServerClass, requestingModule); + assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); + assertThat(entitlements.hasEntitlement(ExitVMEntitlement.class), is(true)); + } + + public void testGetEntitlementsReturnsEntitlementsForPluginModule() throws IOException, ClassNotFoundException { + final Path home = createTempDir(); + + Path jar = creteMockPluginJar(home); + + var policyManager = new PolicyManager( + createEmptyTestServerPolicy(), + Map.of("mock-plugin", createPluginPolicy("org.example.plugin")), + c -> "mock-plugin" + ); + + var layer = createLayerForJar(jar, "org.example.plugin"); + var mockPluginClass = layer.findLoader("org.example.plugin").loadClass("q.B"); + var requestingModule = mockPluginClass.getModule(); + + var entitlements = policyManager.getEntitlementsOrThrow(mockPluginClass, requestingModule); + assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); + assertThat( + entitlements.getEntitlements(FileEntitlement.class).toList(), + contains(transformedMatch(FileEntitlement::toString, containsString("/test/path"))) + ); + } + + public void testGetEntitlementsResultIsCached() { + var policyManager = new PolicyManager( + createEmptyTestServerPolicy(), + Map.ofEntries(entry("plugin2", createPluginPolicy(ALL_UNNAMED))), + c -> "plugin2" + ); + + // Any class from the current module (unnamed) will do + var callerClass = this.getClass(); + var requestingModule = callerClass.getModule(); + + var entitlements = policyManager.getEntitlementsOrThrow(callerClass, requestingModule); + assertThat(entitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true)); + assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1)); + var cachedResult = policyManager.moduleEntitlementsMap.values().stream().findFirst().get(); + var entitlementsAgain = policyManager.getEntitlementsOrThrow(callerClass, requestingModule); + + // Nothing new in the map + assertThat(policyManager.moduleEntitlementsMap, aMapWithSize(1)); + assertThat(entitlementsAgain, sameInstance(cachedResult)); + } + + private static Policy createEmptyTestServerPolicy() { + return new Policy("server", List.of()); + } + + private static Policy createTestServerPolicy(String scopeName) { + return new Policy("server", List.of(new Scope(scopeName, List.of(new ExitVMEntitlement(), new CreateClassLoaderEntitlement())))); + } + + private static Policy createPluginPolicy(String... pluginModules) { + return new Policy( + "plugin", + Arrays.stream(pluginModules) + .map( + name -> new Scope( + name, + List.of(new FileEntitlement("/test/path", List.of(FileEntitlement.READ)), new CreateClassLoaderEntitlement()) + ) + ) + .toList() + ); + } + + private static Path creteMockPluginJar(Path home) throws IOException { + Path jar = home.resolve("mock-plugin.jar"); + + Map sources = Map.ofEntries( + entry("module-info", "module org.example.plugin { exports q; }"), + entry("q.B", "package q; public class B { }") + ); + + var classToBytes = InMemoryJavaCompiler.compile(sources); + JarUtils.createJarWithEntries( + jar, + Map.ofEntries(entry("module-info.class", classToBytes.get("module-info")), entry("q/B.class", classToBytes.get("q.B"))) + ); + return jar; + } + + private static ModuleLayer createLayerForJar(Path jar, String moduleName) { + Configuration cf = ModuleLayer.boot().configuration().resolve(ModuleFinder.of(jar), ModuleFinder.of(), Set.of(moduleName)); + var moduleController = ModuleLayer.defineModulesWithOneLoader( + cf, + List.of(ModuleLayer.boot()), + ClassLoader.getPlatformClassLoader() + ); + return moduleController.layer(); + } +} diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index c06ea9305aef..27cbb39c05d3 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -42,9 +42,7 @@ import org.elasticsearch.nativeaccess.NativeAccess; import org.elasticsearch.node.Node; import org.elasticsearch.node.NodeValidationException; -import org.elasticsearch.plugins.PluginBundle; import org.elasticsearch.plugins.PluginsLoader; -import org.elasticsearch.plugins.PluginsUtils; import java.io.IOException; import java.io.InputStream; @@ -54,10 +52,8 @@ import java.nio.file.Path; import java.security.Permission; import java.security.Security; -import java.util.ArrayList; import java.util.List; import java.util.Objects; -import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -208,21 +204,17 @@ private static void initPhase2(Bootstrap bootstrap) throws IOException { // load the plugin Java modules and layers now for use in entitlements var pluginsLoader = PluginsLoader.createPluginsLoader(nodeEnv.modulesFile(), nodeEnv.pluginsFile()); bootstrap.setPluginsLoader(pluginsLoader); + var pluginsResolver = PluginsResolver.create(pluginsLoader); if (Boolean.parseBoolean(System.getProperty("es.entitlements.enabled"))) { LogManager.getLogger(Elasticsearch.class).info("Bootstrapping Entitlements"); - List> pluginData = new ArrayList<>(); - Set moduleBundles = PluginsUtils.getModuleBundles(nodeEnv.modulesFile()); - for (PluginBundle moduleBundle : moduleBundles) { - pluginData.add(Tuple.tuple(moduleBundle.getDir(), moduleBundle.pluginDescriptor().isModular())); - } - Set pluginBundles = PluginsUtils.getPluginBundles(nodeEnv.pluginsFile()); - for (PluginBundle pluginBundle : pluginBundles) { - pluginData.add(Tuple.tuple(pluginBundle.getDir(), pluginBundle.pluginDescriptor().isModular())); - } - // TODO: add a functor to map module to plugin name - EntitlementBootstrap.bootstrap(pluginData, callerClass -> null); + List> pluginData = pluginsLoader.allBundles() + .stream() + .map(bundle -> Tuple.tuple(bundle.getDir(), bundle.pluginDescriptor().isModular())) + .toList(); + + EntitlementBootstrap.bootstrap(pluginData, pluginsResolver::resolveClassToPluginName); } else { // install SM after natives, shutdown hooks, etc. LogManager.getLogger(Elasticsearch.class).info("Bootstrapping java SecurityManager"); diff --git a/server/src/main/java/org/elasticsearch/bootstrap/PluginsResolver.java b/server/src/main/java/org/elasticsearch/bootstrap/PluginsResolver.java new file mode 100644 index 000000000000..256e91cbee16 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/bootstrap/PluginsResolver.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.bootstrap; + +import org.elasticsearch.plugins.PluginsLoader; + +import java.util.HashMap; +import java.util.Map; + +class PluginsResolver { + private final Map pluginNameByModule; + + private PluginsResolver(Map pluginNameByModule) { + this.pluginNameByModule = pluginNameByModule; + } + + public static PluginsResolver create(PluginsLoader pluginsLoader) { + Map pluginNameByModule = new HashMap<>(); + + pluginsLoader.pluginLayers().forEach(pluginLayer -> { + var pluginName = pluginLayer.pluginBundle().pluginDescriptor().getName(); + if (pluginLayer.pluginModuleLayer() != null && pluginLayer.pluginModuleLayer() != ModuleLayer.boot()) { + // This plugin is a Java Module + for (var module : pluginLayer.pluginModuleLayer().modules()) { + pluginNameByModule.put(module, pluginName); + } + } else { + // This plugin is not modularized + pluginNameByModule.put(pluginLayer.pluginClassLoader().getUnnamedModule(), pluginName); + } + }); + + return new PluginsResolver(pluginNameByModule); + } + + public String resolveClassToPluginName(Class clazz) { + var module = clazz.getModule(); + return pluginNameByModule.get(module); + } +} diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginsLoader.java b/server/src/main/java/org/elasticsearch/plugins/PluginsLoader.java index aa21e5c64d90..aadda93f977b 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginsLoader.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginsLoader.java @@ -50,7 +50,6 @@ * to have all the plugin information they need prior to starting. */ public class PluginsLoader { - /** * Contains information about the {@link ClassLoader} required to load a plugin */ @@ -64,18 +63,26 @@ public interface PluginLayer { * @return The {@link ClassLoader} used to instantiate the main class for the plugin */ ClassLoader pluginClassLoader(); + + /** + * @return The {@link ModuleLayer} for the plugin modules + */ + ModuleLayer pluginModuleLayer(); } /** * Contains information about the {@link ClassLoader}s and {@link ModuleLayer} required for loading a plugin - * @param pluginBundle Information about the bundle of jars used in this plugin + * + * @param pluginBundle Information about the bundle of jars used in this plugin * @param pluginClassLoader The {@link ClassLoader} used to instantiate the main class for the plugin - * @param spiClassLoader The exported {@link ClassLoader} visible to other Java modules - * @param spiModuleLayer The exported {@link ModuleLayer} visible to other Java modules + * @param pluginModuleLayer The {@link ModuleLayer} containing the Java modules of the plugin + * @param spiClassLoader The exported {@link ClassLoader} visible to other Java modules + * @param spiModuleLayer The exported {@link ModuleLayer} visible to other Java modules */ private record LoadedPluginLayer( PluginBundle pluginBundle, ClassLoader pluginClassLoader, + ModuleLayer pluginModuleLayer, ClassLoader spiClassLoader, ModuleLayer spiModuleLayer ) implements PluginLayer { @@ -103,6 +110,10 @@ public record LayerAndLoader(ModuleLayer layer, ClassLoader loader) { public static LayerAndLoader ofLoader(ClassLoader loader) { return new LayerAndLoader(ModuleLayer.boot(), loader); } + + public static LayerAndLoader ofUberModuleLoader(UberModuleClassLoader loader) { + return new LayerAndLoader(loader.getLayer(), loader); + } } private static final Logger logger = LogManager.getLogger(PluginsLoader.class); @@ -111,6 +122,7 @@ public static LayerAndLoader ofLoader(ClassLoader loader) { private final List moduleDescriptors; private final List pluginDescriptors; private final Map loadedPluginLayers; + private final Set allBundles; /** * Constructs a new PluginsLoader @@ -185,17 +197,19 @@ public static PluginsLoader createPluginsLoader(Path modulesDirectory, Path plug } } - return new PluginsLoader(moduleDescriptors, pluginDescriptors, loadedPluginLayers); + return new PluginsLoader(moduleDescriptors, pluginDescriptors, loadedPluginLayers, Set.copyOf(seenBundles)); } PluginsLoader( List moduleDescriptors, List pluginDescriptors, - Map loadedPluginLayers + Map loadedPluginLayers, + Set allBundles ) { this.moduleDescriptors = moduleDescriptors; this.pluginDescriptors = pluginDescriptors; this.loadedPluginLayers = loadedPluginLayers; + this.allBundles = allBundles; } public List moduleDescriptors() { @@ -210,6 +224,10 @@ public Stream pluginLayers() { return loadedPluginLayers.values().stream().map(Function.identity()); } + public Set allBundles() { + return allBundles; + } + private static void loadPluginLayer( PluginBundle bundle, Map loaded, @@ -239,7 +257,7 @@ private static void loadPluginLayer( } final ClassLoader pluginParentLoader = spiLayerAndLoader == null ? parentLoader : spiLayerAndLoader.loader(); - final LayerAndLoader pluginLayerAndLoader = createPlugin( + final LayerAndLoader pluginLayerAndLoader = createPluginLayerAndLoader( bundle, pluginParentLoader, extendedPlugins, @@ -253,7 +271,16 @@ private static void loadPluginLayer( spiLayerAndLoader = pluginLayerAndLoader; } - loaded.put(name, new LoadedPluginLayer(bundle, pluginClassLoader, spiLayerAndLoader.loader, spiLayerAndLoader.layer)); + loaded.put( + name, + new LoadedPluginLayer( + bundle, + pluginClassLoader, + pluginLayerAndLoader.layer(), + spiLayerAndLoader.loader, + spiLayerAndLoader.layer + ) + ); } static LayerAndLoader createSPI( @@ -277,7 +304,7 @@ static LayerAndLoader createSPI( } } - static LayerAndLoader createPlugin( + private static LayerAndLoader createPluginLayerAndLoader( PluginBundle bundle, ClassLoader pluginParentLoader, List extendedPlugins, @@ -294,7 +321,7 @@ static LayerAndLoader createPlugin( return createPluginModuleLayer(bundle, pluginParentLoader, parentLayers, qualifiedExports); } else if (plugin.isStable()) { logger.debug(() -> "Loading bundle: " + plugin.getName() + ", non-modular as synthetic module"); - return LayerAndLoader.ofLoader( + return LayerAndLoader.ofUberModuleLoader( UberModuleClassLoader.getInstance( pluginParentLoader, ModuleLayer.boot(), diff --git a/server/src/test/java/org/elasticsearch/bootstrap/PluginsResolverTests.java b/server/src/test/java/org/elasticsearch/bootstrap/PluginsResolverTests.java new file mode 100644 index 000000000000..331f0f7ad13e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/bootstrap/PluginsResolverTests.java @@ -0,0 +1,254 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.bootstrap; + +import org.elasticsearch.plugins.PluginBundle; +import org.elasticsearch.plugins.PluginDescriptor; +import org.elasticsearch.plugins.PluginsLoader; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.compiler.InMemoryJavaCompiler; +import org.elasticsearch.test.jar.JarUtils; + +import java.io.IOException; +import java.lang.module.Configuration; +import java.lang.module.ModuleFinder; +import java.net.MalformedURLException; +import java.net.URL; +import java.net.URLClassLoader; +import java.nio.file.Path; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Stream; + +import static java.util.Map.entry; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +@ESTestCase.WithoutSecurityManager +public class PluginsResolverTests extends ESTestCase { + + private record TestPluginLayer(PluginBundle pluginBundle, ClassLoader pluginClassLoader, ModuleLayer pluginModuleLayer) + implements + PluginsLoader.PluginLayer {} + + public void testResolveModularPlugin() throws IOException, ClassNotFoundException { + String moduleName = "modular.plugin"; + String pluginName = "modular-plugin"; + + final Path home = createTempDir(); + + Path jar = createModularPluginJar(home, pluginName, moduleName, "p", "A"); + + var layer = createModuleLayer(moduleName, jar); + var loader = layer.findLoader(moduleName); + + PluginBundle bundle = createMockBundle(pluginName, moduleName, "p.A"); + PluginsLoader mockPluginsLoader = mock(PluginsLoader.class); + + when(mockPluginsLoader.pluginLayers()).thenReturn(Stream.of(new TestPluginLayer(bundle, loader, layer))); + PluginsResolver pluginsResolver = PluginsResolver.create(mockPluginsLoader); + + var testClass = loader.loadClass("p.A"); + var resolvedPluginName = pluginsResolver.resolveClassToPluginName(testClass); + var unresolvedPluginName1 = pluginsResolver.resolveClassToPluginName(PluginsResolver.class); + var unresolvedPluginName2 = pluginsResolver.resolveClassToPluginName(String.class); + + assertEquals(pluginName, resolvedPluginName); + assertNull(unresolvedPluginName1); + assertNull(unresolvedPluginName2); + } + + public void testResolveMultipleModularPlugins() throws IOException, ClassNotFoundException { + final Path home = createTempDir(); + + Path jar1 = createModularPluginJar(home, "plugin1", "module.one", "p", "A"); + Path jar2 = createModularPluginJar(home, "plugin2", "module.two", "q", "B"); + + var layer1 = createModuleLayer("module.one", jar1); + var loader1 = layer1.findLoader("module.one"); + var layer2 = createModuleLayer("module.two", jar2); + var loader2 = layer2.findLoader("module.two"); + + PluginBundle bundle1 = createMockBundle("plugin1", "module.one", "p.A"); + PluginBundle bundle2 = createMockBundle("plugin2", "module.two", "q.B"); + PluginsLoader mockPluginsLoader = mock(PluginsLoader.class); + + when(mockPluginsLoader.pluginLayers()).thenReturn( + Stream.of(new TestPluginLayer(bundle1, loader1, layer1), new TestPluginLayer(bundle2, loader2, layer2)) + ); + PluginsResolver pluginsResolver = PluginsResolver.create(mockPluginsLoader); + + var testClass1 = loader1.loadClass("p.A"); + var testClass2 = loader2.loadClass("q.B"); + var resolvedPluginName1 = pluginsResolver.resolveClassToPluginName(testClass1); + var resolvedPluginName2 = pluginsResolver.resolveClassToPluginName(testClass2); + + assertEquals("plugin1", resolvedPluginName1); + assertEquals("plugin2", resolvedPluginName2); + } + + public void testResolveReferencedModulesInModularPlugins() throws IOException, ClassNotFoundException { + final Path home = createTempDir(); + + Path dependencyJar = createModularPluginJar(home, "plugin1", "module.one", "p", "A"); + Path pluginJar = home.resolve("plugin2.jar"); + + Map sources = Map.ofEntries( + entry("module-info", "module module.two { exports q; requires module.one; }"), + entry("q.B", "package q; public class B { public p.A a = null; }") + ); + + var classToBytes = InMemoryJavaCompiler.compile(sources, "--add-modules", "module.one", "-p", home.toString()); + JarUtils.createJarWithEntries( + pluginJar, + Map.ofEntries(entry("module-info.class", classToBytes.get("module-info")), entry("q/B.class", classToBytes.get("q.B"))) + ); + + var layer = createModuleLayer("module.two", pluginJar, dependencyJar); + var loader = layer.findLoader("module.two"); + + PluginBundle bundle = createMockBundle("plugin2", "module.two", "q.B"); + PluginsLoader mockPluginsLoader = mock(PluginsLoader.class); + + when(mockPluginsLoader.pluginLayers()).thenReturn(Stream.of(new TestPluginLayer(bundle, loader, layer))); + PluginsResolver pluginsResolver = PluginsResolver.create(mockPluginsLoader); + + var testClass1 = loader.loadClass("p.A"); + var testClass2 = loader.loadClass("q.B"); + var resolvedPluginName1 = pluginsResolver.resolveClassToPluginName(testClass1); + var resolvedPluginName2 = pluginsResolver.resolveClassToPluginName(testClass2); + + assertEquals("plugin2", resolvedPluginName1); + assertEquals("plugin2", resolvedPluginName2); + } + + public void testResolveMultipleNonModularPlugins() throws IOException, ClassNotFoundException { + final Path home = createTempDir(); + + Path jar1 = createNonModularPluginJar(home, "plugin1", "p", "A"); + Path jar2 = createNonModularPluginJar(home, "plugin2", "q", "B"); + + var loader1 = createClassLoader(jar1); + var loader2 = createClassLoader(jar2); + + PluginBundle bundle1 = createMockBundle("plugin1", null, "p.A"); + PluginBundle bundle2 = createMockBundle("plugin2", null, "q.B"); + PluginsLoader mockPluginsLoader = mock(PluginsLoader.class); + + when(mockPluginsLoader.pluginLayers()).thenReturn( + Stream.of(new TestPluginLayer(bundle1, loader1, ModuleLayer.boot()), new TestPluginLayer(bundle2, loader2, ModuleLayer.boot())) + ); + PluginsResolver pluginsResolver = PluginsResolver.create(mockPluginsLoader); + + var testClass1 = loader1.loadClass("p.A"); + var testClass2 = loader2.loadClass("q.B"); + var resolvedPluginName1 = pluginsResolver.resolveClassToPluginName(testClass1); + var resolvedPluginName2 = pluginsResolver.resolveClassToPluginName(testClass2); + + assertEquals("plugin1", resolvedPluginName1); + assertEquals("plugin2", resolvedPluginName2); + } + + public void testResolveNonModularPlugin() throws IOException, ClassNotFoundException { + String pluginName = "non-modular-plugin"; + + final Path home = createTempDir(); + + Path jar = createNonModularPluginJar(home, pluginName, "p", "A"); + + var loader = createClassLoader(jar); + + PluginBundle bundle = createMockBundle(pluginName, null, "p.A"); + PluginsLoader mockPluginsLoader = mock(PluginsLoader.class); + + when(mockPluginsLoader.pluginLayers()).thenReturn(Stream.of(new TestPluginLayer(bundle, loader, ModuleLayer.boot()))); + PluginsResolver pluginsResolver = PluginsResolver.create(mockPluginsLoader); + + var testClass = loader.loadClass("p.A"); + var resolvedPluginName = pluginsResolver.resolveClassToPluginName(testClass); + var unresolvedPluginName1 = pluginsResolver.resolveClassToPluginName(PluginsResolver.class); + var unresolvedPluginName2 = pluginsResolver.resolveClassToPluginName(String.class); + + assertEquals(pluginName, resolvedPluginName); + assertNull(unresolvedPluginName1); + assertNull(unresolvedPluginName2); + } + + private static URLClassLoader createClassLoader(Path jar) throws MalformedURLException { + return new URLClassLoader(new URL[] { jar.toUri().toURL() }); + } + + private static ModuleLayer createModuleLayer(String moduleName, Path... jars) { + var finder = ModuleFinder.of(jars); + Configuration cf = ModuleLayer.boot().configuration().resolve(finder, ModuleFinder.of(), Set.of(moduleName)); + var moduleController = ModuleLayer.defineModulesWithOneLoader( + cf, + List.of(ModuleLayer.boot()), + ClassLoader.getPlatformClassLoader() + ); + return moduleController.layer(); + } + + private static PluginBundle createMockBundle(String pluginName, String moduleName, String fqClassName) { + PluginDescriptor pd = new PluginDescriptor( + pluginName, + null, + null, + null, + null, + fqClassName, + moduleName, + List.of(), + false, + false, + true, + false + ); + + PluginBundle bundle = mock(PluginBundle.class); + when(bundle.pluginDescriptor()).thenReturn(pd); + return bundle; + } + + private static Path createModularPluginJar(Path home, String pluginName, String moduleName, String packageName, String className) + throws IOException { + Path jar = home.resolve(pluginName + ".jar"); + String fqClassName = packageName + "." + className; + + Map sources = Map.ofEntries( + entry("module-info", "module " + moduleName + " { exports " + packageName + "; }"), + entry(fqClassName, "package " + packageName + "; public class " + className + " {}") + ); + + var classToBytes = InMemoryJavaCompiler.compile(sources); + JarUtils.createJarWithEntries( + jar, + Map.ofEntries( + entry("module-info.class", classToBytes.get("module-info")), + entry(packageName + "/" + className + ".class", classToBytes.get(fqClassName)) + ) + ); + return jar; + } + + private static Path createNonModularPluginJar(Path home, String pluginName, String packageName, String className) throws IOException { + Path jar = home.resolve(pluginName + ".jar"); + String fqClassName = packageName + "." + className; + + Map sources = Map.ofEntries( + entry(fqClassName, "package " + packageName + "; public class " + className + " {}") + ); + + var classToBytes = InMemoryJavaCompiler.compile(sources); + JarUtils.createJarWithEntries(jar, Map.ofEntries(entry(packageName + "/" + className + ".class", classToBytes.get(fqClassName)))); + return jar; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java b/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java index a9a825af3b86..91875600ec00 100644 --- a/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java +++ b/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java @@ -45,7 +45,7 @@ public MockPluginsService(Settings settings, Environment environment, Collection super( settings, environment.configFile(), - new PluginsLoader(Collections.emptyList(), Collections.emptyList(), Collections.emptyMap()) + new PluginsLoader(Collections.emptyList(), Collections.emptyList(), Collections.emptyMap(), Collections.emptySet()) ); List pluginsLoaded = new ArrayList<>(); From b8afe64eb637535691a66d0696296e90202b2350 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 5 Dec 2024 12:02:16 +0100 Subject: [PATCH 33/45] [8.x] Address mapping and compute engine runtime field issues (#117792) (#118049) * Address mapping and compute engine runtime field issues (#117792) This change addresses the following issues: Fields mapped as runtime fields not getting stored if source mode is synthetic. Address java.io.EOFException when an es|ql query uses multiple runtime fields that fallback to source when source mode is synthetic. (1) Address concurrency issue when runtime fields get pushed down to Lucene. (2) 1: ValueSourceOperator can read values in row striding or columnar fashion. When values are read in columnar fashion and multiple runtime fields synthetize source then this can cause the same SourceProvider evaluation the same range of docs ids multiple times. This can then result in unexpected io errors at the codec level. This is because the same doc value instances are used by SourceProvider. Re-evaluating the same docids is in violation of the contract of the DocIdSetIterator#advance(...) / DocIdSetIterator#advanceExact(...) methods, which documents that unexpected behaviour can occur if target docid is lower than current docid position. Note that this is only an issue for synthetic source loader and not for stored source loader. And not when executing in row stride fashion which sometimes happen in compute engine and always happen in _search api. 2: The concurrency issue that arrises with source provider if source operator executes in parallel with data portioning set to DOC. The same SourceProvider instance then gets access by multiple threads concurrently. SourceProviders implementations are not designed to handle concurrent access. Closes #117644 --- docs/changelog/117792.yaml | 6 + .../index/mapper/DocumentParser.java | 4 +- .../index/query/SearchExecutionContext.java | 10 +- .../search/lookup/SearchLookup.java | 12 ++ .../xpack/esql/action/EsqlActionIT.java | 39 ++++++ .../planner/EsPhysicalOperationProviders.java | 12 +- .../xpack/esql/plugin/ComputeService.java | 17 ++- .../plugin/ReinitializingSourceProvider.java | 43 +++++++ .../xpack/logsdb/LogsdbRestIT.java | 117 ++++++++++++++++++ 9 files changed, 250 insertions(+), 10 deletions(-) create mode 100644 docs/changelog/117792.yaml create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ReinitializingSourceProvider.java diff --git a/docs/changelog/117792.yaml b/docs/changelog/117792.yaml new file mode 100644 index 000000000000..2d7ddda1ace4 --- /dev/null +++ b/docs/changelog/117792.yaml @@ -0,0 +1,6 @@ +pr: 117792 +summary: Address mapping and compute engine runtime field issues +area: Mapping +type: bug +issues: + - 117644 diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index c9e035f9d3f2..fe2c4dc7f2c5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -946,7 +946,9 @@ public Query termQuery(Object value, SearchExecutionContext context) { protected void parseCreateField(DocumentParserContext context) { // Run-time fields are mapped to this mapper, so it needs to handle storing values for use in synthetic source. // #parseValue calls this method once the run-time field is created. - if (context.dynamic() == ObjectMapper.Dynamic.RUNTIME && context.canAddIgnoredField()) { + var fieldType = context.mappingLookup().getFieldType(path); + boolean isRuntimeField = fieldType instanceof AbstractScriptFieldType; + if ((context.dynamic() == ObjectMapper.Dynamic.RUNTIME || isRuntimeField) && context.canAddIgnoredField()) { try { context.addIgnoredField( IgnoredSourceFieldMapper.NameValue.fromContext(context, path, context.encodeFlattenedToken()) diff --git a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java index b07112440d3c..d5e48a6a54da 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/SearchExecutionContext.java @@ -493,14 +493,18 @@ public boolean containsBrokenAnalysis(String field) { */ public SearchLookup lookup() { if (this.lookup == null) { - SourceProvider sourceProvider = isSourceSynthetic() - ? SourceProvider.fromSyntheticSource(mappingLookup.getMapping(), mapperMetrics.sourceFieldMetrics()) - : SourceProvider.fromStoredFields(); + var sourceProvider = createSourceProvider(); setLookupProviders(sourceProvider, LeafFieldLookupProvider.fromStoredFields()); } return this.lookup; } + public SourceProvider createSourceProvider() { + return isSourceSynthetic() + ? SourceProvider.fromSyntheticSource(mappingLookup.getMapping(), mapperMetrics.sourceFieldMetrics()) + : SourceProvider.fromStoredFields(); + } + /** * Replace the standard source provider and field lookup provider on the SearchLookup * diff --git a/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java b/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java index f7f8cee30ee1..9eb0170af5ef 100644 --- a/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java +++ b/server/src/main/java/org/elasticsearch/search/lookup/SearchLookup.java @@ -102,6 +102,14 @@ private SearchLookup(SearchLookup searchLookup, Set fieldChain) { this.fieldLookupProvider = searchLookup.fieldLookupProvider; } + private SearchLookup(SearchLookup searchLookup, SourceProvider sourceProvider, Set fieldChain) { + this.fieldChain = Collections.unmodifiableSet(fieldChain); + this.sourceProvider = sourceProvider; + this.fieldTypeLookup = searchLookup.fieldTypeLookup; + this.fieldDataLookup = searchLookup.fieldDataLookup; + this.fieldLookupProvider = searchLookup.fieldLookupProvider; + } + /** * Creates a copy of the current {@link SearchLookup} that looks fields up in the same way, but also tracks field references * in order to detect cycles and prevent resolving fields that depend on more than {@link #MAX_FIELD_CHAIN_DEPTH} other fields. @@ -144,4 +152,8 @@ public IndexFieldData getForField(MappedFieldType fieldType, MappedFieldType. public Source getSource(LeafReaderContext ctx, int doc) throws IOException { return sourceProvider.getSource(ctx, doc); } + + public SearchLookup swapSourceProvider(SourceProvider sourceProvider) { + return new SearchLookup(this, sourceProvider, fieldChain); + } } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java index 147b13b36c44..00f53d31165b 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.client.internal.ClusterAdminClient; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; @@ -1648,6 +1649,44 @@ public void testMaxTruncationSizeSetting() { } } + public void testScriptField() throws Exception { + XContentBuilder mapping = JsonXContent.contentBuilder(); + mapping.startObject(); + { + mapping.startObject("runtime"); + { + mapping.startObject("k1"); + mapping.field("type", "long"); + mapping.endObject(); + mapping.startObject("k2"); + mapping.field("type", "long"); + mapping.endObject(); + } + mapping.endObject(); + { + mapping.startObject("properties"); + mapping.startObject("meter").field("type", "double").endObject(); + mapping.endObject(); + } + } + mapping.endObject(); + String sourceMode = randomBoolean() ? "stored" : "synthetic"; + Settings.Builder settings = indexSettings(1, 0).put(indexSettings()).put("index.mapping.source.mode", sourceMode); + client().admin().indices().prepareCreate("test-script").setMapping(mapping).setSettings(settings).get(); + for (int i = 0; i < 10; i++) { + index("test-script", Integer.toString(i), Map.of("k1", i, "k2", "b-" + i, "meter", 10000 * i)); + } + refresh("test-script"); + try (EsqlQueryResponse resp = run("FROM test-script | SORT k1 | LIMIT 10")) { + List k1Column = Iterators.toList(resp.column(0)); + assertThat(k1Column, contains(0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L)); + List k2Column = Iterators.toList(resp.column(1)); + assertThat(k2Column, contains(null, null, null, null, null, null, null, null, null, null)); + List meterColumn = Iterators.toList(resp.column(2)); + assertThat(meterColumn, contains(0.0, 10000.0, 20000.0, 30000.0, 40000.0, 50000.0, 60000.0, 70000.0, 80000.0, 90000.0)); + } + } + private void clearPersistentSettings(Setting... settings) { Settings.Builder clearedSettings = Settings.builder(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java index 7bf7d0e2d08e..39e2a3bc1d5a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java @@ -39,6 +39,7 @@ import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NestedLookup; +import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.SourceLoader; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -348,7 +349,16 @@ public MappedFieldType.FieldExtractPreference fieldExtractPreference() { @Override public SearchLookup lookup() { - return ctx.lookup(); + boolean syntheticSource = SourceFieldMapper.isSynthetic(indexSettings()); + var searchLookup = ctx.lookup(); + if (syntheticSource) { + // in the context of scripts and when synthetic source is used the search lookup can't always be reused between + // users of SearchLookup. This is only an issue when scripts fallback to _source, but since we can't always + // accurately determine whether a script uses _source, we should do this for all script usages. + // This lookup() method is only invoked for scripts / runtime fields, so it is ok to do here. + searchLookup = searchLookup.swapSourceProvider(ctx.createSourceProvider()); + } + return searchLookup; } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index 9aea1577a413..aa3123fc2f63 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -45,6 +45,7 @@ import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.search.lookup.SourceProvider; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskCancelledException; @@ -82,6 +83,7 @@ import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Supplier; import static org.elasticsearch.xpack.esql.plugin.EsqlPlugin.ESQL_WORKER_THREAD_POOL_NAME; @@ -428,12 +430,17 @@ void runCompute(CancellableTask task, ComputeContext context, PhysicalPlan plan, List contexts = new ArrayList<>(context.searchContexts.size()); for (int i = 0; i < context.searchContexts.size(); i++) { SearchContext searchContext = context.searchContexts.get(i); + var searchExecutionContext = new SearchExecutionContext(searchContext.getSearchExecutionContext()) { + + @Override + public SourceProvider createSourceProvider() { + final Supplier supplier = () -> super.createSourceProvider(); + return new ReinitializingSourceProvider(supplier); + + } + }; contexts.add( - new EsPhysicalOperationProviders.DefaultShardContext( - i, - searchContext.getSearchExecutionContext(), - searchContext.request().getAliasFilter() - ) + new EsPhysicalOperationProviders.DefaultShardContext(i, searchExecutionContext, searchContext.request().getAliasFilter()) ); } final List drivers; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ReinitializingSourceProvider.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ReinitializingSourceProvider.java new file mode 100644 index 000000000000..b6b2c6dfec75 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ReinitializingSourceProvider.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plugin; + +import org.apache.lucene.index.LeafReaderContext; +import org.elasticsearch.search.lookup.Source; +import org.elasticsearch.search.lookup.SourceProvider; + +import java.io.IOException; +import java.util.function.Supplier; + +/** + * This is a workaround for when compute engine executes concurrently with data partitioning by docid. + */ +final class ReinitializingSourceProvider implements SourceProvider { + + private PerThreadSourceProvider perThreadProvider; + private final Supplier sourceProviderFactory; + + ReinitializingSourceProvider(Supplier sourceProviderFactory) { + this.sourceProviderFactory = sourceProviderFactory; + } + + @Override + public Source getSource(LeafReaderContext ctx, int doc) throws IOException { + var currentThread = Thread.currentThread(); + PerThreadSourceProvider provider = perThreadProvider; + if (provider == null || provider.creatingThread != currentThread) { + provider = new PerThreadSourceProvider(sourceProviderFactory.get(), currentThread); + this.perThreadProvider = provider; + } + return perThreadProvider.source.getSource(ctx, doc); + } + + private record PerThreadSourceProvider(SourceProvider source, Thread creatingThread) { + + } +} diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java index 2bf8b00cf551..d42c1aa240a6 100644 --- a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java @@ -10,6 +10,8 @@ import org.elasticsearch.client.Request; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.FormatNames; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.ESRestTestCase; @@ -17,6 +19,7 @@ import org.junit.ClassRule; import java.io.IOException; +import java.time.Instant; import java.util.List; import java.util.Map; @@ -108,4 +111,118 @@ public void testLogsdbSourceModeForLogsIndex() throws IOException { assertNull(settings.get("index.mapping.source.mode")); } + public void testEsqlRuntimeFields() throws IOException { + String mappings = """ + { + "runtime": { + "message_length": { + "type": "long" + }, + "log.offset": { + "type": "long" + } + }, + "dynamic": false, + "properties": { + "@timestamp": { + "type": "date" + }, + "log" : { + "properties": { + "level": { + "type": "keyword" + }, + "file": { + "type": "keyword" + } + } + } + } + } + """; + String indexName = "test-foo"; + createIndex(indexName, Settings.builder().put("index.mode", "logsdb").build(), mappings); + + int numDocs = 500; + var sb = new StringBuilder(); + var now = Instant.now(); + + var expectedMinTimestamp = now; + for (int i = 0; i < numDocs; i++) { + String level = randomBoolean() ? "info" : randomBoolean() ? "warning" : randomBoolean() ? "error" : "fatal"; + String msg = randomAlphaOfLength(20); + String path = randomAlphaOfLength(8); + String messageLength = Integer.toString(msg.length()); + String offset = Integer.toString(randomNonNegativeInt()); + sb.append("{ \"create\": {} }").append('\n'); + if (randomBoolean()) { + sb.append( + """ + {"@timestamp":"$now","message":"$msg","message_length":$l,"file":{"level":"$level","offset":5,"file":"$path"}} + """.replace("$now", formatInstant(now)) + .replace("$level", level) + .replace("$msg", msg) + .replace("$path", path) + .replace("$l", messageLength) + .replace("$o", offset) + ); + } else { + sb.append(""" + {"@timestamp": "$now", "message": "$msg", "message_length": $l} + """.replace("$now", formatInstant(now)).replace("$msg", msg).replace("$l", messageLength)); + } + sb.append('\n'); + if (i != numDocs - 1) { + now = now.plusSeconds(1); + } + } + var expectedMaxTimestamp = now; + + var bulkRequest = new Request("POST", "/" + indexName + "/_bulk"); + bulkRequest.setJsonEntity(sb.toString()); + bulkRequest.addParameter("refresh", "true"); + var bulkResponse = client().performRequest(bulkRequest); + var bulkResponseBody = responseAsMap(bulkResponse); + assertThat(bulkResponseBody, Matchers.hasEntry("errors", false)); + + var forceMergeRequest = new Request("POST", "/" + indexName + "/_forcemerge"); + forceMergeRequest.addParameter("max_num_segments", "1"); + var forceMergeResponse = client().performRequest(forceMergeRequest); + assertOK(forceMergeResponse); + + String query = "FROM test-foo | STATS count(*), min(@timestamp), max(@timestamp), min(message_length), max(message_length)" + + " ,sum(message_length), avg(message_length), min(log.offset), max(log.offset) | LIMIT 1"; + final Request esqlRequest = new Request("POST", "/_query"); + esqlRequest.setJsonEntity(""" + { + "query": "$query" + } + """.replace("$query", query)); + var esqlResponse = client().performRequest(esqlRequest); + assertOK(esqlResponse); + Map esqlResponseBody = responseAsMap(esqlResponse); + + List values = (List) esqlResponseBody.get("values"); + assertThat(values, Matchers.not(Matchers.empty())); + var count = ((List) values.get(0)).get(0); + assertThat(count, equalTo(numDocs)); + logger.warn("VALUES: {}", values); + + var minTimestamp = ((List) values.get(0)).get(1); + assertThat(minTimestamp, equalTo(formatInstant(expectedMinTimestamp))); + var maxTimestamp = ((List) values.get(0)).get(2); + assertThat(maxTimestamp, equalTo(formatInstant(expectedMaxTimestamp))); + + var minLength = ((List) values.get(0)).get(3); + assertThat(minLength, equalTo(20)); + var maxLength = ((List) values.get(0)).get(4); + assertThat(maxLength, equalTo(20)); + var sumLength = ((List) values.get(0)).get(5); + assertThat(sumLength, equalTo(20 * numDocs)); + } + + static String formatInstant(Instant instant) { + return DateFormatter.forPattern(FormatNames.STRICT_DATE_OPTIONAL_TIME.getName()).format(instant); + } + } From 5afbfda21349a0dda515c5bd0e2ee6beacb2f61d Mon Sep 17 00:00:00 2001 From: Craig Taverner Date: Thu, 5 Dec 2024 12:29:16 +0100 Subject: [PATCH 34/45] [8.x] Backport two PRs (#117246) (#117843) (#117967) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * LOOKUP JOIN using field-caps for field mapping (#117246) * LOOKUP JOIN using field-caps for field mapping Removes the hard-coded hack for languages_lookup, and instead does a field-caps check for the real join index. * Update docs/changelog/117246.yaml * Some code review comments * Enhance LOOKUP JOIN csv-spec tests to cover more cases and fix several bugs found (#117843) Adds several more tests to lookup-join.csv-spec, and fixes the following bugs: * FieldCaps on right hand side should ignore fieldNames method and just use "*" because currently the fieldNames search cannot handle lookup fields with aliases (should be fixed in a followup PR). * Stop using the lookup index in the ComputeService (so we don’t get both indices data coming in from the left, and other weird behaviour). * Ignore failing SearchStats checks on fields from the right hand side in the logical planner (so it does not plan EVAL field = null for all right hand fields). This should be fixed properly with the correct updates to TransportSearchShardsAction (or rather to making multiple use of that for each branch of the execution model). * Don't load indices with mode:lookup due to cluster state errors in mixed clusters * Disable all lookup-join tests on 8.x, due to issues with cluster state * Spotless apply --- docs/changelog/117246.yaml | 5 + .../xpack/esql/ccq/MultiClusterSpecIT.java | 4 +- .../xpack/esql/CsvTestsDataLoader.java | 24 +- .../resources/clientips_lookup-settings.json | 5 + .../src/main/resources/languages.csv | 2 +- .../resources/languages_lookup-settings.json | 5 + .../src/main/resources/lookup-join.csv-spec | 224 +++++++++++++++++- .../src/main/resources/mapping-clientips.json | 16 +- .../src/main/resources/mapping-languages.json | 2 +- .../main/resources/mapping-message_types.json | 10 + .../src/main/resources/message_types.csv | 6 + .../message_types_lookup-settings.json | 5 + .../xpack/esql/action/EsqlCapabilities.java | 2 +- .../xpack/esql/analysis/Analyzer.java | 37 +-- .../xpack/esql/analysis/AnalyzerContext.java | 14 +- .../esql/enrich/LookupFromIndexService.java | 11 + .../local/ReplaceMissingFieldWithNull.java | 13 +- .../physical/local/InsertFieldExtraction.java | 15 +- .../esql/plan/physical/LookupJoinExec.java | 2 +- .../esql/planner/LocalExecutionPlanner.java | 1 + .../xpack/esql/planner/PlannerUtils.java | 11 +- .../xpack/esql/plugin/ComputeService.java | 54 ++++- .../xpack/esql/session/EsqlSession.java | 146 ++++++++---- .../elasticsearch/xpack/esql/CsvTests.java | 2 +- 24 files changed, 494 insertions(+), 122 deletions(-) create mode 100644 docs/changelog/117246.yaml create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/clientips_lookup-settings.json create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages_lookup-settings.json create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-message_types.json create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/message_types.csv create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/message_types_lookup-settings.json diff --git a/docs/changelog/117246.yaml b/docs/changelog/117246.yaml new file mode 100644 index 000000000000..29c446485596 --- /dev/null +++ b/docs/changelog/117246.yaml @@ -0,0 +1,5 @@ +pr: 117246 +summary: LOOKUP JOIN using field-caps for field mapping +area: ES|QL +type: enhancement +issues: [] diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java index 6c7b700af5b1..e658d169cbce 100644 --- a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java @@ -48,7 +48,7 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.classpathResources; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.INLINESTATS_V2; -import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V3; +import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_LOOKUP_V4; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.JOIN_PLANNING_V1; import static org.elasticsearch.xpack.esql.action.EsqlCapabilities.Cap.METADATA_FIELDS_REMOTE_TEST; import static org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase.Mode.SYNC; @@ -124,7 +124,7 @@ protected void shouldSkipTest(String testName) throws IOException { assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS.capabilityName())); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(INLINESTATS_V2.capabilityName())); assumeFalse("INLINESTATS not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_PLANNING_V1.capabilityName())); - assumeFalse("LOOKUP JOIN not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_LOOKUP_V3.capabilityName())); + assumeFalse("LOOKUP JOIN not yet supported in CCS", testCase.requiredCapabilities.contains(JOIN_LOOKUP_V4.capabilityName())); } private TestFeatureService remoteFeaturesService() throws IOException { diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index 32e244f4b729..2cb89a9c0aca 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -56,6 +56,8 @@ public class CsvTestsDataLoader { private static final TestsDataset APPS = new TestsDataset("apps"); private static final TestsDataset APPS_SHORT = APPS.withIndex("apps_short").withTypeMapping(Map.of("id", "short")); private static final TestsDataset LANGUAGES = new TestsDataset("languages"); + // private static final TestsDataset LANGUAGES_LOOKUP = LANGUAGES.withIndex("languages_lookup") + // .withSetting("languages_lookup-settings.json"); private static final TestsDataset ALERTS = new TestsDataset("alerts"); private static final TestsDataset UL_LOGS = new TestsDataset("ul_logs"); private static final TestsDataset SAMPLE_DATA = new TestsDataset("sample_data"); @@ -70,6 +72,11 @@ public class CsvTestsDataLoader { .withTypeMapping(Map.of("@timestamp", "date_nanos")); private static final TestsDataset MISSING_IP_SAMPLE_DATA = new TestsDataset("missing_ip_sample_data"); private static final TestsDataset CLIENT_IPS = new TestsDataset("clientips"); + // private static final TestsDataset CLIENT_IPS_LOOKUP = CLIENT_IPS.withIndex("clientips_lookup") + // .withSetting("clientips_lookup-settings.json"); + private static final TestsDataset MESSAGE_TYPES = new TestsDataset("message_types"); + // private static final TestsDataset MESSAGE_TYPES_LOOKUP = MESSAGE_TYPES.withIndex("message_types_lookup") + // .withSetting("message_types_lookup-settings.json"); private static final TestsDataset CLIENT_CIDR = new TestsDataset("client_cidr"); private static final TestsDataset AGES = new TestsDataset("ages"); private static final TestsDataset HEIGHTS = new TestsDataset("heights"); @@ -94,14 +101,13 @@ public class CsvTestsDataLoader { private static final TestsDataset BOOKS = new TestsDataset("books"); private static final TestsDataset SEMANTIC_TEXT = new TestsDataset("semantic_text").withInferenceEndpoint(true); - private static final String LOOKUP_INDEX_SUFFIX = "_lookup"; - public static final Map CSV_DATASET_MAP = Map.ofEntries( Map.entry(EMPLOYEES.indexName, EMPLOYEES), Map.entry(HOSTS.indexName, HOSTS), Map.entry(APPS.indexName, APPS), Map.entry(APPS_SHORT.indexName, APPS_SHORT), Map.entry(LANGUAGES.indexName, LANGUAGES), + // Map.entry(LANGUAGES_LOOKUP.indexName, LANGUAGES_LOOKUP), Map.entry(UL_LOGS.indexName, UL_LOGS), Map.entry(SAMPLE_DATA.indexName, SAMPLE_DATA), Map.entry(MV_SAMPLE_DATA.indexName, MV_SAMPLE_DATA), @@ -111,6 +117,9 @@ public class CsvTestsDataLoader { Map.entry(SAMPLE_DATA_TS_NANOS.indexName, SAMPLE_DATA_TS_NANOS), Map.entry(MISSING_IP_SAMPLE_DATA.indexName, MISSING_IP_SAMPLE_DATA), Map.entry(CLIENT_IPS.indexName, CLIENT_IPS), + // Map.entry(CLIENT_IPS_LOOKUP.indexName, CLIENT_IPS_LOOKUP), + Map.entry(MESSAGE_TYPES.indexName, MESSAGE_TYPES), + // Map.entry(MESSAGE_TYPES_LOOKUP.indexName, MESSAGE_TYPES_LOOKUP), Map.entry(CLIENT_CIDR.indexName, CLIENT_CIDR), Map.entry(AGES.indexName, AGES), Map.entry(HEIGHTS.indexName, HEIGHTS), @@ -132,9 +141,7 @@ public class CsvTestsDataLoader { Map.entry(DISTANCES.indexName, DISTANCES), Map.entry(ADDRESSES.indexName, ADDRESSES), Map.entry(BOOKS.indexName, BOOKS), - Map.entry(SEMANTIC_TEXT.indexName, SEMANTIC_TEXT), - // JOIN LOOKUP alias - Map.entry(LANGUAGES.indexName + LOOKUP_INDEX_SUFFIX, LANGUAGES.withIndex(LANGUAGES.indexName + LOOKUP_INDEX_SUFFIX)) + Map.entry(SEMANTIC_TEXT.indexName, SEMANTIC_TEXT) ); private static final EnrichConfig LANGUAGES_ENRICH = new EnrichConfig("languages_policy", "enrich-policy-languages.json"); @@ -174,13 +181,14 @@ public class CsvTestsDataLoader { *

*

* Accepts an URL as first argument, eg. http://localhost:9200 or http://user:pass@localhost:9200 - *

+ *

*

* If no arguments are specified, the default URL is http://localhost:9200 without authentication *

*

* It also supports HTTPS *

+ * * @param args the URL to connect * @throws IOException */ @@ -270,7 +278,9 @@ private static void loadDataSetIntoEs(RestClient client, IndexCreator indexCreat } } - /** The semantic_text mapping type require an inference endpoint that needs to be setup before creating the index. */ + /** + * The semantic_text mapping type require an inference endpoint that needs to be setup before creating the index. + */ public static void createInferenceEndpoint(RestClient client) throws IOException { Request request = new Request("PUT", "_inference/sparse_embedding/test_sparse_inference"); request.setJsonEntity(""" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/clientips_lookup-settings.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/clientips_lookup-settings.json new file mode 100644 index 000000000000..b73d1f9accf9 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/clientips_lookup-settings.json @@ -0,0 +1,5 @@ +{ + "index": { + "mode": "lookup" + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages.csv index 3ee60b79970b..1c1a9776df6c 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages.csv +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages.csv @@ -1,4 +1,4 @@ -language_code:keyword,language_name:keyword +language_code:integer,language_name:keyword 1,English 2,French 3,Spanish diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages_lookup-settings.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages_lookup-settings.json new file mode 100644 index 000000000000..b73d1f9accf9 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/languages_lookup-settings.json @@ -0,0 +1,5 @@ +{ + "index": { + "mode": "lookup" + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec index 5de353978b30..f2800456ceb3 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/lookup-join.csv-spec @@ -4,8 +4,8 @@ // //TODO: this sometimes returns null instead of the looked up value (likely related to the execution order) -basicOnTheDataNode-Ignore -required_capability: join_lookup_v3 +basicOnTheDataNode +required_capability: join_lookup_v4 FROM employees | EVAL language_code = languages @@ -21,19 +21,19 @@ emp_no:integer | language_code:integer | language_name:keyword 10093 | 3 | Spanish ; -basicRow-Ignore -required_capability: join_lookup_v3 +basicRow +required_capability: join_lookup_v4 ROW language_code = 1 | LOOKUP JOIN languages_lookup ON language_code ; -language_code:keyword | language_name:keyword +language_code:integer | language_name:keyword 1 | English ; basicOnTheCoordinator -required_capability: join_lookup_v3 +required_capability: join_lookup_v4 FROM employees | SORT emp_no @@ -49,9 +49,8 @@ emp_no:integer | language_code:integer | language_name:keyword 10003 | 4 | German ; -//TODO: this sometimes returns null instead of the looked up value (likely related to the execution order) -subsequentEvalOnTheDataNode-Ignore -required_capability: join_lookup_v3 +subsequentEvalOnTheDataNode +required_capability: join_lookup_v4 FROM employees | EVAL language_code = languages @@ -69,7 +68,7 @@ emp_no:integer | language_code:integer | language_name:keyword | language_code_x ; subsequentEvalOnTheCoordinator -required_capability: join_lookup_v3 +required_capability: join_lookup_v4 FROM employees | SORT emp_no @@ -85,3 +84,208 @@ emp_no:integer | language_code:integer | language_name:keyword | language_code_x 10002 | 5 | null | 10 10003 | 4 | german | 8 ; + +lookupIPFromRow +required_capability: join_lookup_v4 + +ROW left = "left", client_ip = "172.21.0.5", right = "right" +| LOOKUP JOIN clientips_lookup ON client_ip +; + +left:keyword | client_ip:keyword | right:keyword | env:keyword +left | 172.21.0.5 | right | Development +; + +lookupIPFromRowWithShadowing +required_capability: join_lookup_v4 + +ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" +| LOOKUP JOIN clientips_lookup ON client_ip +; + +left:keyword | client_ip:keyword | right:keyword | env:keyword +left | 172.21.0.5 | right | Development +; + +lookupIPFromRowWithShadowingKeep +required_capability: join_lookup_v4 + +ROW left = "left", client_ip = "172.21.0.5", env = "env", right = "right" +| EVAL client_ip = client_ip::keyword +| LOOKUP JOIN clientips_lookup ON client_ip +| KEEP left, client_ip, right, env +; + +left:keyword | client_ip:keyword | right:keyword | env:keyword +left | 172.21.0.5 | right | Development +; + +lookupIPFromIndex +required_capability: join_lookup_v4 + +FROM sample_data +| EVAL client_ip = client_ip::keyword +| LOOKUP JOIN clientips_lookup ON client_ip +; + +@timestamp:date | event_duration:long | message:keyword | client_ip:keyword | env:keyword +2023-10-23T13:55:01.543Z | 1756467 | Connected to 10.1.0.1 | 172.21.3.15 | Production +2023-10-23T13:53:55.832Z | 5033755 | Connection error | 172.21.3.15 | Production +2023-10-23T13:52:55.015Z | 8268153 | Connection error | 172.21.3.15 | Production +2023-10-23T13:51:54.732Z | 725448 | Connection error | 172.21.3.15 | Production +2023-10-23T13:33:34.937Z | 1232382 | Disconnected | 172.21.0.5 | Development +2023-10-23T12:27:28.948Z | 2764889 | Connected to 10.1.0.2 | 172.21.2.113 | QA +2023-10-23T12:15:03.360Z | 3450233 | Connected to 10.1.0.3 | 172.21.2.162 | QA +; + +lookupIPFromIndexKeep +required_capability: join_lookup_v4 + +FROM sample_data +| EVAL client_ip = client_ip::keyword +| LOOKUP JOIN clientips_lookup ON client_ip +| KEEP @timestamp, client_ip, event_duration, message, env +; + +@timestamp:date | client_ip:keyword | event_duration:long | message:keyword | env:keyword +2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 | Production +2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error | Production +2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error | Production +2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error | Production +2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected | Development +2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 | QA +2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 | QA +; + +lookupIPFromIndexStats +required_capability: join_lookup_v4 + +FROM sample_data +| EVAL client_ip = client_ip::keyword +| LOOKUP JOIN clientips_lookup ON client_ip +| STATS count = count(client_ip) BY env +| SORT count DESC, env ASC +; + +count:long | env:keyword +4 | Production +2 | QA +1 | Development +; + +lookupIPFromIndexStatsKeep +required_capability: join_lookup_v4 + +FROM sample_data +| EVAL client_ip = client_ip::keyword +| LOOKUP JOIN clientips_lookup ON client_ip +| KEEP client_ip, env +| STATS count = count(client_ip) BY env +| SORT count DESC, env ASC +; + +count:long | env:keyword +4 | Production +2 | QA +1 | Development +; + +lookupMessageFromRow +required_capability: join_lookup_v4 + +ROW left = "left", message = "Connected to 10.1.0.1", right = "right" +| LOOKUP JOIN message_types_lookup ON message +; + +left:keyword | message:keyword | right:keyword | type:keyword +left | Connected to 10.1.0.1 | right | Success +; + +lookupMessageFromRowWithShadowing +required_capability: join_lookup_v4 + +ROW left = "left", message = "Connected to 10.1.0.1", type = "unknown", right = "right" +| LOOKUP JOIN message_types_lookup ON message +; + +left:keyword | message:keyword | right:keyword | type:keyword +left | Connected to 10.1.0.1 | right | Success +; + +lookupMessageFromRowWithShadowingKeep +required_capability: join_lookup_v4 + +ROW left = "left", message = "Connected to 10.1.0.1", type = "unknown", right = "right" +| LOOKUP JOIN message_types_lookup ON message +| KEEP left, message, right, type +; + +left:keyword | message:keyword | right:keyword | type:keyword +left | Connected to 10.1.0.1 | right | Success +; + +lookupMessageFromIndex +required_capability: join_lookup_v4 + +FROM sample_data +| LOOKUP JOIN message_types_lookup ON message +; + +@timestamp:date | client_ip:ip | event_duration:long | message:keyword | type:keyword +2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 | Success +2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error | Error +2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error | Error +2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error | Error +2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected | Disconnected +2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 | Success +2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 | Success +; + +lookupMessageFromIndexKeep +required_capability: join_lookup_v4 + +FROM sample_data +| LOOKUP JOIN message_types_lookup ON message +| KEEP @timestamp, client_ip, event_duration, message, type +; + +@timestamp:date | client_ip:ip | event_duration:long | message:keyword | type:keyword +2023-10-23T13:55:01.543Z | 172.21.3.15 | 1756467 | Connected to 10.1.0.1 | Success +2023-10-23T13:53:55.832Z | 172.21.3.15 | 5033755 | Connection error | Error +2023-10-23T13:52:55.015Z | 172.21.3.15 | 8268153 | Connection error | Error +2023-10-23T13:51:54.732Z | 172.21.3.15 | 725448 | Connection error | Error +2023-10-23T13:33:34.937Z | 172.21.0.5 | 1232382 | Disconnected | Disconnected +2023-10-23T12:27:28.948Z | 172.21.2.113 | 2764889 | Connected to 10.1.0.2 | Success +2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233 | Connected to 10.1.0.3 | Success +; + +lookupMessageFromIndexStats +required_capability: join_lookup_v4 + +FROM sample_data +| LOOKUP JOIN message_types_lookup ON message +| STATS count = count(message) BY type +| SORT count DESC, type ASC +; + +count:long | type:keyword +3 | Error +3 | Success +1 | Disconnected +; + +lookupMessageFromIndexStatsKeep +required_capability: join_lookup_v4 + +FROM sample_data +| LOOKUP JOIN message_types_lookup ON message +| KEEP message, type +| STATS count = count(message) BY type +| SORT count DESC, type ASC +; + +count:long | type:keyword +3 | Error +3 | Success +1 | Disconnected +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-clientips.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-clientips.json index 39bd37ce26c7..d491810f9134 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-clientips.json +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-clientips.json @@ -1,10 +1,10 @@ { - "properties": { - "client_ip": { - "type": "keyword" - }, - "env": { - "type": "keyword" - } + "properties": { + "client_ip": { + "type": "keyword" + }, + "env": { + "type": "keyword" } - } \ No newline at end of file + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-languages.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-languages.json index 0cec0caf1730..327b69236924 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-languages.json +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-languages.json @@ -1,7 +1,7 @@ { "properties" : { "language_code" : { - "type" : "keyword" + "type" : "integer" }, "language_name" : { "type" : "keyword" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-message_types.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-message_types.json new file mode 100644 index 000000000000..af545b48da3d --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-message_types.json @@ -0,0 +1,10 @@ +{ + "properties": { + "message": { + "type": "keyword" + }, + "type": { + "type": "keyword" + } + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/message_types.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/message_types.csv new file mode 100644 index 000000000000..8e0048577144 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/message_types.csv @@ -0,0 +1,6 @@ +message:keyword,type:keyword +Connection error,Error +Disconnected,Disconnected +Connected to 10.1.0.1,Success +Connected to 10.1.0.2,Success +Connected to 10.1.0.3,Success diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/message_types_lookup-settings.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/message_types_lookup-settings.json new file mode 100644 index 000000000000..b73d1f9accf9 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/message_types_lookup-settings.json @@ -0,0 +1,5 @@ +{ + "index": { + "mode": "lookup" + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 9fad9123944f..a8f51bd4dc24 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -518,7 +518,7 @@ public enum Cap { /** * LOOKUP JOIN */ - JOIN_LOOKUP_V3(Build.current().isSnapshot()), + JOIN_LOOKUP_V4(false && Build.current().isSnapshot()), /** * Fix for https://github.com/elastic/elasticsearch/issues/117054 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index d127c26298a2..b847508d2b16 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -61,6 +61,7 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.index.EsIndex; +import org.elasticsearch.xpack.esql.index.IndexResolution; import org.elasticsearch.xpack.esql.parser.ParsingException; import org.elasticsearch.xpack.esql.plan.TableIdentifier; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; @@ -105,7 +106,6 @@ import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.function.Function; @@ -198,11 +198,12 @@ private static class ResolveTable extends ParameterizedAnalyzerRule"), enrichResolution); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexService.java index 849e8e890e24..4f429c46b912 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/LookupFromIndexService.java @@ -24,6 +24,7 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.action.EsqlQueryAction; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.Source; @@ -78,9 +79,19 @@ protected TransportRequest transportRequest(LookupFromIndexService.Request reque @Override protected QueryList queryList(TransportRequest request, SearchExecutionContext context, Block inputBlock, DataType inputDataType) { MappedFieldType fieldType = context.getFieldType(request.matchField); + validateTypes(request.inputDataType, fieldType); return termQueryList(fieldType, context, inputBlock, inputDataType); } + private static void validateTypes(DataType inputDataType, MappedFieldType fieldType) { + // TODO: consider supporting implicit type conversion as done in ENRICH for some types + if (fieldType.typeName().equals(inputDataType.typeName()) == false) { + throw new EsqlIllegalArgumentException( + "LOOKUP JOIN match and input types are incompatible: match[" + fieldType.typeName() + "], input[" + inputDataType + "]" + ); + } + } + public static class Request extends AbstractLookupService.Request { private final String matchField; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/ReplaceMissingFieldWithNull.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/ReplaceMissingFieldWithNull.java index 0fa6d61a0ca9..096f72f7694e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/ReplaceMissingFieldWithNull.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/local/ReplaceMissingFieldWithNull.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.xpack.esql.core.expression.Alias; +import org.elasticsearch.xpack.esql.core.expression.AttributeSet; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; @@ -23,6 +24,7 @@ import org.elasticsearch.xpack.esql.plan.logical.Project; import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; import org.elasticsearch.xpack.esql.plan.logical.TopN; +import org.elasticsearch.xpack.esql.plan.logical.join.Join; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.rule.ParameterizedRule; import org.elasticsearch.xpack.esql.stats.SearchStats; @@ -56,10 +58,13 @@ else if (plan instanceof Project project) { var projections = project.projections(); List newProjections = new ArrayList<>(projections.size()); Map nullLiteral = Maps.newLinkedHashMapWithExpectedSize(DataType.types().size()); + AttributeSet joinAttributes = joinAttributes(project); for (NamedExpression projection : projections) { // Do not use the attribute name, this can deviate from the field name for union types. - if (projection instanceof FieldAttribute f && stats.exists(f.fieldName()) == false) { + if (projection instanceof FieldAttribute f && stats.exists(f.fieldName()) == false && joinAttributes.contains(f) == false) { + // TODO: Should do a searchStats lookup for join attributes instead of just ignoring them here + // See TransportSearchShardsAction DataType dt = f.dataType(); Alias nullAlias = nullLiteral.get(f.dataType()); // save the first field as null (per datatype) @@ -96,4 +101,10 @@ else if (plan instanceof Project project) { return plan; } + + private AttributeSet joinAttributes(Project project) { + var attributes = new AttributeSet(); + project.forEachDown(Join.class, j -> j.right().forEachDown(EsRelation.class, p -> attributes.addAll(p.output()))); + return attributes; + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java index cafe3726f92a..dc32a4ad3c28 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/physical/local/InsertFieldExtraction.java @@ -23,14 +23,12 @@ import org.elasticsearch.xpack.esql.rule.Rule; import java.util.ArrayList; -import java.util.Collections; import java.util.LinkedHashSet; import java.util.LinkedList; import java.util.List; import java.util.Set; /** - * * Materialize the concrete fields that need to be extracted from the storage until the last possible moment. * Expects the local plan to already have a projection containing the fields needed upstream. *

@@ -102,15 +100,18 @@ public PhysicalPlan apply(PhysicalPlan plan) { private static Set missingAttributes(PhysicalPlan p) { var missing = new LinkedHashSet(); - var inputSet = p.inputSet(); + var input = p.inputSet(); - // TODO: We need to extract whatever fields are missing from the left hand side. - // skip the lookup join since the right side is always materialized and a projection + // For LOOKUP JOIN we only need field-extraction on left fields used to match, since the right side is always materialized if (p instanceof LookupJoinExec join) { - return Collections.emptySet(); + join.leftFields().forEach(f -> { + if (input.contains(f) == false) { + missing.add(f); + } + }); + return missing; } - var input = inputSet; // collect field attributes used inside expressions // TODO: Rather than going over all expressions manually, this should just call .references() p.forEachExpression(TypedAttribute.class, f -> { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LookupJoinExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LookupJoinExec.java index 2d3caa27da4c..8b1cc047309e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LookupJoinExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LookupJoinExec.java @@ -102,7 +102,7 @@ public List output() { @Override public PhysicalPlan estimateRowSize(State state) { - state.add(false, output()); + state.add(false, addedFields); return this; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index 0bdd3476320a..00afe64c76ee 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -565,6 +565,7 @@ private PhysicalOperation planHashJoin(HashJoinExec join, LocalExecutionPlannerC private PhysicalOperation planLookupJoin(LookupJoinExec join, LocalExecutionPlannerContext context) { PhysicalOperation source = plan(join.left(), context); + // TODO: The source builder includes incoming fields including the ones we're going to drop Layout.Builder layoutBuilder = source.layout.builder(); for (Attribute f : join.addedFields()) { layoutBuilder.append(f); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java index 5e13825d91bd..22f4c4d46e6a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java @@ -14,6 +14,7 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.SearchExecutionContext; @@ -25,6 +26,7 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.util.Holder; import org.elasticsearch.xpack.esql.core.util.Queries; +import org.elasticsearch.xpack.esql.index.EsIndex; import org.elasticsearch.xpack.esql.optimizer.LocalLogicalOptimizerContext; import org.elasticsearch.xpack.esql.optimizer.LocalLogicalPlanOptimizer; import org.elasticsearch.xpack.esql.optimizer.LocalPhysicalOptimizerContext; @@ -117,12 +119,17 @@ public static String[] planOriginalIndices(PhysicalPlan plan) { var indices = new LinkedHashSet(); plan.forEachUp( FragmentExec.class, - f -> f.fragment() - .forEachUp(EsRelation.class, r -> indices.addAll(asList(Strings.commaDelimitedListToStringArray(r.index().name())))) + f -> f.fragment().forEachUp(EsRelation.class, r -> addOriginalIndexIfNotLookup(indices, r.index())) ); return indices.toArray(String[]::new); } + private static void addOriginalIndexIfNotLookup(Set indices, EsIndex index) { + if (index.indexNameWithModes().get(index.name()) != IndexMode.LOOKUP) { + indices.addAll(asList(Strings.commaDelimitedListToStringArray(index.name()))); + } + } + public static PhysicalPlan localPlan(List searchContexts, Configuration configuration, PhysicalPlan plan) { return localPlan(configuration, plan, SearchContextStats.from(searchContexts)); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index aa3123fc2f63..ed037d24139f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -63,8 +63,12 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; import org.elasticsearch.xpack.esql.enrich.LookupFromIndexService; +import org.elasticsearch.xpack.esql.plan.logical.EsRelation; +import org.elasticsearch.xpack.esql.plan.logical.join.Join; import org.elasticsearch.xpack.esql.plan.physical.ExchangeSinkExec; import org.elasticsearch.xpack.esql.plan.physical.ExchangeSourceExec; +import org.elasticsearch.xpack.esql.plan.physical.FragmentExec; +import org.elasticsearch.xpack.esql.plan.physical.LookupJoinExec; import org.elasticsearch.xpack.esql.plan.physical.OutputExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.planner.EsPhysicalOperationProviders; @@ -77,6 +81,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -162,9 +167,11 @@ public void execute( Map clusterToConcreteIndices = transportService.getRemoteClusterService() .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, PlannerUtils.planConcreteIndices(physicalPlan).toArray(String[]::new)); QueryPragmas queryPragmas = configuration.pragmas(); + Set lookupIndexNames = findLookupIndexNames(physicalPlan); + Set concreteIndexNames = selectConcreteIndices(clusterToConcreteIndices, lookupIndexNames); if (dataNodePlan == null) { - if (clusterToConcreteIndices.values().stream().allMatch(v -> v.indices().length == 0) == false) { - String error = "expected no concrete indices without data node plan; got " + clusterToConcreteIndices; + if (concreteIndexNames.isEmpty() == false) { + String error = "expected no concrete indices without data node plan; got " + concreteIndexNames; assert false : error; listener.onFailure(new IllegalStateException(error)); return; @@ -187,7 +194,7 @@ public void execute( return; } } else { - if (clusterToConcreteIndices.values().stream().allMatch(v -> v.indices().length == 0)) { + if (concreteIndexNames.isEmpty()) { var error = "expected concrete indices with data node plan but got empty; data node plan " + dataNodePlan; assert false : error; listener.onFailure(new IllegalStateException(error)); @@ -261,6 +268,42 @@ public void execute( } } + private Set selectConcreteIndices(Map clusterToConcreteIndices, Set indexesToIgnore) { + Set concreteIndexNames = new HashSet<>(); + clusterToConcreteIndices.forEach((clusterAlias, concreteIndices) -> { + for (String index : concreteIndices.indices()) { + if (indexesToIgnore.contains(index) == false) { + concreteIndexNames.add(index); + } + } + }); + return concreteIndexNames; + } + + private Set findLookupIndexNames(PhysicalPlan physicalPlan) { + Set lookupIndexNames = new HashSet<>(); + // When planning JOIN on the coordinator node: "LookupJoinExec.lookup()->FragmentExec.fragment()->EsRelation.index()" + physicalPlan.forEachDown( + LookupJoinExec.class, + lookupJoinExec -> lookupJoinExec.lookup() + .forEachDown( + FragmentExec.class, + frag -> frag.fragment().forEachDown(EsRelation.class, esRelation -> lookupIndexNames.add(esRelation.index().name())) + ) + ); + // When planning JOIN on the data node: "FragmentExec.fragment()->Join.right()->EsRelation.index()" + // TODO this only works for LEFT join, so we still need to support RIGHT join + physicalPlan.forEachDown( + FragmentExec.class, + fragmentExec -> fragmentExec.fragment() + .forEachDown( + Join.class, + join -> join.right().forEachDown(EsRelation.class, esRelation -> lookupIndexNames.add(esRelation.index().name())) + ) + ); + return lookupIndexNames; + } + // For queries like: FROM logs* | LIMIT 0 (including cross-cluster LIMIT 0 queries) private static void updateShardCountForCoordinatorOnlyQuery(EsqlExecutionInfo execInfo) { if (execInfo.isCrossClusterSearch()) { @@ -569,8 +612,9 @@ record DataNode(Transport.Connection connection, List shardIds, Map dataNodes, int totalShards, int skippedShards) {} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index 99d6f3d276d2..3d1ed8f70eae 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.compute.data.Block; @@ -62,6 +63,8 @@ import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; import org.elasticsearch.xpack.esql.plan.logical.UnresolvedRelation; import org.elasticsearch.xpack.esql.plan.logical.join.InlineJoin; +import org.elasticsearch.xpack.esql.plan.logical.join.JoinTypes; +import org.elasticsearch.xpack.esql.plan.logical.join.LookupJoin; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; import org.elasticsearch.xpack.esql.plan.physical.EstimatesRowSize; @@ -76,7 +79,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.function.BiFunction; import java.util.stream.Collectors; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; @@ -176,7 +178,7 @@ public void executeOptimizedPlan( executeSubPlans(physicalPlan, planRunner, executionInfo, request, listener); } - private record PlanTuple(PhysicalPlan physical, LogicalPlan logical) {}; + private record PlanTuple(PhysicalPlan physical, LogicalPlan logical) {} private void executeSubPlans( PhysicalPlan physicalPlan, @@ -272,9 +274,12 @@ public void analyzedPlan(LogicalPlan parsed, EsqlExecutionInfo executionInfo, Ac return; } - preAnalyze(parsed, executionInfo, (indices, policies) -> { + preAnalyze(parsed, executionInfo, (indices, lookupIndices, policies) -> { planningMetrics.gatherPreAnalysisMetrics(parsed); - Analyzer analyzer = new Analyzer(new AnalyzerContext(configuration, functionRegistry, indices, policies), verifier); + Analyzer analyzer = new Analyzer( + new AnalyzerContext(configuration, functionRegistry, indices, lookupIndices, policies), + verifier + ); var plan = analyzer.analyze(parsed); plan.setAnalyzed(); LOGGER.debug("Analyzed plan:\n{}", plan); @@ -285,7 +290,7 @@ public void analyzedPlan(LogicalPlan parsed, EsqlExecutionInfo executionInfo, Ac private void preAnalyze( LogicalPlan parsed, EsqlExecutionInfo executionInfo, - BiFunction action, + TriFunction action, ActionListener listener ) { PreAnalyzer.PreAnalysis preAnalysis = preAnalyzer.preAnalyze(parsed); @@ -299,63 +304,81 @@ private void preAnalyze( ).keySet(); enrichPolicyResolver.resolvePolicies(targetClusters, unresolvedPolicies, listener.delegateFailureAndWrap((l, enrichResolution) -> { // first we need the match_fields names from enrich policies and THEN, with an updated list of fields, we call field_caps API - var matchFields = enrichResolution.resolvedEnrichPolicies() + var enrichMatchFields = enrichResolution.resolvedEnrichPolicies() .stream() .map(ResolvedEnrichPolicy::matchField) .collect(Collectors.toSet()); - Map unavailableClusters = enrichResolution.getUnavailableClusters(); - preAnalyzeIndices(parsed, executionInfo, unavailableClusters, l.delegateFailureAndWrap((ll, indexResolution) -> { - // TODO in follow-PR (for skip_unavailble handling of missing concrete indexes) add some tests for invalid index - // resolution to updateExecutionInfo - if (indexResolution.isValid()) { - EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); - EsqlSessionCCSUtils.updateExecutionInfoWithUnavailableClusters(executionInfo, indexResolution.unavailableClusters()); - if (executionInfo.isCrossClusterSearch() - && executionInfo.getClusterStateCount(EsqlExecutionInfo.Cluster.Status.RUNNING) == 0) { - // for a CCS, if all clusters have been marked as SKIPPED, nothing to search so send a sentinel - // Exception to let the LogicalPlanActionListener decide how to proceed - ll.onFailure(new NoClustersToSearchException()); - return; - } - - Set newClusters = enrichPolicyResolver.groupIndicesPerCluster( - indexResolution.get().concreteIndices().toArray(String[]::new) - ).keySet(); - // If new clusters appear when resolving the main indices, we need to resolve the enrich policies again - // or exclude main concrete indices. Since this is rare, it's simpler to resolve the enrich policies again. - // TODO: add a test for this - if (targetClusters.containsAll(newClusters) == false - // do not bother with a re-resolution if only remotes were requested and all were offline - && executionInfo.getClusterStateCount(EsqlExecutionInfo.Cluster.Status.RUNNING) > 0) { - enrichPolicyResolver.resolvePolicies( - newClusters, - unresolvedPolicies, - ll.map(newEnrichResolution -> action.apply(indexResolution, newEnrichResolution)) - ); - return; - } - } - ll.onResponse(action.apply(indexResolution, enrichResolution)); - }), matchFields); + // get the field names from the parsed plan combined with the ENRICH match fields from the ENRICH policy + var fieldNames = fieldNames(parsed, enrichMatchFields); + // First resolve the lookup indices, then the main indices + preAnalyzeLookupIndices( + preAnalysis.lookupIndices, + Set.of("*"), // Current LOOKUP JOIN syntax does not allow for field selection + l.delegateFailureAndWrap( + (lx, lookupIndexResolution) -> preAnalyzeIndices( + indices, + executionInfo, + enrichResolution.getUnavailableClusters(), + fieldNames, + lx.delegateFailureAndWrap((ll, indexResolution) -> { + // TODO in follow-PR (for skip_unavailble handling of missing concrete indexes) add some tests for invalid + // index resolution to updateExecutionInfo + if (indexResolution.isValid()) { + EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + EsqlSessionCCSUtils.updateExecutionInfoWithUnavailableClusters( + executionInfo, + indexResolution.unavailableClusters() + ); + if (executionInfo.isCrossClusterSearch() + && executionInfo.getClusterStateCount(EsqlExecutionInfo.Cluster.Status.RUNNING) == 0) { + // for a CCS, if all clusters have been marked as SKIPPED, nothing to search so send a sentinel + // Exception to let the LogicalPlanActionListener decide how to proceed + ll.onFailure(new NoClustersToSearchException()); + return; + } + + Set newClusters = enrichPolicyResolver.groupIndicesPerCluster( + indexResolution.get().concreteIndices().toArray(String[]::new) + ).keySet(); + // If new clusters appear when resolving the main indices, we need to resolve the enrich policies again + // or exclude main concrete indices. Since this is rare, it's simpler to resolve the enrich policies + // again. + // TODO: add a test for this + if (targetClusters.containsAll(newClusters) == false + // do not bother with a re-resolution if only remotes were requested and all were offline + && executionInfo.getClusterStateCount(EsqlExecutionInfo.Cluster.Status.RUNNING) > 0) { + enrichPolicyResolver.resolvePolicies( + newClusters, + unresolvedPolicies, + ll.map( + newEnrichResolution -> action.apply(indexResolution, lookupIndexResolution, newEnrichResolution) + ) + ); + return; + } + } + ll.onResponse(action.apply(indexResolution, lookupIndexResolution, enrichResolution)); + }) + ) + ) + ); })); } private void preAnalyzeIndices( - LogicalPlan parsed, + List indices, EsqlExecutionInfo executionInfo, Map unavailableClusters, // known to be unavailable from the enrich policy API call - ActionListener listener, - Set enrichPolicyMatchFields + Set fieldNames, + ActionListener listener ) { - PreAnalyzer.PreAnalysis preAnalysis = new PreAnalyzer().preAnalyze(parsed); // TODO we plan to support joins in the future when possible, but for now we'll just fail early if we see one - if (preAnalysis.indices.size() > 1) { + if (indices.size() > 1) { // Note: JOINs are not supported but we detect them when listener.onFailure(new MappingException("Queries with multiple indices are not supported")); - } else if (preAnalysis.indices.size() == 1) { - TableInfo tableInfo = preAnalysis.indices.get(0); + } else if (indices.size() == 1) { + TableInfo tableInfo = indices.get(0); TableIdentifier table = tableInfo.id(); - var fieldNames = fieldNames(parsed, enrichPolicyMatchFields); Map clusterIndices = indicesExpressionGrouper.groupIndices(IndicesOptions.DEFAULT, table.index()); for (Map.Entry entry : clusterIndices.entrySet()) { @@ -401,6 +424,25 @@ private void preAnalyzeIndices( } } + private void preAnalyzeLookupIndices(List indices, Set fieldNames, ActionListener listener) { + if (indices.size() > 1) { + // Note: JOINs on more than one index are not yet supported + listener.onFailure(new MappingException("More than one LOOKUP JOIN is not supported")); + } else if (indices.size() == 1) { + TableInfo tableInfo = indices.get(0); + TableIdentifier table = tableInfo.id(); + // call the EsqlResolveFieldsAction (field-caps) to resolve indices and get field types + indexResolver.resolveAsMergedMapping(table.index(), fieldNames, listener); + } else { + try { + // No lookup indices specified + listener.onResponse(IndexResolution.invalid("[none specified]")); + } catch (Exception ex) { + listener.onFailure(ex); + } + } + } + static Set fieldNames(LogicalPlan parsed, Set enrichPolicyMatchFields) { if (false == parsed.anyMatch(plan -> plan instanceof Aggregate || plan instanceof Project)) { // no explicit columns selection, for example "from employees" @@ -422,6 +464,7 @@ static Set fieldNames(LogicalPlan parsed, Set enrichPolicyMatchF // "keep" attributes are special whenever a wildcard is used in their name // ie "from test | eval lang = languages + 1 | keep *l" should consider both "languages" and "*l" as valid fields to ask for AttributeSet keepCommandReferences = new AttributeSet(); + AttributeSet keepJoinReferences = new AttributeSet(); parsed.forEachDown(p -> {// go over each plan top-down if (p instanceof RegexExtract re) { // for Grok and Dissect @@ -438,6 +481,11 @@ static Set fieldNames(LogicalPlan parsed, Set enrichPolicyMatchF // The exact name of the field will be added later as part of enrichPolicyMatchFields Set enrichRefs.removeIf(attr -> attr instanceof EmptyAttribute); references.addAll(enrichRefs); + } else if (p instanceof LookupJoin join) { + keepJoinReferences.addAll(join.config().matchFields()); // TODO: why is this empty + if (join.config().type() instanceof JoinTypes.UsingJoinType usingJoinType) { + keepJoinReferences.addAll(usingJoinType.columns()); + } } else { references.addAll(p.references()); if (p instanceof UnresolvedRelation ur && ur.indexMode() == IndexMode.TIME_SERIES) { @@ -470,6 +518,8 @@ static Set fieldNames(LogicalPlan parsed, Set enrichPolicyMatchF references.removeIf(attr -> matchByName(attr, alias.name(), keepCommandReferences.contains(attr))); }); }); + // Add JOIN ON column references afterward to avoid Alias removal + references.addAll(keepJoinReferences); // remove valid metadata attributes because they will be filtered out by the IndexResolver anyway // otherwise, in some edge cases, we will fail to ask for "*" (all fields) instead diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 857b7623043b..be4803160353 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -260,7 +260,7 @@ public final void test() throws Throwable { ); assumeFalse( "lookup join disabled for csv tests", - testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.JOIN_LOOKUP_V3.capabilityName()) + testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.JOIN_LOOKUP_V4.capabilityName()) ); if (Build.current().isSnapshot()) { assertThat( From dfbd526babfc024ee04e6d7fc4d728404590976d Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Thu, 5 Dec 2024 15:28:45 +0200 Subject: [PATCH 35/45] Smarter field caps with subscribable listener (#116755) (#118063) * Smarter field caps with subscribable listener (#116755) (cherry picked from commit 22f4a799377ea8710076ff10b74fbb48724a0c09) * Create the mapping explicitly, otherwise for 0 documents indices (#118015) the mapping will not contain the "value" field (cherry picked from commit 774c6ea174bdd866ad91c86ba779e1f2b0f8a27a) --- docs/changelog/116755.yaml | 5 + .../multi_node/RequestIndexFilteringIT.java | 27 ++ .../single_node/RequestIndexFilteringIT.java | 27 ++ .../rest/RequestIndexFilteringTestCase.java | 287 ++++++++++++++++ .../esql/qa/rest/RestEnrichTestCase.java | 176 +++++++++- .../esql/enrich/EnrichPolicyResolver.java | 2 +- .../xpack/esql/session/EsqlSession.java | 315 ++++++++++++------ .../xpack/esql/session/IndexResolver.java | 13 +- 8 files changed, 744 insertions(+), 108 deletions(-) create mode 100644 docs/changelog/116755.yaml create mode 100644 x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/RequestIndexFilteringIT.java create mode 100644 x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RequestIndexFilteringIT.java create mode 100644 x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java diff --git a/docs/changelog/116755.yaml b/docs/changelog/116755.yaml new file mode 100644 index 000000000000..3aa5ec8580b5 --- /dev/null +++ b/docs/changelog/116755.yaml @@ -0,0 +1,5 @@ +pr: 116755 +summary: Smarter field caps with subscribable listener +area: ES|QL +type: enhancement +issues: [] diff --git a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/RequestIndexFilteringIT.java b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/RequestIndexFilteringIT.java new file mode 100644 index 000000000000..c2ba502b9255 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/RequestIndexFilteringIT.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.multi_node; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.esql.qa.rest.RequestIndexFilteringTestCase; +import org.junit.ClassRule; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class RequestIndexFilteringIT extends RequestIndexFilteringTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(ignored -> {}); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RequestIndexFilteringIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RequestIndexFilteringIT.java new file mode 100644 index 000000000000..f13bcd618f0a --- /dev/null +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RequestIndexFilteringIT.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.single_node; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.esql.qa.rest.RequestIndexFilteringTestCase; +import org.junit.ClassRule; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class RequestIndexFilteringIT extends RequestIndexFilteringTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java new file mode 100644 index 000000000000..406997b66dbf --- /dev/null +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RequestIndexFilteringTestCase.java @@ -0,0 +1,287 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.rest; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.esql.AssertWarnings; +import org.junit.After; +import org.junit.Assert; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.elasticsearch.test.ListMatcher.matchesList; +import static org.elasticsearch.test.MapMatcher.assertMap; +import static org.elasticsearch.test.MapMatcher.matchesMap; +import static org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase.entityToMap; +import static org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase.requestObjectBuilder; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.nullValue; + +public abstract class RequestIndexFilteringTestCase extends ESRestTestCase { + + @After + public void wipeTestData() throws IOException { + try { + var response = client().performRequest(new Request("DELETE", "/test*")); + assertEquals(200, response.getStatusLine().getStatusCode()); + } catch (ResponseException re) { + assertEquals(404, re.getResponse().getStatusLine().getStatusCode()); + } + } + + public void testTimestampFilterFromQuery() throws IOException { + int docsTest1 = 50; + int docsTest2 = 30; + indexTimestampData(docsTest1, "test1", "2024-11-26", "id1"); + indexTimestampData(docsTest2, "test2", "2023-11-26", "id2"); + + // filter includes both indices in the result (all columns, all rows) + RestEsqlTestCase.RequestObjectBuilder builder = timestampFilter("gte", "2023-01-01").query("FROM test*"); + Map result = runEsql(builder); + assertMap( + result, + matchesMap().entry( + "columns", + matchesList().item(matchesMap().entry("name", "@timestamp").entry("type", "date")) + .item(matchesMap().entry("name", "id1").entry("type", "integer")) + .item(matchesMap().entry("name", "id2").entry("type", "integer")) + .item(matchesMap().entry("name", "value").entry("type", "long")) + ).entry("values", allOf(instanceOf(List.class), hasSize(docsTest1 + docsTest2))).entry("took", greaterThanOrEqualTo(0)) + ); + + // filter includes only test1. Columns from test2 are filtered out, as well (not only rows)! + builder = timestampFilter("gte", "2024-01-01").query("FROM test*"); + assertMap( + runEsql(builder), + matchesMap().entry( + "columns", + matchesList().item(matchesMap().entry("name", "@timestamp").entry("type", "date")) + .item(matchesMap().entry("name", "id1").entry("type", "integer")) + .item(matchesMap().entry("name", "value").entry("type", "long")) + ).entry("values", allOf(instanceOf(List.class), hasSize(docsTest1))).entry("took", greaterThanOrEqualTo(0)) + ); + + // filter excludes both indices (no rows); the first analysis step fails because there are no columns, a second attempt succeeds + // after eliminating the index filter. All columns are returned. + builder = timestampFilter("gte", "2025-01-01").query("FROM test*"); + assertMap( + runEsql(builder), + matchesMap().entry( + "columns", + matchesList().item(matchesMap().entry("name", "@timestamp").entry("type", "date")) + .item(matchesMap().entry("name", "id1").entry("type", "integer")) + .item(matchesMap().entry("name", "id2").entry("type", "integer")) + .item(matchesMap().entry("name", "value").entry("type", "long")) + ).entry("values", allOf(instanceOf(List.class), hasSize(0))).entry("took", greaterThanOrEqualTo(0)) + ); + } + + public void testFieldExistsFilter_KeepWildcard() throws IOException { + int docsTest1 = randomIntBetween(0, 10); + int docsTest2 = randomIntBetween(0, 10); + indexTimestampData(docsTest1, "test1", "2024-11-26", "id1"); + indexTimestampData(docsTest2, "test2", "2023-11-26", "id2"); + + // filter includes only test1. Columns and rows of test2 are filtered out + RestEsqlTestCase.RequestObjectBuilder builder = existsFilter("id1").query("FROM test*"); + Map result = runEsql(builder); + assertMap( + result, + matchesMap().entry( + "columns", + matchesList().item(matchesMap().entry("name", "@timestamp").entry("type", "date")) + .item(matchesMap().entry("name", "id1").entry("type", "integer")) + .item(matchesMap().entry("name", "value").entry("type", "long")) + ).entry("values", allOf(instanceOf(List.class), hasSize(docsTest1))).entry("took", greaterThanOrEqualTo(0)) + ); + + // filter includes only test1. Columns from test2 are filtered out, as well (not only rows)! + builder = existsFilter("id1").query("FROM test* METADATA _index | KEEP _index, id*"); + result = runEsql(builder); + assertMap( + result, + matchesMap().entry( + "columns", + matchesList().item(matchesMap().entry("name", "_index").entry("type", "keyword")) + .item(matchesMap().entry("name", "id1").entry("type", "integer")) + ).entry("values", allOf(instanceOf(List.class), hasSize(docsTest1))).entry("took", greaterThanOrEqualTo(0)) + ); + @SuppressWarnings("unchecked") + var values = (List>) result.get("values"); + for (List row : values) { + assertThat(row.get(0), equalTo("test1")); + assertThat(row.get(1), instanceOf(Integer.class)); + } + } + + public void testFieldExistsFilter_With_ExplicitUseOfDiscardedIndexFields() throws IOException { + int docsTest1 = randomIntBetween(1, 5); + int docsTest2 = randomIntBetween(0, 5); + indexTimestampData(docsTest1, "test1", "2024-11-26", "id1"); + indexTimestampData(docsTest2, "test2", "2023-11-26", "id2"); + + // test2 is explicitly used in a query with "SORT id2" even if the index filter should discard test2 + RestEsqlTestCase.RequestObjectBuilder builder = existsFilter("id1").query( + "FROM test* METADATA _index | SORT id2 | KEEP _index, id*" + ); + Map result = runEsql(builder); + assertMap( + result, + matchesMap().entry( + "columns", + matchesList().item(matchesMap().entry("name", "_index").entry("type", "keyword")) + .item(matchesMap().entry("name", "id1").entry("type", "integer")) + .item(matchesMap().entry("name", "id2").entry("type", "integer")) + ).entry("values", allOf(instanceOf(List.class), hasSize(docsTest1))).entry("took", greaterThanOrEqualTo(0)) + ); + @SuppressWarnings("unchecked") + var values = (List>) result.get("values"); + for (List row : values) { + assertThat(row.get(0), equalTo("test1")); + assertThat(row.get(1), instanceOf(Integer.class)); + assertThat(row.get(2), nullValue()); + } + } + + public void testFieldNameTypo() throws IOException { + int docsTest1 = randomIntBetween(0, 5); + int docsTest2 = randomIntBetween(0, 5); + indexTimestampData(docsTest1, "test1", "2024-11-26", "id1"); + indexTimestampData(docsTest2, "test2", "2023-11-26", "id2"); + + // idx field name is explicitly used, though it doesn't exist in any of the indices. First test - without filter + ResponseException e = expectThrows( + ResponseException.class, + () -> runEsql(requestObjectBuilder().query("FROM test* | WHERE idx == 123")) + ); + assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); + assertThat(e.getMessage(), containsString("verification_exception")); + assertThat(e.getMessage(), containsString("Found 1 problem")); + assertThat(e.getMessage(), containsString("line 1:20: Unknown column [idx]")); + + e = expectThrows(ResponseException.class, () -> runEsql(requestObjectBuilder().query("FROM test1 | WHERE idx == 123"))); + assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); + assertThat(e.getMessage(), containsString("verification_exception")); + assertThat(e.getMessage(), containsString("Found 1 problem")); + assertThat(e.getMessage(), containsString("line 1:20: Unknown column [idx]")); + + e = expectThrows( + ResponseException.class, + () -> runEsql(timestampFilter("gte", "2020-01-01").query("FROM test* | WHERE idx == 123")) + ); + assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); + assertThat(e.getMessage(), containsString("Found 1 problem")); + assertThat(e.getMessage(), containsString("line 1:20: Unknown column [idx]")); + + e = expectThrows( + ResponseException.class, + () -> runEsql(timestampFilter("gte", "2020-01-01").query("FROM test2 | WHERE idx == 123")) + ); + assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); + assertThat(e.getMessage(), containsString("Found 1 problem")); + assertThat(e.getMessage(), containsString("line 1:20: Unknown column [idx]")); + } + + public void testIndicesDontExist() throws IOException { + int docsTest1 = 0; // we are interested only in the created index, not necessarily that it has data + indexTimestampData(docsTest1, "test1", "2024-11-26", "id1"); + + ResponseException e = expectThrows(ResponseException.class, () -> runEsql(timestampFilter("gte", "2020-01-01").query("FROM foo"))); + assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); + assertThat(e.getMessage(), containsString("verification_exception")); + assertThat(e.getMessage(), containsString("Unknown index [foo]")); + + e = expectThrows(ResponseException.class, () -> runEsql(timestampFilter("gte", "2020-01-01").query("FROM foo*"))); + assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); + assertThat(e.getMessage(), containsString("verification_exception")); + assertThat(e.getMessage(), containsString("Unknown index [foo*]")); + + e = expectThrows(ResponseException.class, () -> runEsql(timestampFilter("gte", "2020-01-01").query("FROM foo,test1"))); + assertEquals(404, e.getResponse().getStatusLine().getStatusCode()); + assertThat(e.getMessage(), containsString("index_not_found_exception")); + assertThat(e.getMessage(), containsString("no such index [foo]")); + } + + private static RestEsqlTestCase.RequestObjectBuilder timestampFilter(String op, String date) throws IOException { + return requestObjectBuilder().filter(b -> { + b.startObject("range"); + { + b.startObject("@timestamp").field(op, date).endObject(); + } + b.endObject(); + }); + } + + private static RestEsqlTestCase.RequestObjectBuilder existsFilter(String field) throws IOException { + return requestObjectBuilder().filter(b -> b.startObject("exists").field("field", field).endObject()); + } + + public Map runEsql(RestEsqlTestCase.RequestObjectBuilder requestObject) throws IOException { + return RestEsqlTestCase.runEsql(requestObject, new AssertWarnings.NoWarnings(), RestEsqlTestCase.Mode.SYNC); + } + + protected void indexTimestampData(int docs, String indexName, String date, String differentiatorFieldName) throws IOException { + Request createIndex = new Request("PUT", indexName); + createIndex.setJsonEntity(""" + { + "settings": { + "index": { + "number_of_shards": 3 + } + }, + "mappings": { + "properties": { + "@timestamp": { + "type": "date" + }, + "value": { + "type": "long" + }, + "%differentiator_field_name%": { + "type": "integer" + } + } + } + }""".replace("%differentiator_field_name%", differentiatorFieldName)); + Response response = client().performRequest(createIndex); + assertThat( + entityToMap(response.getEntity(), XContentType.JSON), + matchesMap().entry("shards_acknowledged", true).entry("index", indexName).entry("acknowledged", true) + ); + + if (docs > 0) { + StringBuilder b = new StringBuilder(); + for (int i = 0; i < docs; i++) { + b.append(String.format(Locale.ROOT, """ + {"create":{"_index":"%s"}} + {"@timestamp":"%s","value":%d,"%s":%d} + """, indexName, date, i, differentiatorFieldName, i)); + } + Request bulk = new Request("POST", "/_bulk"); + bulk.addParameter("refresh", "true"); + bulk.addParameter("filter_path", "errors"); + bulk.setJsonEntity(b.toString()); + response = client().performRequest(bulk); + Assert.assertEquals("{\"errors\":false}", EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8)); + } + } +} diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEnrichTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEnrichTestCase.java index def6491fb920..bf4a4400e13c 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEnrichTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEnrichTestCase.java @@ -12,7 +12,9 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xcontent.XContentBuilder; import org.junit.After; import org.junit.Before; @@ -29,7 +31,6 @@ public abstract class RestEnrichTestCase extends ESRestTestCase { private static final String sourceIndexName = "countries"; - private static final String testIndexName = "test"; private static final String policyName = "countries"; public enum Mode { @@ -56,7 +57,7 @@ public void assertRequestBreakerEmpty() throws Exception { @Before public void loadTestData() throws IOException { - Request request = new Request("PUT", "/" + testIndexName); + Request request = new Request("PUT", "/test1"); request.setJsonEntity(""" { "mappings": { @@ -72,7 +73,7 @@ public void loadTestData() throws IOException { }"""); assertEquals(200, client().performRequest(request).getStatusLine().getStatusCode()); - request = new Request("POST", "/" + testIndexName + "/_bulk"); + request = new Request("POST", "/test1/_bulk"); request.addParameter("refresh", "true"); request.setJsonEntity(""" { "index": {"_id": 1} } @@ -84,6 +85,34 @@ public void loadTestData() throws IOException { """); assertEquals(200, client().performRequest(request).getStatusLine().getStatusCode()); + request = new Request("PUT", "/test2"); + request.setJsonEntity(""" + { + "mappings": { + "properties": { + "geo.dest": { + "type": "keyword" + }, + "country_number": { + "type": "long" + } + } + } + }"""); + assertEquals(200, client().performRequest(request).getStatusLine().getStatusCode()); + + request = new Request("POST", "/test2/_bulk"); + request.addParameter("refresh", "true"); + request.setJsonEntity(""" + { "index": {"_id": 1} } + { "geo.dest": "IN", "country_number": 2 } + { "index": {"_id": 2} } + { "geo.dest": "IN", "country_number": 2 } + { "index": {"_id": 3} } + { "geo.dest": "US", "country_number": 3 } + """); + assertEquals(200, client().performRequest(request).getStatusLine().getStatusCode()); + request = new Request("PUT", "/" + sourceIndexName); request.setJsonEntity(""" { @@ -131,7 +160,7 @@ public void loadTestData() throws IOException { @After public void wipeTestData() throws IOException { try { - var response = client().performRequest(new Request("DELETE", "/" + testIndexName)); + var response = client().performRequest(new Request("DELETE", "/test1,test2")); assertEquals(200, response.getStatusLine().getStatusCode()); response = client().performRequest(new Request("DELETE", "/" + sourceIndexName)); assertEquals(200, response.getStatusLine().getStatusCode()); @@ -143,7 +172,7 @@ public void wipeTestData() throws IOException { } public void testNonExistentEnrichPolicy() throws IOException { - ResponseException re = expectThrows(ResponseException.class, () -> runEsql("from test | enrich countris", Mode.SYNC)); + ResponseException re = expectThrows(ResponseException.class, () -> runEsql("from test1 | enrich countris", null, Mode.SYNC)); assertThat( EntityUtils.toString(re.getResponse().getEntity()), containsString("cannot find enrich policy [countris], did you mean [countries]?") @@ -151,7 +180,10 @@ public void testNonExistentEnrichPolicy() throws IOException { } public void testNonExistentEnrichPolicy_KeepField() throws IOException { - ResponseException re = expectThrows(ResponseException.class, () -> runEsql("from test | enrich countris | keep number", Mode.SYNC)); + ResponseException re = expectThrows( + ResponseException.class, + () -> runEsql("from test1 | enrich countris | keep number", null, Mode.SYNC) + ); assertThat( EntityUtils.toString(re.getResponse().getEntity()), containsString("cannot find enrich policy [countris], did you mean [countries]?") @@ -159,25 +191,147 @@ public void testNonExistentEnrichPolicy_KeepField() throws IOException { } public void testMatchField_ImplicitFieldsList() throws IOException { - Map result = runEsql("from test | enrich countries | keep number | sort number"); + Map result = runEsql("from test1 | enrich countries | keep number | sort number"); var columns = List.of(Map.of("name", "number", "type", "long")); var values = List.of(List.of(1000), List.of(1000), List.of(5000)); assertMap(result, matchesMap().entry("columns", columns).entry("values", values).entry("took", greaterThanOrEqualTo(0))); } public void testMatchField_ImplicitFieldsList_WithStats() throws IOException { - Map result = runEsql("from test | enrich countries | stats s = sum(number) by country_name"); + Map result = runEsql("from test1 | enrich countries | stats s = sum(number) by country_name"); var columns = List.of(Map.of("name", "s", "type", "long"), Map.of("name", "country_name", "type", "keyword")); var values = List.of(List.of(2000, "United States of America"), List.of(5000, "China")); assertMap(result, matchesMap().entry("columns", columns).entry("values", values).entry("took", greaterThanOrEqualTo(0))); } + public void testSimpleIndexFilteringWithEnrich() throws IOException { + // no filter + Map result = runEsql(""" + from test* metadata _index + | enrich countries + | keep *number, geo.dest, _index + | sort geo.dest, _index + """); + var columns = List.of( + Map.of("name", "country_number", "type", "long"), + Map.of("name", "number", "type", "long"), + Map.of("name", "geo.dest", "type", "keyword"), + Map.of("name", "_index", "type", "keyword") + ); + var values = List.of( + Arrays.asList(null, 5000, "CN", "test1"), + Arrays.asList(2, null, "IN", "test2"), + Arrays.asList(2, null, "IN", "test2"), + Arrays.asList(null, 1000, "US", "test1"), + Arrays.asList(null, 1000, "US", "test1"), + Arrays.asList(3, null, "US", "test2") + ); + assertMap(result, matchesMap().entry("columns", columns).entry("values", values).entry("took", greaterThanOrEqualTo(0))); + + // filter something that won't affect the columns + result = runEsql(""" + from test* metadata _index + | enrich countries + | keep *number, geo.dest, _index + | sort geo.dest, _index + """, b -> b.startObject("exists").field("field", "foobar").endObject()); + assertMap(result, matchesMap().entry("columns", columns).entry("values", List.of()).entry("took", greaterThanOrEqualTo(0))); + } + + public void testIndexFilteringWithEnrich_RemoveOneIndex() throws IOException { + // filter out test2 but specifically use one of its fields in the query (country_number) + Map result = runEsql(""" + from test* metadata _index + | enrich countries + | keep country_number, number, geo.dest, _index + | sort geo.dest, _index + """, b -> b.startObject("exists").field("field", "number").endObject()); + + var columns = List.of( + Map.of("name", "country_number", "type", "long"), + Map.of("name", "number", "type", "long"), + Map.of("name", "geo.dest", "type", "keyword"), + Map.of("name", "_index", "type", "keyword") + ); + var values = List.of( + Arrays.asList(null, 5000, "CN", "test1"), + Arrays.asList(null, 1000, "US", "test1"), + Arrays.asList(null, 1000, "US", "test1") + ); + + assertMap(result, matchesMap().entry("columns", columns).entry("values", values).entry("took", greaterThanOrEqualTo(0))); + + // filter out test2 and use a wildcarded field name in the "keep" command + result = runEsql(""" + from test* metadata _index + | enrich countries + | keep *number, geo.dest, _index + | sort geo.dest, _index + """, b -> b.startObject("exists").field("field", "number").endObject()); + + columns = List.of( + Map.of("name", "number", "type", "long"), + Map.of("name", "geo.dest", "type", "keyword"), + Map.of("name", "_index", "type", "keyword") + ); + values = List.of(Arrays.asList(5000, "CN", "test1"), Arrays.asList(1000, "US", "test1"), Arrays.asList(1000, "US", "test1")); + assertMap(result, matchesMap().entry("columns", columns).entry("values", values).entry("took", greaterThanOrEqualTo(0))); + } + + public void testIndexFilteringWithEnrich_ExpectException() throws IOException { + // no filter, just a simple query with "enrich" that should throw a valid VerificationException + ResponseException e = expectThrows(ResponseException.class, () -> runEsql(""" + from test* metadata _index + | enrich countries + | where foobar == 123 + """)); + assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); + assertThat(e.getMessage(), containsString("Found 1 problem")); + assertThat(e.getMessage(), containsString("line 3:13: Unknown column [foobar]")); + + // same query, but with a filter this time + e = expectThrows(ResponseException.class, () -> runEsql(""" + from test* metadata _index + | enrich countries + | where foobar == 123 + """, b -> b.startObject("exists").field("field", "number").endObject())); + assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); + assertThat(e.getMessage(), containsString("Found 1 problem")); + assertThat(e.getMessage(), containsString("line 3:13: Unknown column [foobar]")); + } + + public void testIndexFilteringWithEnrich_FilterUnusedIndexFields() throws IOException { + // filter out "test1". The field that is specific to "test1" ("number") is not actually used in the query + Map result = runEsql(""" + from test* metadata _index + | enrich countries + | keep country_number, geo.dest, _index + | sort geo.dest, _index + """, b -> b.startObject("exists").field("field", "country_number").endObject()); + + var columns = List.of( + Map.of("name", "country_number", "type", "long"), + Map.of("name", "geo.dest", "type", "keyword"), + Map.of("name", "_index", "type", "keyword") + ); + var values = List.of(Arrays.asList(2, "IN", "test2"), Arrays.asList(2, "IN", "test2"), Arrays.asList(3, "US", "test2")); + assertMap(result, matchesMap().entry("columns", columns).entry("values", values).entry("took", greaterThanOrEqualTo(0))); + } + private Map runEsql(String query) throws IOException { - return runEsql(query, mode); + return runEsql(query, null, mode); } - private Map runEsql(String query, Mode mode) throws IOException { - var requestObject = new RestEsqlTestCase.RequestObjectBuilder().query(query); + private Map runEsql(String query, CheckedConsumer filter) throws IOException { + return runEsql(query, filter, mode); + } + + private Map runEsql(String query, CheckedConsumer filter, Mode mode) throws IOException { + var requestObject = new RestEsqlTestCase.RequestObjectBuilder(); + if (filter != null) { + requestObject.filter(filter); + } + requestObject.query(query); if (mode == Mode.ASYNC) { return RestEsqlTestCase.runEsqlAsync(requestObject); } else { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index c8a7a6bcc4e9..c8e993b7dbf0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -411,7 +411,7 @@ public void messageReceived(LookupRequest request, TransportChannel channel, Tas } try (ThreadContext.StoredContext ignored = threadContext.stashWithOrigin(ClientHelper.ENRICH_ORIGIN)) { String indexName = EnrichPolicy.getBaseName(policyName); - indexResolver.resolveAsMergedMapping(indexName, IndexResolver.ALL_FIELDS, refs.acquire(indexResult -> { + indexResolver.resolveAsMergedMapping(indexName, IndexResolver.ALL_FIELDS, null, refs.acquire(indexResult -> { if (indexResult.isValid() && indexResult.get().concreteIndices().size() == 1) { EsIndex esIndex = indexResult.get(); var concreteIndices = Map.of(request.clusterAlias, Iterables.get(esIndex.concreteIndices(), 0)); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index 3d1ed8f70eae..71fba5683644 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.common.Strings; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.collect.Iterators; @@ -25,6 +26,7 @@ import org.elasticsearch.indices.IndicesExpressionGrouper; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; +import org.elasticsearch.xpack.esql.VerificationException; import org.elasticsearch.xpack.esql.action.EsqlExecutionInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; import org.elasticsearch.xpack.esql.analysis.Analyzer; @@ -151,6 +153,7 @@ public void execute(EsqlQueryRequest request, EsqlExecutionInfo executionInfo, P analyzedPlan( parse(request.query(), request.params()), executionInfo, + request.filter(), new EsqlSessionCCSUtils.CssPartialErrorsActionListener(executionInfo, listener) { @Override public void onResponse(LogicalPlan analyzedPlan) { @@ -268,31 +271,28 @@ private LogicalPlan parse(String query, QueryParams params) { return parsed; } - public void analyzedPlan(LogicalPlan parsed, EsqlExecutionInfo executionInfo, ActionListener listener) { + public void analyzedPlan( + LogicalPlan parsed, + EsqlExecutionInfo executionInfo, + QueryBuilder requestFilter, + ActionListener logicalPlanListener + ) { if (parsed.analyzed()) { - listener.onResponse(parsed); + logicalPlanListener.onResponse(parsed); return; } - preAnalyze(parsed, executionInfo, (indices, lookupIndices, policies) -> { + TriFunction analyzeAction = (indices, lookupIndices, policies) -> { planningMetrics.gatherPreAnalysisMetrics(parsed); Analyzer analyzer = new Analyzer( new AnalyzerContext(configuration, functionRegistry, indices, lookupIndices, policies), verifier ); - var plan = analyzer.analyze(parsed); + LogicalPlan plan = analyzer.analyze(parsed); plan.setAnalyzed(); - LOGGER.debug("Analyzed plan:\n{}", plan); return plan; - }, listener); - } + }; - private void preAnalyze( - LogicalPlan parsed, - EsqlExecutionInfo executionInfo, - TriFunction action, - ActionListener listener - ) { PreAnalyzer.PreAnalysis preAnalysis = preAnalyzer.preAnalyze(parsed); var unresolvedPolicies = preAnalysis.enriches.stream() .map(e -> new EnrichPolicyResolver.UnresolvedPolicy((String) e.policyName().fold(), e.mode())) @@ -302,81 +302,113 @@ private void preAnalyze( final Set targetClusters = enrichPolicyResolver.groupIndicesPerCluster( indices.stream().flatMap(t -> Arrays.stream(Strings.commaDelimitedListToStringArray(t.id().index()))).toArray(String[]::new) ).keySet(); - enrichPolicyResolver.resolvePolicies(targetClusters, unresolvedPolicies, listener.delegateFailureAndWrap((l, enrichResolution) -> { - // first we need the match_fields names from enrich policies and THEN, with an updated list of fields, we call field_caps API - var enrichMatchFields = enrichResolution.resolvedEnrichPolicies() - .stream() - .map(ResolvedEnrichPolicy::matchField) - .collect(Collectors.toSet()); - // get the field names from the parsed plan combined with the ENRICH match fields from the ENRICH policy - var fieldNames = fieldNames(parsed, enrichMatchFields); - // First resolve the lookup indices, then the main indices - preAnalyzeLookupIndices( - preAnalysis.lookupIndices, + + SubscribableListener.newForked(l -> enrichPolicyResolver.resolvePolicies(targetClusters, unresolvedPolicies, l)) + .andThen((l, enrichResolution) -> { + // we need the match_fields names from enrich policies and THEN, with an updated list of fields, we call field_caps API + var enrichMatchFields = enrichResolution.resolvedEnrichPolicies() + .stream() + .map(ResolvedEnrichPolicy::matchField) + .collect(Collectors.toSet()); + // get the field names from the parsed plan combined with the ENRICH match fields from the ENRICH policy + var fieldNames = fieldNames(parsed, enrichMatchFields); + ListenerResult listenerResult = new ListenerResult(null, null, enrichResolution, fieldNames); + + // first resolve the lookup indices, then the main indices + preAnalyzeLookupIndices(preAnalysis.lookupIndices, listenerResult, l); + }) + .andThen((l, listenerResult) -> { + // resolve the main indices + preAnalyzeIndices(preAnalysis.indices, executionInfo, listenerResult, requestFilter, l); + }) + .andThen((l, listenerResult) -> { + // TODO in follow-PR (for skip_unavailable handling of missing concrete indexes) add some tests for + // invalid index resolution to updateExecutionInfo + if (listenerResult.indices.isValid()) { + // CCS indices and skip_unavailable cluster values can stop the analysis right here + if (analyzeCCSIndices(executionInfo, targetClusters, unresolvedPolicies, listenerResult, logicalPlanListener, l)) + return; + } + // whatever tuple we have here (from CCS-special handling or from the original pre-analysis), pass it on to the next step + l.onResponse(listenerResult); + }) + .andThen((l, listenerResult) -> { + // first attempt (maybe the only one) at analyzing the plan + analyzeAndMaybeRetry(analyzeAction, requestFilter, listenerResult, logicalPlanListener, l); + }) + .andThen((l, listenerResult) -> { + assert requestFilter != null : "The second pre-analysis shouldn't take place when there is no index filter in the request"; + + // "reset" execution information for all ccs or non-ccs (local) clusters, since we are performing the indices + // resolving one more time (the first attempt failed and the query had a filter) + for (String clusterAlias : executionInfo.clusterAliases()) { + executionInfo.swapCluster(clusterAlias, (k, v) -> null); + } + + // here the requestFilter is set to null, performing the pre-analysis after the first step failed + preAnalyzeIndices(preAnalysis.indices, executionInfo, listenerResult, null, l); + }) + .andThen((l, listenerResult) -> { + assert requestFilter != null : "The second analysis shouldn't take place when there is no index filter in the request"; + LOGGER.debug("Analyzing the plan (second attempt, without filter)"); + LogicalPlan plan; + try { + plan = analyzeAction.apply(listenerResult.indices, listenerResult.lookupIndices, listenerResult.enrichResolution); + } catch (Exception e) { + l.onFailure(e); + return; + } + LOGGER.debug("Analyzed plan (second attempt, without filter):\n{}", plan); + l.onResponse(plan); + }) + .addListener(logicalPlanListener); + } + + private void preAnalyzeLookupIndices(List indices, ListenerResult listenerResult, ActionListener listener) { + if (indices.size() > 1) { + // Note: JOINs on more than one index are not yet supported + listener.onFailure(new MappingException("More than one LOOKUP JOIN is not supported")); + } else if (indices.size() == 1) { + TableInfo tableInfo = indices.get(0); + TableIdentifier table = tableInfo.id(); + // call the EsqlResolveFieldsAction (field-caps) to resolve indices and get field types + indexResolver.resolveAsMergedMapping( + table.index(), Set.of("*"), // Current LOOKUP JOIN syntax does not allow for field selection - l.delegateFailureAndWrap( - (lx, lookupIndexResolution) -> preAnalyzeIndices( - indices, - executionInfo, - enrichResolution.getUnavailableClusters(), - fieldNames, - lx.delegateFailureAndWrap((ll, indexResolution) -> { - // TODO in follow-PR (for skip_unavailble handling of missing concrete indexes) add some tests for invalid - // index resolution to updateExecutionInfo - if (indexResolution.isValid()) { - EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); - EsqlSessionCCSUtils.updateExecutionInfoWithUnavailableClusters( - executionInfo, - indexResolution.unavailableClusters() - ); - if (executionInfo.isCrossClusterSearch() - && executionInfo.getClusterStateCount(EsqlExecutionInfo.Cluster.Status.RUNNING) == 0) { - // for a CCS, if all clusters have been marked as SKIPPED, nothing to search so send a sentinel - // Exception to let the LogicalPlanActionListener decide how to proceed - ll.onFailure(new NoClustersToSearchException()); - return; - } - - Set newClusters = enrichPolicyResolver.groupIndicesPerCluster( - indexResolution.get().concreteIndices().toArray(String[]::new) - ).keySet(); - // If new clusters appear when resolving the main indices, we need to resolve the enrich policies again - // or exclude main concrete indices. Since this is rare, it's simpler to resolve the enrich policies - // again. - // TODO: add a test for this - if (targetClusters.containsAll(newClusters) == false - // do not bother with a re-resolution if only remotes were requested and all were offline - && executionInfo.getClusterStateCount(EsqlExecutionInfo.Cluster.Status.RUNNING) > 0) { - enrichPolicyResolver.resolvePolicies( - newClusters, - unresolvedPolicies, - ll.map( - newEnrichResolution -> action.apply(indexResolution, lookupIndexResolution, newEnrichResolution) - ) - ); - return; - } - } - ll.onResponse(action.apply(indexResolution, lookupIndexResolution, enrichResolution)); - }) - ) - ) + null, + listener.map(indexResolution -> listenerResult.withLookupIndexResolution(indexResolution)) ); - })); + } else { + try { + // No lookup indices specified + listener.onResponse( + new ListenerResult( + listenerResult.indices, + IndexResolution.invalid("[none specified]"), + listenerResult.enrichResolution, + listenerResult.fieldNames + ) + ); + } catch (Exception ex) { + listener.onFailure(ex); + } + } } private void preAnalyzeIndices( List indices, EsqlExecutionInfo executionInfo, - Map unavailableClusters, // known to be unavailable from the enrich policy API call - Set fieldNames, - ActionListener listener + ListenerResult listenerResult, + QueryBuilder requestFilter, + ActionListener listener ) { // TODO we plan to support joins in the future when possible, but for now we'll just fail early if we see one if (indices.size() > 1) { // Note: JOINs are not supported but we detect them when listener.onFailure(new MappingException("Queries with multiple indices are not supported")); } else if (indices.size() == 1) { + // known to be unavailable from the enrich policy API call + Map unavailableClusters = listenerResult.enrichResolution.getUnavailableClusters(); TableInfo tableInfo = indices.get(0); TableIdentifier table = tableInfo.id(); @@ -409,38 +441,116 @@ private void preAnalyzeIndices( String indexExpressionToResolve = EsqlSessionCCSUtils.createIndexExpressionFromAvailableClusters(executionInfo); if (indexExpressionToResolve.isEmpty()) { // if this was a pure remote CCS request (no local indices) and all remotes are offline, return an empty IndexResolution - listener.onResponse(IndexResolution.valid(new EsIndex(table.index(), Map.of(), Map.of()))); + listener.onResponse( + new ListenerResult( + IndexResolution.valid(new EsIndex(table.index(), Map.of(), Map.of())), + listenerResult.lookupIndices, + listenerResult.enrichResolution, + listenerResult.fieldNames + ) + ); } else { // call the EsqlResolveFieldsAction (field-caps) to resolve indices and get field types - indexResolver.resolveAsMergedMapping(indexExpressionToResolve, fieldNames, listener); + indexResolver.resolveAsMergedMapping( + indexExpressionToResolve, + listenerResult.fieldNames, + requestFilter, + listener.map(indexResolution -> listenerResult.withIndexResolution(indexResolution)) + ); } } else { try { // occurs when dealing with local relations (row a = 1) - listener.onResponse(IndexResolution.invalid("[none specified]")); + listener.onResponse( + new ListenerResult( + IndexResolution.invalid("[none specified]"), + listenerResult.lookupIndices, + listenerResult.enrichResolution, + listenerResult.fieldNames + ) + ); } catch (Exception ex) { listener.onFailure(ex); } } } - private void preAnalyzeLookupIndices(List indices, Set fieldNames, ActionListener listener) { - if (indices.size() > 1) { - // Note: JOINs on more than one index are not yet supported - listener.onFailure(new MappingException("More than one LOOKUP JOIN is not supported")); - } else if (indices.size() == 1) { - TableInfo tableInfo = indices.get(0); - TableIdentifier table = tableInfo.id(); - // call the EsqlResolveFieldsAction (field-caps) to resolve indices and get field types - indexResolver.resolveAsMergedMapping(table.index(), fieldNames, listener); - } else { - try { - // No lookup indices specified - listener.onResponse(IndexResolution.invalid("[none specified]")); - } catch (Exception ex) { - listener.onFailure(ex); + private boolean analyzeCCSIndices( + EsqlExecutionInfo executionInfo, + Set targetClusters, + Set unresolvedPolicies, + ListenerResult listenerResult, + ActionListener logicalPlanListener, + ActionListener l + ) { + IndexResolution indexResolution = listenerResult.indices; + EsqlSessionCCSUtils.updateExecutionInfoWithClustersWithNoMatchingIndices(executionInfo, indexResolution); + EsqlSessionCCSUtils.updateExecutionInfoWithUnavailableClusters(executionInfo, indexResolution.unavailableClusters()); + if (executionInfo.isCrossClusterSearch() && executionInfo.getClusterStateCount(EsqlExecutionInfo.Cluster.Status.RUNNING) == 0) { + // for a CCS, if all clusters have been marked as SKIPPED, nothing to search so send a sentinel Exception + // to let the LogicalPlanActionListener decide how to proceed + logicalPlanListener.onFailure(new NoClustersToSearchException()); + return true; + } + + Set newClusters = enrichPolicyResolver.groupIndicesPerCluster( + indexResolution.get().concreteIndices().toArray(String[]::new) + ).keySet(); + // If new clusters appear when resolving the main indices, we need to resolve the enrich policies again + // or exclude main concrete indices. Since this is rare, it's simpler to resolve the enrich policies again. + // TODO: add a test for this + if (targetClusters.containsAll(newClusters) == false + // do not bother with a re-resolution if only remotes were requested and all were offline + && executionInfo.getClusterStateCount(EsqlExecutionInfo.Cluster.Status.RUNNING) > 0) { + enrichPolicyResolver.resolvePolicies( + newClusters, + unresolvedPolicies, + l.map(enrichResolution -> listenerResult.withEnrichResolution(enrichResolution)) + ); + return true; + } + return false; + } + + private static void analyzeAndMaybeRetry( + TriFunction analyzeAction, + QueryBuilder requestFilter, + ListenerResult listenerResult, + ActionListener logicalPlanListener, + ActionListener l + ) { + LogicalPlan plan = null; + var filterPresentMessage = requestFilter == null ? "without" : "with"; + var attemptMessage = requestFilter == null ? "the only" : "first"; + LOGGER.debug("Analyzing the plan ({} attempt, {} filter)", attemptMessage, filterPresentMessage); + + try { + plan = analyzeAction.apply(listenerResult.indices, listenerResult.lookupIndices, listenerResult.enrichResolution); + } catch (Exception e) { + if (e instanceof VerificationException ve) { + LOGGER.debug( + "Analyzing the plan ({} attempt, {} filter) failed with {}", + attemptMessage, + filterPresentMessage, + ve.getDetailedMessage() + ); + if (requestFilter == null) { + // if the initial request didn't have a filter, then just pass the exception back to the user + logicalPlanListener.onFailure(ve); + } else { + // interested only in a VerificationException, but this time we are taking out the index filter + // to try and make the index resolution work without any index filtering. In the next step... to be continued + l.onResponse(listenerResult); + } + } else { + // if the query failed with any other type of exception, then just pass the exception back to the user + logicalPlanListener.onFailure(e); } + return; } + LOGGER.debug("Analyzed plan ({} attempt, {} filter):\n{}", attemptMessage, filterPresentMessage, plan); + // the analysis succeeded from the first attempt, irrespective if it had a filter or not, just continue with the planning + logicalPlanListener.onResponse(plan); } static Set fieldNames(LogicalPlan parsed, Set enrichPolicyMatchFields) { @@ -591,4 +701,23 @@ public PhysicalPlan optimizedPhysicalPlan(LogicalPlan optimizedPlan) { LOGGER.debug("Optimized physical plan:\n{}", plan); return plan; } + + private record ListenerResult( + IndexResolution indices, + IndexResolution lookupIndices, + EnrichResolution enrichResolution, + Set fieldNames + ) { + ListenerResult withEnrichResolution(EnrichResolution newEnrichResolution) { + return new ListenerResult(indices(), lookupIndices(), newEnrichResolution, fieldNames()); + } + + ListenerResult withIndexResolution(IndexResolution newIndexResolution) { + return new ListenerResult(newIndexResolution, lookupIndices(), enrichResolution(), fieldNames()); + } + + ListenerResult withLookupIndexResolution(IndexResolution newIndexResolution) { + return new ListenerResult(indices(), newIndexResolution, enrichResolution(), fieldNames()); + } + }; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java index f61be4b59830..d000b2765e2b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/IndexResolver.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.mapper.TimeSeriesParams; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.esql.action.EsqlResolveFieldsAction; import org.elasticsearch.xpack.esql.core.type.DataType; @@ -76,10 +77,15 @@ public IndexResolver(Client client) { /** * Resolves a pattern to one (potentially compound meaning that spawns multiple indices) mapping. */ - public void resolveAsMergedMapping(String indexWildcard, Set fieldNames, ActionListener listener) { + public void resolveAsMergedMapping( + String indexWildcard, + Set fieldNames, + QueryBuilder requestFilter, + ActionListener listener + ) { client.execute( EsqlResolveFieldsAction.TYPE, - createFieldCapsRequest(indexWildcard, fieldNames), + createFieldCapsRequest(indexWildcard, fieldNames, requestFilter), listener.delegateFailureAndWrap((l, response) -> l.onResponse(mergedMappings(indexWildcard, response))) ); } @@ -252,10 +258,11 @@ private EsField conflictingMetricTypes(String name, String fullName, FieldCapabi return new InvalidMappedField(name, "mapped as different metric types in indices: " + indices); } - private static FieldCapabilitiesRequest createFieldCapsRequest(String index, Set fieldNames) { + private static FieldCapabilitiesRequest createFieldCapsRequest(String index, Set fieldNames, QueryBuilder requestFilter) { FieldCapabilitiesRequest req = new FieldCapabilitiesRequest().indices(Strings.commaDelimitedListToStringArray(index)); req.fields(fieldNames.toArray(String[]::new)); req.includeUnmapped(true); + req.indexFilter(requestFilter); // lenient because we throw our own errors looking at the response e.g. if something was not resolved // also because this way security doesn't throw authorization exceptions but rather honors ignore_unavailable req.indicesOptions(FIELD_CAPS_INDICES_OPTIONS); From ceeafcc0937c4c8b0e2ea4ac48fa93fed872df8d Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Thu, 5 Dec 2024 16:33:11 +0100 Subject: [PATCH 36/45] Remove bucketOrd field from InternalTerms and friends (#118044) (#118074) The field bucketOrd is only used for building the aggregation but has no use after that. --- .../search/aggregations/BucketOrder.java | 8 +- .../search/aggregations/InternalOrder.java | 21 +-- .../countedterms/CountedTermsAggregator.java | 89 ++++++----- .../bucket/terms/BucketPriorityQueue.java | 8 +- .../BucketSignificancePriorityQueue.java | 6 +- .../GlobalOrdinalsStringTermsAggregator.java | 144 +++++++++++------- .../terms/InternalSignificantTerms.java | 15 +- .../bucket/terms/InternalTerms.java | 10 -- .../terms/MapStringTermsAggregator.java | 103 +++++++------ .../bucket/terms/NumericTermsAggregator.java | 115 ++++++++------ .../bucket/terms/TermsAggregator.java | 6 +- .../bucket/terms/TermsAggregatorFactory.java | 6 +- .../multiterms/InternalMultiTerms.java | 3 - .../multiterms/MultiTermsAggregator.java | 103 +++++++------ 14 files changed, 355 insertions(+), 282 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/BucketOrder.java b/server/src/main/java/org/elasticsearch/search/aggregations/BucketOrder.java index 2d360705f75b..c412ecb5d636 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/BucketOrder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/BucketOrder.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.search.aggregations.bucket.terms.BucketAndOrd; import org.elasticsearch.search.aggregations.support.AggregationPath; import org.elasticsearch.xcontent.ToXContentObject; @@ -20,13 +21,12 @@ import java.util.Comparator; import java.util.List; import java.util.function.BiFunction; -import java.util.function.ToLongFunction; /** * {@link Bucket} ordering strategy. Buckets can be order either as * "complete" buckets using {@link #comparator()} or against a combination * of the buckets internals with its ordinal with - * {@link #partiallyBuiltBucketComparator(ToLongFunction, Aggregator)}. + * {@link #partiallyBuiltBucketComparator(Aggregator)}. */ public abstract class BucketOrder implements ToXContentObject, Writeable { /** @@ -102,7 +102,7 @@ public final void validate(Aggregator aggregator) throws AggregationExecutionExc * to validate this order because doing so checks all of the appropriate * paths. */ - partiallyBuiltBucketComparator(null, aggregator); + partiallyBuiltBucketComparator(aggregator); } /** @@ -121,7 +121,7 @@ public final void validate(Aggregator aggregator) throws AggregationExecutionExc * with it all the time. *

*/ - public abstract Comparator partiallyBuiltBucketComparator(ToLongFunction ordinalReader, Aggregator aggregator); + public abstract Comparator> partiallyBuiltBucketComparator(Aggregator aggregator); /** * Build a comparator for fully built buckets. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java index 043fab6f4f12..74534c275d11 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java @@ -16,6 +16,7 @@ import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.search.aggregations.Aggregator.BucketComparator; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.search.aggregations.bucket.terms.BucketAndOrd; import org.elasticsearch.search.aggregations.support.AggregationPath; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.search.sort.SortValue; @@ -31,7 +32,6 @@ import java.util.List; import java.util.Objects; import java.util.function.BiFunction; -import java.util.function.ToLongFunction; /** * Implementations for {@link Bucket} ordering strategies. @@ -64,10 +64,10 @@ public AggregationPath path() { } @Override - public Comparator partiallyBuiltBucketComparator(ToLongFunction ordinalReader, Aggregator aggregator) { + public Comparator> partiallyBuiltBucketComparator(Aggregator aggregator) { try { BucketComparator bucketComparator = path.bucketComparator(aggregator, order); - return (lhs, rhs) -> bucketComparator.compare(ordinalReader.applyAsLong(lhs), ordinalReader.applyAsLong(rhs)); + return (lhs, rhs) -> bucketComparator.compare(lhs.ord, rhs.ord); } catch (IllegalArgumentException e) { throw new AggregationExecutionException.InvalidPath("Invalid aggregation order path [" + path + "]. " + e.getMessage(), e); } @@ -189,12 +189,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } @Override - public Comparator partiallyBuiltBucketComparator(ToLongFunction ordinalReader, Aggregator aggregator) { - List> comparators = orderElements.stream() - .map(oe -> oe.partiallyBuiltBucketComparator(ordinalReader, aggregator)) - .toList(); + public Comparator> partiallyBuiltBucketComparator(Aggregator aggregator) { + List>> comparators = new ArrayList<>(orderElements.size()); + for (BucketOrder order : orderElements) { + comparators.add(order.partiallyBuiltBucketComparator(aggregator)); + } return (lhs, rhs) -> { - for (Comparator c : comparators) { + for (Comparator> c : comparators) { int result = c.compare(lhs, rhs); if (result != 0) { return result; @@ -300,9 +301,9 @@ byte id() { } @Override - public Comparator partiallyBuiltBucketComparator(ToLongFunction ordinalReader, Aggregator aggregator) { + public Comparator> partiallyBuiltBucketComparator(Aggregator aggregator) { Comparator comparator = comparator(); - return comparator::compare; + return (lhs, rhs) -> comparator.compare(lhs.bucket, rhs.bucket); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java index 192b0b3d7323..310fcd4fb611 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/countedterms/CountedTermsAggregator.java @@ -13,6 +13,7 @@ import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.util.IntArray; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.core.Releasables; @@ -26,6 +27,7 @@ import org.elasticsearch.search.aggregations.InternalOrder; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; +import org.elasticsearch.search.aggregations.bucket.terms.BucketAndOrd; import org.elasticsearch.search.aggregations.bucket.terms.BucketPriorityQueue; import org.elasticsearch.search.aggregations.bucket.terms.BytesKeyedBucketOrds; import org.elasticsearch.search.aggregations.bucket.terms.InternalTerms; @@ -38,7 +40,6 @@ import java.util.Arrays; import java.util.Map; import java.util.function.BiConsumer; -import java.util.function.Supplier; import static java.util.Collections.emptyList; import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS; @@ -115,51 +116,57 @@ public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throw LongArray otherDocCounts = bigArrays().newLongArray(owningBucketOrds.size()); ObjectArray topBucketsPerOrd = bigArrays().newObjectArray(owningBucketOrds.size()) ) { - for (long ordIdx = 0; ordIdx < topBucketsPerOrd.size(); ordIdx++) { - int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize()); - - // as users can't control sort order, in practice we'll always sort by doc count descending - try ( - BucketPriorityQueue ordered = new BucketPriorityQueue<>( - size, - bigArrays(), - partiallyBuiltBucketComparator - ) - ) { - StringTerms.Bucket spare = null; - BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds.get(ordIdx)); - Supplier emptyBucketBuilder = () -> new StringTerms.Bucket( - new BytesRef(), - 0, - null, - false, - 0, - format - ); - while (ordsEnum.next()) { - long docCount = bucketDocCount(ordsEnum.ord()); - otherDocCounts.increment(ordIdx, docCount); - if (spare == null) { - checkRealMemoryCBForInternalBucket(); - spare = emptyBucketBuilder.get(); + try (IntArray bucketsToCollect = bigArrays().newIntArray(owningBucketOrds.size())) { + // find how many buckets we are going to collect + long ordsToCollect = 0; + for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) { + int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrds.get(ordIdx)), bucketCountThresholds.getShardSize()); + bucketsToCollect.set(ordIdx, size); + ordsToCollect += size; + } + try (LongArray ordsArray = bigArrays().newLongArray(ordsToCollect)) { + long ordsCollected = 0; + for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) { + // as users can't control sort order, in practice we'll always sort by doc count descending + try ( + BucketPriorityQueue ordered = new BucketPriorityQueue<>( + bucketsToCollect.get(ordIdx), + bigArrays(), + order.partiallyBuiltBucketComparator(this) + ) + ) { + BucketAndOrd spare = null; + BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds.get(ordIdx)); + while (ordsEnum.next()) { + long docCount = bucketDocCount(ordsEnum.ord()); + otherDocCounts.increment(ordIdx, docCount); + if (spare == null) { + checkRealMemoryCBForInternalBucket(); + spare = new BucketAndOrd<>(new StringTerms.Bucket(new BytesRef(), 0, null, false, 0, format)); + } + ordsEnum.readValue(spare.bucket.getTermBytes()); + spare.bucket.setDocCount(docCount); + spare.ord = ordsEnum.ord(); + spare = ordered.insertWithOverflow(spare); + } + final int orderedSize = (int) ordered.size(); + final StringTerms.Bucket[] buckets = new StringTerms.Bucket[orderedSize]; + for (int i = orderedSize - 1; i >= 0; --i) { + BucketAndOrd bucketAndOrd = ordered.pop(); + buckets[i] = bucketAndOrd.bucket; + ordsArray.set(ordsCollected + i, bucketAndOrd.ord); + otherDocCounts.increment(ordIdx, -bucketAndOrd.bucket.getDocCount()); + bucketAndOrd.bucket.setTermBytes(BytesRef.deepCopyOf(bucketAndOrd.bucket.getTermBytes())); + } + topBucketsPerOrd.set(ordIdx, buckets); + ordsCollected += orderedSize; } - ordsEnum.readValue(spare.getTermBytes()); - spare.setDocCount(docCount); - spare.setBucketOrd(ordsEnum.ord()); - spare = ordered.insertWithOverflow(spare); - } - - topBucketsPerOrd.set(ordIdx, new StringTerms.Bucket[(int) ordered.size()]); - for (int i = (int) ordered.size() - 1; i >= 0; --i) { - topBucketsPerOrd.get(ordIdx)[i] = ordered.pop(); - otherDocCounts.increment(ordIdx, -topBucketsPerOrd.get(ordIdx)[i].getDocCount()); - topBucketsPerOrd.get(ordIdx)[i].setTermBytes(BytesRef.deepCopyOf(topBucketsPerOrd.get(ordIdx)[i].getTermBytes())); } + assert ordsCollected == ordsArray.size(); + buildSubAggsForAllBuckets(topBucketsPerOrd, ordsArray, InternalTerms.Bucket::setAggregations); } } - buildSubAggsForAllBuckets(topBucketsPerOrd, InternalTerms.Bucket::getBucketOrd, InternalTerms.Bucket::setAggregations); - return buildAggregations(Math.toIntExact(owningBucketOrds.size()), ordIdx -> { final BucketOrder reduceOrder; if (isKeyOrder(order) == false) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketPriorityQueue.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketPriorityQueue.java index 7f8e5c8c885f..9550003a5bd1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketPriorityQueue.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketPriorityQueue.java @@ -13,17 +13,17 @@ import java.util.Comparator; -public class BucketPriorityQueue extends ObjectArrayPriorityQueue { +public class BucketPriorityQueue extends ObjectArrayPriorityQueue> { - private final Comparator comparator; + private final Comparator> comparator; - public BucketPriorityQueue(int size, BigArrays bigArrays, Comparator comparator) { + public BucketPriorityQueue(int size, BigArrays bigArrays, Comparator> comparator) { super(size, bigArrays); this.comparator = comparator; } @Override - protected boolean lessThan(B a, B b) { + protected boolean lessThan(BucketAndOrd a, BucketAndOrd b) { return comparator.compare(a, b) > 0; // reverse, since we reverse again when adding to a list } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketSignificancePriorityQueue.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketSignificancePriorityQueue.java index fe751c9e7918..4736f52d9362 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketSignificancePriorityQueue.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BucketSignificancePriorityQueue.java @@ -12,14 +12,14 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArrayPriorityQueue; -public class BucketSignificancePriorityQueue extends ObjectArrayPriorityQueue { +public class BucketSignificancePriorityQueue extends ObjectArrayPriorityQueue> { public BucketSignificancePriorityQueue(int size, BigArrays bigArrays) { super(size, bigArrays); } @Override - protected boolean lessThan(SignificantTerms.Bucket o1, SignificantTerms.Bucket o2) { - return o1.getSignificanceScore() < o2.getSignificanceScore(); + protected boolean lessThan(BucketAndOrd o1, BucketAndOrd o2) { + return o1.bucket.getSignificanceScore() < o2.bucket.getSignificanceScore(); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 037870016a5f..ee472bb2050a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -20,6 +20,7 @@ import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.IntArray; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.common.util.ObjectArray; @@ -558,10 +559,10 @@ InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOExc ) { GlobalOrdLookupFunction lookupGlobalOrd = valuesSupplier.get()::lookupOrd; final int size = (int) Math.min(valueCount, bucketCountThresholds.getShardSize()); - try (ObjectArrayPriorityQueue ordered = collectionStrategy.buildPriorityQueue(size)) { + try (ObjectArrayPriorityQueue> ordered = collectionStrategy.buildPriorityQueue(size)) { BucketUpdater updater = collectionStrategy.bucketUpdater(0, lookupGlobalOrd); collect(new BucketInfoConsumer() { - TB spare = null; + BucketAndOrd spare = null; @Override public void accept(long globalOrd, long bucketOrd, long docCount) throws IOException { @@ -569,24 +570,31 @@ public void accept(long globalOrd, long bucketOrd, long docCount) throws IOExcep if (docCount >= bucketCountThresholds.getShardMinDocCount()) { if (spare == null) { checkRealMemoryCBForInternalBucket(); - spare = collectionStrategy.buildEmptyTemporaryBucket(); + spare = new BucketAndOrd<>(collectionStrategy.buildEmptyTemporaryBucket()); } - updater.updateBucket(spare, globalOrd, bucketOrd, docCount); + spare.ord = bucketOrd; + updater.updateBucket(spare.bucket, globalOrd, docCount); spare = ordered.insertWithOverflow(spare); } } }); // Get the top buckets - topBucketsPreOrd.set(0, collectionStrategy.buildBuckets((int) ordered.size())); - for (int i = (int) ordered.size() - 1; i >= 0; --i) { - checkRealMemoryCBForInternalBucket(); - B bucket = collectionStrategy.convertTempBucketToRealBucket(ordered.pop(), lookupGlobalOrd); - topBucketsPreOrd.get(0)[i] = bucket; - otherDocCount.increment(0, -bucket.getDocCount()); + int orderedSize = (int) ordered.size(); + try (LongArray ordsArray = bigArrays().newLongArray(orderedSize)) { + B[] buckets = collectionStrategy.buildBuckets(orderedSize); + for (int i = orderedSize - 1; i >= 0; --i) { + checkRealMemoryCBForInternalBucket(); + BucketAndOrd bucketAndOrd = ordered.pop(); + B bucket = collectionStrategy.convertTempBucketToRealBucket(bucketAndOrd.bucket, lookupGlobalOrd); + ordsArray.set(i, bucketAndOrd.ord); + buckets[i] = bucket; + otherDocCount.increment(0, -bucket.getDocCount()); + } + topBucketsPreOrd.set(0, buckets); + collectionStrategy.buildSubAggs(topBucketsPreOrd, ordsArray); } } - collectionStrategy.buildSubAggs(topBucketsPreOrd); return GlobalOrdinalsStringTermsAggregator.this.buildAggregations( Math.toIntExact(owningBucketOrds.size()), ordIdx -> collectionStrategy.buildResult( @@ -706,39 +714,61 @@ InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throws IOExc LongArray otherDocCount = bigArrays().newLongArray(owningBucketOrds.size(), true); ObjectArray topBucketsPreOrd = collectionStrategy.buildTopBucketsPerOrd(owningBucketOrds.size()) ) { - GlobalOrdLookupFunction lookupGlobalOrd = valuesSupplier.get()::lookupOrd; - for (long ordIdx = 0; ordIdx < topBucketsPreOrd.size(); ordIdx++) { - long owningBucketOrd = owningBucketOrds.get(ordIdx); - collectZeroDocEntriesIfNeeded(owningBucketOrds.get(ordIdx)); - int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrd), bucketCountThresholds.getShardSize()); - try (ObjectArrayPriorityQueue ordered = collectionStrategy.buildPriorityQueue(size)) { - BucketUpdater updater = collectionStrategy.bucketUpdater(owningBucketOrd, lookupGlobalOrd); - LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd); - TB spare = null; - while (ordsEnum.next()) { - long docCount = bucketDocCount(ordsEnum.ord()); - otherDocCount.increment(ordIdx, docCount); - if (docCount < bucketCountThresholds.getShardMinDocCount()) { - continue; - } - if (spare == null) { - checkRealMemoryCBForInternalBucket(); - spare = collectionStrategy.buildEmptyTemporaryBucket(); + try (IntArray bucketsToCollect = bigArrays().newIntArray(owningBucketOrds.size())) { + long ordsToCollect = 0; + for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) { + final long owningBucketOrd = owningBucketOrds.get(ordIdx); + collectZeroDocEntriesIfNeeded(owningBucketOrd); + final int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrd), bucketCountThresholds.getShardSize()); + ordsToCollect += size; + bucketsToCollect.set(ordIdx, size); + } + try (LongArray ordsArray = bigArrays().newLongArray(ordsToCollect)) { + long ordsCollected = 0; + GlobalOrdLookupFunction lookupGlobalOrd = valuesSupplier.get()::lookupOrd; + for (long ordIdx = 0; ordIdx < topBucketsPreOrd.size(); ordIdx++) { + long owningBucketOrd = owningBucketOrds.get(ordIdx); + try ( + ObjectArrayPriorityQueue> ordered = collectionStrategy.buildPriorityQueue( + bucketsToCollect.get(ordIdx) + ) + ) { + BucketUpdater updater = collectionStrategy.bucketUpdater(owningBucketOrd, lookupGlobalOrd); + LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd); + BucketAndOrd spare = null; + while (ordsEnum.next()) { + long docCount = bucketDocCount(ordsEnum.ord()); + otherDocCount.increment(ordIdx, docCount); + if (docCount < bucketCountThresholds.getShardMinDocCount()) { + continue; + } + if (spare == null) { + checkRealMemoryCBForInternalBucket(); + spare = new BucketAndOrd<>(collectionStrategy.buildEmptyTemporaryBucket()); + } + updater.updateBucket(spare.bucket, ordsEnum.value(), docCount); + spare.ord = ordsEnum.ord(); + spare = ordered.insertWithOverflow(spare); + } + // Get the top buckets + int orderedSize = (int) ordered.size(); + B[] buckets = collectionStrategy.buildBuckets(orderedSize); + for (int i = orderedSize - 1; i >= 0; --i) { + checkRealMemoryCBForInternalBucket(); + BucketAndOrd bucketAndOrd = ordered.pop(); + B bucket = collectionStrategy.convertTempBucketToRealBucket(bucketAndOrd.bucket, lookupGlobalOrd); + ordsArray.set(ordsCollected + i, bucketAndOrd.ord); + buckets[i] = bucket; + otherDocCount.increment(ordIdx, -bucket.getDocCount()); + } + topBucketsPreOrd.set(ordIdx, buckets); + ordsCollected += orderedSize; } - updater.updateBucket(spare, ordsEnum.value(), ordsEnum.ord(), docCount); - spare = ordered.insertWithOverflow(spare); - } - // Get the top buckets - topBucketsPreOrd.set(ordIdx, collectionStrategy.buildBuckets((int) ordered.size())); - for (int i = (int) ordered.size() - 1; i >= 0; --i) { - checkRealMemoryCBForInternalBucket(); - B bucket = collectionStrategy.convertTempBucketToRealBucket(ordered.pop(), lookupGlobalOrd); - topBucketsPreOrd.get(ordIdx)[i] = bucket; - otherDocCount.increment(ordIdx, -bucket.getDocCount()); } + assert ordsCollected == ordsArray.size(); + collectionStrategy.buildSubAggs(topBucketsPreOrd, ordsArray); } } - collectionStrategy.buildSubAggs(topBucketsPreOrd); return GlobalOrdinalsStringTermsAggregator.this.buildAggregations( Math.toIntExact(owningBucketOrds.size()), ordIdx -> collectionStrategy.buildResult( @@ -787,7 +817,7 @@ abstract class ResultStrategy< * Build a {@link PriorityQueue} to sort the buckets. After we've * collected all of the buckets we'll collect all entries in the queue. */ - abstract ObjectArrayPriorityQueue buildPriorityQueue(int size); + abstract ObjectArrayPriorityQueue> buildPriorityQueue(int size); /** * Build an array to hold the "top" buckets for each ordinal. @@ -809,7 +839,7 @@ abstract class ResultStrategy< * Build the sub-aggregations into the buckets. This will usually * delegate to {@link #buildSubAggsForAllBuckets}. */ - abstract void buildSubAggs(ObjectArray topBucketsPreOrd) throws IOException; + abstract void buildSubAggs(ObjectArray topBucketsPreOrd, LongArray ordsArray) throws IOException; /** * Turn the buckets into an aggregation result. @@ -830,7 +860,7 @@ abstract class ResultStrategy< } interface BucketUpdater { - void updateBucket(TB spare, long globalOrd, long bucketOrd, long docCount) throws IOException; + void updateBucket(TB spare, long globalOrd, long docCount) throws IOException; } /** @@ -864,29 +894,30 @@ OrdBucket buildEmptyTemporaryBucket() { @Override BucketUpdater bucketUpdater(long owningBucketOrd, GlobalOrdLookupFunction lookupGlobalOrd) { - return (spare, globalOrd, bucketOrd, docCount) -> { + return (spare, globalOrd, docCount) -> { spare.globalOrd = globalOrd; - spare.bucketOrd = bucketOrd; spare.docCount = docCount; }; } @Override - ObjectArrayPriorityQueue buildPriorityQueue(int size) { - return new BucketPriorityQueue<>(size, bigArrays(), partiallyBuiltBucketComparator); + ObjectArrayPriorityQueue> buildPriorityQueue(int size) { + return new BucketPriorityQueue<>( + size, + bigArrays(), + order.partiallyBuiltBucketComparator(GlobalOrdinalsStringTermsAggregator.this) + ); } @Override StringTerms.Bucket convertTempBucketToRealBucket(OrdBucket temp, GlobalOrdLookupFunction lookupGlobalOrd) throws IOException { BytesRef term = BytesRef.deepCopyOf(lookupGlobalOrd.apply(temp.globalOrd)); - StringTerms.Bucket result = new StringTerms.Bucket(term, temp.docCount, null, showTermDocCountError, 0, format); - result.bucketOrd = temp.bucketOrd; - return result; + return new StringTerms.Bucket(term, temp.docCount, null, showTermDocCountError, 0, format); } @Override - void buildSubAggs(ObjectArray topBucketsPreOrd) throws IOException { - buildSubAggsForAllBuckets(topBucketsPreOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs); + void buildSubAggs(ObjectArray topBucketsPreOrd, LongArray ordsArray) throws IOException { + buildSubAggsForAllBuckets(topBucketsPreOrd, ordsArray, (b, aggs) -> b.aggregations = aggs); } @Override @@ -1001,8 +1032,7 @@ private long subsetSize(long owningBucketOrd) { @Override BucketUpdater bucketUpdater(long owningBucketOrd, GlobalOrdLookupFunction lookupGlobalOrd) { long subsetSize = subsetSize(owningBucketOrd); - return (spare, globalOrd, bucketOrd, docCount) -> { - spare.bucketOrd = bucketOrd; + return (spare, globalOrd, docCount) -> { oversizedCopy(lookupGlobalOrd.apply(globalOrd), spare.termBytes); spare.subsetDf = docCount; spare.supersetDf = backgroundFrequencies.freq(spare.termBytes); @@ -1016,7 +1046,7 @@ BucketUpdater bucketUpdater(long owningBucketOrd, } @Override - ObjectArrayPriorityQueue buildPriorityQueue(int size) { + ObjectArrayPriorityQueue> buildPriorityQueue(int size) { return new BucketSignificancePriorityQueue<>(size, bigArrays()); } @@ -1029,8 +1059,8 @@ SignificantStringTerms.Bucket convertTempBucketToRealBucket( } @Override - void buildSubAggs(ObjectArray topBucketsPreOrd) throws IOException { - buildSubAggsForAllBuckets(topBucketsPreOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs); + void buildSubAggs(ObjectArray topBucketsPreOrd, LongArray ordsArray) throws IOException { + buildSubAggsForAllBuckets(topBucketsPreOrd, ordsArray, (b, aggs) -> b.aggregations = aggs); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java index 78ae2481f5d9..5108793b8a80 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalSignificantTerms.java @@ -10,12 +10,12 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.ObjectArrayPriorityQueue; import org.elasticsearch.common.util.ObjectObjectPagedHashMap; import org.elasticsearch.core.Releasables; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationErrors; import org.elasticsearch.search.aggregations.AggregationReduceContext; -import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorReducer; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; @@ -58,12 +58,6 @@ public interface Reader> { long subsetDf; long supersetDf; - /** - * Ordinal of the bucket while it is being built. Not used after it is - * returned from {@link Aggregator#buildAggregations(org.elasticsearch.common.util.LongArray)} and not - * serialized. - */ - transient long bucketOrd; double score; protected InternalAggregations aggregations; final transient DocValueFormat format; @@ -235,7 +229,12 @@ canLeadReduction here is essentially checking if this shard returned data. Unma public InternalAggregation get() { final SignificanceHeuristic heuristic = getSignificanceHeuristic().rewrite(reduceContext); final int size = (int) (reduceContext.isFinalReduce() == false ? buckets.size() : Math.min(requiredSize, buckets.size())); - try (BucketSignificancePriorityQueue ordered = new BucketSignificancePriorityQueue<>(size, reduceContext.bigArrays())) { + try (ObjectArrayPriorityQueue ordered = new ObjectArrayPriorityQueue(size, reduceContext.bigArrays()) { + @Override + protected boolean lessThan(B a, B b) { + return a.getSignificanceScore() < b.getSignificanceScore(); + } + }) { buckets.forEach(entry -> { final B b = createBucket( entry.value.subsetDf[0], diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java index 739f0b923eaa..de35046691b3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java @@ -38,8 +38,6 @@ public interface Reader> { B read(StreamInput in, DocValueFormat format, boolean showDocCountError) throws IOException; } - long bucketOrd; - protected long docCount; private long docCountError; protected InternalAggregations aggregations; @@ -88,14 +86,6 @@ public void setDocCount(long docCount) { this.docCount = docCount; } - public long getBucketOrd() { - return bucketOrd; - } - - public void setBucketOrd(long bucketOrd) { - this.bucketOrd = bucketOrd; - } - @Override public long getDocCountError() { return docCountError; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java index b96c495d3748..026912a583ef 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java @@ -17,6 +17,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.PriorityQueue; +import org.elasticsearch.common.util.IntArray; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.common.util.ObjectArrayPriorityQueue; @@ -43,6 +44,7 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Comparator; import java.util.Map; import java.util.function.BiConsumer; import java.util.function.Function; @@ -287,40 +289,55 @@ private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) thro LongArray otherDocCounts = bigArrays().newLongArray(owningBucketOrds.size(), true); ObjectArray topBucketsPerOrd = buildTopBucketsPerOrd(Math.toIntExact(owningBucketOrds.size())) ) { - for (long ordIdx = 0; ordIdx < topBucketsPerOrd.size(); ordIdx++) { - long owningOrd = owningBucketOrds.get(ordIdx); - collectZeroDocEntriesIfNeeded(owningOrd, excludeDeletedDocs); - int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize()); - - try (ObjectArrayPriorityQueue ordered = buildPriorityQueue(size)) { - B spare = null; - BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningOrd); - BucketUpdater bucketUpdater = bucketUpdater(owningOrd); - while (ordsEnum.next()) { - long docCount = bucketDocCount(ordsEnum.ord()); - otherDocCounts.increment(ordIdx, docCount); - if (docCount < bucketCountThresholds.getShardMinDocCount()) { - continue; - } - if (spare == null) { - checkRealMemoryCBForInternalBucket(); - spare = buildEmptyBucket(); + try (IntArray bucketsToCollect = bigArrays().newIntArray(owningBucketOrds.size())) { + long ordsToCollect = 0; + for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) { + final long owningBucketOrd = owningBucketOrds.get(ordIdx); + collectZeroDocEntriesIfNeeded(owningBucketOrd, excludeDeletedDocs); + final int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrd), bucketCountThresholds.getShardSize()); + ordsToCollect += size; + bucketsToCollect.set(ordIdx, size); + } + try (LongArray ordsArray = bigArrays().newLongArray(ordsToCollect)) { + long ordsCollected = 0; + for (long ordIdx = 0; ordIdx < topBucketsPerOrd.size(); ordIdx++) { + long owningOrd = owningBucketOrds.get(ordIdx); + try (ObjectArrayPriorityQueue> ordered = buildPriorityQueue(bucketsToCollect.get(ordIdx))) { + BucketAndOrd spare = null; + BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningOrd); + BucketUpdater bucketUpdater = bucketUpdater(owningOrd); + while (ordsEnum.next()) { + long docCount = bucketDocCount(ordsEnum.ord()); + otherDocCounts.increment(ordIdx, docCount); + if (docCount < bucketCountThresholds.getShardMinDocCount()) { + continue; + } + if (spare == null) { + checkRealMemoryCBForInternalBucket(); + spare = new BucketAndOrd<>(buildEmptyBucket()); + } + bucketUpdater.updateBucket(spare.bucket, ordsEnum, docCount); + spare.ord = ordsEnum.ord(); + spare = ordered.insertWithOverflow(spare); + } + + final int orderedSize = (int) ordered.size(); + final B[] buckets = buildBuckets(orderedSize); + for (int i = orderedSize - 1; i >= 0; --i) { + BucketAndOrd bucketAndOrd = ordered.pop(); + finalizeBucket(bucketAndOrd.bucket); + buckets[i] = bucketAndOrd.bucket; + ordsArray.set(ordsCollected + i, bucketAndOrd.ord); + otherDocCounts.increment(ordIdx, -bucketAndOrd.bucket.getDocCount()); + } + topBucketsPerOrd.set(ordIdx, buckets); + ordsCollected += orderedSize; } - bucketUpdater.updateBucket(spare, ordsEnum, docCount); - spare = ordered.insertWithOverflow(spare); - } - - topBucketsPerOrd.set(ordIdx, buildBuckets((int) ordered.size())); - for (int i = (int) ordered.size() - 1; i >= 0; --i) { - topBucketsPerOrd.get(ordIdx)[i] = ordered.pop(); - otherDocCounts.increment(ordIdx, -topBucketsPerOrd.get(ordIdx)[i].getDocCount()); - finalizeBucket(topBucketsPerOrd.get(ordIdx)[i]); } + assert ordsCollected == ordsArray.size(); + buildSubAggs(topBucketsPerOrd, ordsArray); } } - - buildSubAggs(topBucketsPerOrd); - return MapStringTermsAggregator.this.buildAggregations( Math.toIntExact(owningBucketOrds.size()), ordIdx -> buildResult(owningBucketOrds.get(ordIdx), otherDocCounts.get(ordIdx), topBucketsPerOrd.get(ordIdx)) @@ -355,7 +372,7 @@ private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) thro * Build a {@link PriorityQueue} to sort the buckets. After we've * collected all of the buckets we'll collect all entries in the queue. */ - abstract ObjectArrayPriorityQueue buildPriorityQueue(int size); + abstract ObjectArrayPriorityQueue> buildPriorityQueue(int size); /** * Update fields in {@code spare} to reflect information collected for @@ -382,9 +399,9 @@ private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) thro /** * Build the sub-aggregations into the buckets. This will usually - * delegate to {@link #buildSubAggsForAllBuckets}. + * delegate to {@link #buildSubAggsForAllBuckets(ObjectArray, LongArray, BiConsumer)}. */ - abstract void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException; + abstract void buildSubAggs(ObjectArray topBucketsPerOrd, LongArray ordsArray) throws IOException; /** * Turn the buckets into an aggregation result. @@ -407,9 +424,11 @@ interface BucketUpdater */ class StandardTermsResults extends ResultStrategy { private final ValuesSource valuesSource; + private final Comparator> comparator; - StandardTermsResults(ValuesSource valuesSource) { + StandardTermsResults(ValuesSource valuesSource, Aggregator aggregator) { this.valuesSource = valuesSource; + this.comparator = order.partiallyBuiltBucketComparator(aggregator); } @Override @@ -498,8 +517,8 @@ StringTerms.Bucket buildEmptyBucket() { } @Override - ObjectArrayPriorityQueue buildPriorityQueue(int size) { - return new BucketPriorityQueue<>(size, bigArrays(), partiallyBuiltBucketComparator); + ObjectArrayPriorityQueue> buildPriorityQueue(int size) { + return new BucketPriorityQueue<>(size, bigArrays(), comparator); } @Override @@ -507,7 +526,6 @@ BucketUpdater bucketUpdater(long owningBucketOrd) { return (spare, ordsEnum, docCount) -> { ordsEnum.readValue(spare.termBytes); spare.docCount = docCount; - spare.bucketOrd = ordsEnum.ord(); }; } @@ -532,8 +550,8 @@ void finalizeBucket(StringTerms.Bucket bucket) { } @Override - void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException { - buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, a) -> b.aggregations = a); + void buildSubAggs(ObjectArray topBucketsPerOrd, LongArray ordArray) throws IOException { + buildSubAggsForAllBuckets(topBucketsPerOrd, ordArray, (b, a) -> b.aggregations = a); } @Override @@ -625,7 +643,7 @@ SignificantStringTerms.Bucket buildEmptyBucket() { } @Override - ObjectArrayPriorityQueue buildPriorityQueue(int size) { + ObjectArrayPriorityQueue> buildPriorityQueue(int size) { return new BucketSignificancePriorityQueue<>(size, bigArrays()); } @@ -634,7 +652,6 @@ BucketUpdater bucketUpdater(long owningBucketOrd) long subsetSize = subsetSizes.get(owningBucketOrd); return (spare, ordsEnum, docCount) -> { ordsEnum.readValue(spare.termBytes); - spare.bucketOrd = ordsEnum.ord(); spare.subsetDf = docCount; spare.supersetDf = backgroundFrequencies.freq(spare.termBytes); /* @@ -667,8 +684,8 @@ void finalizeBucket(SignificantStringTerms.Bucket bucket) { } @Override - void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException { - buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, a) -> b.aggregations = a); + void buildSubAggs(ObjectArray topBucketsPerOrd, LongArray ordsArray) throws IOException { + buildSubAggsForAllBuckets(topBucketsPerOrd, ordsArray, (b, a) -> b.aggregations = a); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java index 5d4c15d8a3b8..a54053f712f8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/NumericTermsAggregator.java @@ -14,6 +14,7 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.common.util.IntArray; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.common.util.ObjectArrayPriorityQueue; @@ -40,6 +41,7 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Comparator; import java.util.Map; import java.util.function.BiConsumer; import java.util.function.Function; @@ -167,42 +169,56 @@ private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) thro LongArray otherDocCounts = bigArrays().newLongArray(owningBucketOrds.size(), true); ObjectArray topBucketsPerOrd = buildTopBucketsPerOrd(owningBucketOrds.size()) ) { - for (long ordIdx = 0; ordIdx < topBucketsPerOrd.size(); ordIdx++) { - final long owningBucketOrd = owningBucketOrds.get(ordIdx); - collectZeroDocEntriesIfNeeded(owningBucketOrd, excludeDeletedDocs); - long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrd); - - int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize()); - try (ObjectArrayPriorityQueue ordered = buildPriorityQueue(size)) { - B spare = null; - BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd); - BucketUpdater bucketUpdater = bucketUpdater(owningBucketOrd); - while (ordsEnum.next()) { - long docCount = bucketDocCount(ordsEnum.ord()); - otherDocCounts.increment(ordIdx, docCount); - if (docCount < bucketCountThresholds.getShardMinDocCount()) { - continue; - } - if (spare == null) { - checkRealMemoryCBForInternalBucket(); - spare = buildEmptyBucket(); - } - bucketUpdater.updateBucket(spare, ordsEnum, docCount); - spare = ordered.insertWithOverflow(spare); - } + try (IntArray bucketsToCollect = bigArrays().newIntArray(owningBucketOrds.size())) { + long ordsToCollect = 0; + for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) { + final long owningBucketOrd = owningBucketOrds.get(ordIdx); + collectZeroDocEntriesIfNeeded(owningBucketOrd, excludeDeletedDocs); + int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrd), bucketCountThresholds.getShardSize()); + bucketsToCollect.set(ordIdx, size); + ordsToCollect += size; + } + try (LongArray ordsArray = bigArrays().newLongArray(ordsToCollect)) { + long ordsCollected = 0; + for (long ordIdx = 0; ordIdx < topBucketsPerOrd.size(); ordIdx++) { + final long owningBucketOrd = owningBucketOrds.get(ordIdx); + try (ObjectArrayPriorityQueue> ordered = buildPriorityQueue(bucketsToCollect.get(ordIdx))) { + BucketAndOrd spare = null; + BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd); + BucketUpdater bucketUpdater = bucketUpdater(owningBucketOrd); + while (ordsEnum.next()) { + long docCount = bucketDocCount(ordsEnum.ord()); + otherDocCounts.increment(ordIdx, docCount); + if (docCount < bucketCountThresholds.getShardMinDocCount()) { + continue; + } + if (spare == null) { + checkRealMemoryCBForInternalBucket(); + spare = new BucketAndOrd<>(buildEmptyBucket()); + } + bucketUpdater.updateBucket(spare.bucket, ordsEnum, docCount); + spare.ord = ordsEnum.ord(); + spare = ordered.insertWithOverflow(spare); + } + + // Get the top buckets + final int orderedSize = (int) ordered.size(); + final B[] bucketsForOrd = buildBuckets(orderedSize); + for (int b = orderedSize - 1; b >= 0; --b) { + BucketAndOrd bucketAndOrd = ordered.pop(); + bucketsForOrd[b] = bucketAndOrd.bucket; + ordsArray.set(ordsCollected + b, bucketAndOrd.ord); + otherDocCounts.increment(ordIdx, -bucketAndOrd.bucket.getDocCount()); + } + topBucketsPerOrd.set(ordIdx, bucketsForOrd); + ordsCollected += orderedSize; - // Get the top buckets - B[] bucketsForOrd = buildBuckets((int) ordered.size()); - topBucketsPerOrd.set(ordIdx, bucketsForOrd); - for (int b = (int) ordered.size() - 1; b >= 0; --b) { - topBucketsPerOrd.get(ordIdx)[b] = ordered.pop(); - otherDocCounts.increment(ordIdx, -topBucketsPerOrd.get(ordIdx)[b].getDocCount()); + } } + assert ordsCollected == ordsArray.size(); + buildSubAggs(topBucketsPerOrd, ordsArray); } } - - buildSubAggs(topBucketsPerOrd); - return NumericTermsAggregator.this.buildAggregations( Math.toIntExact(owningBucketOrds.size()), ordIdx -> buildResult(owningBucketOrds.get(ordIdx), otherDocCounts.get(ordIdx), topBucketsPerOrd.get(ordIdx)) @@ -254,13 +270,13 @@ private InternalAggregation[] buildAggregations(LongArray owningBucketOrds) thro * Build a {@link ObjectArrayPriorityQueue} to sort the buckets. After we've * collected all of the buckets we'll collect all entries in the queue. */ - abstract ObjectArrayPriorityQueue buildPriorityQueue(int size); + abstract ObjectArrayPriorityQueue> buildPriorityQueue(int size); /** * Build the sub-aggregations into the buckets. This will usually - * delegate to {@link #buildSubAggsForAllBuckets}. + * delegate to {@link #buildSubAggsForAllBuckets(ObjectArray, LongArray, BiConsumer)}. */ - abstract void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException; + abstract void buildSubAggs(ObjectArray topBucketsPerOrd, LongArray ordsArray) throws IOException; /** * Collect extra entries for "zero" hit documents if they were requested @@ -287,9 +303,11 @@ interface BucketUpdater abstract class StandardTermsResultStrategy, B extends InternalTerms.Bucket> extends ResultStrategy { protected final boolean showTermDocCountError; + private final Comparator> comparator; - StandardTermsResultStrategy(boolean showTermDocCountError) { + StandardTermsResultStrategy(boolean showTermDocCountError, Aggregator aggregator) { this.showTermDocCountError = showTermDocCountError; + this.comparator = order.partiallyBuiltBucketComparator(aggregator); } @Override @@ -298,13 +316,13 @@ final LeafBucketCollector wrapCollector(LeafBucketCollector primary) { } @Override - final ObjectArrayPriorityQueue buildPriorityQueue(int size) { - return new BucketPriorityQueue<>(size, bigArrays(), partiallyBuiltBucketComparator); + final ObjectArrayPriorityQueue> buildPriorityQueue(int size) { + return new BucketPriorityQueue<>(size, bigArrays(), comparator); } @Override - final void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException { - buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs); + final void buildSubAggs(ObjectArray topBucketsPerOrd, LongArray ordsArray) throws IOException { + buildSubAggsForAllBuckets(topBucketsPerOrd, ordsArray, (b, aggs) -> b.aggregations = aggs); } @Override @@ -340,8 +358,8 @@ public final void close() {} } class LongTermsResults extends StandardTermsResultStrategy { - LongTermsResults(boolean showTermDocCountError) { - super(showTermDocCountError); + LongTermsResults(boolean showTermDocCountError, Aggregator aggregator) { + super(showTermDocCountError, aggregator); } @Override @@ -374,7 +392,6 @@ BucketUpdater bucketUpdater(long owningBucketOrd) { return (LongTerms.Bucket spare, BucketOrdsEnum ordsEnum, long docCount) -> { spare.term = ordsEnum.value(); spare.docCount = docCount; - spare.bucketOrd = ordsEnum.ord(); }; } @@ -424,8 +441,8 @@ LongTerms buildEmptyResult() { class DoubleTermsResults extends StandardTermsResultStrategy { - DoubleTermsResults(boolean showTermDocCountError) { - super(showTermDocCountError); + DoubleTermsResults(boolean showTermDocCountError, Aggregator aggregator) { + super(showTermDocCountError, aggregator); } @Override @@ -458,7 +475,6 @@ BucketUpdater bucketUpdater(long owningBucketOrd) { return (DoubleTerms.Bucket spare, BucketOrdsEnum ordsEnum, long docCount) -> { spare.term = NumericUtils.sortableLongToDouble(ordsEnum.value()); spare.docCount = docCount; - spare.bucketOrd = ordsEnum.ord(); }; } @@ -575,7 +591,6 @@ BucketUpdater bucketUpdater(long owningBucketOrd) { spare.term = ordsEnum.value(); spare.subsetDf = docCount; spare.supersetDf = backgroundFrequencies.freq(spare.term); - spare.bucketOrd = ordsEnum.ord(); // During shard-local down-selection we use subset/superset stats that are for this shard only // Back at the central reducer these properties will be updated with global stats spare.updateScore(significanceHeuristic, subsetSize, supersetSize); @@ -583,13 +598,13 @@ BucketUpdater bucketUpdater(long owningBucketOrd) { } @Override - ObjectArrayPriorityQueue buildPriorityQueue(int size) { + ObjectArrayPriorityQueue> buildPriorityQueue(int size) { return new BucketSignificancePriorityQueue<>(size, bigArrays()); } @Override - void buildSubAggs(ObjectArray topBucketsPerOrd) throws IOException { - buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, aggs) -> b.aggregations = aggs); + void buildSubAggs(ObjectArray topBucketsPerOrd, LongArray ordsArray) throws IOException { + buildSubAggsForAllBuckets(topBucketsPerOrd, ordsArray, (b, aggs) -> b.aggregations = aggs); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java index 4922be7cec1b..c07c0726a4ae 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java @@ -27,7 +27,6 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Comparator; import java.util.HashSet; import java.util.Map; import java.util.Objects; @@ -190,7 +189,6 @@ public boolean equals(Object obj) { protected final DocValueFormat format; protected final BucketCountThresholds bucketCountThresholds; protected final BucketOrder order; - protected final Comparator> partiallyBuiltBucketComparator; protected final Set aggsUsedForSorting; protected final SubAggCollectionMode collectMode; @@ -209,7 +207,9 @@ public TermsAggregator( super(name, factories, context, parent, metadata); this.bucketCountThresholds = bucketCountThresholds; this.order = order; - partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(b -> b.bucketOrd, this); + if (order != null) { + order.validate(this); + } this.format = format; if ((subAggsNeedScore() && descendsFromNestedAggregator(parent)) || context.isInSortOrderExecutionRequired()) { /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 2c7b768fcdbb..da5ae37b0822 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -195,12 +195,12 @@ private static TermsAggregatorSupplier numericSupplier() { if (includeExclude != null) { longFilter = includeExclude.convertToDoubleFilter(); } - resultStrategy = agg -> agg.new DoubleTermsResults(showTermDocCountError); + resultStrategy = agg -> agg.new DoubleTermsResults(showTermDocCountError, agg); } else { if (includeExclude != null) { longFilter = includeExclude.convertToLongFilter(valuesSourceConfig.format()); } - resultStrategy = agg -> agg.new LongTermsResults(showTermDocCountError); + resultStrategy = agg -> agg.new LongTermsResults(showTermDocCountError, agg); } return new NumericTermsAggregator( name, @@ -403,7 +403,7 @@ Aggregator create( name, factories, new MapStringTermsAggregator.ValuesSourceCollectorSource(valuesSourceConfig), - a -> a.new StandardTermsResults(valuesSourceConfig.getValuesSource()), + a -> a.new StandardTermsResults(valuesSourceConfig.getValuesSource(), a), order, valuesSourceConfig.format(), bucketCountThresholds, diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java index 0d42a2856a10..85510c8a989c 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java @@ -37,9 +37,6 @@ public class InternalMultiTerms extends AbstractInternalTerms { - - long bucketOrd; - protected long docCount; protected InternalAggregations aggregations; private long docCountError; diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java index 1691aedf543f..5c10e2c8feeb 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.IntArray; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.common.util.ObjectArrayPriorityQueue; @@ -40,6 +41,7 @@ import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.DeferableBucketAggregator; +import org.elasticsearch.search.aggregations.bucket.terms.BucketAndOrd; import org.elasticsearch.search.aggregations.bucket.terms.BucketPriorityQueue; import org.elasticsearch.search.aggregations.bucket.terms.BytesKeyedBucketOrds; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; @@ -72,7 +74,7 @@ class MultiTermsAggregator extends DeferableBucketAggregator { protected final List formats; protected final TermsAggregator.BucketCountThresholds bucketCountThresholds; protected final BucketOrder order; - protected final Comparator partiallyBuiltBucketComparator; + protected final Comparator> partiallyBuiltBucketComparator; protected final Set aggsUsedForSorting; protected final SubAggCollectionMode collectMode; private final List values; @@ -99,7 +101,7 @@ protected MultiTermsAggregator( super(name, factories, context, parent, metadata); this.bucketCountThresholds = bucketCountThresholds; this.order = order; - partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(b -> b.bucketOrd, this); + partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(this); this.formats = formats; this.showTermDocCountError = showTermDocCountError; if (subAggsNeedScore() && descendsFromNestedAggregator(parent) || context.isInSortOrderExecutionRequired()) { @@ -242,52 +244,67 @@ public InternalAggregation[] buildAggregations(LongArray owningBucketOrds) throw LongArray otherDocCounts = bigArrays().newLongArray(owningBucketOrds.size(), true); ObjectArray topBucketsPerOrd = bigArrays().newObjectArray(owningBucketOrds.size()) ) { - for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) { - final long owningBucketOrd = owningBucketOrds.get(ordIdx); - long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrd); - - int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize()); - try ( - ObjectArrayPriorityQueue ordered = new BucketPriorityQueue<>( - size, - bigArrays(), - partiallyBuiltBucketComparator - ) - ) { - InternalMultiTerms.Bucket spare = null; - BytesRef spareKey = null; - BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd); - while (ordsEnum.next()) { - long docCount = bucketDocCount(ordsEnum.ord()); - otherDocCounts.increment(ordIdx, docCount); - if (docCount < bucketCountThresholds.getShardMinDocCount()) { - continue; - } - if (spare == null) { - checkRealMemoryCBForInternalBucket(); - spare = new InternalMultiTerms.Bucket(null, 0, null, showTermDocCountError, 0, formats, keyConverters); - spareKey = new BytesRef(); - } - ordsEnum.readValue(spareKey); - spare.terms = unpackTerms(spareKey); - spare.docCount = docCount; - spare.bucketOrd = ordsEnum.ord(); - spare = ordered.insertWithOverflow(spare); - } + try (IntArray bucketsToCollect = bigArrays().newIntArray(owningBucketOrds.size())) { + long ordsToCollect = 0; + for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) { + int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrds.get(ordIdx)), bucketCountThresholds.getShardSize()); + ordsToCollect += size; + bucketsToCollect.set(ordIdx, size); + } + try (LongArray ordsArray = bigArrays().newLongArray(ordsToCollect)) { + long ordsCollected = 0; + for (long ordIdx = 0; ordIdx < owningBucketOrds.size(); ordIdx++) { + final long owningBucketOrd = owningBucketOrds.get(ordIdx); + long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrd); + + int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize()); + try ( + ObjectArrayPriorityQueue> ordered = new BucketPriorityQueue<>( + size, + bigArrays(), + partiallyBuiltBucketComparator + ) + ) { + BucketAndOrd spare = null; + BytesRef spareKey = null; + BytesKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd); + while (ordsEnum.next()) { + long docCount = bucketDocCount(ordsEnum.ord()); + otherDocCounts.increment(ordIdx, docCount); + if (docCount < bucketCountThresholds.getShardMinDocCount()) { + continue; + } + if (spare == null) { + checkRealMemoryCBForInternalBucket(); + spare = new BucketAndOrd<>( + new InternalMultiTerms.Bucket(null, 0, null, showTermDocCountError, 0, formats, keyConverters) + ); + spareKey = new BytesRef(); + } + ordsEnum.readValue(spareKey); + spare.bucket.terms = unpackTerms(spareKey); + spare.bucket.docCount = docCount; + spare.ord = ordsEnum.ord(); + spare = ordered.insertWithOverflow(spare); + } - // Get the top buckets - InternalMultiTerms.Bucket[] bucketsForOrd = new InternalMultiTerms.Bucket[(int) ordered.size()]; - topBucketsPerOrd.set(ordIdx, bucketsForOrd); - for (int b = (int) ordered.size() - 1; b >= 0; --b) { - InternalMultiTerms.Bucket[] buckets = topBucketsPerOrd.get(ordIdx); - buckets[b] = ordered.pop(); - otherDocCounts.increment(ordIdx, -buckets[b].getDocCount()); + // Get the top buckets + int orderedSize = (int) ordered.size(); + InternalMultiTerms.Bucket[] buckets = new InternalMultiTerms.Bucket[orderedSize]; + for (int i = orderedSize - 1; i >= 0; --i) { + BucketAndOrd bucketAndOrd = ordered.pop(); + buckets[i] = bucketAndOrd.bucket; + ordsArray.set(ordsCollected + i, bucketAndOrd.ord); + otherDocCounts.increment(ordIdx, -buckets[i].getDocCount()); + } + topBucketsPerOrd.set(ordIdx, buckets); + ordsCollected += orderedSize; + } } + buildSubAggsForAllBuckets(topBucketsPerOrd, ordsArray, (b, a) -> b.aggregations = a); } } - buildSubAggsForAllBuckets(topBucketsPerOrd, b -> b.bucketOrd, (b, a) -> b.aggregations = a); - return buildAggregations( Math.toIntExact(owningBucketOrds.size()), ordIdx -> buildResult(otherDocCounts.get(ordIdx), topBucketsPerOrd.get(ordIdx)) From b449c8e0ec0cbc45a79aea0ce443e4f60b77103c Mon Sep 17 00:00:00 2001 From: kosabogi <105062005+kosabogi@users.noreply.github.com> Date: Thu, 5 Dec 2024 17:00:19 +0100 Subject: [PATCH 37/45] Adds warning to Create inference API page (#118073) (#118091) --- docs/reference/inference/put-inference.asciidoc | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc index e7e25ec98b49..2986f16916f3 100644 --- a/docs/reference/inference/put-inference.asciidoc +++ b/docs/reference/inference/put-inference.asciidoc @@ -10,7 +10,6 @@ Creates an {infer} endpoint to perform an {infer} task. * For built-in models and models uploaded through Eland, the {infer} APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the {infer} APIs to use these models or if you want to use non-NLP models, use the <>. ==== - [discrete] [[put-inference-api-request]] ==== {api-request-title} @@ -47,6 +46,14 @@ Refer to the service list in the <> API. In the response, look for `"state": "fully_allocated"` and ensure the `"allocation_count"` matches the `"target_allocation_count"`. +* Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources. +==== + + The following services are available through the {infer} API. You can find the available task types next to the service name. Click the links to review the configuration details of the services: From 90b116f2f190ab206d221b6a1f09d15e1f4ea7a7 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Thu, 5 Dec 2024 11:15:10 -0500 Subject: [PATCH 38/45] Esql refactor date tests (#117923) (#118085) This refactors a bit of the type logic in the parametrized testing to pass the input values as java Instants for millisecond and nanosecond date. Mainly, this impacts verifier functions. The goal here is to ensure that the values are correctly converted based on the type they were generated as, rather than relying on the verifier function to know how to convert from a long with no additional information. This will make tests that have mixed millisecond and nanosecond inputs easier to write correctly. --- .../xpack/esql/core/util/DateUtils.java | 4 + .../expression/function/TestCaseSupplier.java | 78 +++---------------- .../scalar/convert/ToDateNanosTests.java | 17 ++-- .../scalar/convert/ToDatetimeTests.java | 14 +++- .../scalar/convert/ToDoubleTests.java | 6 +- .../scalar/convert/ToIntegerTests.java | 7 +- .../function/scalar/convert/ToLongTests.java | 11 ++- .../scalar/convert/ToStringTests.java | 11 ++- .../scalar/convert/ToUnsignedLongTests.java | 6 +- .../operator/arithmetic/AddTests.java | 22 +++--- .../operator/arithmetic/SubTests.java | 16 ++-- .../comparison/GreaterThanOrEqualTests.java | 39 ++++------ .../operator/comparison/GreaterThanTests.java | 5 +- .../comparison/LessThanOrEqualTests.java | 39 ++++------ .../operator/comparison/LessThanTests.java | 5 +- 15 files changed, 117 insertions(+), 163 deletions(-) diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/DateUtils.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/DateUtils.java index 280cf172a8a5..20f7b400e936 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/DateUtils.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/DateUtils.java @@ -174,6 +174,10 @@ public static ZonedDateTime asDateTime(long millis) { return ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), UTC); } + public static ZonedDateTime asDateTime(Instant instant) { + return ZonedDateTime.ofInstant(instant, UTC); + } + public static long asMillis(ZonedDateTime zonedDateTime) { return zonedDateTime.toInstant().toEpochMilli(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java index 816c9ef6f352..377027b70fb5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java @@ -620,70 +620,6 @@ public static void forUnaryBoolean( unary(suppliers, expectedEvaluatorToString, booleanCases(), expectedType, v -> expectedValue.apply((Boolean) v), warnings); } - /** - * Generate positive test cases for a unary function operating on an {@link DataType#DATETIME}. - * This variant defaults to maximum range of possible values - */ - public static void forUnaryDatetime( - List suppliers, - String expectedEvaluatorToString, - DataType expectedType, - Function expectedValue, - List warnings - ) { - unaryNumeric( - suppliers, - expectedEvaluatorToString, - dateCases(), - expectedType, - n -> expectedValue.apply(Instant.ofEpochMilli(n.longValue())), - warnings - ); - } - - /** - * Generate positive test cases for a unary function operating on an {@link DataType#DATETIME}. - * This variant accepts a range of values - */ - public static void forUnaryDatetime( - List suppliers, - String expectedEvaluatorToString, - DataType expectedType, - long min, - long max, - Function expectedValue, - List warnings - ) { - unaryNumeric( - suppliers, - expectedEvaluatorToString, - dateCases(min, max), - expectedType, - n -> expectedValue.apply(Instant.ofEpochMilli(n.longValue())), - warnings - ); - } - - /** - * Generate positive test cases for a unary function operating on an {@link DataType#DATE_NANOS}. - */ - public static void forUnaryDateNanos( - List suppliers, - String expectedEvaluatorToString, - DataType expectedType, - Function expectedValue, - List warnings - ) { - unaryNumeric( - suppliers, - expectedEvaluatorToString, - dateNanosCases(), - expectedType, - n -> expectedValue.apply(DateUtils.toInstant((long) n)), - warnings - ); - } - /** * Generate positive test cases for a unary function operating on an {@link DataType#GEO_POINT}. */ @@ -1912,11 +1848,19 @@ public List multiRowData() { } /** - * @return the data value being supplied, casting unsigned longs into BigIntegers correctly + * @return the data value being supplied, casting to java objects when appropriate */ public Object getValue() { - if (type == DataType.UNSIGNED_LONG && data instanceof Long l) { - return NumericUtils.unsignedLongAsBigInteger(l); + if (data instanceof Long l) { + if (type == DataType.UNSIGNED_LONG) { + return NumericUtils.unsignedLongAsBigInteger(l); + } + if (type == DataType.DATETIME) { + return Instant.ofEpochMilli(l); + } + if (type == DataType.DATE_NANOS) { + return DateUtils.toInstant(l); + } } return data; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDateNanosTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDateNanosTests.java index e91a5cc1ebca..8d1c2443c1bf 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDateNanosTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDateNanosTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; +import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.function.Supplier; @@ -32,14 +33,20 @@ public static Iterable parameters() { final String read = "Attribute[channel=0]"; final List suppliers = new ArrayList<>(); - TestCaseSupplier.forUnaryDateNanos(suppliers, read, DataType.DATE_NANOS, DateUtils::toLong, List.of()); - TestCaseSupplier.forUnaryDatetime( + TestCaseSupplier.unary( + suppliers, + read, + TestCaseSupplier.dateNanosCases(), + DataType.DATE_NANOS, + v -> DateUtils.toLong((Instant) v), + List.of() + ); + TestCaseSupplier.unary( suppliers, "ToDateNanosFromDatetimeEvaluator[field=" + read + "]", + TestCaseSupplier.dateCases(0, DateUtils.MAX_NANOSECOND_INSTANT.toEpochMilli()), DataType.DATE_NANOS, - 0, - DateUtils.MAX_NANOSECOND_INSTANT.toEpochMilli(), - i -> DateUtils.toNanoSeconds(i.toEpochMilli()), + i -> DateUtils.toNanoSeconds(((Instant) i).toEpochMilli()), List.of() ); TestCaseSupplier.forUnaryLong( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java index 2852b92ba156..43b889baf530 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java @@ -37,12 +37,20 @@ public static Iterable parameters() { final String read = "Attribute[channel=0]"; final List suppliers = new ArrayList<>(); - TestCaseSupplier.forUnaryDatetime(suppliers, read, DataType.DATETIME, Instant::toEpochMilli, emptyList()); - TestCaseSupplier.forUnaryDateNanos( + TestCaseSupplier.unary( + suppliers, + read, + TestCaseSupplier.dateCases(), + DataType.DATETIME, + v -> ((Instant) v).toEpochMilli(), + emptyList() + ); + TestCaseSupplier.unary( suppliers, "ToDatetimeFromDateNanosEvaluator[field=" + read + "]", + TestCaseSupplier.dateNanosCases(), DataType.DATETIME, - i -> DateUtils.toMilliSeconds(DateUtils.toLong(i)), + i -> DateUtils.toMilliSeconds(DateUtils.toLong((Instant) i)), emptyList() ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java index d5153019c1e4..b68306d6cac8 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; import java.math.BigInteger; +import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.function.Function; @@ -49,11 +50,12 @@ public static Iterable parameters() { ); TestCaseSupplier.forUnaryBoolean(suppliers, evaluatorName.apply("Boolean"), DataType.DOUBLE, b -> b ? 1d : 0d, List.of()); - TestCaseSupplier.forUnaryDatetime( + TestCaseSupplier.unary( suppliers, evaluatorName.apply("Long"), + TestCaseSupplier.dateCases(), DataType.DOUBLE, - i -> (double) i.toEpochMilli(), + i -> (double) ((Instant) i).toEpochMilli(), List.of() ); // random strings that don't look like a double diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java index eb81d48e0c5b..6a3f7022c9d3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; +import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.function.Function; @@ -48,7 +49,7 @@ public static Iterable parameters() { evaluatorName.apply("Long"), dateCases(0, Integer.MAX_VALUE), DataType.INTEGER, - l -> ((Long) l).intValue(), + l -> Long.valueOf(((Instant) l).toEpochMilli()).intValue(), List.of() ); // datetimes that fall outside Integer's range @@ -60,7 +61,9 @@ public static Iterable parameters() { l -> null, l -> List.of( "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", - "Line -1:-1: org.elasticsearch.xpack.esql.core.InvalidArgumentException: [" + l + "] out of [integer] range" + "Line -1:-1: org.elasticsearch.xpack.esql.core.InvalidArgumentException: [" + + ((Instant) l).toEpochMilli() + + "] out of [integer] range" ) ); // random strings that don't look like an Integer diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java index 4c2cf14af41e..c7101ab730ab 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java @@ -43,8 +43,15 @@ public static Iterable parameters() { TestCaseSupplier.forUnaryBoolean(suppliers, evaluatorName.apply("Boolean"), DataType.LONG, b -> b ? 1L : 0L, List.of()); // datetimes - TestCaseSupplier.forUnaryDatetime(suppliers, read, DataType.LONG, Instant::toEpochMilli, List.of()); - TestCaseSupplier.forUnaryDateNanos(suppliers, read, DataType.LONG, DateUtils::toLong, List.of()); + TestCaseSupplier.unary(suppliers, read, TestCaseSupplier.dateCases(), DataType.LONG, v -> ((Instant) v).toEpochMilli(), List.of()); + TestCaseSupplier.unary( + suppliers, + read, + TestCaseSupplier.dateNanosCases(), + DataType.LONG, + v -> DateUtils.toLong((Instant) v), + List.of() + ); // random strings that don't look like a long TestCaseSupplier.forUnaryStrings( suppliers, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java index 0b101efa073d..3b30e4b353ae 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; +import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.function.Supplier; @@ -81,18 +82,20 @@ public static Iterable parameters() { b -> new BytesRef(b.toString()), List.of() ); - TestCaseSupplier.forUnaryDatetime( + TestCaseSupplier.unary( suppliers, "ToStringFromDatetimeEvaluator[field=" + read + "]", + TestCaseSupplier.dateCases(), DataType.KEYWORD, - i -> new BytesRef(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(i.toEpochMilli())), + i -> new BytesRef(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(((Instant) i).toEpochMilli())), List.of() ); - TestCaseSupplier.forUnaryDateNanos( + TestCaseSupplier.unary( suppliers, "ToStringFromDateNanosEvaluator[field=" + read + "]", + TestCaseSupplier.dateNanosCases(), DataType.KEYWORD, - i -> new BytesRef(DateFieldMapper.DEFAULT_DATE_TIME_NANOS_FORMATTER.formatNanos(DateUtils.toLong(i))), + i -> new BytesRef(DateFieldMapper.DEFAULT_DATE_TIME_NANOS_FORMATTER.formatNanos(DateUtils.toLong((Instant) i))), List.of() ); TestCaseSupplier.forUnaryGeoPoint( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongTests.java index d8122aa73f81..ca48bb029f22 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongTests.java @@ -19,6 +19,7 @@ import java.math.BigDecimal; import java.math.BigInteger; +import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.function.Function; @@ -58,11 +59,12 @@ public static Iterable parameters() { ); // datetimes - TestCaseSupplier.forUnaryDatetime( + TestCaseSupplier.unary( suppliers, evaluatorName.apply("Long"), + TestCaseSupplier.dateCases(), DataType.UNSIGNED_LONG, - instant -> BigInteger.valueOf(instant.toEpochMilli()), + instant -> BigInteger.valueOf(((Instant) instant).toEpochMilli()), List.of() ); // random strings that don't look like an unsigned_long diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java index abfb634d5f30..aa4c037e5e96 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java @@ -159,7 +159,7 @@ public static Iterable parameters() { }; BiFunction> warnings = (lhs, rhs) -> { try { - addDatesAndTemporalAmount(lhs.data(), rhs.data(), AddTests::addMillis); + addDatesAndTemporalAmount(lhs.getValue(), rhs.getValue(), AddTests::addMillis); return List.of(); } catch (ArithmeticException e) { return List.of( @@ -193,6 +193,7 @@ public static Iterable parameters() { BinaryOperator nanosResult = (lhs, rhs) -> { try { + assert (lhs instanceof Instant) || (rhs instanceof Instant); return addDatesAndTemporalAmount(lhs, rhs, AddTests::addNanos); } catch (ArithmeticException e) { return null; @@ -327,29 +328,28 @@ private static String addErrorMessageString(boolean includeOrdinal, List adder) { + private static Object addDatesAndTemporalAmount(Object lhs, Object rhs, ToLongBiFunction adder) { // this weird casting dance makes the expected value lambda symmetric - Long date; + Instant date; TemporalAmount period; - if (lhs instanceof Long) { - date = (Long) lhs; + assert (lhs instanceof Instant) || (rhs instanceof Instant); + if (lhs instanceof Instant) { + date = (Instant) lhs; period = (TemporalAmount) rhs; } else { - date = (Long) rhs; + date = (Instant) rhs; period = (TemporalAmount) lhs; } return adder.applyAsLong(date, period); } - private static long addMillis(Long date, TemporalAmount period) { + private static long addMillis(Instant date, TemporalAmount period) { return asMillis(asDateTime(date).plus(period)); } - private static long addNanos(Long date, TemporalAmount period) { + private static long addNanos(Instant date, TemporalAmount period) { return DateUtils.toLong( - Instant.from( - ZonedDateTime.ofInstant(DateUtils.toInstant(date), org.elasticsearch.xpack.esql.core.util.DateUtils.UTC).plus(period) - ) + Instant.from(ZonedDateTime.ofInstant(date, org.elasticsearch.xpack.esql.core.util.DateUtils.UTC).plus(period)) ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java index 1338299b3a12..bce5dea30f84 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java @@ -277,25 +277,23 @@ protected Expression build(Source source, List args) { return new Sub(source, args.get(0), args.get(1)); } - private static Object subtractDatesAndTemporalAmount(Object lhs, Object rhs, ToLongBiFunction subtract) { + private static Object subtractDatesAndTemporalAmount(Object lhs, Object rhs, ToLongBiFunction subtract) { // this weird casting dance makes the expected value lambda symmetric - Long date; + Instant date; TemporalAmount period; - if (lhs instanceof Long) { - date = (Long) lhs; + if (lhs instanceof Instant) { + date = (Instant) lhs; period = (TemporalAmount) rhs; } else { - date = (Long) rhs; + date = (Instant) rhs; period = (TemporalAmount) lhs; } return subtract.applyAsLong(date, period); } - private static long subtractNanos(Long date, TemporalAmount period) { + private static long subtractNanos(Instant date, TemporalAmount period) { return DateUtils.toLong( - Instant.from( - ZonedDateTime.ofInstant(DateUtils.toInstant(date), org.elasticsearch.xpack.esql.core.util.DateUtils.UTC).minus(period) - ) + Instant.from(ZonedDateTime.ofInstant(date, org.elasticsearch.xpack.esql.core.util.DateUtils.UTC).minus(period)) ); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java index a4f1a19e135e..395a574028f6 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; +import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.function.Supplier; @@ -106,33 +107,19 @@ public static Iterable parameters() { ) ); // Datetime - suppliers.addAll( - TestCaseSupplier.forBinaryNotCasting( - "GreaterThanOrEqualLongsEvaluator", - "lhs", - "rhs", - (l, r) -> ((Number) l).longValue() >= ((Number) r).longValue(), - DataType.BOOLEAN, - TestCaseSupplier.dateCases(), - TestCaseSupplier.dateCases(), - List.of(), - false - ) - ); + suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("GreaterThanOrEqualLongsEvaluator", "lhs", "rhs", (lhs, rhs) -> { + if (lhs instanceof Instant l && rhs instanceof Instant r) { + return l.isAfter(r) || l.equals(r); + } + throw new UnsupportedOperationException("Got some weird types"); + }, DataType.BOOLEAN, TestCaseSupplier.dateCases(), TestCaseSupplier.dateCases(), List.of(), false)); - suppliers.addAll( - TestCaseSupplier.forBinaryNotCasting( - "GreaterThanOrEqualLongsEvaluator", - "lhs", - "rhs", - (l, r) -> ((Number) l).longValue() >= ((Number) r).longValue(), - DataType.BOOLEAN, - TestCaseSupplier.dateNanosCases(), - TestCaseSupplier.dateNanosCases(), - List.of(), - false - ) - ); + suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("GreaterThanOrEqualLongsEvaluator", "lhs", "rhs", (lhs, rhs) -> { + if (lhs instanceof Instant l && rhs instanceof Instant r) { + return l.isAfter(r) || l.equals(r); + } + throw new UnsupportedOperationException("Got some weird types"); + }, DataType.BOOLEAN, TestCaseSupplier.dateNanosCases(), TestCaseSupplier.dateNanosCases(), List.of(), false)); suppliers.addAll( TestCaseSupplier.stringCases( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java index 86a4676e3500..b56ecd7392ba 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; +import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.function.Supplier; @@ -111,7 +112,7 @@ public static Iterable parameters() { "GreaterThanLongsEvaluator", "lhs", "rhs", - (l, r) -> ((Number) l).longValue() > ((Number) r).longValue(), + (l, r) -> ((Instant) l).isAfter((Instant) r), DataType.BOOLEAN, TestCaseSupplier.dateCases(), TestCaseSupplier.dateCases(), @@ -125,7 +126,7 @@ public static Iterable parameters() { "GreaterThanLongsEvaluator", "lhs", "rhs", - (l, r) -> ((Number) l).longValue() > ((Number) r).longValue(), + (l, r) -> ((Instant) l).isAfter((Instant) r), DataType.BOOLEAN, TestCaseSupplier.dateNanosCases(), TestCaseSupplier.dateNanosCases(), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java index 5793f26ecd44..60062f071c18 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; +import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.function.Supplier; @@ -106,33 +107,19 @@ public static Iterable parameters() { ) ); // Datetime - suppliers.addAll( - TestCaseSupplier.forBinaryNotCasting( - "LessThanOrEqualLongsEvaluator", - "lhs", - "rhs", - (l, r) -> ((Number) l).longValue() <= ((Number) r).longValue(), - DataType.BOOLEAN, - TestCaseSupplier.dateCases(), - TestCaseSupplier.dateCases(), - List.of(), - false - ) - ); + suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("LessThanOrEqualLongsEvaluator", "lhs", "rhs", (lhs, rhs) -> { + if (lhs instanceof Instant l && rhs instanceof Instant r) { + return l.isBefore(r) || l.equals(r); + } + throw new UnsupportedOperationException("Got some weird types"); + }, DataType.BOOLEAN, TestCaseSupplier.dateCases(), TestCaseSupplier.dateCases(), List.of(), false)); - suppliers.addAll( - TestCaseSupplier.forBinaryNotCasting( - "LessThanOrEqualLongsEvaluator", - "lhs", - "rhs", - (l, r) -> ((Number) l).longValue() <= ((Number) r).longValue(), - DataType.BOOLEAN, - TestCaseSupplier.dateNanosCases(), - TestCaseSupplier.dateNanosCases(), - List.of(), - false - ) - ); + suppliers.addAll(TestCaseSupplier.forBinaryNotCasting("LessThanOrEqualLongsEvaluator", "lhs", "rhs", (lhs, rhs) -> { + if (lhs instanceof Instant l && rhs instanceof Instant r) { + return l.isBefore(r) || l.equals(r); + } + throw new UnsupportedOperationException("Got some weird types"); + }, DataType.BOOLEAN, TestCaseSupplier.dateNanosCases(), TestCaseSupplier.dateNanosCases(), List.of(), false)); suppliers.addAll( TestCaseSupplier.stringCases( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java index 0d114b496492..30812cf8e538 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import java.math.BigInteger; +import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.function.Supplier; @@ -111,7 +112,7 @@ public static Iterable parameters() { "LessThanLongsEvaluator", "lhs", "rhs", - (l, r) -> ((Number) l).longValue() < ((Number) r).longValue(), + (l, r) -> ((Instant) l).isBefore((Instant) r), DataType.BOOLEAN, TestCaseSupplier.dateNanosCases(), TestCaseSupplier.dateNanosCases(), @@ -125,7 +126,7 @@ public static Iterable parameters() { "LessThanLongsEvaluator", "lhs", "rhs", - (l, r) -> ((Number) l).longValue() < ((Number) r).longValue(), + (l, r) -> ((Instant) l).isBefore((Instant) r), DataType.BOOLEAN, TestCaseSupplier.dateCases(), TestCaseSupplier.dateCases(), From d56e0a904c00a490a231b416589755d271984f2f Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 5 Dec 2024 17:48:37 +0100 Subject: [PATCH 39/45] Update synthetic source cutoff date (#118069) (#118087) Updating from 01-02-2025T00:00:00UTC to 04-02-2025T00:00:00UTC --- .../xpack/logsdb/SyntheticSourceLicenseService.java | 2 +- .../xpack/logsdb/LegacyLicenceIntegrationTests.java | 3 ++- ...SyntheticSourceIndexSettingsProviderLegacyLicenseTests.java | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseService.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseService.java index 26a672fb1c90..e629f9b3998b 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseService.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceLicenseService.java @@ -29,7 +29,7 @@ final class SyntheticSourceLicenseService { // You can only override this property if you received explicit approval from Elastic. static final String CUTOFF_DATE_SYS_PROP_NAME = "es.mapping.synthetic_source_fallback_to_stored_source.cutoff_date_restricted_override"; private static final Logger LOGGER = LogManager.getLogger(SyntheticSourceLicenseService.class); - static final long DEFAULT_CUTOFF_DATE = LocalDateTime.of(2025, 2, 1, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + static final long DEFAULT_CUTOFF_DATE = LocalDateTime.of(2025, 2, 4, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); /** * A setting that determines whether source mode should always be stored source. Regardless of licence. diff --git a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LegacyLicenceIntegrationTests.java b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LegacyLicenceIntegrationTests.java index 890bc464a257..f8f307b572f3 100644 --- a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LegacyLicenceIntegrationTests.java +++ b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/LegacyLicenceIntegrationTests.java @@ -69,7 +69,8 @@ public void testSyntheticSourceUsageWithLegacyLicense() { } public void testSyntheticSourceUsageWithLegacyLicensePastCutoff() throws Exception { - long startPastCutoff = LocalDateTime.of(2025, 11, 12, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + // One day after default cutoff date + long startPastCutoff = LocalDateTime.of(2025, 2, 5, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); putLicense(createGoldOrPlatinumLicense(startPastCutoff)); ensureGreen(); diff --git a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderLegacyLicenseTests.java b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderLegacyLicenseTests.java index eda0d8786874..c871a7d0216e 100644 --- a/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderLegacyLicenseTests.java +++ b/x-pack/plugin/logsdb/src/test/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProviderLegacyLicenseTests.java @@ -98,7 +98,7 @@ public void testGetAdditionalIndexSettingsTsdb() throws IOException { } public void testGetAdditionalIndexSettingsTsdbAfterCutoffDate() throws Exception { - long start = LocalDateTime.of(2025, 2, 2, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); + long start = LocalDateTime.of(2025, 2, 5, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); License license = createGoldOrPlatinumLicense(start); long time = LocalDateTime.of(2024, 12, 31, 0, 0).toInstant(ZoneOffset.UTC).toEpochMilli(); var licenseState = new XPackLicenseState(() -> time, new XPackLicenseStatus(license.operationMode(), true, null)); From bc4d351ade259c2fac5454ded1013c78e2988658 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 5 Dec 2024 16:55:22 +0000 Subject: [PATCH 40/45] Speed up `testRespondAfterClose` (#117969) (#118066) Since #106511 this test takes 30s because it waits for the client to time out and close the connection before allowing the transport to fully shut down. This commit reinstates the previous behaviour of closing connections quickly, and tests both client-triggered and server-triggered connection closure. --- .../Netty4HttpServerTransportTests.java | 35 ++++++++++++++++--- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java index 3fd5cc44a340..1d39b993cef9 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java @@ -40,6 +40,7 @@ import io.netty.handler.codec.http.HttpUtil; import io.netty.handler.codec.http.HttpVersion; +import org.apache.http.ConnectionClosedException; import org.apache.http.HttpHost; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; @@ -48,6 +49,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.IncrementalBulkService; import org.elasticsearch.action.support.ActionTestUtils; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.client.Request; import org.elasticsearch.client.RestClient; @@ -100,6 +102,7 @@ import java.util.Collections; import java.util.List; import java.util.Set; +import java.util.concurrent.CancellationException; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -110,6 +113,7 @@ import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_SERVER_SHUTDOWN_GRACE_PERIOD; import static org.elasticsearch.rest.RestStatus.BAD_REQUEST; import static org.elasticsearch.rest.RestStatus.OK; import static org.elasticsearch.rest.RestStatus.UNAUTHORIZED; @@ -1039,8 +1043,16 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th } } - public void testRespondAfterClose() throws Exception { - final String url = "/thing"; + public void testRespondAfterServiceCloseWithClientCancel() throws Exception { + runRespondAfterServiceCloseTest(true); + } + + public void testRespondAfterServiceCloseWithServerCancel() throws Exception { + runRespondAfterServiceCloseTest(false); + } + + private void runRespondAfterServiceCloseTest(boolean clientCancel) throws Exception { + final String url = "/" + randomIdentifier(); final CountDownLatch responseReleasedLatch = new CountDownLatch(1); final SubscribableListener transportClosedFuture = new SubscribableListener<>(); final CountDownLatch handlingRequestLatch = new CountDownLatch(1); @@ -1066,7 +1078,9 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th try ( Netty4HttpServerTransport transport = new Netty4HttpServerTransport( - Settings.EMPTY, + clientCancel + ? Settings.EMPTY + : Settings.builder().put(SETTING_HTTP_SERVER_SHUTDOWN_GRACE_PERIOD.getKey(), TimeValue.timeValueMillis(1)).build(), networkService, threadPool, xContentRegistry(), @@ -1082,11 +1096,24 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th transport.start(); final var address = randomFrom(transport.boundAddress().boundAddresses()).address(); try (var client = RestClient.builder(new HttpHost(address.getAddress(), address.getPort())).build()) { - client.performRequestAsync(new Request("GET", url), ActionTestUtils.wrapAsRestResponseListener(ActionListener.noop())); + final var responseExceptionFuture = new PlainActionFuture(); + final var cancellable = client.performRequestAsync( + new Request("GET", url), + ActionTestUtils.wrapAsRestResponseListener(ActionTestUtils.assertNoSuccessListener(responseExceptionFuture::onResponse)) + ); safeAwait(handlingRequestLatch); + if (clientCancel) { + threadPool.generic().execute(cancellable::cancel); + } transport.close(); transportClosedFuture.onResponse(null); safeAwait(responseReleasedLatch); + final var responseException = safeGet(responseExceptionFuture); + if (clientCancel) { + assertThat(responseException, instanceOf(CancellationException.class)); + } else { + assertThat(responseException, instanceOf(ConnectionClosedException.class)); + } } } } From 379a3a80972cd49e85323a1a3c290f88e8a3024f Mon Sep 17 00:00:00 2001 From: elasticsearchmachine <58790826+elasticsearchmachine@users.noreply.github.com> Date: Thu, 21 Nov 2024 09:30:15 +1100 Subject: [PATCH 41/45] Add 8.17 to branches.json --- branches.json | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/branches.json b/branches.json index 1d860501cbc8..0e23a795664d 100644 --- a/branches.json +++ b/branches.json @@ -4,6 +4,15 @@ { "branch": "main" }, + { + "branch": "8.16" + }, + { + "branch": "8.17" + }, + { + "branch": "8.x" + }, { "branch": "8.15" }, From f0b2a1f53688f26ba9191e6c3b84caf6ac601a50 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Thu, 5 Dec 2024 17:42:51 +0000 Subject: [PATCH 42/45] Add a new `offset_source` field to store offsets referencing substrings of another field. (#118017) (#118088) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This field is primarily designed for use with the `semantic_text` field, where it enables storing offsets that point to substrings of the field used to generate its underlying chunks. To prevent external usage, the field is intentionally undocumented, with detailed javadocs explaining its specific purpose and limitations. I couldn’t find a way to fully block external usage, but skipping the docs should keep it mostly out of sight for now. --- .../xpack/inference/InferencePlugin.java | 8 +- .../inference/mapper/OffsetSourceField.java | 145 ++++++++++ .../mapper/OffsetSourceFieldMapper.java | 253 ++++++++++++++++++ .../mapper/OffsetSourceFieldMapperTests.java | 216 +++++++++++++++ .../mapper/OffsetSourceFieldTests.java | 72 +++++ .../mapper/OffsetSourceFieldTypeTests.java | 44 +++ 6 files changed, 737 insertions(+), 1 deletion(-) create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceField.java create mode 100644 x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldMapper.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldMapperTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldTests.java create mode 100644 x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldTypeTests.java diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index b3ab421e71e9..88a46a3277b4 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -66,6 +66,7 @@ import org.elasticsearch.xpack.inference.external.http.sender.HttpRequestSender; import org.elasticsearch.xpack.inference.external.http.sender.RequestExecutorServiceSettings; import org.elasticsearch.xpack.inference.logging.ThrottlerManager; +import org.elasticsearch.xpack.inference.mapper.OffsetSourceFieldMapper; import org.elasticsearch.xpack.inference.mapper.SemanticTextFieldMapper; import org.elasticsearch.xpack.inference.queries.SemanticQueryBuilder; import org.elasticsearch.xpack.inference.rank.random.RandomRankBuilder; @@ -365,7 +366,12 @@ public void close() { @Override public Map getMappers() { - return Map.of(SemanticTextFieldMapper.CONTENT_TYPE, SemanticTextFieldMapper.PARSER); + return Map.of( + SemanticTextFieldMapper.CONTENT_TYPE, + SemanticTextFieldMapper.PARSER, + OffsetSourceFieldMapper.CONTENT_TYPE, + OffsetSourceFieldMapper.PARSER + ); } @Override diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceField.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceField.java new file mode 100644 index 000000000000..d8339f1004da --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceField.java @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.mapper; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.Terms; +import org.apache.lucene.search.DocIdSetIterator; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.util.LinkedHashMap; +import java.util.Map; + +/** + * Represents a {@link Field} that stores a {@link Term} along with its start and end offsets. + * Note: The {@link Charset} used to calculate these offsets is not associated with this field. + * It is the responsibility of the consumer to handle the appropriate {@link Charset}. + */ +public final class OffsetSourceField extends Field { + private static final FieldType FIELD_TYPE = new FieldType(); + + static { + FIELD_TYPE.setTokenized(false); + FIELD_TYPE.setOmitNorms(true); + FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); + } + + private int startOffset; + private int endOffset; + + public OffsetSourceField(String fieldName, String sourceFieldName, int startOffset, int endOffset) { + super(fieldName, sourceFieldName, FIELD_TYPE); + this.startOffset = startOffset; + this.endOffset = endOffset; + } + + public void setValues(String fieldName, int startOffset, int endOffset) { + this.fieldsData = fieldName; + this.startOffset = startOffset; + this.endOffset = endOffset; + } + + @Override + public TokenStream tokenStream(Analyzer analyzer, TokenStream reuse) { + OffsetTokenStream stream; + if (reuse instanceof OffsetTokenStream) { + stream = (OffsetTokenStream) reuse; + } else { + stream = new OffsetTokenStream(); + } + + stream.setValues((String) fieldsData, startOffset, endOffset); + return stream; + } + + public static OffsetSourceLoader loader(Terms terms) throws IOException { + return new OffsetSourceLoader(terms); + } + + private static final class OffsetTokenStream extends TokenStream { + private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class); + private final OffsetAttribute offsetAttribute = addAttribute(OffsetAttribute.class); + private boolean used = true; + private String term = null; + private int startOffset = 0; + private int endOffset = 0; + + private OffsetTokenStream() {} + + /** Sets the values */ + void setValues(String term, int startOffset, int endOffset) { + this.term = term; + this.startOffset = startOffset; + this.endOffset = endOffset; + } + + @Override + public boolean incrementToken() { + if (used) { + return false; + } + clearAttributes(); + termAttribute.append(term); + offsetAttribute.setOffset(startOffset, endOffset); + used = true; + return true; + } + + @Override + public void reset() { + used = false; + } + + @Override + public void close() { + term = null; + } + } + + public static class OffsetSourceLoader { + private final Map postingsEnums = new LinkedHashMap<>(); + + private OffsetSourceLoader(Terms terms) throws IOException { + var termsEnum = terms.iterator(); + while (termsEnum.next() != null) { + var postings = termsEnum.postings(null, PostingsEnum.OFFSETS); + if (postings.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { + postingsEnums.put(termsEnum.term().utf8ToString(), postings); + } + } + } + + public OffsetSourceFieldMapper.OffsetSource advanceTo(int doc) throws IOException { + for (var it = postingsEnums.entrySet().iterator(); it.hasNext();) { + var entry = it.next(); + var postings = entry.getValue(); + if (postings.docID() < doc) { + if (postings.advance(doc) == DocIdSetIterator.NO_MORE_DOCS) { + it.remove(); + continue; + } + } + if (postings.docID() == doc) { + assert postings.freq() == 1; + postings.nextPosition(); + return new OffsetSourceFieldMapper.OffsetSource(entry.getKey(), postings.startOffset(), postings.endOffset()); + } + } + return null; + } + } +} diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldMapper.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldMapper.java new file mode 100644 index 000000000000..e612076f1aaf --- /dev/null +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldMapper.java @@ -0,0 +1,253 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.mapper; + +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Query; +import org.elasticsearch.index.fielddata.FieldDataContext; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.mapper.DocumentParserContext; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.MapperBuilderContext; +import org.elasticsearch.index.mapper.TextSearchInfo; +import org.elasticsearch.index.mapper.ValueFetcher; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.search.fetch.StoredFieldsSpec; +import org.elasticsearch.search.lookup.Source; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +/** + * A {@link FieldMapper} that maps a field name to its start and end offsets. + * The {@link CharsetFormat} used to compute the offsets is specified via the charset parameter. + * Currently, only {@link CharsetFormat#UTF_16} is supported, aligning with Java's {@code String} charset + * for simpler internal usage and integration. + * + * Each document can store at most one value in this field. + * + * Note: This mapper is not yet documented and is intended exclusively for internal use by + * {@link SemanticTextFieldMapper}. If exposing this mapper directly to users becomes necessary, + * extending charset compatibility should be considered, as the current default (and sole supported charset) + * was chosen for ease of Java integration. + */ +public class OffsetSourceFieldMapper extends FieldMapper { + public static final String CONTENT_TYPE = "offset_source"; + + private static final String SOURCE_NAME_FIELD = "field"; + private static final String START_OFFSET_FIELD = "start"; + private static final String END_OFFSET_FIELD = "end"; + + public record OffsetSource(String field, int start, int end) implements ToXContentObject { + public OffsetSource { + if (start < 0 || end < 0) { + throw new IllegalArgumentException("Illegal offsets, expected positive numbers, got: " + start + ":" + end); + } + if (start > end) { + throw new IllegalArgumentException("Illegal offsets, expected start < end, got: " + start + " > " + end); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(SOURCE_NAME_FIELD, field); + builder.field(START_OFFSET_FIELD, start); + builder.field(END_OFFSET_FIELD, end); + return builder.endObject(); + } + } + + private static final ConstructingObjectParser OFFSET_SOURCE_PARSER = new ConstructingObjectParser<>( + CONTENT_TYPE, + true, + args -> new OffsetSource((String) args[0], (int) args[1], (int) args[2]) + ); + + static { + OFFSET_SOURCE_PARSER.declareString(constructorArg(), new ParseField(SOURCE_NAME_FIELD)); + OFFSET_SOURCE_PARSER.declareInt(constructorArg(), new ParseField(START_OFFSET_FIELD)); + OFFSET_SOURCE_PARSER.declareInt(constructorArg(), new ParseField(END_OFFSET_FIELD)); + } + + public enum CharsetFormat { + UTF_16(StandardCharsets.UTF_16); + + private Charset charSet; + + CharsetFormat(Charset charSet) { + this.charSet = charSet; + } + } + + public static class Builder extends FieldMapper.Builder { + private final Parameter charset = Parameter.enumParam( + "charset", + false, + i -> CharsetFormat.UTF_16, + CharsetFormat.UTF_16, + CharsetFormat.class + ); + private final Parameter> meta = Parameter.metaParam(); + + public Builder(String name) { + super(name); + } + + @Override + protected Parameter[] getParameters() { + return new Parameter[] { meta, charset }; + } + + @Override + public OffsetSourceFieldMapper build(MapperBuilderContext context) { + return new OffsetSourceFieldMapper( + leafName(), + new OffsetSourceFieldType(context.buildFullName(leafName()), charset.get(), meta.getValue()), + builderParams(this, context) + ); + } + } + + public static final TypeParser PARSER = new TypeParser((n, c) -> new Builder(n)); + + public static final class OffsetSourceFieldType extends MappedFieldType { + private final CharsetFormat charset; + + public OffsetSourceFieldType(String name, CharsetFormat charset, Map meta) { + super(name, true, false, false, TextSearchInfo.NONE, meta); + this.charset = charset; + } + + public Charset getCharset() { + return charset.charSet; + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + public boolean fieldHasValue(FieldInfos fieldInfos) { + return fieldInfos.fieldInfo(name()) != null; + } + + @Override + public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext) { + throw new IllegalArgumentException("[offset_source] fields do not support sorting, scripting or aggregating"); + } + + @Override + public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { + return new ValueFetcher() { + OffsetSourceField.OffsetSourceLoader offsetLoader; + + @Override + public void setNextReader(LeafReaderContext context) { + try { + var terms = context.reader().terms(name()); + offsetLoader = terms != null ? OffsetSourceField.loader(terms) : null; + } catch (IOException exc) { + throw new UncheckedIOException(exc); + } + } + + @Override + public List fetchValues(Source source, int doc, List ignoredValues) throws IOException { + var offsetSource = offsetLoader != null ? offsetLoader.advanceTo(doc) : null; + return offsetSource != null ? List.of(offsetSource) : null; + } + + @Override + public StoredFieldsSpec storedFieldsSpec() { + return StoredFieldsSpec.NO_REQUIREMENTS; + } + }; + } + + @Override + public Query termQuery(Object value, SearchExecutionContext context) { + throw new IllegalArgumentException("Queries on [offset_source] fields are not supported"); + } + + @Override + public boolean isSearchable() { + return false; + } + } + + /** + * @param simpleName the leaf name of the mapper + * @param mappedFieldType + * @param params initialization params for this field mapper + */ + protected OffsetSourceFieldMapper(String simpleName, MappedFieldType mappedFieldType, BuilderParams params) { + super(simpleName, mappedFieldType, params); + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + @Override + protected boolean supportsParsingObject() { + return true; + } + + @Override + protected void parseCreateField(DocumentParserContext context) throws IOException { + var parser = context.parser(); + if (parser.currentToken() == XContentParser.Token.VALUE_NULL) { + // skip + return; + } + + if (context.doc().getByKey(fullPath()) != null) { + throw new IllegalArgumentException( + "[offset_source] fields do not support indexing multiple values for the same field [" + + fullPath() + + "] in the same document" + ); + } + + // make sure that we don't expand dots in field names while parsing + boolean isWithinLeafObject = context.path().isWithinLeafObject(); + context.path().setWithinLeafObject(true); + try { + var offsetSource = OFFSET_SOURCE_PARSER.parse(parser, null); + context.doc() + .addWithKey( + fieldType().name(), + new OffsetSourceField(fullPath(), offsetSource.field, offsetSource.start, offsetSource.end) + ); + context.addToFieldNames(fieldType().name()); + } finally { + context.path().setWithinLeafObject(isWithinLeafObject); + } + } + + @Override + public FieldMapper.Builder getMergeBuilder() { + return new Builder(leafName()).init(this); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldMapperTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldMapperTests.java new file mode 100644 index 000000000000..40140d6da5eb --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldMapperTests.java @@ -0,0 +1,216 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.mapper; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; +import org.apache.lucene.index.IndexableField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.DocumentParsingException; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.MapperTestCase; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.mapper.ValueFetcher; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.lookup.Source; +import org.elasticsearch.search.lookup.SourceProvider; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.inference.InferencePlugin; +import org.junit.AssumptionViolatedException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class OffsetSourceFieldMapperTests extends MapperTestCase { + @Override + protected Collection getPlugins() { + return List.of(new InferencePlugin(Settings.EMPTY)); + } + + @Override + protected void minimalMapping(XContentBuilder b) throws IOException { + b.field("type", "offset_source"); + } + + @Override + protected Object getSampleValueForDocument() { + return getSampleObjectForDocument(); + } + + @Override + protected Object getSampleObjectForDocument() { + return Map.of("field", "foo", "start", 100, "end", 300); + } + + @Override + protected Object generateRandomInputValue(MappedFieldType ft) { + return new OffsetSourceFieldMapper.OffsetSource("field", randomIntBetween(0, 100), randomIntBetween(101, 1000)); + } + + @Override + protected IngestScriptSupport ingestScriptSupport() { + throw new AssumptionViolatedException("not supported"); + } + + @Override + protected void registerParameters(ParameterChecker checker) throws IOException {} + + @Override + protected void assertSearchable(MappedFieldType fieldType) { + assertFalse(fieldType.isSearchable()); + } + + @Override + protected boolean supportsStoredFields() { + return false; + } + + @Override + protected boolean supportsEmptyInputArray() { + return false; + } + + @Override + protected boolean supportsCopyTo() { + return false; + } + + @Override + protected boolean supportsIgnoreMalformed() { + return false; + } + + @Override + protected SyntheticSourceSupport syntheticSourceSupport(boolean ignoreMalformed) { + return new SyntheticSourceSupport() { + @Override + public SyntheticSourceExample example(int maxValues) { + return new SyntheticSourceExample(getSampleValueForDocument(), getSampleValueForDocument(), null, b -> minimalMapping(b)); + } + + @Override + public List invalidExample() { + return List.of(); + } + }; + } + + @Override + public void testSyntheticSourceKeepArrays() { + // This mapper doesn't support multiple values (array of objects). + } + + public void testDefaults() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + assertEquals(Strings.toString(fieldMapping(this::minimalMapping)), mapper.mappingSource().toString()); + + ParsedDocument doc1 = mapper.parse( + source(b -> b.startObject("field").field("field", "foo").field("start", 0).field("end", 128).endObject()) + ); + List fields = doc1.rootDoc().getFields("field"); + assertEquals(1, fields.size()); + assertThat(fields.get(0), instanceOf(OffsetSourceField.class)); + OffsetSourceField offsetField1 = (OffsetSourceField) fields.get(0); + + ParsedDocument doc2 = mapper.parse( + source(b -> b.startObject("field").field("field", "bar").field("start", 128).field("end", 512).endObject()) + ); + OffsetSourceField offsetField2 = (OffsetSourceField) doc2.rootDoc().getFields("field").get(0); + + assertTokenStream(offsetField1.tokenStream(null, null), "foo", 0, 128); + assertTokenStream(offsetField2.tokenStream(null, null), "bar", 128, 512); + } + + private void assertTokenStream(TokenStream tk, String expectedTerm, int expectedStartOffset, int expectedEndOffset) throws IOException { + CharTermAttribute termAttribute = tk.addAttribute(CharTermAttribute.class); + OffsetAttribute offsetAttribute = tk.addAttribute(OffsetAttribute.class); + tk.reset(); + assertTrue(tk.incrementToken()); + assertThat(new String(termAttribute.buffer(), 0, termAttribute.length()), equalTo(expectedTerm)); + assertThat(offsetAttribute.startOffset(), equalTo(expectedStartOffset)); + assertThat(offsetAttribute.endOffset(), equalTo(expectedEndOffset)); + assertFalse(tk.incrementToken()); + } + + @Override + protected void assertFetch(MapperService mapperService, String field, Object value, String format) throws IOException { + MappedFieldType ft = mapperService.fieldType(field); + MappedFieldType.FielddataOperation fdt = MappedFieldType.FielddataOperation.SEARCH; + SourceToParse source = source(b -> b.field(ft.name(), value)); + SearchExecutionContext searchExecutionContext = mock(SearchExecutionContext.class); + when(searchExecutionContext.isSourceEnabled()).thenReturn(true); + when(searchExecutionContext.sourcePath(field)).thenReturn(Set.of(field)); + when(searchExecutionContext.getForField(ft, fdt)).thenAnswer(inv -> fieldDataLookup(mapperService).apply(ft, () -> { + throw new UnsupportedOperationException(); + }, fdt)); + ValueFetcher nativeFetcher = ft.valueFetcher(searchExecutionContext, format); + ParsedDocument doc = mapperService.documentMapper().parse(source); + withLuceneIndex(mapperService, iw -> iw.addDocuments(doc.docs()), ir -> { + Source s = SourceProvider.fromStoredFields().getSource(ir.leaves().get(0), 0); + nativeFetcher.setNextReader(ir.leaves().get(0)); + List fromNative = nativeFetcher.fetchValues(s, 0, new ArrayList<>()); + assertThat(fromNative.size(), equalTo(1)); + assertThat("fetching " + value, fromNative.get(0), equalTo(value)); + }); + } + + @Override + protected void assertFetchMany(MapperService mapperService, String field, Object value, String format, int count) throws IOException { + assumeFalse("[offset_source] currently don't support multiple values in the same field", false); + } + + public void testInvalidCharset() { + var exc = expectThrows(Exception.class, () -> createDocumentMapper(mapping(b -> { + b.startObject("field").field("type", "offset_source").field("charset", "utf_8").endObject(); + }))); + assertThat(exc.getCause().getMessage(), containsString("Unknown value [utf_8] for field [charset]")); + } + + public void testRejectMultiValuedFields() throws IOException { + DocumentMapper mapper = createDocumentMapper(mapping(b -> { b.startObject("field").field("type", "offset_source").endObject(); })); + + DocumentParsingException exc = expectThrows(DocumentParsingException.class, () -> mapper.parse(source(b -> { + b.startArray("field"); + { + b.startObject().field("field", "bar1").field("start", 128).field("end", 512).endObject(); + b.startObject().field("field", "bar2").field("start", 128).field("end", 512).endObject(); + } + b.endArray(); + }))); + assertThat(exc.getCause().getMessage(), containsString("[offset_source] fields do not support indexing multiple values")); + } + + public void testInvalidOffsets() throws IOException { + DocumentMapper mapper = createDocumentMapper(mapping(b -> { b.startObject("field").field("type", "offset_source").endObject(); })); + + DocumentParsingException exc = expectThrows(DocumentParsingException.class, () -> mapper.parse(source(b -> { + b.startArray("field"); + { + b.startObject().field("field", "bar1").field("start", -1).field("end", 512).endObject(); + } + b.endArray(); + }))); + assertThat(exc.getCause().getCause().getCause().getMessage(), containsString("Illegal offsets")); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldTests.java new file mode 100644 index 000000000000..4d86263e446f --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldTests.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.mapper; + +import org.apache.lucene.document.Document; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.test.ESTestCase; + +public class OffsetSourceFieldTests extends ESTestCase { + public void testBasics() throws Exception { + Directory dir = newDirectory(); + RandomIndexWriter writer = new RandomIndexWriter( + random(), + dir, + newIndexWriterConfig().setMergePolicy(newLogMergePolicy(random().nextBoolean())) + ); + Document doc = new Document(); + OffsetSourceField field1 = new OffsetSourceField("field1", "foo", 1, 10); + doc.add(field1); + writer.addDocument(doc); + + field1.setValues("bar", 10, 128); + writer.addDocument(doc); + + writer.addDocument(new Document()); // gap + + field1.setValues("foo", 50, 256); + writer.addDocument(doc); + + writer.addDocument(new Document()); // double gap + writer.addDocument(new Document()); + + field1.setValues("baz", 32, 512); + writer.addDocument(doc); + + writer.forceMerge(1); + var reader = writer.getReader(); + writer.close(); + + var searcher = newSearcher(reader); + var context = searcher.getIndexReader().leaves().get(0); + + var terms = context.reader().terms("field1"); + assertNotNull(terms); + OffsetSourceField.OffsetSourceLoader loader = OffsetSourceField.loader(terms); + + var offset = loader.advanceTo(0); + assertEquals(new OffsetSourceFieldMapper.OffsetSource("foo", 1, 10), offset); + + offset = loader.advanceTo(1); + assertEquals(new OffsetSourceFieldMapper.OffsetSource("bar", 10, 128), offset); + + assertNull(loader.advanceTo(2)); + + offset = loader.advanceTo(3); + assertEquals(new OffsetSourceFieldMapper.OffsetSource("foo", 50, 256), offset); + + offset = loader.advanceTo(6); + assertEquals(new OffsetSourceFieldMapper.OffsetSource("baz", 32, 512), offset); + + assertNull(loader.advanceTo(189)); + + IOUtils.close(reader, dir); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldTypeTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldTypeTests.java new file mode 100644 index 000000000000..ccb696515a06 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/mapper/OffsetSourceFieldTypeTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.mapper; + +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.elasticsearch.index.mapper.FieldTypeTestCase; +import org.elasticsearch.index.mapper.MappedFieldType; + +import java.util.Collections; + +public class OffsetSourceFieldTypeTests extends FieldTypeTestCase { + public void testIsNotAggregatable() { + MappedFieldType fieldType = getMappedFieldType(); + assertFalse(fieldType.isAggregatable()); + } + + @Override + public void testFieldHasValue() { + MappedFieldType fieldType = getMappedFieldType(); + FieldInfos fieldInfos = new FieldInfos(new FieldInfo[] { getFieldInfoWithName(fieldType.name()) }); + assertTrue(fieldType.fieldHasValue(fieldInfos)); + } + + @Override + public void testFieldHasValueWithEmptyFieldInfos() { + MappedFieldType fieldType = getMappedFieldType(); + assertFalse(fieldType.fieldHasValue(FieldInfos.EMPTY)); + } + + @Override + public MappedFieldType getMappedFieldType() { + return new OffsetSourceFieldMapper.OffsetSourceFieldType( + "field", + OffsetSourceFieldMapper.CharsetFormat.UTF_16, + Collections.emptyMap() + ); + } +} From d86e6a0a6e207b677f21e552019ce7b2c3f947d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Thu, 5 Dec 2024 19:05:31 +0100 Subject: [PATCH 43/45] Close URLClassLoaders to make Windows happy deleting the temp test jar files (#118083) (#118097) --- .../bootstrap/PluginsResolverTests.java | 67 ++++++++++--------- 1 file changed, 35 insertions(+), 32 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/bootstrap/PluginsResolverTests.java b/server/src/test/java/org/elasticsearch/bootstrap/PluginsResolverTests.java index 331f0f7ad13e..798b576500d7 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/PluginsResolverTests.java +++ b/server/src/test/java/org/elasticsearch/bootstrap/PluginsResolverTests.java @@ -136,25 +136,28 @@ public void testResolveMultipleNonModularPlugins() throws IOException, ClassNotF Path jar1 = createNonModularPluginJar(home, "plugin1", "p", "A"); Path jar2 = createNonModularPluginJar(home, "plugin2", "q", "B"); - var loader1 = createClassLoader(jar1); - var loader2 = createClassLoader(jar2); - - PluginBundle bundle1 = createMockBundle("plugin1", null, "p.A"); - PluginBundle bundle2 = createMockBundle("plugin2", null, "q.B"); - PluginsLoader mockPluginsLoader = mock(PluginsLoader.class); - - when(mockPluginsLoader.pluginLayers()).thenReturn( - Stream.of(new TestPluginLayer(bundle1, loader1, ModuleLayer.boot()), new TestPluginLayer(bundle2, loader2, ModuleLayer.boot())) - ); - PluginsResolver pluginsResolver = PluginsResolver.create(mockPluginsLoader); - - var testClass1 = loader1.loadClass("p.A"); - var testClass2 = loader2.loadClass("q.B"); - var resolvedPluginName1 = pluginsResolver.resolveClassToPluginName(testClass1); - var resolvedPluginName2 = pluginsResolver.resolveClassToPluginName(testClass2); - - assertEquals("plugin1", resolvedPluginName1); - assertEquals("plugin2", resolvedPluginName2); + try (var loader1 = createClassLoader(jar1); var loader2 = createClassLoader(jar2)) { + + PluginBundle bundle1 = createMockBundle("plugin1", null, "p.A"); + PluginBundle bundle2 = createMockBundle("plugin2", null, "q.B"); + PluginsLoader mockPluginsLoader = mock(PluginsLoader.class); + + when(mockPluginsLoader.pluginLayers()).thenReturn( + Stream.of( + new TestPluginLayer(bundle1, loader1, ModuleLayer.boot()), + new TestPluginLayer(bundle2, loader2, ModuleLayer.boot()) + ) + ); + PluginsResolver pluginsResolver = PluginsResolver.create(mockPluginsLoader); + + var testClass1 = loader1.loadClass("p.A"); + var testClass2 = loader2.loadClass("q.B"); + var resolvedPluginName1 = pluginsResolver.resolveClassToPluginName(testClass1); + var resolvedPluginName2 = pluginsResolver.resolveClassToPluginName(testClass2); + + assertEquals("plugin1", resolvedPluginName1); + assertEquals("plugin2", resolvedPluginName2); + } } public void testResolveNonModularPlugin() throws IOException, ClassNotFoundException { @@ -164,22 +167,22 @@ public void testResolveNonModularPlugin() throws IOException, ClassNotFoundExcep Path jar = createNonModularPluginJar(home, pluginName, "p", "A"); - var loader = createClassLoader(jar); - - PluginBundle bundle = createMockBundle(pluginName, null, "p.A"); - PluginsLoader mockPluginsLoader = mock(PluginsLoader.class); + try (var loader = createClassLoader(jar)) { + PluginBundle bundle = createMockBundle(pluginName, null, "p.A"); + PluginsLoader mockPluginsLoader = mock(PluginsLoader.class); - when(mockPluginsLoader.pluginLayers()).thenReturn(Stream.of(new TestPluginLayer(bundle, loader, ModuleLayer.boot()))); - PluginsResolver pluginsResolver = PluginsResolver.create(mockPluginsLoader); + when(mockPluginsLoader.pluginLayers()).thenReturn(Stream.of(new TestPluginLayer(bundle, loader, ModuleLayer.boot()))); + PluginsResolver pluginsResolver = PluginsResolver.create(mockPluginsLoader); - var testClass = loader.loadClass("p.A"); - var resolvedPluginName = pluginsResolver.resolveClassToPluginName(testClass); - var unresolvedPluginName1 = pluginsResolver.resolveClassToPluginName(PluginsResolver.class); - var unresolvedPluginName2 = pluginsResolver.resolveClassToPluginName(String.class); + var testClass = loader.loadClass("p.A"); + var resolvedPluginName = pluginsResolver.resolveClassToPluginName(testClass); + var unresolvedPluginName1 = pluginsResolver.resolveClassToPluginName(PluginsResolver.class); + var unresolvedPluginName2 = pluginsResolver.resolveClassToPluginName(String.class); - assertEquals(pluginName, resolvedPluginName); - assertNull(unresolvedPluginName1); - assertNull(unresolvedPluginName2); + assertEquals(pluginName, resolvedPluginName); + assertNull(unresolvedPluginName1); + assertNull(unresolvedPluginName2); + } } private static URLClassLoader createClassLoader(Path jar) throws MalformedURLException { From 2dd919323ed9578eca6d4970d9bb4727cacf061b Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Thu, 5 Dec 2024 13:10:55 -0500 Subject: [PATCH 44/45] ESQL Javadoc for creating new data types (#117520) (#118096) This adds some java doc to the DataType enum, listing out the steps I followed for adding DateNanos. Hopefully it's helpful to future folks adding data types. --------- Co-authored-by: Bogdan Pintea --- .../xpack/esql/core/type/DataType.java | 107 ++++++++++++++++++ 1 file changed, 107 insertions(+) diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java index a63571093ba5..d86cdb0de038 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DataType.java @@ -32,6 +32,113 @@ import static org.elasticsearch.xpack.esql.core.util.PlanStreamInput.readCachedStringWithVersionCheck; import static org.elasticsearch.xpack.esql.core.util.PlanStreamOutput.writeCachedStringWithVersionCheck; +/** + * This enum represents data types the ES|QL query processing layer is able to + * interact with in some way. This includes fully representable types (e.g. + * {@link DataType#LONG}, numeric types which we promote (e.g. {@link DataType#SHORT}) + * or fold into other types (e.g. {@link DataType#DATE_PERIOD}) early in the + * processing pipeline, types for internal use + * cases (e.g. {@link DataType#PARTIAL_AGG}), and types which the language + * doesn't support, but require special handling anyway (e.g. + * {@link DataType#OBJECT}) + * + *

Process for adding a new data type

+ * Note: it is not expected that all the following steps be done in a single PR. + * Use capabilities to gate tests as you go, and use as many PRs as you think + * appropriate. New data types are complex, and smaller PRs will make reviews + * easier. + *
    + *
  • + * Create a new feature flag for the type in {@link EsqlCorePlugin}. We + * recommend developing the data type over a series of smaller PRs behind + * a feature flag; even for relatively simple data types.
  • + *
  • + * Add a capability to EsqlCapabilities related to the new type, and + * gated by the feature flag you just created. Again, using the feature + * flag is preferred over snapshot-only. As development progresses, you may + * need to add more capabilities related to the new type, e.g. for + * supporting specific functions. This is fine, and expected.
  • + *
  • + * Create a new CSV test file for the new type. You'll either need to + * create a new data file as well, or add values of the new type to + * and existing data file. See CsvTestDataLoader for creating a new data + * set.
  • + *
  • + * In the new CSV test file, start adding basic functionality tests. + * These should include reading and returning values, both from indexed data + * and from the ROW command. It should also include functions that support + * "every" type, such as Case or MvFirst.
  • + *
  • + * Add the new type to the CsvTestUtils#Type enum, if it isn't already + * there. You also need to modify CsvAssert to support reading values + * of the new type.
  • + *
  • + * At this point, the CSV tests should fail with a sensible ES|QL error + * message. Make sure they're failing in ES|QL, not in the test + * framework.
  • + *
  • + * Add the new data type to this enum. This will cause a bunch of + * compile errors for switch statements throughout the code. Resolve those + * as appropriate. That is the main way in which the new type will be tied + * into the framework.
  • + *
  • + * Add the new type to the {@link DataType#UNDER_CONSTRUCTION} + * collection. This is used by the test framework to disable some checks + * around how functions report their supported types, which would otherwise + * generate a lot of noise while the type is still in development.
  • + *
  • + * Add typed data generators to TestCaseSupplier, and make sure all + * functions that support the new type have tests for it.
  • + *
  • + * Work to support things all types should do. Equality and the + * "typeless" MV functions (MvFirst, MvLast, and MvCount) should work for + * most types. Case and Coalesce should also support all types. + * If the type has a natural ordering, make sure to test + * sorting and the other binary comparisons. Make sure these functions all + * have CSV tests that run against indexed data.
  • + *
  • + * Add conversion functions as appropriate. Almost all types should + * support ToString, and should have a "ToType" function that accepts a + * string. There may be other logical conversions depending on the nature + * of the type. Make sure to add the conversion function to the + * TYPE_TO_CONVERSION_FUNCTION map in EsqlDataTypeConverter. Make sure the + * conversion functions have CSV tests that run against indexed data.
  • + *
  • + * Support the new type in aggregations that are type independent. + * This includes Values, Count, and Count Distinct. Make sure there are + * CSV tests against indexed data for these.
  • + *
  • + * Support other functions and aggregations as appropriate, making sure + * to included CSV tests.
  • + *
  • + * Consider how the type will interact with other types. For example, + * if the new type is numeric, it may be good for it to be comparable with + * other numbers. Supporting this may require new logic in + * EsqlDataTypeConverter#commonType, individual function type checking, the + * verifier rules, or other places. We suggest starting with CSV tests and + * seeing where they fail.
  • + *
+ * There are some additional steps that should be taken when removing the + * feature flag and getting ready for a release: + *
    + *
  • + * Ensure the capabilities for this type are always enabled + *
  • + *
  • + * Remove the type from the {@link DataType#UNDER_CONSTRUCTION} + * collection
  • + *
  • + * Fix new test failures related to declared function types + *
  • + *
  • + * Make sure to run the full test suite locally via gradle to generate + * the function type tables and helper files with the new type. Ensure all + * the functions that support the type have appropriate docs for it.
  • + *
  • + * If appropriate, remove the type from the ESQL limitations list of + * unsupported types.
  • + *
+ */ public enum DataType { /** * Fields of this type are unsupported by any functions and are always From 51f1bc0dc299539b03f95b67036ea72c3a255af9 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 5 Dec 2024 10:27:38 -0800 Subject: [PATCH 45/45] Wrap jackson exception on malformed json string (#114445) (#118098) This commit hides the underlying Jackson parse exception when encountered while parsing string tokens. Co-authored-by: Henrique Paes --- docs/changelog/114445.yaml | 6 ++++++ .../xcontent/provider/json/JsonXContentParser.java | 6 +++++- .../java/org/elasticsearch/http/BulkRestIT.java | 3 +-- .../common/xcontent/json/JsonXContentTests.java | 13 +++++++++++++ 4 files changed, 25 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/114445.yaml diff --git a/docs/changelog/114445.yaml b/docs/changelog/114445.yaml new file mode 100644 index 000000000000..afbc080d1e0b --- /dev/null +++ b/docs/changelog/114445.yaml @@ -0,0 +1,6 @@ +pr: 114445 +summary: Wrap jackson exception on malformed json string +area: Infra/Core +type: bug +issues: + - 114142 diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java index d42c56845d03..38ef8bc2e4ef 100644 --- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java +++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentParser.java @@ -108,7 +108,11 @@ public String text() throws IOException { if (currentToken().isValue() == false) { throwOnNoText(); } - return parser.getText(); + try { + return parser.getText(); + } catch (JsonParseException e) { + throw newXContentParseException(e); + } } private void throwOnNoText() { diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BulkRestIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BulkRestIT.java index 369d0824bdb2..3faa88339f0a 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BulkRestIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/BulkRestIT.java @@ -74,8 +74,7 @@ public void testBulkInvalidIndexNameString() throws IOException { ResponseException responseException = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request)); assertThat(responseException.getResponse().getStatusLine().getStatusCode(), equalTo(BAD_REQUEST.getStatus())); - assertThat(responseException.getMessage(), containsString("could not parse bulk request body")); - assertThat(responseException.getMessage(), containsString("json_parse_exception")); + assertThat(responseException.getMessage(), containsString("x_content_parse_exception")); assertThat(responseException.getMessage(), containsString("Invalid UTF-8")); } diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java b/server/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java index 55f6cc5498d8..4135ead545e0 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/json/JsonXContentTests.java @@ -11,6 +11,9 @@ import org.elasticsearch.common.xcontent.BaseXContentTestCase; import org.elasticsearch.xcontent.XContentGenerator; +import org.elasticsearch.xcontent.XContentParseException; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; @@ -28,4 +31,14 @@ public void testBigInteger() throws Exception { XContentGenerator generator = JsonXContent.jsonXContent.createGenerator(os); doTestBigInteger(generator, os); } + + public void testMalformedJsonFieldThrowsXContentException() throws Exception { + String json = "{\"test\":\"/*/}"; + try (XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, json)) { + parser.nextToken(); + parser.nextToken(); + parser.nextToken(); + assertThrows(XContentParseException.class, () -> parser.text()); + } + } }