Skip to content

Commit

Permalink
Merge branch 'main' into rerank-cohere-inference
Browse files Browse the repository at this point in the history
  • Loading branch information
szabosteve authored Apr 15, 2024
2 parents 9e0dd94 + b9322da commit faf0e8e
Show file tree
Hide file tree
Showing 171 changed files with 7,279 additions and 2,208 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -535,7 +535,7 @@ GET /_analyze
]
}
],
"text": "My license plate is ٢٥٠١٥"
"text": "My license plate is empty"
}
----
"""
Expand All @@ -557,7 +557,7 @@ GET /_analyze
]
}
],
"text": "My license plate is ٢٥٠١٥"
"text": "My license plate is empty"
}"""
}

Expand Down
16 changes: 16 additions & 0 deletions docs/changelog/106068.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,19 @@ summary: Add `modelId` and `modelText` to `KnnVectorQueryBuilder`
area: Search
type: enhancement
issues: []
highlight:
title: Query phase KNN now supports query_vector_builder
body: |-
It is now possible to pass `model_text` and `model_id` within a `knn` query
in the [query DSL](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-knn-query.html) to convert a text query into a dense vector and run the
nearest neighbor query on it, instead of requiring the dense vector to be
directly passed (within the `query_vector` parameter). Similar to the
[top-level knn query](https://www.elastic.co/guide/en/elasticsearch/reference/current/knn-search.html) (executed in the DFS phase), it is possible to supply
a `query_vector_builder` object containing a `text_embedding` object with
`model_text` (the text query to be converted into a dense vector) and
`model_id` (the identifier of a deployed model responsible for transforming
the text query into a dense vector). Note that an embedding model with the
referenced `model_id` needs to be [deployed on a ML node](https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html).
in the cluster.
notable: true

5 changes: 5 additions & 0 deletions docs/changelog/106796.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 106796
summary: Bulk loading enrich fields in ESQL
area: ES|QL
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/107196.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 107196
summary: Add metric for calculating index flush time excluding waiting on locks
area: Engine
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/107370.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 107370
summary: Fork when handling remote field-caps responses
area: Search
type: bug
issues: []
6 changes: 6 additions & 0 deletions docs/changelog/107432.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 107432
summary: "Percolator named queries: rewrite for matched info"
area: Percolator
type: bug
issues:
- 107176
1 change: 1 addition & 0 deletions docs/reference/cluster/nodes-stats.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -626,6 +626,7 @@ Total time spent performing flush operations.
(integer)
Total time in milliseconds
spent performing flush operations.

=======
`warmer`::
Expand Down
1 change: 1 addition & 0 deletions docs/reference/inference/post-inference.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ The unique identifier of the {infer} endpoint.
(Optional, string)
The type of {infer} task that the model performs.


[discrete]
[[post-inference-api-query-params]]
==== {api-query-parms-title}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1778,22 +1778,9 @@ public void testRemoveGhostReference() throws Exception {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
DataStream original = currentState.getMetadata().dataStreams().get(dataStreamName);
DataStream broken = new DataStream(
original.getName(),
List.of(new Index(original.getIndices().get(0).getName(), "broken"), original.getIndices().get(1)),
original.getGeneration(),
original.getMetadata(),
original.isHidden(),
original.isReplicated(),
original.isSystem(),
original.isAllowCustomRouting(),
original.getIndexMode(),
original.getLifecycle(),
original.isFailureStore(),
original.getFailureIndices(),
original.rolloverOnWrite(),
original.getAutoShardingEvent()
);
DataStream broken = original.copy()
.setIndices(List.of(new Index(original.getIndices().get(0).getName(), "broken"), original.getIndices().get(1)))
.build();
brokenDataStreamHolder.set(broken);
return ClusterState.builder(currentState)
.metadata(Metadata.builder(currentState.getMetadata()).put(broken).build())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ static GetDataStreamAction.Response innerOperation(
Map<Index, IndexProperties> backingIndicesSettingsValues = new HashMap<>();
Metadata metadata = state.getMetadata();
collectIndexSettingsValues(dataStream, backingIndicesSettingsValues, metadata, dataStream.getIndices());
if (DataStream.isFailureStoreEnabled() && dataStream.getFailureIndices().isEmpty() == false) {
if (DataStream.isFailureStoreFeatureFlagEnabled() && dataStream.getFailureIndices().isEmpty() == false) {
collectIndexSettingsValues(dataStream, backingIndicesSettingsValues, metadata, dataStream.getFailureIndices());
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -301,24 +301,7 @@ public void testGetAdditionalIndexSettingsDataStreamAlreadyCreatedTimeSettingsMi
).getMetadata()
);
DataStream ds = mb.dataStream(dataStreamName);
mb.put(
new DataStream(
ds.getName(),
ds.getIndices(),
ds.getGeneration(),
ds.getMetadata(),
ds.isHidden(),
ds.isReplicated(),
ds.isSystem(),
ds.isAllowCustomRouting(),
IndexMode.TIME_SERIES,
ds.getLifecycle(),
ds.isFailureStore(),
ds.getFailureIndices(),
ds.rolloverOnWrite(),
ds.getAutoShardingEvent()
)
);
mb.put(ds.copy().setIndexMode(IndexMode.TIME_SERIES).build());
Metadata metadata = mb.build();

Instant now = twoHoursAgo.plus(2, ChronoUnit.HOURS);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,17 +60,10 @@ public class MetadataDataStreamRolloverServiceTests extends ESTestCase {
public void testRolloverClusterStateForDataStream() throws Exception {
Instant now = Instant.now();
String dataStreamName = "logs-my-app";
final DataStream dataStream = new DataStream(
final DataStream dataStream = DataStream.builder(
dataStreamName,
List.of(new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 1, now.toEpochMilli()), "uuid")),
1,
null,
false,
false,
false,
false,
IndexMode.TIME_SERIES
);
List.of(new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 1, now.toEpochMilli()), "uuid"))
).setIndexMode(IndexMode.TIME_SERIES).build();
ComposableIndexTemplate template = ComposableIndexTemplate.builder()
.indexPatterns(List.of(dataStream.getName() + "*"))
.template(
Expand Down Expand Up @@ -168,17 +161,10 @@ public void testRolloverAndMigrateDataStream() throws Exception {
Instant now = Instant.now().truncatedTo(ChronoUnit.SECONDS);
String dataStreamName = "logs-my-app";
IndexMode dsIndexMode = randomBoolean() ? null : IndexMode.STANDARD;
final DataStream dataStream = new DataStream(
final DataStream dataStream = DataStream.builder(
dataStreamName,
List.of(new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 1, now.toEpochMilli()), "uuid")),
1,
null,
false,
false,
false,
false,
dsIndexMode
);
List.of(new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 1, now.toEpochMilli()), "uuid"))
).setIndexMode(dsIndexMode).build();
ComposableIndexTemplate template = ComposableIndexTemplate.builder()
.indexPatterns(List.of(dataStream.getName() + "*"))
.template(
Expand Down Expand Up @@ -257,17 +243,10 @@ public void testRolloverAndMigrateDataStream() throws Exception {
public void testChangingIndexModeFromTimeSeriesToSomethingElseNoEffectOnExistingDataStreams() throws Exception {
Instant now = Instant.now().truncatedTo(ChronoUnit.SECONDS);
String dataStreamName = "logs-my-app";
final DataStream dataStream = new DataStream(
final DataStream dataStream = DataStream.builder(
dataStreamName,
List.of(new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 1, now.toEpochMilli()), "uuid")),
1,
null,
false,
false,
false,
false,
IndexMode.TIME_SERIES
);
List.of(new Index(DataStream.getDefaultBackingIndexName(dataStreamName, 1, now.toEpochMilli()), "uuid"))
).setIndexMode(IndexMode.TIME_SERIES).build();
ComposableIndexTemplate template = ComposableIndexTemplate.builder()
.indexPatterns(List.of(dataStream.getName() + "*"))
.template(
Expand Down Expand Up @@ -479,17 +458,7 @@ private static ClusterState createClusterState(String dataStreamName, int number
for (int i = 1; i <= numberOfBackingIndices; i++) {
backingIndices.add(new Index(DataStream.getDefaultBackingIndexName(dataStreamName, i, now.toEpochMilli()), "uuid" + i));
}
final DataStream dataStream = new DataStream(
dataStreamName,
backingIndices,
numberOfBackingIndices,
null,
false,
false,
false,
false,
null
);
final DataStream dataStream = DataStream.builder(dataStreamName, backingIndices).setGeneration(numberOfBackingIndices).build();
ComposableIndexTemplate template = ComposableIndexTemplate.builder()
.indexPatterns(List.of(dataStream.getName() + "*"))
.template(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -139,26 +139,7 @@ public void testUpdateTimeSeriesTemporalRange_NoUpdateBecauseReplicated() {
List.of(new Tuple<>(start.minus(4, ChronoUnit.HOURS), start), new Tuple<>(start, end))
).getMetadata();
DataStream d = metadata.dataStreams().get(dataStreamName);
metadata = Metadata.builder(metadata)
.put(
new DataStream(
d.getName(),
d.getIndices(),
d.getGeneration(),
d.getMetadata(),
d.isHidden(),
true,
d.isSystem(),
d.isAllowCustomRouting(),
d.getIndexMode(),
d.getLifecycle(),
d.isFailureStore(),
d.getFailureIndices(),
false,
d.getAutoShardingEvent()
)
)
.build();
metadata = Metadata.builder(metadata).put(d.copy().setReplicated(true).setRolloverOnWrite(false).build()).build();

now = now.plus(1, ChronoUnit.HOURS);
ClusterState in = ClusterState.builder(ClusterState.EMPTY_STATE).metadata(metadata).build();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ public void testDeleteDataStream() {
}

public void testDeleteDataStreamWithFailureStore() {
Assume.assumeTrue(DataStream.isFailureStoreEnabled());
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());

final String dataStreamName = "my-data-stream";
final List<String> otherIndices = randomSubsetOf(List.of("foo", "bar", "baz"));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,22 +76,14 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti
List<Index> failureStores = List.of(failureStoreIndex);
{
// data stream has an enabled lifecycle
DataStream logs = new DataStream(
"logs",
indices,
3,
null,
false,
false,
false,
true,
IndexMode.STANDARD,
new DataStreamLifecycle(),
true,
failureStores,
false,
null
);
DataStream logs = DataStream.builder("logs", indices)
.setGeneration(3)
.setAllowCustomRouting(true)
.setIndexMode(IndexMode.STANDARD)
.setLifecycle(new DataStreamLifecycle())
.setFailureStoreEnabled(true)
.setFailureIndices(failureStores)
.build();

String ilmPolicyName = "rollover-30days";
Map<Index, Response.IndexProperties> indexSettingsValues = Map.of(
Expand Down Expand Up @@ -166,7 +158,7 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti
is(ManagedBy.LIFECYCLE.displayValue)
);

if (DataStream.isFailureStoreEnabled()) {
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
List<Object> failureStoresRepresentation = (List<Object>) dataStreamMap.get(
DataStream.FAILURE_INDICES_FIELD.getPreferredName()
);
Expand All @@ -187,22 +179,14 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti

{
// data stream has a lifecycle that's not enabled
DataStream logs = new DataStream(
"logs",
indices,
3,
null,
false,
false,
false,
true,
IndexMode.STANDARD,
new DataStreamLifecycle(null, null, false),
true,
failureStores,
false,
null
);
DataStream logs = DataStream.builder("logs", indices)
.setGeneration(3)
.setAllowCustomRouting(true)
.setIndexMode(IndexMode.STANDARD)
.setLifecycle(new DataStreamLifecycle(null, null, false))
.setFailureStoreEnabled(true)
.setFailureIndices(failureStores)
.build();

String ilmPolicyName = "rollover-30days";
Map<Index, Response.IndexProperties> indexSettingsValues = Map.of(
Expand Down Expand Up @@ -266,7 +250,7 @@ public void testResponseIlmAndDataStreamLifecycleRepresentation() throws Excepti
is(ManagedBy.UNMANAGED.displayValue)
);

if (DataStream.isFailureStoreEnabled()) {
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
List<Object> failureStoresRepresentation = (List<Object>) dataStreamMap.get(
DataStream.FAILURE_INDICES_FIELD.getPreferredName()
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -283,22 +283,11 @@ public void testRetentionNotExecutedForTSIndicesWithinTimeBounds() {
Metadata.Builder builder = Metadata.builder(clusterState.metadata());
DataStream dataStream = builder.dataStream(dataStreamName);
builder.put(
new DataStream(
dataStreamName,
dataStream.getIndices(),
dataStream.getGeneration() + 1,
dataStream.getMetadata(),
dataStream.isHidden(),
dataStream.isReplicated(),
dataStream.isSystem(),
dataStream.isAllowCustomRouting(),
dataStream.getIndexMode(),
DataStreamLifecycle.newBuilder().dataRetention(0L).build(),
dataStream.isFailureStore(),
dataStream.getFailureIndices(),
dataStream.rolloverOnWrite(),
dataStream.getAutoShardingEvent()
)
dataStream.copy()
.setName(dataStreamName)
.setGeneration(dataStream.getGeneration() + 1)
.setLifecycle(DataStreamLifecycle.newBuilder().dataRetention(0L).build())
.build()
);
clusterState = ClusterState.builder(clusterState).metadata(builder).build();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,9 @@ public void process(HitContext hitContext) throws IOException {
// This is not a document with a percolator field.
continue;
}
query = pc.filterNestedDocs(query, fetchContext.getSearchExecutionContext().indexVersionCreated());
IndexSearcher percolatorIndexSearcher = pc.percolateQuery.getPercolatorIndexSearcher();
query = pc.filterNestedDocs(query, fetchContext.getSearchExecutionContext().indexVersionCreated());
query = percolatorIndexSearcher.rewrite(query);
int memoryIndexMaxDoc = percolatorIndexSearcher.getIndexReader().maxDoc();
TopDocs topDocs = percolatorIndexSearcher.search(query, memoryIndexMaxDoc, new Sort(SortField.FIELD_DOC));
if (topDocs.totalHits.value == 0) {
Expand Down
Loading

0 comments on commit faf0e8e

Please sign in to comment.