diff --git a/.unreleased/pr_7645 b/.unreleased/pr_7645 new file mode 100644 index 00000000000..c7dae561dd8 --- /dev/null +++ b/.unreleased/pr_7645 @@ -0,0 +1 @@ +Fixes: #7645 Fix DELETE on compressed chunk with non-btree operators diff --git a/tsl/src/compression/compression_dml.c b/tsl/src/compression/compression_dml.c index 573039b3ca5..f599c13967e 100644 --- a/tsl/src/compression/compression_dml.c +++ b/tsl/src/compression/compression_dml.c @@ -268,7 +268,8 @@ decompress_batches_for_update_delete(HypertableModifyState *ht_state, Chunk *chu scankeys = build_update_delete_scankeys(comp_chunk_rel, heap_filters, &num_scankeys, - &null_columns); + &null_columns, + &delete_only); } if (matching_index_rel) @@ -957,7 +958,16 @@ process_predicates(Chunk *ch, CompressionSettings *settings, List *predicates, break; } default: - /* Do nothing for unknown operator strategies. */ + *heap_filters = lappend(*heap_filters, + make_batchfilter(column_name, + op_strategy, + collation, + opcode, + arg_value, + false, /* is_null_check */ + false, /* is_null */ + false /* is_array_op */ + )); break; } continue; @@ -1109,7 +1119,16 @@ process_predicates(Chunk *ch, CompressionSettings *settings, List *predicates, break; } default: - /* Do nothing on unknown operator strategies. */ + *heap_filters = lappend(*heap_filters, + make_batchfilter(column_name, + op_strategy, + collation, + opcode, + arg_value, + false, /* is_null_check */ + false, /* is_null */ + true /* is_array_op */ + )); break; } continue; diff --git a/tsl/src/compression/compression_dml.h b/tsl/src/compression/compression_dml.h index 90b77ce73f0..d30ed3f6610 100644 --- a/tsl/src/compression/compression_dml.h +++ b/tsl/src/compression/compression_dml.h @@ -45,4 +45,4 @@ ScanKeyData *build_heap_scankeys(Oid hypertable_relid, Relation in_rel, Relation CompressionSettings *settings, Bitmapset *key_columns, Bitmapset **null_columns, TupleTableSlot *slot, int *num_scankeys); ScanKeyData *build_update_delete_scankeys(Relation in_rel, List *heap_filters, int *num_scankeys, - Bitmapset **null_columns); + Bitmapset **null_columns, bool *delete_only); diff --git a/tsl/src/compression/compression_scankey.c b/tsl/src/compression/compression_scankey.c index be5e6ef9652..9fc6c320354 100644 --- a/tsl/src/compression/compression_scankey.c +++ b/tsl/src/compression/compression_scankey.c @@ -16,11 +16,11 @@ #include "ts_catalog/array_utils.h" static Oid deduce_filter_subtype(BatchFilter *filter, Oid att_typoid); -static int create_segment_filter_scankey(Relation in_rel, char *segment_filter_col_name, - StrategyNumber strategy, Oid subtype, - ScanKeyData *scankeys, int num_scankeys, - Bitmapset **null_columns, Datum value, bool is_null_check, - bool is_array_op); +static bool create_segment_filter_scankey(Relation in_rel, char *segment_filter_col_name, + StrategyNumber strategy, Oid subtype, + ScanKeyData *scankeys, int *num_scankeys, + Bitmapset **null_columns, Datum value, bool is_null_check, + bool is_array_op); /* * Test ScanKey against a slot. @@ -187,16 +187,16 @@ build_heap_scankeys(Oid hypertable_relid, Relation in_rel, Relation out_rel, */ if (ts_array_is_member(settings->fd.segmentby, attname)) { - key_index = create_segment_filter_scankey(in_rel, - attname, - BTEqualStrategyNumber, - InvalidOid, - scankeys, - key_index, - null_columns, - value, - isnull, - false); + create_segment_filter_scankey(in_rel, + attname, + BTEqualStrategyNumber, + InvalidOid, + scankeys, + &key_index, + null_columns, + value, + isnull, + false); } if (ts_array_is_member(settings->fd.orderby, attname)) { @@ -208,26 +208,28 @@ build_heap_scankeys(Oid hypertable_relid, Relation in_rel, Relation out_rel, int16 index = ts_array_position(settings->fd.orderby, attname); - key_index = create_segment_filter_scankey(in_rel, - column_segment_min_name(index), - BTLessEqualStrategyNumber, - InvalidOid, - scankeys, - key_index, - null_columns, - value, - false, - false); /* is_null_check */ - key_index = create_segment_filter_scankey(in_rel, - column_segment_max_name(index), - BTGreaterEqualStrategyNumber, - InvalidOid, - scankeys, - key_index, - null_columns, - value, - false, - false); /* is_null_check */ + create_segment_filter_scankey(in_rel, + column_segment_min_name(index), + BTLessEqualStrategyNumber, + InvalidOid, + scankeys, + &key_index, + null_columns, + value, + false, + false /* is_null_check */ + ); + create_segment_filter_scankey(in_rel, + column_segment_max_name(index), + BTGreaterEqualStrategyNumber, + InvalidOid, + scankeys, + &key_index, + null_columns, + value, + false, + false /* is_null_check */ + ); } } } @@ -435,7 +437,7 @@ build_index_scankeys_using_slot(Oid hypertable_relid, Relation in_rel, Relation */ ScanKeyData * build_update_delete_scankeys(Relation in_rel, List *heap_filters, int *num_scankeys, - Bitmapset **null_columns) + Bitmapset **null_columns, bool *delete_only) { ListCell *lc; BatchFilter *filter; @@ -455,25 +457,33 @@ build_update_delete_scankeys(Relation in_rel, List *heap_filters, int *num_scank NameStr(filter->column_name), RelationGetRelationName(in_rel)))); - key_index = create_segment_filter_scankey(in_rel, - NameStr(filter->column_name), - filter->strategy, - deduce_filter_subtype(filter, typoid), - scankeys, - key_index, - null_columns, - filter->value ? filter->value->constvalue : 0, - filter->is_null_check, - filter->is_array_op); + bool added = create_segment_filter_scankey(in_rel, + NameStr(filter->column_name), + filter->strategy, + deduce_filter_subtype(filter, typoid), + scankeys, + &key_index, + null_columns, + filter->value ? filter->value->constvalue : 0, + filter->is_null_check, + filter->is_array_op); + /* + * When we plan to DELETE directly on compressed chunks we + * need to ensure all query constraints could be applied + * to the compressed scan and disable direct DELETE when + * we are skipping filters. + */ + if (*delete_only && !added) + *delete_only = false; } *num_scankeys = key_index; return scankeys; } -static int +static bool create_segment_filter_scankey(Relation in_rel, char *segment_filter_col_name, StrategyNumber strategy, Oid subtype, ScanKeyData *scankeys, - int num_scankeys, Bitmapset **null_columns, Datum value, + int *num_scankeys, Bitmapset **null_columns, Datum value, bool is_null_check, bool is_array_op) { AttrNumber cmp_attno = get_attnum(in_rel->rd_id, segment_filter_col_name); @@ -481,7 +491,7 @@ create_segment_filter_scankey(Relation in_rel, char *segment_filter_col_name, /* This should never happen but if it does happen, we can't generate a scan key for * the filter column so just skip it */ if (cmp_attno == InvalidAttrNumber) - return num_scankeys; + return false; int flags = is_array_op ? SK_SEARCHARRAY : 0; @@ -497,7 +507,7 @@ create_segment_filter_scankey(Relation in_rel, char *segment_filter_col_name, if (is_null_check) { *null_columns = bms_add_member(*null_columns, cmp_attno); - return num_scankeys; + return false; } Oid atttypid = in_rel->rd_att->attrs[AttrNumberGetAttrOffset(cmp_attno)].atttypid; @@ -520,15 +530,15 @@ create_segment_filter_scankey(Relation in_rel, char *segment_filter_col_name, /* No operator could be found so we can't create the scankey. */ if (!OidIsValid(opr)) - return num_scankeys; + return false; opr = get_opcode(opr); Assert(OidIsValid(opr)); /* We should never end up here but: no opcode, no optimization */ if (!OidIsValid(opr)) - return num_scankeys; + return false; - ScanKeyEntryInitialize(&scankeys[num_scankeys++], + ScanKeyEntryInitialize(&scankeys[(*num_scankeys)++], flags, cmp_attno, strategy, @@ -537,7 +547,7 @@ create_segment_filter_scankey(Relation in_rel, char *segment_filter_col_name, opr, value); - return num_scankeys; + return true; } /* diff --git a/tsl/test/shared/expected/compression_dml.out b/tsl/test/shared/expected/compression_dml.out index 38cb54edd5a..7a3eda60d9d 100644 --- a/tsl/test/shared/expected/compression_dml.out +++ b/tsl/test/shared/expected/compression_dml.out @@ -435,9 +435,11 @@ INSERT INTO direct_delete VALUES ('2021-01-01', 'd1', 'r1', 1.0), ('2021-01-01', 'd1', 'r2', 1.0), ('2021-01-01', 'd1', 'r3', 1.0), +('2021-01-01', 'd1', NULL, 1.0), ('2021-01-01', 'd2', 'r1', 1.0), ('2021-01-01', 'd2', 'r2', 1.0), -('2021-01-01', 'd2', 'r3', 1.0); +('2021-01-01', 'd2', 'r3', 1.0), +('2021-01-01', 'd2', NULL, 1.0); SELECT count(compress_chunk(c)) FROM show_chunks('direct_delete') c; count 1 @@ -448,7 +450,7 @@ BEGIN; :ANALYZE DELETE FROM direct_delete WHERE device='d1'; QUERY PLAN Custom Scan (HypertableModify) (actual rows=0 loops=1) - Batches deleted: 3 + Batches deleted: 4 -> Delete on direct_delete (actual rows=0 loops=1) Delete on _hyper_X_X_chunk direct_delete_1 -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=0 loops=1) @@ -480,6 +482,108 @@ SELECT count(*) FROM direct_delete WHERE reading='r2'; 0 (1 row) +ROLLBACK; +-- issue #7644 +-- make sure non-btree operators don't delete unrelated batches +BEGIN; +:ANALYZE DELETE FROM direct_delete WHERE reading <> 'r2'; +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 8 + Tuples decompressed: 8 + -> Delete on direct_delete (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk direct_delete_1 + -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=4 loops=1) + Filter: (reading <> 'r2'::text) + Rows Removed by Filter: 4 +(8 rows) + +-- 4 tuples should still be there +SELECT count(*) FROM direct_delete; + count + 4 +(1 row) + +ROLLBACK; +-- test IS NULL +BEGIN; +:ANALYZE DELETE FROM direct_delete WHERE reading IS NULL; +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 2 + Tuples decompressed: 2 + -> Delete on direct_delete (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk direct_delete_1 + -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=2 loops=1) + Filter: (reading IS NULL) +(7 rows) + +-- 6 tuples should still be there +SELECT count(*) FROM direct_delete; + count + 6 +(1 row) + +ROLLBACK; +-- test IS NOT NULL +BEGIN; +:ANALYZE DELETE FROM direct_delete WHERE reading IS NOT NULL; +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 6 + Tuples decompressed: 6 + -> Delete on direct_delete (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk direct_delete_1 + -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=6 loops=1) + Filter: (reading IS NOT NULL) +(7 rows) + +-- 2 tuples should still be there +SELECT count(*) FROM direct_delete; + count + 2 +(1 row) + +ROLLBACK; +-- test IN +BEGIN; +:ANALYZE DELETE FROM direct_delete WHERE reading IN ('r1','r2'); +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches deleted: 4 + -> Delete on direct_delete (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk direct_delete_1 + -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=0 loops=1) + Filter: (reading = ANY ('{r1,r2}'::text[])) +(6 rows) + +-- 4 tuples should still be there +SELECT count(*) FROM direct_delete; + count + 4 +(1 row) + +ROLLBACK; +-- test IN +BEGIN; +:ANALYZE DELETE FROM direct_delete WHERE reading NOT IN ('r1'); +QUERY PLAN + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 8 + Tuples decompressed: 8 + -> Delete on direct_delete (actual rows=0 loops=1) + Delete on _hyper_X_X_chunk direct_delete_1 + -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=4 loops=1) + Filter: (reading <> 'r1'::text) + Rows Removed by Filter: 4 +(8 rows) + +-- 4 tuples should still be there +SELECT count(*) FROM direct_delete; + count + 4 +(1 row) + ROLLBACK; -- combining constraints on segmentby columns should work BEGIN; @@ -505,22 +609,22 @@ ROLLBACK; BEGIN; :ANALYZE DELETE FROM direct_delete WHERE value = '1.0'; ROLLBACK; QUERY PLAN Custom Scan (HypertableModify) (actual rows=0 loops=1) - Batches decompressed: 6 - Tuples decompressed: 6 + Batches decompressed: 8 + Tuples decompressed: 8 -> Delete on direct_delete (actual rows=0 loops=1) Delete on _hyper_X_X_chunk direct_delete_1 - -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=6 loops=1) + -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=8 loops=1) Filter: (value = '1'::double precision) (7 rows) BEGIN; :ANALYZE DELETE FROM direct_delete WHERE device = 'd1' AND value = '1.0'; ROLLBACK; QUERY PLAN Custom Scan (HypertableModify) (actual rows=0 loops=1) - Batches decompressed: 3 - Tuples decompressed: 3 + Batches decompressed: 4 + Tuples decompressed: 4 -> Delete on direct_delete (actual rows=0 loops=1) Delete on _hyper_X_X_chunk direct_delete_1 - -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=3 loops=1) + -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=4 loops=1) Filter: ((device = 'd1'::text) AND (value = '1'::double precision)) (7 rows) @@ -552,15 +656,16 @@ BEGIN; :ANALYZE DELETE FROM direct_delete WHERE device = 'd1'; ROLLBACK; WARNING: Trigger fired WARNING: Trigger fired WARNING: Trigger fired +WARNING: Trigger fired QUERY PLAN Custom Scan (HypertableModify) (actual rows=0 loops=1) - Batches decompressed: 3 - Tuples decompressed: 3 + Batches decompressed: 4 + Tuples decompressed: 4 -> Delete on direct_delete (actual rows=0 loops=1) Delete on _hyper_X_X_chunk direct_delete_1 - -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=3 loops=1) + -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=4 loops=1) Filter: (device = 'd1'::text) - Trigger direct_delete_trigger on _hyper_X_X_chunk: calls=3 + Trigger direct_delete_trigger on _hyper_X_X_chunk: calls=4 (8 rows) DROP TRIGGER direct_delete_trigger ON direct_delete; @@ -569,15 +674,16 @@ BEGIN; :ANALYZE DELETE FROM direct_delete WHERE device = 'd1'; ROLLBACK; WARNING: Trigger fired WARNING: Trigger fired WARNING: Trigger fired +WARNING: Trigger fired QUERY PLAN Custom Scan (HypertableModify) (actual rows=0 loops=1) - Batches decompressed: 3 - Tuples decompressed: 3 + Batches decompressed: 4 + Tuples decompressed: 4 -> Delete on direct_delete (actual rows=0 loops=1) Delete on _hyper_X_X_chunk direct_delete_1 - -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=3 loops=1) + -> Seq Scan on _hyper_X_X_chunk direct_delete_1 (actual rows=4 loops=1) Filter: (device = 'd1'::text) - Trigger direct_delete_trigger on _hyper_X_X_chunk: calls=3 + Trigger direct_delete_trigger on _hyper_X_X_chunk: calls=4 (8 rows) DROP TRIGGER direct_delete_trigger ON direct_delete; diff --git a/tsl/test/shared/sql/compression_dml.sql b/tsl/test/shared/sql/compression_dml.sql index 098e4400e7d..90ee67c1c2a 100644 --- a/tsl/test/shared/sql/compression_dml.sql +++ b/tsl/test/shared/sql/compression_dml.sql @@ -205,9 +205,11 @@ INSERT INTO direct_delete VALUES ('2021-01-01', 'd1', 'r1', 1.0), ('2021-01-01', 'd1', 'r2', 1.0), ('2021-01-01', 'd1', 'r3', 1.0), +('2021-01-01', 'd1', NULL, 1.0), ('2021-01-01', 'd2', 'r1', 1.0), ('2021-01-01', 'd2', 'r2', 1.0), -('2021-01-01', 'd2', 'r3', 1.0); +('2021-01-01', 'd2', 'r3', 1.0), +('2021-01-01', 'd2', NULL, 1.0); SELECT count(compress_chunk(c)) FROM show_chunks('direct_delete') c; @@ -225,6 +227,42 @@ BEGIN; SELECT count(*) FROM direct_delete WHERE reading='r2'; ROLLBACK; +-- issue #7644 +-- make sure non-btree operators don't delete unrelated batches +BEGIN; +:ANALYZE DELETE FROM direct_delete WHERE reading <> 'r2'; +-- 4 tuples should still be there +SELECT count(*) FROM direct_delete; +ROLLBACK; + +-- test IS NULL +BEGIN; +:ANALYZE DELETE FROM direct_delete WHERE reading IS NULL; +-- 6 tuples should still be there +SELECT count(*) FROM direct_delete; +ROLLBACK; + +-- test IS NOT NULL +BEGIN; +:ANALYZE DELETE FROM direct_delete WHERE reading IS NOT NULL; +-- 2 tuples should still be there +SELECT count(*) FROM direct_delete; +ROLLBACK; + +-- test IN +BEGIN; +:ANALYZE DELETE FROM direct_delete WHERE reading IN ('r1','r2'); +-- 4 tuples should still be there +SELECT count(*) FROM direct_delete; +ROLLBACK; + +-- test IN +BEGIN; +:ANALYZE DELETE FROM direct_delete WHERE reading NOT IN ('r1'); +-- 4 tuples should still be there +SELECT count(*) FROM direct_delete; +ROLLBACK; + -- combining constraints on segmentby columns should work BEGIN; -- should be 1 batches directly deleted