Commit 845a170e authored by craig[bot]'s avatar craig[bot]

Merge #48620 #48789

48620: opt: elide FK checks when setting NULL values r=RaduBerinde a=RaduBerinde

When we set NULL values on the FK columns, we can elide foreign key checks. This
is common when we are inserting/upserting on a subset of columns. It will also
be the case for the mutation generated by `ON CASCADE SET NULL` (and to some
extent `SET DEFAULT`).

To determine whether we are inserting NULL values, we check the input if it's a
Project or Values; these will be the common cases for mutations.

Release note: None

48789: roachtest: make roachtest aware of 20.1 and the 20.2 tag r=tbg a=knz

First commit from #48788
Co-authored-by: default avatarRadu Berinde <[email protected]>
Co-authored-by: default avatarTobias Schottdorf <[email protected]>
Co-authored-by: default avatarRaphael 'kena' Poss <[email protected]>
......@@ -168,9 +168,12 @@ var enabledDjangoTests = []string{
var djangoBlacklists = blacklistsForVersion{
{"v19.2", "djangoBlacklist19_2", djangoBlacklist19_2, "djangoIgnoreList19_2", djangoIgnoreList19_2},
{"v20.1", "djangoBlacklist20_1", djangoBlacklist20_1, "djangoIgnoreList20_1", djangoIgnoreList20_1},
{"v20.2", "djangoBlacklist20_2", djangoBlacklist20_2, "djangoIgnoreList20_2", djangoIgnoreList20_2},
}
// Maintain that this list is alphabetized.
var djangoBlacklist20_2 = blacklist{}
var djangoBlacklist20_1 = blacklist{}
var djangoBlacklist19_2 = blacklist{
......@@ -215,6 +218,8 @@ var djangoBlacklist19_2 = blacklist{
//"postgres_tests.test_array.TestOtherTypesExactQuerying.test_exact_decimals": "23468",
}
var djangoIgnoreList20_2 = blacklist{}
var djangoIgnoreList20_1 = blacklist{}
var djangoIgnoreList19_2 = blacklist{}
......@@ -13,6 +13,7 @@ package main
var gopgBlacklists = blacklistsForVersion{
{"v19.2", "gopgBlackList19_2", gopgBlackList19_2, "gopgIgnoreList19_2", gopgIgnoreList19_2},
{"v20.1", "gopgBlackList20_1", gopgBlackList20_1, "gopgIgnoreList20_1", gopgIgnoreList20_1},
{"v20.2", "gopgBlackList20_2", gopgBlackList20_2, "gopgIgnoreList20_2", gopgIgnoreList20_2},
}
// These are lists of known gopg test errors and failures.
......@@ -24,6 +25,8 @@ var gopgBlacklists = blacklistsForVersion{
// After a failed run, an updated version of this blacklist should be available
// in the test log.
var gopgBlackList20_2 = gopgBlackList20_1
var gopgBlackList20_1 = blacklist{
"pg | CopyFrom/CopyTo | copies corrupted data to a table": "41608",
"pg | CopyFrom/CopyTo | copies data from a table and to a table": "41608",
......@@ -90,6 +93,8 @@ var gopgBlackList19_2 = blacklist{
"v9.TestUnixSocket": "31113",
}
var gopgIgnoreList20_2 = gopgIgnoreList20_1
var gopgIgnoreList20_1 = gopgIgnoreList19_2
var gopgIgnoreList19_2 = blacklist{
......
......@@ -17,11 +17,14 @@ var hibernateBlacklists = blacklistsForVersion{
{"v19.1", "hibernateBlackList19_1", hibernateBlackList19_1, "", nil},
{"v19.2", "hibernateBlackList19_2", hibernateBlackList19_2, "", nil},
{"v20.1", "hibernateBlackList20_1", hibernateBlackList20_1, "", nil},
{"v20.2", "hibernateBlackList20_2", hibernateBlackList20_2, "", nil},
}
// Please keep these lists alphabetized for easy diffing.
// After a failed run, an updated version of this blacklist should be available
// in the test log.
var hibernateBlackList20_2 = blacklist{}
var hibernateBlackList20_1 = blacklist{}
var hibernateBlackList19_2 = blacklist{}
......
......@@ -13,8 +13,11 @@ package main
var libPQBlacklists = blacklistsForVersion{
{"v19.2", "libPQBlacklist19_2", libPQBlacklist19_2, "libPQIgnorelist19_2", libPQIgnorelist19_2},
{"v20.1", "libPQBlacklist20_1", libPQBlacklist20_1, "libPQIgnorelist20_1", libPQIgnorelist20_1},
{"v20.2", "libPQBlacklist20_2", libPQBlacklist20_2, "libPQIgnorelist20_2", libPQIgnorelist20_2},
}
var libPQBlacklist20_2 = libPQBlacklist20_1
var libPQBlacklist20_1 = blacklist{
"pq.TestBinaryByteSliceToInt": "41547",
"pq.TestBinaryByteSlicetoUUID": "41547",
......@@ -98,6 +101,8 @@ var libPQBlacklist19_2 = blacklist{
"pq.TestTimestampWithTimeZone": "41565",
}
var libPQIgnorelist20_2 = libPQIgnorelist20_1
var libPQIgnorelist20_1 = libPQIgnorelist19_2
var libPQIgnorelist19_2 = blacklist{
......
......@@ -17,11 +17,14 @@ var pgjdbcBlacklists = blacklistsForVersion{
{"v19.1", "pgjdbcBlackList19_1", pgjdbcBlackList19_1, "", pgjdbcIgnoreList19_1},
{"v19.2", "pgjdbcBlackList19_2", pgjdbcBlackList19_2, "pgjdbcIgnoreList19_2", pgjdbcIgnoreList19_2},
{"v20.1", "pgjdbcBlackList20_1", pgjdbcBlackList20_1, "pgjdbcIgnoreList20_1", pgjdbcIgnoreList20_1},
{"v20.2", "pgjdbcBlackList20_2", pgjdbcBlackList20_2, "pgjdbcIgnoreList20_2", pgjdbcIgnoreList20_2},
}
// Please keep these lists alphabetized for easy diffing.
// After a failed run, an updated version of this blacklist should be available
// in the test log.
var pgjdbcBlackList20_2 = pgjdbcBlackList20_1
var pgjdbcBlackList20_1 = blacklist{
"org.postgresql.jdbc.DeepBatchedInsertStatementTest.testDeepInternalsBatchedQueryDecorator": "26508",
"org.postgresql.jdbc.DeepBatchedInsertStatementTest.testUnspecifiedParameterType": "26508",
......@@ -5617,6 +5620,8 @@ var pgjdbcBlackList2_0 = blacklist{
"org.postgresql.test.jdbc42.GetObject310InfinityTests.test[binary = REGULAR, expr = infinity, pgType = timestamp, klass = class java.time.OffsetDateTime]": "41786",
}
var pgjdbcIgnoreList20_2 = pgjdbcIgnoreList20_1
var pgjdbcIgnoreList20_1 = pgjdbcIgnoreList19_2
var pgjdbcIgnoreList19_2 = blacklist{
......@@ -13,11 +13,14 @@ package main
var pgxBlacklists = blacklistsForVersion{
{"v19.2", "pgxBlacklist19_2", pgxBlacklist19_2, "pgxIgnorelist19_2", pgxIgnorelist19_2},
{"v20.1", "pgxBlacklist20_1", pgxBlacklist20_1, "pgxIgnorelist20_1", pgxIgnorelist20_1},
{"v20.2", "pgxBlacklist20_2", pgxBlacklist20_2, "pgxIgnorelist20_2", pgxIgnorelist20_2},
}
// Please keep these lists alphabetized for easy diffing.
// After a failed run, an updated version of this blacklist should be available
// in the test log.
var pgxBlacklist20_2 = pgxBlacklist20_1
var pgxBlacklist20_1 = blacklist{
"v4.Example_CustomType": "27796",
"v4.TestConnBeginBatchDeferredError": "31632",
......@@ -60,6 +63,8 @@ var pgxBlacklist20_1 = blacklist{
"v4.TestUnregisteredTypeUsableAsStringArgumentAndBaseResult": "27796",
}
var pgxIgnorelist20_2 = pgxIgnorelist20_1
var pgxIgnorelist20_1 = blacklist{
"v4.TestBeginIsoLevels": "We don't support isolation levels",
"v4.TestQueryEncodeError": "This test checks the exact error message",
......
......@@ -15,6 +15,7 @@ var psycopgBlacklists = blacklistsForVersion{
{"v19.1", "psycopgBlackList19_1", psycopgBlackList19_1, "psycopgIgnoreList19_1", psycopgIgnoreList19_1},
{"v19.2", "psycopgBlackList19_2", psycopgBlackList19_2, "psycopgIgnoreList19_2", psycopgIgnoreList19_2},
{"v20.1", "psycopgBlackList20_1", psycopgBlackList20_1, "psycopgIgnoreList20_1", psycopgIgnoreList20_1},
{"v20.2", "psycopgBlackList20_2", psycopgBlackList20_2, "psycopgIgnoreList20_2", psycopgIgnoreList20_2},
}
// These are lists of known psycopg test errors and failures.
......@@ -28,6 +29,8 @@ var psycopgBlacklists = blacklistsForVersion{
// Please keep these lists alphabetized for easy diffing.
// After a failed run, an updated version of this blacklist should be available
// in the test log.
var psycopgBlackList20_2 = psycopgBlackList20_1
var psycopgBlackList20_1 = blacklist{
"tests.test_async.AsyncTests.test_async_callproc": "44701",
"tests.test_async.AsyncTests.test_error": "44706",
......@@ -816,6 +819,8 @@ var psycopgBlackList19_1 = blacklist{
"tests.test_with.WithCursorTestCase.test_named_with_noop": "30352",
}
var psycopgIgnoreList20_2 = psycopgIgnoreList20_1
var psycopgIgnoreList20_1 = psycopgIgnoreList19_2
var psycopgIgnoreList19_2 = psycopgIgnoreList19_1
......
......@@ -15,8 +15,11 @@ var sqlAlchemyBlacklists = blacklistsForVersion{
{"v19.1", "sqlAlchemyBlacklist", sqlAlchemyBlacklist, "sqlAlchemyIgnoreList", sqlAlchemyIgnoreList},
{"v19.2", "sqlAlchemyBlacklist", sqlAlchemyBlacklist, "sqlAlchemyIgnoreList", sqlAlchemyIgnoreList},
{"v20.1", "sqlAlchemyBlacklist20_1", sqlAlchemyBlacklist20_1, "sqlAlchemyIgnoreList20_1", sqlAlchemyIgnoreList20_1},
{"v20.2", "sqlAlchemyBlacklist20_2", sqlAlchemyBlacklist20_2, "sqlAlchemyIgnoreList20_2", sqlAlchemyIgnoreList20_2},
}
var sqlAlchemyBlacklist20_2 = sqlAlchemyBlacklist20_1
var sqlAlchemyBlacklist20_1 = blacklist{
"test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_autoincrement_col": "5807",
"test/dialect/test_suite.py::ComponentReflectionTest_cockroachdb+psycopg2_9_5_0::test_deprecated_get_primary_keys": "5807",
......@@ -157,6 +160,8 @@ var sqlAlchemyBlacklist = blacklist{
"test/dialect/test_suite.py::TableDDLTest_cockroachdb+psycopg2_9_5_0::test_create_table_schema": "26443",
}
var sqlAlchemyIgnoreList20_2 = sqlAlchemyIgnoreList
var sqlAlchemyIgnoreList20_1 = sqlAlchemyIgnoreList
var sqlAlchemyIgnoreList = blacklist{
......
......@@ -1142,6 +1142,7 @@ func PredecessorVersion(buildVersion version.Version) (string, error) {
// (see runVersionUpgrade). The same is true for adding a new key to this
// map.
verMap := map[string]string{
"20.2": "20.1.0",
"20.1": "19.2.6",
"19.2": "19.1.5",
"19.1": "2.1.9",
......
......@@ -38,6 +38,7 @@ type crdbVersion int
const (
tpchVecVersion19_2 crdbVersion = iota
tpchVecVersion20_1
tpchVecVersion20_2
)
func toCRDBVersion(v string) (crdbVersion, error) {
......@@ -45,6 +46,8 @@ func toCRDBVersion(v string) (crdbVersion, error) {
return tpchVecVersion19_2, nil
} else if strings.HasPrefix(v, "v20.1") {
return tpchVecVersion20_1, nil
} else if strings.HasPrefix(v, "v20.2") {
return tpchVecVersion20_2, nil
} else {
return 0, errors.Errorf("unrecognized version: %s", v)
}
......@@ -54,6 +57,7 @@ var (
vectorizeOnOptionByVersion = map[crdbVersion]string{
tpchVecVersion19_2: "experimental_on",
tpchVecVersion20_1: "on",
tpchVecVersion20_2: "on",
}
// queriesToSkipByVersion is a map keyed by version that contains query numbers
......@@ -77,6 +81,7 @@ var (
slownessThresholdByVersion = map[crdbVersion]float64{
tpchVecVersion19_2: 1.5,
tpchVecVersion20_1: 1.2,
tpchVecVersion20_2: 1.2,
}
)
......
......@@ -425,6 +425,7 @@ func stmtFeatureTest(
func makeVersionFixtureAndFatal(
ctx context.Context, t *test, c *cluster, predecessorVersion string, makeFixtureVersion string,
) {
c.l.Printf("making fixture for %s (starting at %s)", makeFixtureVersion, predecessorVersion)
c.encryptDefault = false
newVersionUpgradeTest(c,
// Start the cluster from a fixture. That fixture's cluster version may
......
......@@ -163,7 +163,7 @@ CREATE TABLE multi_ref_child (
)
query TTT
EXPLAIN INSERT INTO multi_ref_child VALUES (1, NULL, NULL, NULL)
EXPLAIN INSERT INTO multi_ref_child VALUES (1, NULL, NULL, NULL), (2, 3, 4, 5)
----
· distributed false
· vectorized false
......@@ -175,7 +175,7 @@ root · ·
│ └── buffer node · ·
│ │ label buffer 1
│ └── values · ·
│ size 4 columns, 1 row
│ size 4 columns, 2 rows
├── fk-check · ·
│ └── error if rows · ·
│ └── lookup-join · ·
......@@ -203,6 +203,20 @@ root · ·
└── scan buffer node · ·
· label buffer 1
# FK check can be omitted when we are inserting only NULLs.
query TTT
EXPLAIN INSERT INTO multi_ref_child VALUES (1, NULL, NULL, NULL)
----
· distributed false
· vectorized false
count · ·
└── insert · ·
│ into multi_ref_child(k, a, b, c)
│ strategy inserter
│ auto commit ·
└── values · ·
· size 4 columns, 1 row
# -- Tests with DELETE --
query TTT
......
......@@ -681,6 +681,41 @@ func ExprIsNeverNull(e opt.ScalarExpr, notNullCols opt.ColSet) bool {
}
}
// OutputColumnIsAlwaysNull returns true if the expression produces only NULL
// values for the given column. Used to elide foreign key checks.
//
// This could be a logical property but we only care about simple cases (NULLs
// in Projections and Values).
func OutputColumnIsAlwaysNull(e RelExpr, col opt.ColumnID) bool {
switch e.Op() {
case opt.ProjectOp:
p := e.(*ProjectExpr)
if p.Passthrough.Contains(col) {
return OutputColumnIsAlwaysNull(p.Input, col)
}
for i := range p.Projections {
if p.Projections[i].Col == col {
return p.Projections[i].Element.Op() == opt.NullOp
}
}
case opt.ValuesOp:
v := e.(*ValuesExpr)
colOrdinal, ok := v.Cols.Find(col)
if !ok {
return false
}
for i := range v.Rows {
if v.Rows[i].(*TupleExpr).Elems[colOrdinal].Op() != opt.NullOp {
return false
}
}
return true
}
return false
}
// FKCascades stores metadata necessary for building cascading queries.
type FKCascades []FKCascade
......
......@@ -81,8 +81,9 @@ func (mb *mutationBuilder) buildFKChecksForInsert() {
h := &mb.fkCheckHelper
for i, n := 0, mb.tab.OutboundForeignKeyCount(); i < n; i++ {
h.initWithOutboundFK(mb, i)
mb.checks = append(mb.checks, h.buildInsertionCheck())
if h.initWithOutboundFK(mb, i) {
mb.checks = append(mb.checks, h.buildInsertionCheck())
}
}
telemetry.Inc(sqltelemetry.ForeignKeyChecksUseCounter)
}
......@@ -265,8 +266,9 @@ func (mb *mutationBuilder) buildFKChecksForUpdate() {
for i, n := 0, mb.tab.OutboundForeignKeyCount(); i < n; i++ {
// Verify that at least one FK column is actually updated.
if mb.outboundFKColsUpdated(i) {
h.initWithOutboundFK(mb, i)
mb.checks = append(mb.checks, h.buildInsertionCheck())
if h.initWithOutboundFK(mb, i) {
mb.checks = append(mb.checks, h.buildInsertionCheck())
}
}
}
......@@ -368,8 +370,9 @@ func (mb *mutationBuilder) buildFKChecksForUpsert() {
h := &mb.fkCheckHelper
for i := 0; i < numOutbound; i++ {
h.initWithOutboundFK(mb, i)
mb.checks = append(mb.checks, h.buildInsertionCheck())
if h.initWithOutboundFK(mb, i) {
mb.checks = append(mb.checks, h.buildInsertionCheck())
}
}
for i := 0; i < numInbound; i++ {
......@@ -465,6 +468,9 @@ type fkCheckHelper struct {
}
// initWithOutboundFK initializes the helper with an outbound FK constraint.
//
// Returns false if the FK relation should be ignored (e.g. because the new
// values for the FK columns are known to be always NULL).
func (h *fkCheckHelper) initWithOutboundFK(mb *mutationBuilder, fkOrdinal int) bool {
*h = fkCheckHelper{
mb: mb,
......@@ -492,6 +498,26 @@ func (h *fkCheckHelper) initWithOutboundFK(mb *mutationBuilder, fkOrdinal int) b
h.tabOrdinals[i] = h.fk.OriginColumnOrdinal(mb.tab, i)
h.otherTabOrdinals[i] = h.fk.ReferencedColumnOrdinal(h.otherTab, i)
}
// Check if we are setting NULL values for the FK columns, like when this
// mutation is the result of a SET NULL cascade action.
numNullCols := 0
for _, tabOrd := range h.tabOrdinals {
col := mb.scopeOrdToColID(mb.mapToReturnScopeOrd(tabOrd))
if memo.OutputColumnIsAlwaysNull(mb.outScope.expr, col) {
numNullCols++
}
}
if numNullCols == numCols {
// All FK columns are getting NULL values; FK check not needed.
return false
}
if numNullCols > 0 && h.fk.MatchMethod() == tree.MatchSimple {
// At least one FK column is getting a NULL value and we are using MATCH
// SIMPLE; FK check not needed.
return false
}
return true
}
......
......@@ -171,6 +171,40 @@ insert child_nullable
└── filters
└── column2:5 = parent.p:6
# In this case, we know that we are inserting *only* NULL values, so we don't
# need to check any FKs. We need to use norm because internally the values
# become NULL::INT and the normalization rules are needed to fold the cast.
norm
INSERT INTO child_nullable VALUES (100, NULL), (200, NULL)
----
insert child_nullable
├── columns: <none>
├── insert-mapping:
│ ├── column1:3 => c:1
│ └── column2:4 => p:2
└── values
├── columns: column1:3!null column2:4
├── (100, NULL)
└── (200, NULL)
# Same as above.
norm
INSERT INTO child_nullable (c) VALUES (100), (200)
----
insert child_nullable
├── columns: <none>
├── insert-mapping:
│ ├── column1:3 => c:1
│ └── column4:4 => p:2
└── project
├── columns: column4:4 column1:3!null
├── values
│ ├── columns: column1:3!null
│ ├── (100,)
│ └── (200,)
└── projections
└── CAST(NULL AS INT8) [as=column4:4]
# Check planning of filter with FULL match (which should be the same on a
# single column).
exec-ddl
......@@ -207,6 +241,24 @@ insert child_nullable_full
└── filters
└── column2:5 = parent.p:6
# No FK check needed.
norm
INSERT INTO child_nullable_full (c) VALUES (100), (200)
----
insert child_nullable_full
├── columns: <none>
├── insert-mapping:
│ ├── column1:3 => c:1
│ └── column4:4 => p:2
└── project
├── columns: column4:4 column1:3!null
├── values
│ ├── columns: column1:3!null
│ ├── (100,)
│ └── (200,)
└── projections
└── CAST(NULL AS INT8) [as=column4:4]
# Tests with multicolumn FKs.
exec-ddl
CREATE TABLE multi_col_parent (p INT, q INT, r INT, other INT, PRIMARY KEY (p, q, r))
......@@ -222,7 +274,7 @@ CREATE TABLE multi_col_child (
# All columns are nullable and must be part of the filter.
build
INSERT INTO multi_col_child VALUES (4, NULL, NULL, NULL)
INSERT INTO multi_col_child VALUES (4, NULL, NULL, NULL), (5, 1, 2, 3)
----
insert multi_col_child
├── columns: <none>
......@@ -234,7 +286,8 @@ insert multi_col_child
├── input binding: &1
├── values
│ ├── columns: column1:5!null column2:6 column3:7 column4:8
│ └── (4, NULL::INT8, NULL::INT8, NULL::INT8)
│ ├── (4, NULL::INT8, NULL::INT8, NULL::INT8)
│ └── (5, 1, 2, 3)
└── f-k-checks
└── f-k-checks-item: multi_col_child(p,q,r) -> multi_col_parent(p,q,r)
└── anti-join (hash)
......@@ -328,6 +381,21 @@ insert multi_col_child
├── column3:10 = multi_col_parent.q:13
└── column4:11 = multi_col_parent.r:14
# No FK check needed - we have only NULL values for a FK column.
norm
INSERT INTO multi_col_child VALUES (1, 10, NULL, 10)
----
insert multi_col_child
├── columns: <none>
├── insert-mapping:
│ ├── column1:5 => c:1
│ ├── column2:6 => p:2
│ ├── column3:7 => q:3
│ └── column4:8 => r:4
└── values
├── columns: column1:5!null column2:6!null column3:7 column4:8!null
└── (1, 10, NULL, 10)
exec-ddl
CREATE TABLE multi_col_child_full (
c INT PRIMARY KEY,
......@@ -338,7 +406,7 @@ CREATE TABLE multi_col_child_full (
# All columns are nullable and must be part of the filter.
build
INSERT INTO multi_col_child_full VALUES (4, NULL, NULL, NULL)
INSERT INTO multi_col_child_full VALUES (4, NULL, NULL, NULL), (5, 1, 2, 3)
----
insert multi_col_child_full
├── columns: <none>
......@@ -350,7 +418,8 @@ insert multi_col_child_full
├── input binding: &1
├── values
│ ├── columns: column1:5!null column2:6 column3:7 column4:8
│ └── (4, NULL::INT8, NULL::INT8, NULL::INT8)
│ ├── (4, NULL::INT8, NULL::INT8, NULL::INT8)
│ └── (5, 1, 2, 3)
└── f-k-checks
└── f-k-checks-item: multi_col_child_full(p,q,r) -> multi_col_parent(p,q,r)
└── anti-join (hash)
......@@ -437,6 +506,54 @@ insert multi_col_child_full
├── column3:10 = multi_col_parent.q:13
└── column4:11 = multi_col_parent.r:14
# No FK check needed when all FK columns only have NULL values.
norm
INSERT INTO multi_col_child_full VALUES (1, NULL, NULL, NULL)
----
insert multi_col_child_full
├── columns: <none>
├── insert-mapping:
│ ├── column1:5 => c:1
│ ├── column2:6 => p:2
│ ├── column3:7 => q:3
│ └── column4:8 => r:4
└── values
├── columns: column1:5!null column2:6 column3:7 column4:8
└── (1, NULL, NULL, NULL)
# But with MATCH FULL, the FK check is needed when only a subset of the columns
# only have NULL values.
norm
INSERT INTO multi_col_child_full VALUES (1, NULL, 2, NULL)
----
insert multi_col_child_full
├── columns: <none>
├── insert-mapping:
│ ├── column1:5 => c:1
│ ├── column2:6 => multi_col_child_full.p:2
│ ├── column3:7 => multi_col_child_full.q:3
│ └── column4:8 => multi_col_child_full.r:4
├── input binding: &1
├── values
│ ├── columns: column1:5!null column2:6 column3:7!null column4:8
│ └── (1, NULL, 2, NULL)
└── f-k-checks
└── f-k-checks-item: multi_col_child_full(p,q,r) -> multi_col_parent(p,q,r)
└── anti-join (hash)
├── columns: column2:9 column3:10!null column4:11
├── with-scan &1
│ ├── columns: column2:9 column3:10!null column4:11
│ └── mapping:
│ ├── column2:6 => column2:9
│ ├── column3:7 => column3:10
│ └── column4:8 => column4:11
├── scan multi_col_parent
│ └── columns: multi_col_parent.p:12!null multi_col_parent.q:13!null multi_col_parent.r:14!null
└── filters
├── column2:9 = multi_col_parent.p:12
├── column3:10 = multi_col_parent.q:13
└── column4:11 = multi_col_parent.r:14
exec-ddl
CREATE TABLE multi_ref_parent_a (a INT PRIMARY KEY, other INT)
----
......
......@@ -212,6 +212,28 @@ update child
└── filters
└── c:11 = grandchild.c:14
exec-ddl
CREATE TABLE child_nullable (c INT PRIMARY KEY, p INT REFERENCES parent(p))
----
# We don't need the FK check in this case because we are only setting NULL
# values. We need to use norm because internally the value becomes NULL::INT
# and the normalization rules are needed to fold the cast.
norm
UPDATE child_nullable SET p = NULL
----
update child_nullable
├── columns: <none>
├── fetch columns: c:3 p:4
├── update-mapping:
│ └── column5:5 => p:2
└── project
├── columns: column5:5 c:3!null p:4
├── scan child_nullable
│ └── columns: c:3!null p:4
└── projections
└── CAST(NULL AS INT8) [as=column5:5]
# Multiple grandchild tables
exec-ddl
CREATE TABLE grandchild2 (g INT PRIMARY KEY, c INT NOT NULL REFERENCES child(c))
......@@ -313,6 +335,110 @@ update self
└── filters
└── x:6 = y:9
exec-ddl
CREATE TABLE parent_multicol (a INT, b INT, c INT, PRIMARY KEY (a,b,c))
----
exec-ddl
CREATE TABLE child_multicol_simple (
k INT PRIMARY KEY,
a INT, b INT, c INT,
CONSTRAINT fk FOREIGN KEY(a,b,c) REFERENCES parent_multicol(a,b,c) MATCH SIMPLE
)
----
# With MATCH SIMPLE, we can elide the FK check if any FK column is NULL.
norm
UPDATE child_multicol_simple SET a = 1, b = NULL, c = 1 WHERE k = 1
----
update child_multicol_simple
├── columns: <none>
├── fetch columns: k:5 a:6 b:7 c:8
├── update-mapping:
│ ├── column9:9 => a:2
│ ├── column10:10 => b:3
│ └── column9:9 => c:4
└── project
├── columns: column9:9!null column10:10 k:5!null a:6 b:7 c:8
├── select
│ ├── columns: k:5!null a:6 b:7 c:8
│ ├── scan child_multicol_simple
│ │ └── columns: k:5!null a:6 b:7 c:8
│ └── filters
│ └── k:5 = 1
└── projections
├── 1 [as=column9:9]
└── CAST(NULL AS INT8) [as=column10:10]
exec-ddl
CREATE TABLE child_multicol_full (
k INT PRIMARY KEY,
a INT, b INT, c INT,
CONSTRAINT fk FOREIGN KEY(a,b,c) REFERENCES parent_multicol(a,b,c) MATCH FULL
)
----
# With MATCH FULL, we can elide the FK check only if all FK columns are NULL.
norm
UPDATE child_multicol_full SET a = 1, b = NULL, c = 1 WHERE k = 1
----
update child_multicol_full
├── columns: <none>
├── fetch columns: k:5 child_multicol_full.a:6 child_multicol_full.b:7 child_multicol_full.c:8
├── update-mapping:
│ ├── column9:9 => child_multicol_full.a:2
│ ├── column10:10 => child_multicol_full.b:3
│ └── column9:9 => child_multicol_full.c:4
├── input binding: &1
├── project
│ ├── columns: column9:9!null column10:10 k:5!null child_multicol_full.a:6 child_multicol_full.b:7 child_multicol_full.c:8
│ ├── select
│ │ ├── columns: k:5!null child_multicol_full.a:6 child_multicol_full.b:7 child_multicol_full.c:8
│ │ ├── scan child_multicol_full
│ │ │ └── columns: k:5!null child_multicol_full.a:6 child_multicol_full.b:7 child_multicol_full.c:8
│ │ └── filters
│ │ └── k:5 = 1
│ └── projections
│ ├── 1 [as=column9:9]
│ └── CAST(NULL AS INT8) [as=column10:10]
└── f-k-checks
└── f-k-checks-item: child_multicol_full(a,b,c) -> parent_multicol(a,b,c)
└── anti-join (hash)
├── columns: column9:11!null column10:12 column9:13!null
├── with-scan &1
│ ├── columns: column9:11!null column10:12 column9:13!null
│ └── mapping:
│ ├── column9:9 => column9:11
│ ├── column10:10 => column10:12
│ └── column9:9 => column9:13
├── scan parent_multicol
│ └── columns: parent_multicol.a:14!null parent_multicol.b:15!null parent_multicol.c:16!null
└── filters
├── column9:11 = parent_multicol.a:14
├── column10:12 = parent_multicol.b:15
└── column9:13 = parent_multicol.c:16
norm
UPDATE child_multicol_full SET a = NULL, b = NULL, c = NULL WHERE k = 1
----
update child_multicol_full
├── columns: <none>
├── fetch columns: k:5 a:6 b:7 c:8
├── update-mapping:
│ ├── column9:9 => a:2
│ ├── column9:9 => b:3
│ └── column9:9 => c:4
└── project
├── columns: column9:9 k:5!null a:6 b:7 c:8
├── select
│ ├── columns: k:5!null a:6 b:7 c:8
│ ├── scan child_multicol_full
│ │ └── columns: k:5!null a:6 b:7 c:8
│ └── filters
│ └── k:5 = 1
└── projections
└── CAST(NULL AS INT8) [as=column9:9]
exec-ddl
CREATE TABLE two (a int, b int, primary key (a, b))
----
......
......@@ -135,7 +135,7 @@ func (opts Options) filename() string {
// Downloading binaries may take some time, so give ourselves
// some room before the timeout expires.
var httpClient = httputil.NewClientWithTimeout(30 * time.Second)
var httpClient = httputil.NewClientWithTimeout(300 * time.Second)
// Download downloads the binary for the given version, and skips the download
// if the archive is already present in `destDir`.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment