Commit b3697c03 authored by craig[bot]'s avatar craig[bot]

Merge #50760 #50838

50760: backupccl: fix formatting of backup jobs in jobs table r=pbardea a=pbardea

When adding cluster backup/restore, the method which generated the job
description was not updated to handle cluster backups. Previously, a
cluster backup job would appear in the jobs table as `BACKUP TABLE TO`
rather than `BACKUP TO`.

Release note (bug fix): Cluster backup would previously appear as
`BACKUP TABLE TO` rather than `BACKUP TO` in the jobs table.

50838: kv: drop default for kv.bulk_sst.sync_size and kv.snapshot_sst.sync_size r=nvanbenschoten a=nvanbenschoten

These were both defaulting to 2MB per fsync. In Peter's testing, 512 MB per sync is a better value.

@petermattis do you have experiments I can link to for this, or will I need to spin up a few IMPORT roachtests to test out the new defaults? I guess I'll want to do that regardless.
Co-authored-by: default avatarPaul Bardea <[email protected]>
Co-authored-by: default avatarNathan VanBenschoten <[email protected]>
......@@ -228,9 +228,10 @@ func backupJobDescription(
p sql.PlanHookState, backup *tree.Backup, to []string, incrementalFrom []string,
) (string, error) {
b := &tree.Backup{
AsOf: backup.AsOf,
Options: backup.Options,
Targets: backup.Targets,
AsOf: backup.AsOf,
Options: backup.Options,
Targets: backup.Targets,
DescriptorCoverage: backup.DescriptorCoverage,
}
for _, t := range to {
......
......@@ -858,7 +858,7 @@ var recoverySnapshotRate = settings.RegisterPublicValidatedByteSizeSetting(
var snapshotSSTWriteSyncRate = settings.RegisterByteSizeSetting(
"kv.snapshot_sst.sync_size",
"threshold after which snapshot SST writes must fsync",
2<<20, /* 2 MiB */
bulkIOWriteBurst,
)
func snapshotRateLimit(
......
......@@ -25,8 +25,10 @@ import (
"golang.org/x/time/rate"
)
// bulkIOWriteBurst is the burst for the BulkIOWriteLimiter.
const bulkIOWriteBurst = 2 * 1024 * 1024 // 2MB
// bulkIOWriteBurst is the burst for the BulkIOWriteLimiter. It is also used as
// the default value for the kv.bulk_sst.sync_size and kv.snapshot_sst.sync_size
// cluster settings.
const bulkIOWriteBurst = 512 << 10 // 512 KB
const bulkIOWriteLimiterLongWait = 500 * time.Millisecond
......
......@@ -2122,6 +2122,8 @@ $function$`,
`BACKUP TABLE foo TO 'bar' AS OF SYSTEM TIME '1' INCREMENTAL FROM 'baz'`},
{`BACKUP foo TO $1 INCREMENTAL FROM 'bar', $2, 'baz'`,
`BACKUP TABLE foo TO $1 INCREMENTAL FROM 'bar', $2, 'baz'`},
{`BACKUP TO 'bar'`,
`BACKUP TO 'bar'`},
// Tables named "role" are handled specially to support SHOW GRANTS ON ROLE,
// but that special handling should not impact BACKUP.
{`BACKUP role TO 'bar'`,
......@@ -2132,6 +2134,8 @@ $function$`,
`RESTORE TABLE foo FROM $1`},
{`RESTORE foo FROM $1, $2, 'bar'`,
`RESTORE TABLE foo FROM $1, $2, 'bar'`},
{`RESTORE FROM $1, $2, 'bar'`,
`RESTORE FROM $1, $2, 'bar'`},
{`RESTORE foo, baz FROM 'bar'`,
`RESTORE TABLE foo, baz FROM 'bar'`},
{`RESTORE foo, baz FROM 'bar' AS OF SYSTEM TIME '1'`,
......
......@@ -57,8 +57,9 @@ func (node *Backup) Format(ctx *FmtCtx) {
ctx.WriteString("BACKUP ")
if node.DescriptorCoverage == RequestedDescriptors {
ctx.FormatNode(&node.Targets)
ctx.WriteString(" ")
}
ctx.WriteString(" TO ")
ctx.WriteString("TO ")
ctx.FormatNode(&node.To)
if node.AsOf.Expr != nil {
ctx.WriteString(" ")
......@@ -91,8 +92,9 @@ func (node *Restore) Format(ctx *FmtCtx) {
ctx.WriteString("RESTORE ")
if node.DescriptorCoverage == RequestedDescriptors {
ctx.FormatNode(&node.Targets)
ctx.WriteString(" ")
}
ctx.WriteString(" FROM ")
ctx.WriteString("FROM ")
for i := range node.From {
if i > 0 {
ctx.WriteString(", ")
......
......@@ -1932,7 +1932,9 @@ func (node *Backup) doc(p *PrettyCfg) pretty.Doc {
items := make([]pretty.TableRow, 0, 6)
items = append(items, p.row("BACKUP", pretty.Nil))
items = append(items, node.Targets.docRow(p))
if node.DescriptorCoverage == RequestedDescriptors {
items = append(items, node.Targets.docRow(p))
}
items = append(items, p.row("TO", p.Doc(&node.To)))
if node.AsOf.Expr != nil {
......@@ -1951,7 +1953,9 @@ func (node *Restore) doc(p *PrettyCfg) pretty.Doc {
items := make([]pretty.TableRow, 0, 5)
items = append(items, p.row("RESTORE", pretty.Nil))
items = append(items, node.Targets.docRow(p))
if node.DescriptorCoverage == RequestedDescriptors {
items = append(items, node.Targets.docRow(p))
}
from := make([]pretty.Doc, len(node.From))
for i := range node.From {
from[i] = p.Doc(&node.From[i])
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment