Commit 1b8689c0 authored by craig[bot]'s avatar craig[bot]

Merge #32990 #34448

32990: exec: deduplicate distinct inner loop r=jordanlewis a=jordanlewis

This commit deduplicates the inner loop of distinct by introducing a
template definition and calling it in the inner loop. The template
definition is written so that it also looks like a valid Go function, so
the template still compiles properly.

Release note: None

34448: cli: show node status for all nodes r=knz a=petermattis

Previously, `node status` would only show status for nodes which had
gossiped a node descriptor. This meant that a node that had been down
for long enough for the gossiped node descriptor TTL to be reached would
disappear from the `node status` output. Similarly, if a cluster was
restarted but one node failed to come up, that node would be omitted
from the `node status` output. The `crdb_internal.gossip_liveness` table
contains a full list of the nodes in the cluster. We simply need to
`LEFT JOIN` instead of `JOIN` with other tables.

Fixes #34435

Release note: None
Co-authored-by: default avatarJordan Lewis <[email protected]>
Co-authored-by: default avatarPeter Mattis <[email protected]>
......@@ -136,41 +136,33 @@ func runStatusNodeInner(showDecommissioned bool, args []string) ([]string, [][]s
query = q
continue
}
query = "(" + query + ") JOIN (" + q + ") USING (id)"
query = "(" + query + ") LEFT JOIN (" + q + ") USING (id)"
}
return
}
maybeAddActiveNodesFilter := func(query string) string {
activeNodesFilter := "decommissioning = false OR split_part(expiration,',',1)::decimal > now()::decimal"
if !showDecommissioned {
query += " WHERE " + activeNodesFilter
query += " WHERE decommissioning = false OR split_part(expiration,',',1)::decimal > now()::decimal"
}
return query
}
baseQuery := joinUsingID(
[]string{`
SELECT node_id AS id,
address,
build_tag AS build,
started_at,
updated_at
FROM crdb_internal.gossip_liveness JOIN crdb_internal.gossip_nodes USING (node_id)`,
maybeAddActiveNodesFilter(
`SELECT node_id AS id,
CASE WHEN split_part(expiration,',',1)::decimal > now()::decimal
THEN true
ELSE false
END AS is_available
FROM crdb_internal.gossip_liveness`,
),
`SELECT node_id AS id, is_live
FROM crdb_internal.gossip_nodes`,
},
baseQuery := maybeAddActiveNodesFilter(
`SELECT node_id AS id,
address,
build_tag AS build,
started_at,
updated_at,
CASE WHEN split_part(expiration,',',1)::decimal > now()::decimal
THEN true
ELSE false
END AS is_available,
ifnull(is_live, false)
FROM crdb_internal.gossip_liveness LEFT JOIN crdb_internal.gossip_nodes USING (node_id)`,
)
rangesQuery := `
const rangesQuery = `
SELECT node_id AS id,
sum((metrics->>'replicas.leaders')::DECIMAL)::INT AS replicas_leaders,
sum((metrics->>'replicas.leaseholders')::DECIMAL)::INT AS replicas_leaseholders,
......@@ -180,7 +172,7 @@ SELECT node_id AS id,
FROM crdb_internal.kv_store_status
GROUP BY node_id`
statsQuery := `
const statsQuery = `
SELECT node_id AS id,
sum((metrics->>'livebytes')::DECIMAL)::INT AS live_bytes,
sum((metrics->>'keybytes')::DECIMAL)::INT AS key_bytes,
......@@ -190,12 +182,12 @@ SELECT node_id AS id,
FROM crdb_internal.kv_store_status
GROUP BY node_id`
decommissionQuery := `
const decommissionQuery = `
SELECT node_id AS id,
ranges AS gossiped_replicas,
decommissioning AS is_decommissioning,
draining AS is_draining
FROM crdb_internal.gossip_liveness JOIN crdb_internal.gossip_nodes USING (node_id)`
FROM crdb_internal.gossip_liveness LEFT JOIN crdb_internal.gossip_nodes USING (node_id)`
conn, err := getPasswordAndMakeSQLClient("cockroach node status")
if err != nil {
......@@ -221,7 +213,7 @@ FROM crdb_internal.gossip_liveness JOIN crdb_internal.gossip_nodes USING (node_i
}
}
queryString := "SELECT * FROM " + joinUsingID(queriesToJoin)
queryString := "SELECT * FROM (" + joinUsingID(queriesToJoin) + ")"
switch len(args) {
case 0:
......
......@@ -101,4 +101,19 @@ func runCLINodeStatus(ctx context.Context, t *test, c *cluster) {
"false false",
"false false",
})
// Stop the cluster and restart only 2 of the nodes. Verify that three nodes
// show up in the node status output.
c.Stop(ctx, c.Range(1, 3))
c.Start(ctx, t, c.Range(1, 2))
// Wait for the cluster to come back up.
waitForFullReplication(t, db)
waitUntil([]string{
"is_available is_live",
"true true",
"true true",
"false false",
})
}
......@@ -191,26 +191,14 @@ func (p *sortedDistinct_TYPEOp) Next() ColBatch {
// Bounds check elimination.
sel = sel[startIdx:n]
for _, i := range sel {
v := col[i]
// Note that not inlining this unique var actually makes a non-trivial
// performance difference.
var unique bool
_ASSIGN_NE("unique", "v", "lastVal")
outputCol[i] = outputCol[i] || unique
lastVal = v
_INNER_LOOP(int(i), lastVal, col, outputCol)
}
} else {
// Bounds check elimination.
col = col[startIdx:n]
outputCol = outputCol[startIdx:n]
for i := range col {
v := col[i]
// Note that not inlining this unique var actually makes a non-trivial
// performance difference.
var unique bool
_ASSIGN_NE("unique", "v", "lastVal")
outputCol[i] = outputCol[i] || unique
lastVal = v
_INNER_LOOP(i, lastVal, col, outputCol)
}
}
......@@ -242,3 +230,19 @@ func (p partitioner_TYPE) partition(colVec ColVec, outputCol []bool, n uint64) {
}
// {{end}}
// {{/*
func _INNER_LOOP(i int, lastVal _GOTYPE, col []interface{}, outputCol []bool) { // */}}
// {{define "innerLoop"}}
v := col[i]
// Note that not inlining this unique var actually makes a non-trivial
// performance difference.
var unique bool
_ASSIGN_NE("unique", "v", "lastVal")
outputCol[i] = outputCol[i] || unique
lastVal = v
// {{end}}
// {{/*
} // */}}
......@@ -41,6 +41,9 @@ func genDistinctOps(wr io.Writer) error {
assignNeRe := regexp.MustCompile(`_ASSIGN_NE\((.*),(.*),(.*)\)`)
s = assignNeRe.ReplaceAllString(s, "{{.Assign $1 $2 $3}}")
innerLoopRe := regexp.MustCompile(`_INNER_LOOP\(.*\)`)
s = innerLoopRe.ReplaceAllString(s, `{{template "innerLoop" .}}`)
// Now, generate the op, from the template.
tmpl, err := template.New("distinct_op").Parse(s)
if err != nil {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment