Skip to content

Commit

Permalink
kvserver/rangefeed: add metric for processor-level send timeout
Browse files Browse the repository at this point in the history
Epic: none
Release note: None
  • Loading branch information
stevendanna committed Oct 30, 2024
1 parent 75cda80 commit 3bf870a
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 0 deletions.
1 change: 1 addition & 0 deletions docs/generated/metrics/metrics.html
Original file line number Diff line number Diff line change
Expand Up @@ -250,6 +250,7 @@
<tr><td>STORAGE</td><td>kv.rangefeed.processors_goroutine</td><td>Number of active RangeFeed processors using goroutines</td><td>Processors</td><td>GAUGE</td><td>COUNT</td><td>AVG</td><td>NONE</td></tr>
<tr><td>STORAGE</td><td>kv.rangefeed.processors_scheduler</td><td>Number of active RangeFeed processors using scheduler</td><td>Processors</td><td>GAUGE</td><td>COUNT</td><td>AVG</td><td>NONE</td></tr>
<tr><td>STORAGE</td><td>kv.rangefeed.registrations</td><td>Number of active RangeFeed registrations</td><td>Registrations</td><td>GAUGE</td><td>COUNT</td><td>AVG</td><td>NONE</td></tr>
<tr><td>STORAGE</td><td>kv.rangefeed.scheduled_processor.queue_timeout</td><td>Number of times the RangeFeed processor shutdown because of a queue send timeout</td><td>Failure Count</td><td>COUNTER</td><td>COUNT</td><td>AVG</td><td>NON_NEGATIVE_DERIVATIVE</td></tr>
<tr><td>STORAGE</td><td>kv.rangefeed.scheduler.normal.latency</td><td>KV RangeFeed normal scheduler latency</td><td>Latency</td><td>HISTOGRAM</td><td>NANOSECONDS</td><td>AVG</td><td>NONE</td></tr>
<tr><td>STORAGE</td><td>kv.rangefeed.scheduler.normal.queue_size</td><td>Number of entries in the KV RangeFeed normal scheduler queue</td><td>Pending Ranges</td><td>GAUGE</td><td>COUNT</td><td>AVG</td><td>NONE</td></tr>
<tr><td>STORAGE</td><td>kv.rangefeed.scheduler.system.latency</td><td>KV RangeFeed system scheduler latency</td><td>Latency</td><td>HISTOGRAM</td><td>NANOSECONDS</td><td>AVG</td><td>NONE</td></tr>
Expand Down
8 changes: 8 additions & 0 deletions pkg/kv/kvserver/rangefeed/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,12 +76,19 @@ var (
Measurement: "Pending Ranges",
Unit: metric.Unit_COUNT,
}
metaQueueTimeout = metric.Metadata{
Name: "kv.rangefeed.scheduled_processor.queue_timeout",
Help: "Number of times the RangeFeed processor shutdown because of a queue send timeout",
Measurement: "Failure Count",
Unit: metric.Unit_COUNT,
}
)

// Metrics are for production monitoring of RangeFeeds.
type Metrics struct {
RangeFeedCatchUpScanNanos *metric.Counter
RangeFeedBudgetExhausted *metric.Counter
RangefeedProcessorQueueTimeout *metric.Counter
RangeFeedBudgetBlocked *metric.Counter
RangeFeedRegistrations *metric.Gauge
RangeFeedClosedTimestampMaxBehindNanos *metric.Gauge
Expand All @@ -106,6 +113,7 @@ func (*Metrics) MetricStruct() {}
func NewMetrics() *Metrics {
return &Metrics{
RangeFeedCatchUpScanNanos: metric.NewCounter(metaRangeFeedCatchUpScanNanos),
RangefeedProcessorQueueTimeout: metric.NewCounter(metaQueueTimeout),
RangeFeedBudgetExhausted: metric.NewCounter(metaRangeFeedExhausted),
RangeFeedBudgetBlocked: metric.NewCounter(metaRangeFeedBudgetBlocked),
RangeFeedRegistrations: metric.NewGauge(metaRangeFeedRegistrations),
Expand Down
2 changes: 2 additions & 0 deletions pkg/kv/kvserver/rangefeed/scheduled_processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -499,6 +499,7 @@ func (p *ScheduledProcessor) enqueueEventInternal(
case <-p.stoppedC:
// Already stopped. Do nothing.
case <-ctx.Done():
p.Metrics.RangefeedProcessorQueueTimeout.Inc(1)
p.sendStop(newErrBufferCapacityExceeded())
return false
}
Expand Down Expand Up @@ -528,6 +529,7 @@ func (p *ScheduledProcessor) enqueueEventInternal(
case <-ctx.Done():
// Sending on the eventC channel would have blocked.
// Instead, tear down the processor and return immediately.
p.Metrics.RangefeedProcessorQueueTimeout.Inc(1)
p.sendStop(newErrBufferCapacityExceeded())
return false
}
Expand Down

0 comments on commit 3bf870a

Please sign in to comment.