-
Notifications
You must be signed in to change notification settings - Fork 919
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
test: make tests deterministic with -scheduler=threads #4640
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -3,6 +3,7 @@ package main | |
import ( | ||
"runtime" | ||
"sync" | ||
"sync/atomic" | ||
"time" | ||
) | ||
|
||
|
@@ -70,11 +71,13 @@ func main() { | |
// Test multi-receiver. | ||
ch = make(chan int) | ||
wg.Add(3) | ||
go fastreceiver(ch) | ||
go fastreceiver(ch) | ||
go fastreceiver(ch) | ||
var result atomic.Uint32 | ||
go fastreceiveradd(ch, &result) | ||
go fastreceiveradd(ch, &result) | ||
go fastreceiveradd(ch, &result) | ||
slowsender(ch) | ||
wg.Wait() | ||
println("sum of sums:", result.Load()) | ||
|
||
// Test iterator style channel. | ||
ch = make(chan int) | ||
|
@@ -88,7 +91,10 @@ func main() { | |
println("sum(100):", sum) | ||
|
||
// Test simple selects. | ||
go selectDeadlock() // cannot use waitGroup here - never terminates | ||
wg.Add(1) | ||
go selectDeadlock() | ||
wg.Wait() | ||
|
||
wg.Add(1) | ||
go selectNoOp() | ||
wg.Wait() | ||
|
@@ -244,7 +250,7 @@ func receive(ch <-chan int) { | |
func sender(ch chan int) { | ||
for i := 1; i <= 8; i++ { | ||
if i == 4 { | ||
time.Sleep(time.Microsecond) | ||
time.Sleep(time.Millisecond) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This seems to be long enough to make the test deterministic with the Go toolchain. |
||
println("slept") | ||
} | ||
ch <- i | ||
|
@@ -290,6 +296,16 @@ func fastreceiver(ch chan int) { | |
wg.Done() | ||
} | ||
|
||
func fastreceiveradd(ch chan int, result *atomic.Uint32) { | ||
sum := 0 | ||
for i := 0; i < 2; i++ { | ||
n := <-ch | ||
sum += n | ||
} | ||
result.Add(uint32(sum)) | ||
wg.Done() | ||
} | ||
|
||
func iterator(ch chan int, top int) { | ||
for i := 0; i < top; i++ { | ||
ch <- i | ||
|
@@ -300,6 +316,7 @@ func iterator(ch chan int, top int) { | |
|
||
func selectDeadlock() { | ||
println("deadlocking") | ||
wg.Done() | ||
select {} | ||
println("unreachable") | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -12,9 +12,7 @@ received num: 8 | |
recv from closed channel: 0 false | ||
complex128: (+7.000000e+000+1.050000e+001i) | ||
sum of n: 149 | ||
sum: 25 | ||
sum: 29 | ||
sum: 33 | ||
sum of sums: 87 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. These numbers were spread out differently when using threading. Sometimes the right numbers came out, but other times the numbers were balanced differently. Of course that's all correct: the important part is that every number is received by exactly one receiver. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The mathematician in me really wants this to switch from adding integers to multiplying unique primes so that we can be sure (due to https://en.wikipedia.org/wiki/Fundamental_theorem_of_arithmetic ) that everything was received exactly once. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Actually that's not a bad idea! |
||
sum(100): 4950 | ||
deadlocking | ||
select no-op | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Interestingly this was easily fixable using a WaitGroup.