diff --git a/cmd/curio/calc.go b/cmd/curio/calc.go index 6bc4de347..7069499d9 100644 --- a/cmd/curio/calc.go +++ b/cmd/curio/calc.go @@ -25,7 +25,11 @@ var calcCmd = &cli.Command{ var calcBatchCpuCmd = &cli.Command{ Name: "batch-cpu", - Usage: "See layout of batch sealer threads", + Usage: "Analyze and display the layout of batch sealer threads", + Description: `Analyze and display the layout of batch sealer threads on your CPU. + +It provides detailed information about CPU utilization for batch sealing operations, including core allocation, thread +distribution for different batch sizes.`, Flags: []cli.Flag{ &cli.BoolFlag{Name: "dual-hashers", Value: true}, }, @@ -61,14 +65,18 @@ var calcBatchCpuCmd = &cli.Command{ printForBatchSize := func(batchSize int) { fmt.Printf("Batch Size: %s sectors\n", color.CyanString("%d", batchSize)) fmt.Println() - fmt.Printf("Required Threads: %d\n", batchSize/sectorsPerThread) - requiredCCX := (batchSize + sectorsPerCCX - 1) / sectorsPerCCX - fmt.Printf("Required CCX: %d\n", requiredCCX) - requiredCores := requiredCCX + batchSize/sectorsPerThread/info.ThreadsPerCore - fmt.Printf("Required Cores: %d hasher (+4 minimum for non-hashers)\n", requiredCores) + config, err := sealsupra.GenerateSupraSealConfig(*info, cctx.Bool("dual-hashers"), batchSize, nil) + if err != nil { + fmt.Printf("Error generating config: %s\n", err) + return + } - enoughCores := requiredCores <= info.CoreCount + fmt.Printf("Required Threads: %d\n", config.RequiredThreads) + fmt.Printf("Required CCX: %d\n", config.RequiredCCX) + fmt.Printf("Required Cores: %d hasher (+4 minimum for non-hashers)\n", config.RequiredCores) + + enoughCores := config.RequiredCores <= info.CoreCount if enoughCores { fmt.Printf("Enough cores available for hashers %s\n", color.GreenString("✔")) } else { @@ -76,142 +84,40 @@ var calcBatchCpuCmd = &cli.Command{ return } - coresLeftover := info.CoreCount - requiredCores - fmt.Printf("Non-hasher cores: %d\n", coresLeftover) - - const minOverheadCores = 4 - - type CoreNum = int // core number, 0-based - - var ( - // core assignments for non-hasher work - // defaults are the absolutely worst case of just 4 cores available - - pc1writer CoreNum = 1 - pc1reader CoreNum = 2 - pc1orchestrator CoreNum = 3 - - pc2reader CoreNum = 0 - pc2hasher CoreNum = 1 - pc2hasher_cpu CoreNum = 0 - pc2writer CoreNum = 0 - - c1reader CoreNum = 0 - - pc2writer_cores int = 1 - ) - - if coresLeftover < minOverheadCores { - fmt.Printf("Not enough cores for coordination %s\n", color.RedString("✘")) - return - } else { - fmt.Printf("Enough cores for coordination %s\n", color.GreenString("✔")) - } - - nextFreeCore := minOverheadCores + fmt.Printf("Non-hasher cores: %d\n", info.CoreCount-config.RequiredCores) - // first move pc2 to individual cores - if coresLeftover > nextFreeCore { - pc2writer = nextFreeCore - nextFreeCore++ - } else { + if config.P2WrRdOverlap { color.Yellow("! P2 writer will share a core with P2 reader, performance may be impacted") } - - if coresLeftover > nextFreeCore { - pc2hasher = nextFreeCore - nextFreeCore++ - } else { + if config.P2HsP1WrOverlap { color.Yellow("! P2 hasher will share a core with P1 writer, performance may be impacted") } - - if coresLeftover > nextFreeCore { - pc2hasher_cpu = nextFreeCore - nextFreeCore++ - } else { + if config.P2HcP2RdOverlap { color.Yellow("! P2 hasher_cpu will share a core with P2 reader, performance may be impacted") } - if coresLeftover > nextFreeCore { - // might be fine to sit on core0, but let's not do that - pc2reader = nextFreeCore - c1reader = nextFreeCore - nextFreeCore++ - } - - // add p2 writer cores, up to 8 total - if coresLeftover > nextFreeCore { - // swap pc2reader with pc2writer - pc2writer, pc2reader = pc2reader, pc2writer - - for i := 0; i < 7; i++ { - if coresLeftover > nextFreeCore { - pc2writer_cores++ - nextFreeCore++ - } - } - } - fmt.Println() - fmt.Printf("pc1 writer: %d\n", pc1writer) - fmt.Printf("pc1 reader: %d\n", pc1reader) - fmt.Printf("pc1 orchestrator: %d\n", pc1orchestrator) + fmt.Printf("pc1 writer: %d\n", config.Topology.PC1Writer) + fmt.Printf("pc1 reader: %d\n", config.Topology.PC1Reader) + fmt.Printf("pc1 orchestrator: %d\n", config.Topology.PC1Orchestrator) fmt.Println() - fmt.Printf("pc2 reader: %d\n", pc2reader) - fmt.Printf("pc2 hasher: %d\n", pc2hasher) - fmt.Printf("pc2 hasher_cpu: %d\n", pc2hasher_cpu) - fmt.Printf("pc2 writer: %d\n", pc2writer) - fmt.Printf("pc2 writer_cores: %d\n", pc2writer_cores) + fmt.Printf("pc2 reader: %d\n", config.Topology.PC2Reader) + fmt.Printf("pc2 hasher: %d\n", config.Topology.PC2Hasher) + fmt.Printf("pc2 hasher_cpu: %d\n", config.Topology.PC2HasherCPU) + fmt.Printf("pc2 writer: %d\n", config.Topology.PC2Writer) + fmt.Printf("pc2 writer_cores: %d\n", config.Topology.PC2WriterCores) fmt.Println() - fmt.Printf("c1 reader: %d\n", c1reader) + fmt.Printf("c1 reader: %d\n", config.Topology.C1Reader) fmt.Println() - unoccupiedCores := coresLeftover - nextFreeCore - fmt.Printf("Unoccupied Cores: %d\n\n", unoccupiedCores) - - var ccxCores []CoreNum // first core in each CCX - for i := 0; i < info.CoreCount; i += info.CoresPerL3 { - ccxCores = append(ccxCores, i) - } - - type sectorCoreConfig struct { - core CoreNum // coordinator core - hashers CoreNum // number of hasher cores - } - var coreConfigs []sectorCoreConfig - - for i := requiredCores; i > 0; { - firstCCXCoreNum := ccxCores[len(ccxCores)-1] - toAssign := min(i, info.CoresPerL3) - - // shift up the first core if possible so that cores on the right are used first - coreNum := firstCCXCoreNum + info.CoresPerL3 - toAssign - - coreConfigs = append(coreConfigs, sectorCoreConfig{ - core: coreNum, - hashers: (toAssign - 1) * info.ThreadsPerCore, - }) - - i -= toAssign - if toAssign == info.CoresPerL3 { - ccxCores = ccxCores[:len(ccxCores)-1] - if len(ccxCores) == 0 { - break - } - } - } - - // reverse the order - for i, j := 0, len(coreConfigs)-1; i < j; i, j = i+1, j-1 { - coreConfigs[i], coreConfigs[j] = coreConfigs[j], coreConfigs[i] - } + fmt.Printf("Unoccupied Cores: %d\n\n", config.UnoccupiedCores) fmt.Println("{") fmt.Printf(" sectors = %d;\n", batchSize) fmt.Println(" coordinators = (") - for i, config := range coreConfigs { - fmt.Printf(" { core = %d;\n hashers = %d; }", config.core, config.hashers) - if i < len(coreConfigs)-1 { + for i, coord := range config.Topology.SectorConfigs[0].Coordinators { + fmt.Printf(" { core = %d;\n hashers = %d; }", coord.Core, coord.Hashers) + if i < len(config.Topology.SectorConfigs[0].Coordinators)-1 { fmt.Println(",") } else { fmt.Println() @@ -235,6 +141,10 @@ var calcBatchCpuCmd = &cli.Command{ var calcSuprasealConfigCmd = &cli.Command{ Name: "supraseal-config", Usage: "Generate a supra_seal configuration", + Description: `Generate a supra_seal configuration for a given batch size. + +This command outputs a configuration expected by SupraSeal. Main purpose of this command is for debugging and testing. +The config can be used directly with SupraSeal binaries to test it without involving Curio.`, Flags: []cli.Flag{ &cli.BoolFlag{ Name: "dual-hashers", diff --git a/commit-phase1-output b/commit-phase1-output deleted file mode 100644 index f5094e070..000000000 Binary files a/commit-phase1-output and /dev/null differ diff --git a/deps/config/types.go b/deps/config/types.go index 566d2d741..4e344ce56 100644 --- a/deps/config/types.go +++ b/deps/config/types.go @@ -478,11 +478,13 @@ type CurioSealConfig struct { // Set to false for older CPUs (Zen 2 and before). SingleHasherPerThread bool - // LayerNVMEDevices is a list of pcie device addresses that should be used for layer storage. + // LayerNVMEDevices is a list of pcie device addresses that should be used for SDR layer storage. // The required storage is 11 * BatchSealBatchSize * BatchSealSectorSize * BatchSealPipelines // Total Read IOPS for optimal performance should be 10M+. // The devices MUST be NVMe devices, not used for anything else. Any data on the devices will be lost! // + // It's recommend to define these settings in a per-machine layer, as the devices are machine-specific. + // // Example: ["0000:01:00.0", "0000:01:00.1"] LayerNVMEDevices []string } diff --git a/lib/proof/porep_vproof_bin_decode.go b/lib/proof/porep_vproof_bin_decode.go index 2473e3b01..985d6a87e 100644 --- a/lib/proof/porep_vproof_bin_decode.go +++ b/lib/proof/porep_vproof_bin_decode.go @@ -6,6 +6,10 @@ import ( "io" ) +// This file contains a bincode decoder for Commit1OutRaw. +// This is the format output by the C++ supraseal C1 implementation. +// bincode - https://github.com/bincode-org/bincode + func ReadLE[T any](r io.Reader) (T, error) { var out T err := binary.Read(r, binary.LittleEndian, &out) diff --git a/lib/proof/porep_vproof_bin_test.go b/lib/proof/porep_vproof_bin_test.go index df46c0798..d5662d2aa 100644 --- a/lib/proof/porep_vproof_bin_test.go +++ b/lib/proof/porep_vproof_bin_test.go @@ -2,7 +2,9 @@ package proof import ( "bytes" + "compress/gzip" "encoding/json" + "io" "os" "testing" @@ -10,14 +12,32 @@ import ( ) func TestDecode(t *testing.T) { + if os.Getenv("EXPENSIVE_TESTS") == "" { + t.Skip() + } + //binFile := "../../extern/supra_seal/demos/c2-test/resources/test/commit-phase1-output" - binFile := "../../commit-phase1-output" + binFile := "../../commit-phase1-output.gz" - rawData, err := os.ReadFile(binFile) + gzData, err := os.ReadFile(binFile) + if err != nil { + t.Fatal(err) + } + + gzReader, err := gzip.NewReader(bytes.NewReader(gzData)) if err != nil { t.Fatal(err) } + rawData, err := io.ReadAll(gzReader) + if err != nil { + t.Fatal(err) + } + + if err := gzReader.Close(); err != nil { + t.Fatal(err) + } + dec, err := DecodeCommit1OutRaw(bytes.NewReader(rawData)) if err != nil { t.Fatal(err) diff --git a/lib/proof/porep_vproof_challenges.go b/lib/proof/porep_vproof_challenges.go index 19e221827..1124cf461 100644 --- a/lib/proof/porep_vproof_challenges.go +++ b/lib/proof/porep_vproof_challenges.go @@ -7,6 +7,9 @@ import ( "github.com/minio/sha256-simd" ) +// TODO: This file is a placeholder with links to the original implementation in Rust. Eventually we want to +// have our own implementation for generating PoRep vanilla proofs in Go. + // https://github.com/filecoin-project/rust-fil-proofs/blob/8f5bd86be36a55e33b9b293ba22ea13ca1f28163/storage-proofs-porep/src/stacked/vanilla/challenges.rs#L21 func DeriveInteractiveChallenges( diff --git a/lib/proof/porep_vproof_types.go b/lib/proof/porep_vproof_types.go index b854f44e7..24a829af9 100644 --- a/lib/proof/porep_vproof_types.go +++ b/lib/proof/porep_vproof_types.go @@ -1,9 +1,10 @@ package proof -// This file contains some type definitions from +// This file contains PoRep vanilla proof type definitions from // - https://github.com/filecoin-project/rust-fil-proofs/tree/master/storage-proofs-core/src/merkle // - https://github.com/filecoin-project/rust-fil-proofs/tree/master/storage-proofs-porep/src/stacked/vanilla // - https://github.com/filecoin-project/rust-filecoin-proofs-api/tree/master/src +// The json representation of those matches the representation expected by rust-fil-proofs. // core diff --git a/lib/proof/porep_vproof_vanilla.go b/lib/proof/porep_vproof_vanilla.go index faad6031e..2bcea8cc9 100644 --- a/lib/proof/porep_vproof_vanilla.go +++ b/lib/proof/porep_vproof_vanilla.go @@ -1,5 +1,8 @@ package proof +// TODO: This file is a placeholder with links to the original implementation in Rust. Eventually we want to +// have our own implementation for generating PoRep vanilla proofs in Go. + // https://github.com/filecoin-project/rust-fil-proofs/blob/8f5bd86be36a55e33b9b293ba22ea13ca1f28163/storage-proofs-porep/src/stacked/vanilla/proof_scheme.rs#L60 func ProveAllPartitions() { diff --git a/lib/proof/sn-comp-sector-seal/main.go b/lib/proof/sn-comp-sector-seal/main.go index dbd918d75..b10d6c243 100644 --- a/lib/proof/sn-comp-sector-seal/main.go +++ b/lib/proof/sn-comp-sector-seal/main.go @@ -17,47 +17,14 @@ import ( "github.com/filecoin-project/go-state-types/abi" ) -func ReplicaId(sector abi.SectorNumber, ticket []byte, commd []byte) ([32]byte, error) { - // https://github.com/filecoin-project/rust-fil-proofs/blob/5b46d4ac88e19003416bb110e2b2871523cc2892/storage-proofs-porep/src/stacked/vanilla/params.rs#L758-L775 - - pi := [32]byte{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9} - porepID, err := abi.RegisteredSealProof_StackedDrg32GiBV1_1.PoRepID() - if err != nil { - return [32]byte{}, err - } - - if len(ticket) != 32 { - return [32]byte{}, xerrors.Errorf("invalid ticket length %d", len(ticket)) - } - if len(commd) != 32 { - return [32]byte{}, xerrors.Errorf("invalid commd length %d", len(commd)) +func main() { + if len(os.Args) != 2 { + fmt.Println("This tool creates a 32GiB sector compatible with SupraSeal demo sector output") + fmt.Println("Useful only for development purposes.") + fmt.Printf("Usage: %s \n", os.Args[0]) + return } - var sectorID [8]byte - binary.BigEndian.PutUint64(sectorID[:], uint64(sector)) - - s := sha256.New() - - // sha256 writes never error - _, _ = s.Write(pi[:]) - _, _ = s.Write(sectorID[:]) - _, _ = s.Write(ticket) - _, _ = s.Write(commd) - _, _ = s.Write(porepID[:]) - - return bytesIntoFr32Safe(s.Sum(nil)), nil -} - -func bytesIntoFr32Safe(in []byte) [32]byte { - var out [32]byte - copy(out[:], in) - - out[31] &= 0b0011_1111 - - return out -} - -func main() { outPath := os.Args[1] /*const ssize = 32 << 30 @@ -207,5 +174,44 @@ func main() { } fmt.Println("done!") +} + +func ReplicaId(sector abi.SectorNumber, ticket []byte, commd []byte) ([32]byte, error) { + // https://github.com/filecoin-project/rust-fil-proofs/blob/5b46d4ac88e19003416bb110e2b2871523cc2892/storage-proofs-porep/src/stacked/vanilla/params.rs#L758-L775 + + pi := [32]byte{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9} + porepID, err := abi.RegisteredSealProof_StackedDrg32GiBV1_1.PoRepID() + if err != nil { + return [32]byte{}, err + } + + if len(ticket) != 32 { + return [32]byte{}, xerrors.Errorf("invalid ticket length %d", len(ticket)) + } + if len(commd) != 32 { + return [32]byte{}, xerrors.Errorf("invalid commd length %d", len(commd)) + } + + var sectorID [8]byte + binary.BigEndian.PutUint64(sectorID[:], uint64(sector)) + s := sha256.New() + + // sha256 writes never error + _, _ = s.Write(pi[:]) + _, _ = s.Write(sectorID[:]) + _, _ = s.Write(ticket) + _, _ = s.Write(commd) + _, _ = s.Write(porepID[:]) + + return bytesIntoFr32Safe(s.Sum(nil)), nil +} + +func bytesIntoFr32Safe(in []byte) [32]byte { + var out [32]byte + copy(out[:], in) + + out[31] &= 0b0011_1111 + + return out } diff --git a/lib/proof/sn_c1_write_path.txt b/lib/proof/sn_c1_write_path.txt deleted file mode 100644 index def74b876..000000000 --- a/lib/proof/sn_c1_write_path.txt +++ /dev/null @@ -1,55 +0,0 @@ -void C1::WriteProofs(const char* filename, bool do_tree, bool do_node) { - mmap_t file_ptr; - - - uint64_t vp_outer_length = C::GetNumPartitions(); - uint64_t vp_inner_length = challenges_count_; - std::memcpy(&file_ptr[0] + buf_index, &vp_outer_length, sizeof(uint64_t)); - - - for (uint64_t i = 0; i < vp_outer_length; ++i) { - std::memcpy(&file_ptr[0] + buf_index, &vp_inner_length, sizeof(uint64_t)); - buf_index += sizeof(uint64_t); - - for (uint64_t j = 0; j < vp_inner_length; ++j) { - - buf_index = challenge.WriteProof(&file_ptr[0], buf_index, &tree_r[0], - &tree_c[0], tree_d); - - size_t C1Challenge::WriteProof(uint8_t* file_ptr, size_t buf_index, - node_t** tree_r_bufs, node_t** tree_c_bufs, - node_t* tree_d_buf) { - buf_index = WriteTreeProof(file_ptr, buf_index, tree_r_bufs, tree_d_buf); - - size_t C1Challenge::WriteTreeProof(uint8_t* file_ptr, size_t buf_index, - node_t** tree_r_bufs, node_t* tree_d_buf) { - - TreeDCCProof tree_d(C::GetNumTreeDArity(), - C::GetNumTreeDLevels(), nullptr, 0, 0); - tree_d.GenInclusionPath(challenge_, (node_t*) CC_TREE_D_NODE_VALUES); - buf_index = tree_d.WriteProof(file_ptr, buf_index, SINGLE_PROOF_DATA); - - size_t TreeProof::WriteProof(uint8_t* file_ptr, size_t buf_index, - uint32_t proof_type) { - std::memcpy(file_ptr + buf_index, &proof_type, sizeof(uint32_t)); - buf_index += sizeof(uint32_t); - if (proof_type == 0) { - // Root - std::memcpy(file_ptr + buf_index, root_, sizeof(node_t)); - buf_index += sizeof(node_t); - - // Leaf - std::memcpy(file_ptr + buf_index, leaf_, sizeof(node_t)); - buf_index += sizeof(node_t); - - // Proof size - std::memcpy(file_ptr + buf_index, &levels_, sizeof(uint64_t)); - buf_index += sizeof(uint64_t); - - // Proofs - for (size_t i = 0; i < levels_; ++i) { - buf_index = path_[i]->Write(file_ptr, buf_index); - } - - - diff --git a/lib/supraffi/gosupra/main.go b/lib/supraffi/gosupra/main.go deleted file mode 100644 index 0e8e53966..000000000 --- a/lib/supraffi/gosupra/main.go +++ /dev/null @@ -1,22 +0,0 @@ -package main - -import ( - "os" - "strconv" - - "github.com/filecoin-project/curio/lib/supraffi" -) - -func main() { - // call ./gosupra [sector size] [config] - if len(os.Args) != 3 { - panic("Usage: ./gosupra [sector size] [config]") - } - - sectorSize, err := strconv.ParseInt(os.Args[1], 10, 64) - if err != nil { - panic(err) - } - - supraffi.SupraSealInit(uint64(sectorSize), os.Args[2]) -} diff --git a/lib/supraffi/seal.go b/lib/supraffi/seal.go index 25807233e..2af840d22 100644 --- a/lib/supraffi/seal.go +++ b/lib/supraffi/seal.go @@ -29,6 +29,7 @@ root = {SRCDIR}/../../extern/supra_seal/ -Iposeidon -Ideps/sppark -Ideps/sppark/util -Ideps/blst/src -c sealing/supra_seal.cpp -o obj/supra_seal.o -Wno-subobject-linkage --- +NOTE: The below lines match the top of the file, just in a moderately more readable form. -#cgo LDFLAGS: -fno-omit-frame-pointer diff --git a/tasks/sealsupra/supra_config.go b/tasks/sealsupra/supra_config.go index ade197e6a..5382bc652 100644 --- a/tasks/sealsupra/supra_config.go +++ b/tasks/sealsupra/supra_config.go @@ -3,6 +3,7 @@ package sealsupra import ( "bufio" "fmt" + "github.com/samber/lo" "os/exec" "regexp" "strconv" @@ -42,6 +43,16 @@ type TopologyConfig struct { type SupraSealConfig struct { NVMeDevices []string Topology TopologyConfig + + // Diagnostic fields (not part of the config) + RequiredThreads int + RequiredCCX int + RequiredCores int + UnoccupiedCores int + + P2WrRdOverlap bool + P2HsP1WrOverlap bool + P2HcP2RdOverlap bool } func GetSystemInfo() (*SystemInfo, error) { @@ -107,16 +118,23 @@ func GenerateSupraSealConfig(info SystemInfo, dualHashers bool, batchSize int, n config := SupraSealConfig{ NVMeDevices: nvmeDevices, Topology: TopologyConfig{ + // Start with a somewhat optimal layout for top-level P1 processes PC1Writer: 1, - PC1Reader: 2, - PC1Orchestrator: 3, - PC2Reader: 0, - PC2Hasher: 1, - PC2HasherCPU: 0, - PC2Writer: 0, - PC2WriterCores: 1, - C1Reader: 0, + PC1Reader: 2, // High load + PC1Orchestrator: 3, // High load + + // Now cram P2 processes into the remaining ~2 cores + PC2Reader: 0, + PC2Hasher: 1, // High load + PC2HasherCPU: 0, + PC2Writer: 0, // High load + PC2WriterCores: 1, // ^ + C1Reader: 0, }, + + P2WrRdOverlap: true, + P2HsP1WrOverlap: true, + P2HcP2RdOverlap: true, } sectorsPerThread := 1 @@ -128,15 +146,15 @@ func GenerateSupraSealConfig(info SystemInfo, dualHashers bool, batchSize int, n ccxFreeThreads := ccxFreeCores * info.ThreadsPerCore sectorsPerCCX := ccxFreeThreads * sectorsPerThread - requiredThreads := batchSize / sectorsPerThread - requiredCCX := (batchSize + sectorsPerCCX - 1) / sectorsPerCCX - requiredCores := requiredCCX + requiredThreads/info.ThreadsPerCore + config.RequiredThreads = batchSize / sectorsPerThread + config.RequiredCCX = (batchSize + sectorsPerCCX - 1) / sectorsPerCCX + config.RequiredCores = config.RequiredCCX + config.RequiredThreads/info.ThreadsPerCore - if requiredCores > info.CoreCount { + if config.RequiredCores > info.CoreCount { return config, fmt.Errorf("not enough cores available for hashers") } - coresLeftover := info.CoreCount - requiredCores + coresLeftover := info.CoreCount - config.RequiredCores const minOverheadCores = 4 if coresLeftover < minOverheadCores { @@ -148,16 +166,19 @@ func GenerateSupraSealConfig(info SystemInfo, dualHashers bool, batchSize int, n // Assign cores for PC2 processes if coresLeftover > nextFreeCore { config.Topology.PC2Writer = nextFreeCore + config.P2WrRdOverlap = false nextFreeCore++ } if coresLeftover > nextFreeCore { config.Topology.PC2Hasher = nextFreeCore + config.P2HsP1WrOverlap = false nextFreeCore++ } if coresLeftover > nextFreeCore { config.Topology.PC2HasherCPU = nextFreeCore + config.P2HcP2RdOverlap = false nextFreeCore++ } @@ -177,6 +198,8 @@ func GenerateSupraSealConfig(info SystemInfo, dualHashers bool, batchSize int, n } } + config.UnoccupiedCores = coresLeftover - nextFreeCore + sectorConfig := SectorConfig{ Sectors: batchSize, Coordinators: []CoordinatorConfig{}, @@ -187,7 +210,7 @@ func GenerateSupraSealConfig(info SystemInfo, dualHashers bool, batchSize int, n ccxCores = append(ccxCores, i) } - for i := requiredCores; i > 0; { + for i := config.RequiredCores; i > 0; { firstCCXCoreNum := ccxCores[len(ccxCores)-1] toAssign := min(i, info.CoresPerL3) @@ -220,78 +243,72 @@ func GenerateSupraSealConfig(info SystemInfo, dualHashers bool, batchSize int, n func FormatSupraSealConfig(config SupraSealConfig) string { var sb strings.Builder - sb.WriteString("# Configuration for supra_seal\n") - sb.WriteString("spdk: {\n") - sb.WriteString(" # PCIe identifiers of NVMe drives to use to store layers\n") - sb.WriteString(" nvme = [ \n") - for i, device := range config.NVMeDevices { - sb.WriteString(fmt.Sprintf(" \"%s\"", device)) - - // comma if not last - if i < len(config.NVMeDevices)-1 { - sb.WriteString(",\n") - } else { - sb.WriteString("\n") - } - } - sb.WriteString(" ];\n") - sb.WriteString("}\n\n") - - sb.WriteString("# CPU topology for various parallel sector counts\n") - sb.WriteString("topology:\n") - sb.WriteString("{\n") - sb.WriteString(" pc1: {\n") - sb.WriteString(fmt.Sprintf(" writer = %d;\n", config.Topology.PC1Writer)) - sb.WriteString(fmt.Sprintf(" reader = %d;\n", config.Topology.PC1Reader)) - sb.WriteString(fmt.Sprintf(" orchestrator = %d;\n", config.Topology.PC1Orchestrator)) - sb.WriteString(" qpair_reader = 0;\n") - sb.WriteString(" qpair_writer = 1;\n") - sb.WriteString(" reader_sleep_time = 250;\n") - sb.WriteString(" writer_sleep_time = 500;\n") - sb.WriteString(" hashers_per_core = 2;\n\n") - - sb.WriteString(" sector_configs: (\n") + w := func(s string) { sb.WriteString(s); sb.WriteByte('\n') } + + w("# Configuration for supra_seal") + w("spdk: {") + w(" # PCIe identifiers of NVMe drives to use to store layers") + w(" nvme = [ ") + + quotedNvme := lo.Map(config.NVMeDevices, func(d string, i int) string { return ` "` + d + `"` }) + w(strings.Join(quotedNvme, ",\n")) + + w(" ];") + w("}") + w("") + w("# CPU topology for various parallel sector counts") + w("topology:") + w("{") + w(" pc1: {") + w(fmt.Sprintf(" writer = %d;", config.Topology.PC1Writer)) + w(fmt.Sprintf(" reader = %d;", config.Topology.PC1Reader)) + w(fmt.Sprintf(" orchestrator = %d;", config.Topology.PC1Orchestrator)) + w(" qpair_reader = 0;") + w(" qpair_writer = 1;") + w(" reader_sleep_time = 250;") + w(" writer_sleep_time = 500;") + w(" hashers_per_core = 2;") + w("") + w(" sector_configs: (") for i, sectorConfig := range config.Topology.SectorConfigs { - sb.WriteString(" {\n") - sb.WriteString(fmt.Sprintf(" sectors = %d;\n", sectorConfig.Sectors)) - sb.WriteString(" coordinators = (\n") + w(" {") + w(fmt.Sprintf(" sectors = %d;", sectorConfig.Sectors)) + w(" coordinators = (") for i, coord := range sectorConfig.Coordinators { sb.WriteString(fmt.Sprintf(" { core = %d;\n", coord.Core)) sb.WriteString(fmt.Sprintf(" hashers = %d; }", coord.Hashers)) if i < len(sectorConfig.Coordinators)-1 { sb.WriteString(",") } - sb.WriteString("\n") + sb.WriteByte('\n') } - sb.WriteString(" )\n") + w(" )") sb.WriteString(" }") - // if not last, add a comma if i < len(config.Topology.SectorConfigs)-1 { sb.WriteString(",") } - sb.WriteString("\n") + sb.WriteByte('\n') } - sb.WriteString(" )\n") - sb.WriteString(" },\n") - - sb.WriteString(" pc2: {\n") - sb.WriteString(fmt.Sprintf(" reader = %d;\n", config.Topology.PC2Reader)) - sb.WriteString(fmt.Sprintf(" hasher = %d;\n", config.Topology.PC2Hasher)) - sb.WriteString(fmt.Sprintf(" hasher_cpu = %d;\n", config.Topology.PC2HasherCPU)) - sb.WriteString(fmt.Sprintf(" writer = %d;\n", config.Topology.PC2Writer)) - sb.WriteString(fmt.Sprintf(" writer_cores = %d;\n", config.Topology.PC2WriterCores)) - sb.WriteString(" sleep_time = 200;\n") - sb.WriteString(" qpair = 2;\n") - sb.WriteString(" },\n") - - sb.WriteString(" c1: {\n") - sb.WriteString(fmt.Sprintf(" reader = %d;\n", config.Topology.C1Reader)) - sb.WriteString(" sleep_time = 200;\n") - sb.WriteString(" qpair = 3;\n") - sb.WriteString(" }\n") - - sb.WriteString("}\n") + w(" )") + w(" },") + w("") + w(" pc2: {") + w(fmt.Sprintf(" reader = %d;", config.Topology.PC2Reader)) + w(fmt.Sprintf(" hasher = %d;", config.Topology.PC2Hasher)) + w(fmt.Sprintf(" hasher_cpu = %d;", config.Topology.PC2HasherCPU)) + w(fmt.Sprintf(" writer = %d;", config.Topology.PC2Writer)) + w(fmt.Sprintf(" writer_cores = %d;", config.Topology.PC2WriterCores)) + w(" sleep_time = 200;") + w(" qpair = 2;") + w(" },") + w("") + w(" c1: {") + w(fmt.Sprintf(" reader = %d;", config.Topology.C1Reader)) + w(" sleep_time = 200;") + w(" qpair = 3;") + w(" }") + w("}") return sb.String() }