From 37f0dbdb926e33b97dd98494786e1934c58026ab Mon Sep 17 00:00:00 2001 From: vsoch Date: Sun, 31 Mar 2024 00:05:21 -0600 Subject: [PATCH 1/3] python: updates to sdk to support range experiment Problem: we need to update the python sdk to support range Solution: update the protos and the config. This also comments out a lot of verbosity (for now). I would like to have a more hardened logging approach that is tied to a command line flag / config parameter / envar or similar. Signed-off-by: vsoch --- README.md | 1 + .../range/rainbow-config.yaml | 2 +- plugins/algorithms/range/range.go | 4 +-- plugins/backends/memory/dfs.go | 20 ++++--------- python/v1/rainbow/backends/memory.py | 4 +-- python/v1/rainbow/client.py | 2 +- python/v1/rainbow/config.py | 30 ++++++++++++++++++- python/v1/rainbow/schema.py | 18 +++++++++-- python/v1/setup.py | 2 +- 9 files changed, 58 insertions(+), 25 deletions(-) diff --git a/README.md b/README.md index c6bd0d6..03ad7de 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,7 @@ For more information: ## TODO +- nice logger with actual levels that I like - subsystems - make also a function to delete subsystems - ephemeral case - actual nodes don't exist, but instead rules for requests and limits. Need to develop this and means to authenticate to use it. diff --git a/docs/examples/match-algorithms/range/rainbow-config.yaml b/docs/examples/match-algorithms/range/rainbow-config.yaml index ec1654a..d4f98c1 100644 --- a/docs/examples/match-algorithms/range/rainbow-config.yaml +++ b/docs/examples/match-algorithms/range/rainbow-config.yaml @@ -8,7 +8,7 @@ scheduler: name: match cluster: name: spack-builder - secret: 85e59eea-c427-4f55-9668-4ed418de9be8 + secret: bd72a288-cc3d-4659-910b-d665fd95f1a3 graphdatabase: name: memory host: 127.0.0.1:50051 diff --git a/plugins/algorithms/range/range.go b/plugins/algorithms/range/range.go index d0b8eb9..fd87d4b 100644 --- a/plugins/algorithms/range/range.go +++ b/plugins/algorithms/range/range.go @@ -74,7 +74,7 @@ func (req *RangeRequest) Satisfies(value string) (bool, error) { } if req.Min != "" { // Is the version provided greater than the min requested? - c, err := semver.NewConstraint(fmt.Sprintf("> %s", req.Min)) + c, err := semver.NewConstraint(fmt.Sprintf(">= %s", req.Min)) if err != nil { // fmt.Printf(" => Error parsing min constraint %s\n", err) return false, err @@ -89,7 +89,7 @@ func (req *RangeRequest) Satisfies(value string) (bool, error) { } if req.Max != "" { // Is the version provided less than the max requested? - c, err := semver.NewConstraint(fmt.Sprintf("< %s", req.Max)) + c, err := semver.NewConstraint(fmt.Sprintf("<= %s", req.Max)) if err != nil { // fmt.Printf(" => Error parsing max constraint %s\n", err) return false, err diff --git a/plugins/backends/memory/dfs.go b/plugins/backends/memory/dfs.go index 7cbc50e..7161103 100644 --- a/plugins/backends/memory/dfs.go +++ b/plugins/backends/memory/dfs.go @@ -54,7 +54,7 @@ func (g *ClusterGraph) DFSForMatch( } // Get the summary metrics for the subsystem - fmt.Println(ss.Metrics.ResourceCounts) + // fmt.Println(ss.Metrics.ResourceCounts) isMatch := true for resourceType, needed := range totals { @@ -90,9 +90,7 @@ func (g *ClusterGraph) depthFirstSearch( ) (bool, error) { // Note that in the experimental version we have one task and thus one slot - if !g.quiet { - fmt.Printf(" šŸŽ°ļø Slots that need to be satisfied with matcher %s\n", matcher.Name()) - } + // fmt.Printf(" šŸŽ°ļø Slots that need to be satisfied with matcher %s\n", matcher.Name()) slots := map[string]*v1.Task{} // If a slot isn't defined for the task, assume the slot is at the top level @@ -106,9 +104,7 @@ func (g *ClusterGraph) depthFirstSearch( // If we don't have jobspec.Task.Resources, no slot to search for. // Return early based on top level counts if len(jobspec.Task.Resources) == 0 { - if !g.quiet { - fmt.Printf(" šŸŽ°ļø No resources defined, top level counts satisfied so cluster is match\n") - } + // fmt.Printf(" šŸŽ°ļø No resources defined, top level counts satisfied so cluster is match\n") return true, nil } @@ -143,22 +139,18 @@ func (g *ClusterGraph) depthFirstSearch( } // Subsystem edges are here, separate from dominant ones (so search is smaller) - for sName, edges := range vtx.Subsystems { + for _, edges := range vtx.Subsystems { // fmt.Printf(" => Searching for %s and resource type %s in subsystem %v with %d subsystem edges\n", lookingFor, resource.Type, sName, len(edges)) for _, child := range edges { - if !g.quiet { - fmt.Printf(" Found subsystem edge %s with type %s\n", sName, child.Vertex.Type) - } + // fmt.Printf(" Found subsystem edge %s with type %s\n", sName, child.Vertex.Type) // Check if the subsystem edge satisfies the needs of the slot // This will update the slotNeeds.Satisfied matcher.CheckSubsystemEdge(slotNeeds, child, vtx) // Return early if minimum needs are satsified if slotNeeds.Satisfied { - if !g.quiet { - fmt.Printf(" Minimum slot needs are satisfied at %s for %s at %s, returning early.\n", vtx.Type, child.Subsystem, child.Vertex.Type) - } + // fmt.Printf(" Minimum slot needs are satisfied at %s for %s at %s, returning early.\n", vtx.Type, child.Subsystem, child.Vertex.Type) return slotsFound + vtx.Size } } diff --git a/python/v1/rainbow/backends/memory.py b/python/v1/rainbow/backends/memory.py index 9b4c88a..52e8192 100644 --- a/python/v1/rainbow/backends/memory.py +++ b/python/v1/rainbow/backends/memory.py @@ -15,13 +15,13 @@ class MemoryBackend(GraphBackend): This graph database backend is primarily for development. """ - def satisfies(self, jobspec): + def satisfies(self, jobspec, matcher="match"): """ Determine if a jobspec can be satisfied by the graph. """ # Prepare a satisfy request with the jobspec # TODO if auth is in the graph, that needs to be done here too - request = memory_pb2.SatisfyRequest(payload=jobspec.to_str()) + request = memory_pb2.SatisfyRequest(payload=jobspec.to_str(), matcher=matcher) # Host should be set from the database_options from the client with grpc.insecure_channel(self.host) as channel: diff --git a/python/v1/rainbow/client.py b/python/v1/rainbow/client.py index 8e02ea2..abdd727 100644 --- a/python/v1/rainbow/client.py +++ b/python/v1/rainbow/client.py @@ -161,7 +161,7 @@ def submit_jobspec(self, jobspec): it custom with your own special logic. """ # Ask the database backend if our jobspec can be satisfied - response = self.backend.satisfies(jobspec) + response = self.backend.satisfies(jobspec, self.cfg.match_algorithm) matches = response.clusters # No matches? diff --git a/python/v1/rainbow/config.py b/python/v1/rainbow/config.py index 763c956..2f38569 100644 --- a/python/v1/rainbow/config.py +++ b/python/v1/rainbow/config.py @@ -11,7 +11,7 @@ def new_rainbow_config(host, cluster, secret, scheduler_name="rainbow"): "scheduler": { "name": scheduler_name, "secret": secret, - "algorithm": {"name": "random"}, + "algorithms": {"selection": {"name": "random"}, "match": {"name": "match"}}, }, "cluster": { "name": cluster, @@ -52,6 +52,34 @@ def cfg(self): self.load() return self._cfg + @property + def match_algorithm(self): + """ + Get the match algorithm + """ + matcher = self._cfg.get("scheduler", {}).get("algorithms", {}).get("match", {}).get("name") + if not matcher: + matcher = "match" + return matcher + + def set_match_algorithm(self, name): + """ + Get the match algorithm + """ + self._set_algorithm("match", name) + + def set_selection_algorithm(self, name): + """ + Get the match algorithm + """ + self._set_algorithm("selection", name) + + def _set_algorithm(self, typ, name): + """ + Get the match algorithm + """ + self._cfg["scheduler"]["algorithms"][typ]["name"] = name + def load(self, config_file=None): """ Load a rainbow config diff --git a/python/v1/rainbow/schema.py b/python/v1/rainbow/schema.py index fe8faec..d32dd4a 100644 --- a/python/v1/rainbow/schema.py +++ b/python/v1/rainbow/schema.py @@ -12,11 +12,23 @@ "properties": { "name": {"type": "string"}, "secret": {"type": "string"}, - "algorithm": { + "algorithms": { "type": "object", "properties": { - "name": {"type": "string"}, - "options": {"type": "object"}, + "selection": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "options": {"type": "object"}, + }, + }, + "match": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "options": {"type": "object"}, + }, + }, }, }, "user": {"type": "object"}, diff --git a/python/v1/setup.py b/python/v1/setup.py index bf399a1..9e91888 100644 --- a/python/v1/setup.py +++ b/python/v1/setup.py @@ -18,7 +18,7 @@ if __name__ == "__main__": setup( name="rainbow-scheduler", - version="0.0.14rc1", + version="0.0.14", author="Vanessasaurus", author_email="vsoch@users.noreply.github.com", maintainer="Vanessasaurus", From ef4376eacdb67c0cbf2295ab1269f7b12a91b5fd Mon Sep 17 00:00:00 2001 From: vsoch Date: Mon, 1 Apr 2024 01:37:23 -0600 Subject: [PATCH 2/3] feat: custom rainbow logger and updates to rainbow python Signed-off-by: vsoch --- README.md | 1 - cmd/server/server.go | 30 ++- docs/algorithms.md | 7 +- .../range/jobspec-invalid-range.yaml | 2 +- docs/examples/scheduler/jobspec-io.yaml | 3 +- docs/examples/scheduler/rainbow-config.yaml | 2 +- go.mod | 3 + go.sum | 9 + pkg/logger/logger.go | 178 ++++++++++++++++++ plugins/algorithms/match/match.go | 86 +++++++-- plugins/algorithms/range/range.go | 31 +-- plugins/backends/memory/cluster.go | 8 +- plugins/backends/memory/dfs.go | 34 ++-- plugins/backends/memory/graph.go | 17 +- plugins/backends/memory/memory.go | 7 - plugins/backends/memory/nodes.go | 7 - plugins/backends/memory/service/memory.pb.go | 95 ++++++---- plugins/backends/memory/service/memory.proto | 3 + python/v1/README.md | 2 +- python/v1/examples/flux/submit-job.py | 6 +- python/v1/examples/flux/submit-jobspec.py | 5 +- python/v1/rainbow/client.py | 19 +- python/v1/rainbow/protos/memory_pb2.py | 20 +- python/v1/rainbow/protos/memory_pb2.pyi | 10 +- python/v1/rainbow/schema.py | 1 + python/v1/rainbow/types.py | 10 + 26 files changed, 443 insertions(+), 153 deletions(-) create mode 100644 pkg/logger/logger.go create mode 100644 python/v1/rainbow/types.py diff --git a/README.md b/README.md index 03ad7de..c6bd0d6 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,6 @@ For more information: ## TODO -- nice logger with actual levels that I like - subsystems - make also a function to delete subsystems - ephemeral case - actual nodes don't exist, but instead rules for requests and limits. Need to develop this and means to authenticate to use it. diff --git a/cmd/server/server.go b/cmd/server/server.go index fd5354f..ebd0d62 100644 --- a/cmd/server/server.go +++ b/cmd/server/server.go @@ -6,6 +6,7 @@ import ( "log" "github.com/converged-computing/rainbow/pkg/config" + rlog "github.com/converged-computing/rainbow/pkg/logger" "github.com/converged-computing/rainbow/pkg/server" "github.com/converged-computing/rainbow/pkg/types" @@ -17,16 +18,19 @@ import ( ) var ( - host string - name = "rainbow" - sqliteFile = "rainbow.db" - configFile = "" - matchAlgo = "match" - selectAlgo = "random" - database = "" - cleanup = false - secret = "chocolate-cookies" - globalToken = "" + host string + + // default logging level of warning (none, info, warning) + loggingLevel = 3 + name = "rainbow" + sqliteFile = "rainbow.db" + configFile = "" + matchAlgo = "match" + selectAlgo = "random" + database = "" + cleanup = false + secret = "chocolate-cookies" + globalToken = "" ) func main() { @@ -39,9 +43,15 @@ func main() { flag.StringVar(&selectAlgo, "select-algorithm", selectAlgo, "selection algorithm for final cluster selection (defaults to random)") flag.StringVar(&matchAlgo, "match-algorithm", matchAlgo, "match algorithm for graph (defaults to random)") flag.StringVar(&configFile, "config", configFile, "rainbow config file") + flag.IntVar(&loggingLevel, "loglevel", loggingLevel, "rainbow logging level (0 to 5)") flag.BoolVar(&cleanup, "cleanup", cleanup, "cleanup previous sqlite database (default: false)") flag.Parse() + // If the logging level isn't the default, set it + if loggingLevel != rlog.DefaultLevel { + rlog.SetLevel(loggingLevel) + } + // Load (or generate a default) config file here, if provided cfg, err := config.NewRainbowClientConfig(configFile, name, secret, database, selectAlgo, matchAlgo) if err != nil { diff --git a/docs/algorithms.md b/docs/algorithms.md index 34c98c1..3891928 100644 --- a/docs/algorithms.md +++ b/docs/algorithms.md @@ -121,7 +121,7 @@ I understand this is likely not perfect for what everyone wants, but I believe i ### Match -The expliciy "match" type is going to look exactly at the type of a subsystem node, and return true (match) if it matches what the subsystem needs. For example, given this task: +The expliciy "match" type is going to look exactly at some exact value for a field in the metadata. It will return true (match) if it matches what the subsystem needs. For example, given this task: ```yaml task: @@ -133,10 +133,11 @@ task: resources: io: match: - - type: shm + - field: type + value: shm ``` -We would look for a node of type "shm" in the io subsystem that is directly attached (an edge) to a node in the dominant subsystem graph. +We would look for a node with field "type" and value "shm" in the io subsystem that is directly attached (an edge) to a node in the dominant subsystem graph. ### Range diff --git a/docs/examples/match-algorithms/range/jobspec-invalid-range.yaml b/docs/examples/match-algorithms/range/jobspec-invalid-range.yaml index 0ab5381..15179c6 100644 --- a/docs/examples/match-algorithms/range/jobspec-invalid-range.yaml +++ b/docs/examples/match-algorithms/range/jobspec-invalid-range.yaml @@ -11,7 +11,7 @@ resources: type: core task: command: - - ior + - spack slot: default count: per_slot: 1 diff --git a/docs/examples/scheduler/jobspec-io.yaml b/docs/examples/scheduler/jobspec-io.yaml index c411d95..5984ae7 100644 --- a/docs/examples/scheduler/jobspec-io.yaml +++ b/docs/examples/scheduler/jobspec-io.yaml @@ -18,4 +18,5 @@ task: resources: io: match: - - type: shm \ No newline at end of file + - field: type + value: shm \ No newline at end of file diff --git a/docs/examples/scheduler/rainbow-config.yaml b/docs/examples/scheduler/rainbow-config.yaml index 422a875..316602f 100644 --- a/docs/examples/scheduler/rainbow-config.yaml +++ b/docs/examples/scheduler/rainbow-config.yaml @@ -8,7 +8,7 @@ scheduler: name: match cluster: name: keebler - secret: 3994c1e7-9cc7-4b81-ab75-3b00128eda16 + secret: 79643f8e-6408-4cd1-bd1a-76aa499d5864 graphdatabase: name: memory host: 127.0.0.1:50051 diff --git a/go.mod b/go.mod index 4f50281..5e869b6 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ require ( github.com/akamensky/argparse v1.4.0 github.com/compspec/jobspec-go v0.0.0-20240319000127-8020a01a65da github.com/converged-computing/jsongraph-go v0.0.0-20240229082022-c6887a5a00fe + github.com/fatih/color v1.16.0 github.com/google/uuid v1.6.0 github.com/mattn/go-sqlite3 v1.14.22 github.com/pkg/errors v0.9.1 @@ -17,6 +18,8 @@ require ( require ( github.com/golang/protobuf v1.5.3 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect golang.org/x/net v0.21.0 // indirect golang.org/x/sys v0.17.0 // indirect diff --git a/go.sum b/go.sum index 411776e..7f9c643 100644 --- a/go.sum +++ b/go.sum @@ -6,6 +6,8 @@ github.com/compspec/jobspec-go v0.0.0-20240319000127-8020a01a65da h1:Uvfk4IgWMIi github.com/compspec/jobspec-go v0.0.0-20240319000127-8020a01a65da/go.mod h1:BaJyxaOhESe2DD4lqBdwTEWOw0TaTZVJGPrFh6KyXQM= github.com/converged-computing/jsongraph-go v0.0.0-20240229082022-c6887a5a00fe h1:Tk//RW3uKn4A7N8gpHRXs+ZGlR7Fxkwh+4/Iml0GBV4= github.com/converged-computing/jsongraph-go v0.0.0-20240229082022-c6887a5a00fe/go.mod h1:+DhVyLXGVfBsfta4185jd33jqa94inshCcdvsXK2Irk= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -14,6 +16,11 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -22,6 +29,8 @@ github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6Ng github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go new file mode 100644 index 0000000..3c92edf --- /dev/null +++ b/pkg/logger/logger.go @@ -0,0 +1,178 @@ +package logger + +// TODO would like to use slog when we can use go 1.21! + +import ( + "fmt" + "log" + "os" + + "github.com/fatih/color" +) + +const ( + LevelNone = iota + LevelInfo + LevelWarning + LevelError + LevelVerbose + LevelDebug +) + +var ( + DefaultLevel = 3 + logger *RainbowLogger +) + +type RainbowLogger struct { + level int + Filename string + handle *os.File +} + +// Start opens a file handle, if it's desired to write to file +func (l *RainbowLogger) Start() (*log.Logger, error) { + f, err := os.OpenFile(l.Filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, os.ModePerm) + if err != nil { + return nil, err + } + logger := log.New(f, "", 0) + l.handle = f + return logger, nil +} + +// Stop closes the file handle, if defined +func (l *RainbowLogger) Stop() error { + if l.handle != nil { + return l.handle.Close() + } + return nil +} + +// Logging functions with formatting +func Infof(message ...any) error { + return logger.logFormat(LevelInfo, message...) +} + +func Errorf(message ...any) error { + color.Set(color.FgRed) + err := logger.logFormat(LevelError, message...) + color.Unset() + return err +} +func Debugf(message ...any) error { + color.Set(color.FgBlue) + err := logger.logFormat(LevelDebug, message...) + color.Unset() + return err + +} +func Verbosef(message ...any) error { + color.Set(color.FgMagenta) + err := logger.logFormat(LevelVerbose, message...) + color.Unset() + return err + +} +func Warningf(message ...any) error { + color.Set(color.FgYellow) + err := logger.logFormat(LevelWarning, message...) + color.Unset() + return err +} + +// And without! +func Info(message string) error { + return logger.log(LevelInfo, message) +} +func Error(message string) error { + color.Set(color.FgRed) + err := logger.log(LevelError, message) + color.Unset() + return err + +} +func Debug(message string) error { + color.Set(color.FgBlue) + err := logger.log(LevelDebug, message) + color.Unset() + return err +} +func Verbose(message string) error { + color.Set(color.FgMagenta) + err := logger.log(LevelVerbose, message) + color.Unset() + return err +} +func Warning(message string) error { + color.Set(color.FgYellow) + err := logger.log(LevelWarning, message) + color.Unset() + return err +} + +// log prints (without formatting) to the log +func (l *RainbowLogger) log(level int, message string) error { + if l.Filename != "" { + l.logToFile(level, message) + } + if level >= l.level { + fmt.Println(message) + } + return nil +} + +// logFormat is the shared class function for actually printing to the log +func (l *RainbowLogger) logFormat(level int, message ...any) error { + if l.Filename != "" { + l.logFormatToFile(level, message) + } + // Otherwise just print! Simple and dumb + prolog := message[0].(string) + rest := message[1:] + if level <= l.level { + fmt.Printf(prolog, rest...) + } + return nil +} + +// logFormatToFile writes to file if the rainbow logger is set to do that +func (l *RainbowLogger) logFormatToFile(level int, message ...any) error { + logger, err := l.Start() + if err != nil { + return err + } + // Assume the prolog (to be formatted) is at index 0 + prolog := message[0].(string) + rest := message[1:] + if level <= l.level { + logger.Printf(prolog, rest...) + } + return l.Stop() +} + +// logToFile writes to file if the rainbow logger is set to do that +func (l *RainbowLogger) logToFile(level int, message string) error { + logger, err := l.Start() + if err != nil { + return err + } + if level <= l.level { + logger.Println(message) + } + return l.Stop() +} + +// WriteToFile will set the global level and filename +func WriteToFile(filename string) { + logger.Filename = filename +} + +// SetLevel sets the global logging level +func SetLevel(level int) { + logger.level = level +} + +func init() { + logger = &RainbowLogger{level: LevelWarning} +} diff --git a/plugins/algorithms/match/match.go b/plugins/algorithms/match/match.go index fd080cc..277f8a7 100644 --- a/plugins/algorithms/match/match.go +++ b/plugins/algorithms/match/match.go @@ -2,9 +2,11 @@ package match import ( "fmt" + "strings" v1 "github.com/compspec/jobspec-go/pkg/jobspec/experimental" "github.com/converged-computing/rainbow/pkg/graph/algorithm" + rlog "github.com/converged-computing/rainbow/pkg/logger" "github.com/converged-computing/rainbow/pkg/types" ) @@ -15,6 +17,11 @@ var ( matcherName = "match" ) +type MatchRequest struct { + Field string + Value string +} + func (s MatchType) Name() string { return matcherName } @@ -23,6 +30,26 @@ func (s MatchType) Description() string { return description } +// Compress the match request into a parseable field +func (req *MatchRequest) Compress() string { + value := fmt.Sprintf("match||field=%s", req.Field) + value = fmt.Sprintf("%s||value=%s", value, req.Value) + return value +} + +func NewMatchRequest(value string) *MatchRequest { + req := MatchRequest{} + pieces := strings.Split(value, "||") + for _, piece := range pieces { + if strings.HasPrefix(piece, "field=") { + req.Field = strings.ReplaceAll(piece, "field=", "") + } else if strings.HasPrefix(piece, "value=") { + req.Value = strings.ReplaceAll(piece, "value=", "") + } + } + return &req +} + // getSlotResource needs assumes a subsystem request as follows: /* task: command: @@ -68,22 +95,29 @@ func (m MatchType) GetSlotResourceNeeds(slot *v1.Task) *types.SlotResourceNeeds if !ok { continue } + + req := MatchRequest{} for key, value := range entry { value, ok := value.(string) - // This algorithm only knows how to match based on type - if key != "type" { - continue + // We only know how to parse these + if key == "field" && ok { + req.Field = value + } else if key == "value" && ok { + req.Value = value } - if ok { - _, ok := sNeeds[subsystem] - if !ok { - sNeeds[subsystem] = map[string]bool{} - } + } - // This sets the starting state that the value is not satisfied - sNeeds[subsystem][value] = false + // If we get here and we have a field and at LEAST + // one of min or max, we can add to to our needs + // This is a bit janky - compressing with || separators + if req.Field != "" && (req.Value != "") { + _, ok := sNeeds[subsystem] + if !ok { + sNeeds[subsystem] = map[string]bool{} } + // This sets the starting state that the range is not satisfied + sNeeds[subsystem][req.Compress()] = false } } } @@ -99,7 +133,7 @@ func (m MatchType) GetSlotResourceNeeds(slot *v1.Task) *types.SlotResourceNeeds if len(needs) == 0 { slotNeeds.Satisfied = true } - // fmt.Printf(" => Assessing needs for slot: %v\n", slotNeeds) + rlog.Debugf(" => Assessing needs for slot: %v\n", slotNeeds) return slotNeeds } @@ -116,26 +150,42 @@ func (m MatchType) CheckSubsystemEdge(slotNeeds *types.SlotResourceNeeds, edge * // Nested for loops are not great - this will be improved with a more robust graph // that isn't artisinal avocado toast developed by me :) - fmt.Printf("Looking at edge %s->%s\n", edge.Relation, edge.Vertex.Type) + rlog.Debugf("Looking at edge %s->%s\n", edge.Relation, edge.Vertex.Type) // TODO Keep a record if all are satisfied so we stop searching // earlier if this is the case on subsequent calls for i, subsys := range slotNeeds.Subsystems { - fmt.Printf(" => Looking in subsystem %s\n", edge.Subsystem) + rlog.Debugf(" => Looking in subsystem %s\n", edge.Subsystem) // The subsystem has an edge defined here! if subsys.Name == edge.Subsystem { - fmt.Printf(" => Found matching subsystem %s for %s\n", subsys.Name, edge.Subsystem) + rlog.Debugf(" => Found matching subsystem %s for %s\n", subsys.Name, edge.Subsystem) // Yuck, this needs to be a query! Oh well. for k := range subsys.Attributes { - // fmt.Printf(" => Looking at edge %s '%s' for %s that needs %s\n", edge.Subsystem, edge.Vertex.Type, subsys.Name, k) + rlog.Debugf(" => Looking at edge %s '%s' for %s that needs %s\n", edge.Subsystem, edge.Vertex.Type, subsys.Name, k) + + // We care if the attribute is marked as a range + if strings.HasPrefix(k, "match") { - if edge.Vertex.Type == k { - fmt.Printf(" => Resource '%s' has edge '%s' satisfies subsystem %s %s\n", vtx.Type, edge.Vertex.Type, subsys.Name, k) - subsys.Attributes[k] = true + rlog.Debugf(" => Found %s and inspecting edge metadata %v\n", k, edge.Vertex.Metadata.Elements) + req := NewMatchRequest(k) + // Get the field requested by the jobspec + toMatch, err := edge.Vertex.Metadata.GetStringElement(req.Field) + if err != nil { + continue + } + + rlog.Debugf(" => Found field requested for range match %s\n", toMatch) + // These are the conditions of being satisifed, the value we got from the vertex + // matches the value provided in the slot request + if toMatch == req.Value { + rlog.Debugf(" => Resource '%s' has edge '%s' satisfies subsystem %s %s\n", vtx.Type, edge.Vertex.Type, subsys.Name, k) + subsys.Attributes[k] = true + } } + } } slotNeeds.Subsystems[i] = subsys diff --git a/plugins/algorithms/range/range.go b/plugins/algorithms/range/range.go index fd87d4b..38a0c73 100644 --- a/plugins/algorithms/range/range.go +++ b/plugins/algorithms/range/range.go @@ -9,6 +9,8 @@ import ( semver "github.com/Masterminds/semver/v3" v1 "github.com/compspec/jobspec-go/pkg/jobspec/experimental" "github.com/converged-computing/rainbow/pkg/graph/algorithm" + + rlog "github.com/converged-computing/rainbow/pkg/logger" "github.com/converged-computing/rainbow/pkg/types" ) @@ -69,20 +71,20 @@ func (req *RangeRequest) Satisfies(value string) (bool, error) { // We already have the value for the field from the graph, now just use semver to match matchVersion, err := semver.NewVersion(value) if err != nil { - // fmt.Printf(" => Error parsing semver for match value %s\n", err) + rlog.Debugf(" => Error parsing semver for match value %s\n", err) return false, err } if req.Min != "" { // Is the version provided greater than the min requested? c, err := semver.NewConstraint(fmt.Sprintf(">= %s", req.Min)) if err != nil { - // fmt.Printf(" => Error parsing min constraint %s\n", err) + // rlog.Debug(" => Error parsing min constraint %s\n", err) return false, err } // Check if the version meets the constraints. The a variable will be true. satisfied := c.Check(matchVersion) if !satisfied { - // fmt.Printf(" => Not satisfied\n") + rlog.Debugf(" => Not satisfied\n") return false, err } @@ -91,13 +93,13 @@ func (req *RangeRequest) Satisfies(value string) (bool, error) { // Is the version provided less than the max requested? c, err := semver.NewConstraint(fmt.Sprintf("<= %s", req.Max)) if err != nil { - // fmt.Printf(" => Error parsing max constraint %s\n", err) + // rlog.Debug(" => Error parsing max constraint %s\n", err) return false, err } // Check if the version meets the constraints. The a variable will be true. satisfied := c.Check(matchVersion) if !satisfied { - // fmt.Printf(" => Not satisfied\n") + // rlog.Debug(" => Not satisfied\n") return false, err } } @@ -191,7 +193,7 @@ func (m RangeType) GetSlotResourceNeeds(slot *v1.Task) *types.SlotResourceNeeds if len(needs) == 0 { slotNeeds.Satisfied = true } - fmt.Printf(" => Assessing needs for slot: %v\n", slotNeeds) + // rlog.Debug(" => Assessing needs for slot: %v\n", slotNeeds) return slotNeeds } @@ -210,41 +212,40 @@ func (m RangeType) CheckSubsystemEdge( } // Determine if our slot needs can be met - // fmt.Printf("Looking at edge %s->%s\n", edge.Relation, edge.Vertex.Type) + rlog.Debugf("Looking at edge %s->%s\n", edge.Relation, edge.Vertex.Type) // TODO Keep a record if all are satisfied so we stop searching // earlier if this is the case on subsequent calls for i, subsys := range slotNeeds.Subsystems { - //fmt.Printf(" => Looking in subsystem %s\n", edge.Subsystem) + rlog.Debugf(" => Looking in subsystem %s\n", edge.Subsystem) // The subsystem has an edge defined here! if subsys.Name == edge.Subsystem { - // fmt.Printf(" => Found matching subsystem %s for %s\n", subsys.Name, edge.Subsystem) + rlog.Debugf(" => Found matching subsystem %s for %s\n", subsys.Name, edge.Subsystem) // This would match the top level subsystem name for k := range subsys.Attributes { - // fmt.Printf(" => Looking at edge %s '%s' for %s that needs %s\n", edge.Subsystem, edge.Vertex.Type, subsys.Name, k) + rlog.Debugf(" => Looking at edge %s '%s' for %s that needs %s\n", edge.Subsystem, edge.Vertex.Type, subsys.Name, k) // We care if the attribute is marked as a range if strings.HasPrefix(k, "range") { - - // fmt.Printf(" => Found %s and inspecting edge metadata %v\n", k, edge.Vertex.Metadata.Elements) - + rlog.Debugf(" => Found %s and inspecting edge metadata %v\n", k, edge.Vertex.Metadata.Elements) req := NewRangeRequest(k) + // Get the field requested by the jobspec toMatch, err := edge.Vertex.Metadata.GetStringElement(req.Field) if err != nil { continue } - // fmt.Printf(" => Found field requested for range match %s\n", toMatch) + rlog.Debugf(" => Found field requested for range match %s\n", toMatch) satisfied, err := req.Satisfies(toMatch) if err != nil { continue } if satisfied { - fmt.Printf(" => Resource '%s' has edge '%s' satisfies subsystem %s %s\n", vtx.Type, edge.Vertex.Type, subsys.Name, k) + rlog.Debugf(" => Resource '%s' has edge '%s' satisfies subsystem %s %s\n", vtx.Type, edge.Vertex.Type, subsys.Name, k) subsys.Attributes[k] = true } } diff --git a/plugins/backends/memory/cluster.go b/plugins/backends/memory/cluster.go index a0a63d6..d03ac09 100644 --- a/plugins/backends/memory/cluster.go +++ b/plugins/backends/memory/cluster.go @@ -6,6 +6,7 @@ import ( "sync" jgf "github.com/converged-computing/jsongraph-go/jsongraph/v2/graph" + rlog "github.com/converged-computing/rainbow/pkg/logger" ) var ( @@ -25,7 +26,6 @@ type ClusterGraph struct { // The dominant subsystem is a lookup in the subsystem map // It defaults to nodes (node resources) dominantSubsystem string - quiet bool } // Dominant subsystem gets the dominant subsystem @@ -72,13 +72,14 @@ func (g *ClusterGraph) LoadClusterNodes( if !ok { return fmt.Errorf("destination %s is defined as an edge, but missing as node in graph", edge.Label) } - // fmt.Printf("Adding edge from %s -%s-> %s\n", ss.Vertices[src].Type, edge.Relation, ss.Vertices[dest].Type) + rlog.Debugf("Adding edge from %s -%s-> %s\n", ss.Vertices[src].Type, edge.Relation, ss.Vertices[dest].Type) err := ss.AddInternalEdge(src, dest, 0, edge.Relation, subsystem) if err != nil { return err } } log.Printf("We have made an in memory graph (subsystem %s) with %d vertices!", subsystem, ss.CountVertices()) + // Show metrics ss.Metrics.Show() return nil @@ -97,7 +98,7 @@ func (c *ClusterGraph) validateNodes(nodes *jgf.JsonGraph) (error, int, int) { // NewClusterGraph creates a new cluster graph with a dominant subsystem // We assume the dominant is hard coded to be containment -func NewClusterGraph(name string, domSubsystem string, quiet bool) *ClusterGraph { +func NewClusterGraph(name string, domSubsystem string) *ClusterGraph { // If not defined, set the dominant subsystem if domSubsystem == "" { @@ -112,7 +113,6 @@ func NewClusterGraph(name string, domSubsystem string, quiet bool) *ClusterGraph Name: name, subsystem: subsystems, dominantSubsystem: defaultDominantSubsystem, - quiet: quiet, } return g } diff --git a/plugins/backends/memory/dfs.go b/plugins/backends/memory/dfs.go index 7161103..ad97745 100644 --- a/plugins/backends/memory/dfs.go +++ b/plugins/backends/memory/dfs.go @@ -5,6 +5,7 @@ import ( v1 "github.com/compspec/jobspec-go/pkg/jobspec/experimental" "github.com/converged-computing/rainbow/pkg/graph/algorithm" + rlog "github.com/converged-computing/rainbow/pkg/logger" "github.com/converged-computing/rainbow/pkg/types" ) @@ -53,9 +54,6 @@ func (g *ClusterGraph) DFSForMatch( checkResource(&resource) } - // Get the summary metrics for the subsystem - // fmt.Println(ss.Metrics.ResourceCounts) - isMatch := true for resourceType, needed := range totals { @@ -90,7 +88,7 @@ func (g *ClusterGraph) depthFirstSearch( ) (bool, error) { // Note that in the experimental version we have one task and thus one slot - // fmt.Printf(" šŸŽ°ļø Slots that need to be satisfied with matcher %s\n", matcher.Name()) + rlog.Debugf(" šŸŽ°ļø Slots that need to be satisfied with matcher %s\n", matcher.Name()) slots := map[string]*v1.Task{} // If a slot isn't defined for the task, assume the slot is at the top level @@ -104,20 +102,18 @@ func (g *ClusterGraph) depthFirstSearch( // If we don't have jobspec.Task.Resources, no slot to search for. // Return early based on top level counts if len(jobspec.Task.Resources) == 0 { - // fmt.Printf(" šŸŽ°ļø No resources defined, top level counts satisfied so cluster is match\n") + rlog.Debugf(" šŸŽ°ļø No resources defined, top level counts satisfied so cluster is match\n") return true, nil } // From this point on we assume we MUST satisfy the slot // Sanity check what we are trying to match for rname, rslot := range jobspec.Task.Resources { - fmt.Printf(" %s: %s\n", rname, rslot) + rlog.Verbosef(" %s: %s\n", rname, rslot) } // Look through our potential matching clusters - if !g.quiet { - fmt.Printf("\n šŸ”ļø Exploring cluster %s deeper with depth first search\n", g.Name) - } + rlog.Debugf("\n šŸ”ļø Exploring cluster %s deeper with depth first search\n", g.Name) // This is the root vertex of the cluster "cluster" we start with it // We can store this instead, but for now we can assume the index 0 // is the root, as it is the first one made / added @@ -139,18 +135,18 @@ func (g *ClusterGraph) depthFirstSearch( } // Subsystem edges are here, separate from dominant ones (so search is smaller) - for _, edges := range vtx.Subsystems { - // fmt.Printf(" => Searching for %s and resource type %s in subsystem %v with %d subsystem edges\n", lookingFor, resource.Type, sName, len(edges)) + for sName, edges := range vtx.Subsystems { + rlog.Debugf(" => Searching for %s and resource type %s in subsystem %v with %d subsystem edges\n", lookingFor, resource.Type, sName, len(edges)) - for _, child := range edges { - // fmt.Printf(" Found subsystem edge %s with type %s\n", sName, child.Vertex.Type) + for eName, child := range edges { + rlog.Debugf(" Found subsystem edge %s with type %s\n", eName, child.Vertex.Type) // Check if the subsystem edge satisfies the needs of the slot // This will update the slotNeeds.Satisfied matcher.CheckSubsystemEdge(slotNeeds, child, vtx) // Return early if minimum needs are satsified if slotNeeds.Satisfied { - // fmt.Printf(" Minimum slot needs are satisfied at %s for %s at %s, returning early.\n", vtx.Type, child.Subsystem, child.Vertex.Type) + rlog.Debugf(" Minimum slot needs are satisfied at %s for %s at %s, returning early.\n", vtx.Type, child.Subsystem, child.Vertex.Type) return slotsFound + vtx.Size } } @@ -158,7 +154,7 @@ func (g *ClusterGraph) depthFirstSearch( // Otherwise, we haven't found the right level of the graph, keep going for _, child := range vtx.Edges { - //fmt.Printf(" => Searching for %s and resource type %s %s->%s\n", lookingFor, resource.Type, child.Relation, child.Vertex.Type) + rlog.Debugf(" => Searching for %s and resource type %s %s->%s\n", lookingFor, resource.Type, child.Relation, child.Vertex.Type) // Only keep going if we aren't stopping here // This is also traversing the dominant subsystem @@ -170,10 +166,10 @@ func (g *ClusterGraph) depthFirstSearch( // Stop here is true when we are at the slot -> one level below, and // have done the subsystem assessment on this level. if slotNeeds.Satisfied { - //fmt.Printf(" slotNeeds are satsified for %v, returning %d slots matched\n", slotNeeds.Subsystems, slotsFound) + rlog.Debugf(" slotNeeds are satsified for %v, returning %d slots matched\n", slotNeeds.Subsystems, slotsFound) return slotsFound } - //fmt.Printf(" slotNeeds are not satsified for %v, returning 0 slots matched\n", slotNeeds.Subsystems) + rlog.Debugf(" slotNeeds are not satsified for %v, returning 0 slots matched\n", slotNeeds.Subsystems) return 0 } @@ -211,9 +207,7 @@ func (g *ClusterGraph) depthFirstSearch( if resource.With != nil { for _, subresource := range resource.With { slotsFound += findSlots(vertex, &subresource, slotResourceNeeds, slotsFound) - if !g.quiet { - fmt.Printf("Slots found %d/%d for vertex %s\n", slotsFound, slotsNeeded, vertex.Type) - } + rlog.Debugf("Slots found %d/%d for vertex %s\n", slotsFound, slotsNeeded, vertex.Type) } } // The slot is satisfied and we can continue searching resources diff --git a/plugins/backends/memory/graph.go b/plugins/backends/memory/graph.go index cd35465..199c710 100644 --- a/plugins/backends/memory/graph.go +++ b/plugins/backends/memory/graph.go @@ -15,6 +15,7 @@ import ( jgf "github.com/converged-computing/jsongraph-go/jsongraph/v2/graph" "github.com/converged-computing/rainbow/pkg/graph" "github.com/converged-computing/rainbow/pkg/graph/algorithm" + rlog "github.com/converged-computing/rainbow/pkg/logger" "github.com/converged-computing/rainbow/pkg/utils" "github.com/converged-computing/rainbow/plugins/backends/memory/service" ) @@ -24,7 +25,6 @@ type Graph struct { Clusters map[string]*ClusterGraph lock sync.RWMutex backupFile string - quiet bool // The dominant subsystem for all clusters, if desired to set dominantSubsystem string @@ -99,7 +99,7 @@ func (g *Graph) LoadClusterNodes( } // Create a new ClusterGraph - clusterG := NewClusterGraph(clusterName, subsystem, g.quiet) + clusterG := NewClusterGraph(clusterName, subsystem) err := clusterG.LoadClusterNodes(nodes, subsystem) if err != nil { return err @@ -126,10 +126,8 @@ func (g *Graph) Satisfies( } // Tell the user /logs we are looking for a match - if !g.quiet { - fmt.Printf("\nšŸ‡ļø Satisfy request to Graph šŸ‡ļø\n") - fmt.Printf(" jobspec: %s\n", payload) - } + rlog.Debugf("\nšŸ‡ļø Satisfy request to Graph šŸ‡ļø\n") + rlog.Debugf(" jobspec: %s\n", payload) matches := []string{} notMatches := []string{} @@ -146,9 +144,7 @@ func (g *Graph) Satisfies( matches = append(matches, clusterName) } else { notMatches = append(notMatches, clusterName) - if !g.quiet { - fmt.Printf(" match: šŸŽÆļø cluster %s does not have sufficient resources and is NOT a match\n", clusterName) - } + // fmt.Printf(" match: šŸŽÆļø cluster %s does not have sufficient resources and is NOT a match\n", clusterName) } } @@ -162,6 +158,9 @@ func (g *Graph) Satisfies( } // Add the matches to the response response.Clusters = matches + response.TotalClusters = int32(len(g.Clusters)) + response.TotalMatches = int32(len(matches)) + response.TotalMismatches = int32(len(notMatches)) response.Status = service.SatisfyResponse_RESULT_TYPE_SUCCESS return &response, nil } diff --git a/plugins/backends/memory/memory.go b/plugins/backends/memory/memory.go index b4bb118..1017ba8 100644 --- a/plugins/backends/memory/memory.go +++ b/plugins/backends/memory/memory.go @@ -116,13 +116,6 @@ func (g MemoryGraph) Init( graphClient.backupFile = backupFile } - quiet, ok := options["quiet"] - if ok { - if quiet == "true" || quiet == "yes" { - graphClient.quiet = true - } - } - // Warning: this assumes one client running with one graph host host, ok := options["host"] if ok { diff --git a/plugins/backends/memory/nodes.go b/plugins/backends/memory/nodes.go index 44e3808..e1b2f4b 100644 --- a/plugins/backends/memory/nodes.go +++ b/plugins/backends/memory/nodes.go @@ -35,13 +35,6 @@ func (g *ClusterGraph) addNodes( defer g.lock.Unlock() log.Printf("Preparing to load %d nodes and %d edges\n", nNodes, nEdges) - // Get the root vertex, every new subsystem starts there! - // The root vertex is named according to the subsystem - // root, exists := ss.GetNode(subsystem) - // if !exists { - // return ss, lookup, fmt.Errorf("root node does not exist for subsystem %s, this should not happen", subsystem) - // } - // Create an empty resource counter for the subsystem ss.Metrics.NewResource(subsystem) diff --git a/plugins/backends/memory/service/memory.pb.go b/plugins/backends/memory/service/memory.pb.go index 97cb4a1..fb88111 100644 --- a/plugins/backends/memory/service/memory.pb.go +++ b/plugins/backends/memory/service/memory.pb.go @@ -242,8 +242,11 @@ type SatisfyResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Clusters []string `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"` - Status SatisfyResponse_ResultType `protobuf:"varint,2,opt,name=status,proto3,enum=service.SatisfyResponse_ResultType" json:"status,omitempty"` + Clusters []string `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"` + Status SatisfyResponse_ResultType `protobuf:"varint,2,opt,name=status,proto3,enum=service.SatisfyResponse_ResultType" json:"status,omitempty"` + TotalClusters int32 `protobuf:"varint,3,opt,name=total_clusters,json=totalClusters,proto3" json:"total_clusters,omitempty"` + TotalMatches int32 `protobuf:"varint,4,opt,name=total_matches,json=totalMatches,proto3" json:"total_matches,omitempty"` + TotalMismatches int32 `protobuf:"varint,5,opt,name=total_mismatches,json=totalMismatches,proto3" json:"total_mismatches,omitempty"` } func (x *SatisfyResponse) Reset() { @@ -292,6 +295,27 @@ func (x *SatisfyResponse) GetStatus() SatisfyResponse_ResultType { return SatisfyResponse_RESULT_TYPE_UNSPECIFIED } +func (x *SatisfyResponse) GetTotalClusters() int32 { + if x != nil { + return x.TotalClusters + } + return 0 +} + +func (x *SatisfyResponse) GetTotalMatches() int32 { + if x != nil { + return x.TotalMatches + } + return 0 +} + +func (x *SatisfyResponse) GetTotalMismatches() int32 { + if x != nil { + return x.TotalMismatches + } + return 0 +} + // Testing response - the server's response to a request. type Response struct { state protoimpl.MessageState @@ -354,43 +378,50 @@ var file_memory_proto_rawDesc = []byte{ 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x22, 0xc5, 0x01, 0x0a, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x22, 0xbc, 0x02, 0x0a, 0x0f, 0x53, 0x61, 0x74, 0x69, 0x73, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3b, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x53, 0x61, 0x74, 0x69, 0x73, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x59, 0x0a, 0x0a, 0x52, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x45, 0x53, 0x55, 0x4c, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x45, 0x53, 0x55, 0x4c, 0x54, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x15, 0x0a, - 0x11, 0x52, 0x45, 0x53, 0x55, 0x4c, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x52, 0x52, - 0x4f, 0x52, 0x10, 0x02, 0x22, 0x9b, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x34, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x1c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x59, 0x0a, 0x0a, 0x52, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x52, 0x45, 0x53, 0x55, 0x4c, 0x54, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x45, 0x53, 0x55, 0x4c, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x52, - 0x45, 0x53, 0x55, 0x4c, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, - 0x10, 0x02, 0x32, 0x88, 0x01, 0x0a, 0x0b, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x47, 0x72, 0x61, - 0x70, 0x68, 0x12, 0x3e, 0x0a, 0x07, 0x53, 0x61, 0x74, 0x69, 0x73, 0x66, 0x79, 0x12, 0x17, 0x2e, + 0x65, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0d, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, + 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x6d, + 0x69, 0x73, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x4d, 0x69, 0x73, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, + 0x22, 0x59, 0x0a, 0x0a, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, + 0x0a, 0x17, 0x52, 0x45, 0x53, 0x55, 0x4c, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x52, + 0x45, 0x53, 0x55, 0x4c, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, + 0x53, 0x53, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x53, 0x55, 0x4c, 0x54, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x22, 0x9b, 0x01, 0x0a, 0x08, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x59, + 0x0a, 0x0a, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, + 0x52, 0x45, 0x53, 0x55, 0x4c, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x45, 0x53, + 0x55, 0x4c, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, + 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x53, 0x55, 0x4c, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x32, 0x88, 0x01, 0x0a, 0x0b, 0x4d, 0x65, + 0x6d, 0x6f, 0x72, 0x79, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x3e, 0x0a, 0x07, 0x53, 0x61, 0x74, + 0x69, 0x73, 0x66, 0x79, 0x12, 0x17, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x53, + 0x61, 0x74, 0x69, 0x73, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x53, 0x61, 0x74, 0x69, 0x73, 0x66, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x53, 0x61, 0x74, 0x69, 0x73, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x39, 0x0a, 0x08, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x12, 0x18, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x40, 0x5a, - 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x76, - 0x65, 0x72, 0x67, 0x65, 0x64, 0x2d, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2f, - 0x72, 0x61, 0x69, 0x6e, 0x62, 0x6f, 0x77, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x73, - 0x2f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x08, 0x52, 0x65, 0x67, + 0x69, 0x73, 0x74, 0x65, 0x72, 0x12, 0x18, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x11, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x67, 0x65, 0x64, 0x2d, 0x63, 0x6f, 0x6d, + 0x70, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x2f, 0x72, 0x61, 0x69, 0x6e, 0x62, 0x6f, 0x77, 0x2f, 0x62, + 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x73, 0x2f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/plugins/backends/memory/service/memory.proto b/plugins/backends/memory/service/memory.proto index 2d3fda0..a4ee411 100644 --- a/plugins/backends/memory/service/memory.proto +++ b/plugins/backends/memory/service/memory.proto @@ -30,6 +30,9 @@ message SatisfyResponse { repeated string clusters = 1; ResultType status = 2; + int32 total_clusters = 3; + int32 total_matches = 4; + int32 total_mismatches = 5; } diff --git a/python/v1/README.md b/python/v1/README.md index 746eec3..8fcc897 100644 --- a/python/v1/README.md +++ b/python/v1/README.md @@ -222,7 +222,7 @@ options: And then request and accept jobs: ```console - python examples/flux/receive-jobs.py --config-path ./rainbow-config.yaml +python examples/flux/receive-jobs.py --config-path ./rainbow-config.yaml Status: REQUEST_JOBS_SUCCESS Received 1 jobs to accept... ``` diff --git a/python/v1/examples/flux/submit-job.py b/python/v1/examples/flux/submit-job.py index 2c242a3..3bcb1f8 100644 --- a/python/v1/examples/flux/submit-job.py +++ b/python/v1/examples/flux/submit-job.py @@ -5,8 +5,8 @@ import json import argparse from rainbow.client import RainbowClient -import rainbow.jobspec.converter as converter -import rainbow.jobspec as js +import jobspec.core.converter as converter +from jobspec.core import Jobspec def get_parser(): parser = argparse.ArgumentParser(description="šŸŒˆļø Rainbow scheduler submit") @@ -35,7 +35,7 @@ def main(): print(json.dumps(raw, indent=4)) # This loads and validates - jobspec = js.Jobspec(raw) + jobspec = Jobspec(raw) response = cli.submit_jobspec(jobspec) print(response) diff --git a/python/v1/examples/flux/submit-jobspec.py b/python/v1/examples/flux/submit-jobspec.py index 86d4e08..0207bcb 100644 --- a/python/v1/examples/flux/submit-jobspec.py +++ b/python/v1/examples/flux/submit-jobspec.py @@ -5,8 +5,7 @@ import json import argparse from rainbow.client import RainbowClient -import rainbow.jobspec.converter as converter -import rainbow.jobspec as js +from jobspec.core import Jobspec def get_parser(): parser = argparse.ArgumentParser(description="šŸŒˆļø Rainbow scheduler submit") @@ -25,7 +24,7 @@ def main(): # Generate the jobspec here so we can json dump it for the user # Note that this can be done with cli.submit_job(command, nodes, tasks) - jobspec = js.Jobspec(args.jobspec) + jobspec = Jobspec(args.jobspec) print(json.dumps(jobspec.jobspec, indent=4)) response = cli.submit_jobspec(jobspec) print(response) diff --git a/python/v1/rainbow/client.py b/python/v1/rainbow/client.py index abdd727..d2ffc22 100644 --- a/python/v1/rainbow/client.py +++ b/python/v1/rainbow/client.py @@ -8,6 +8,7 @@ import rainbow.backends as backends import rainbow.config as config import rainbow.defaults as defaults +import rainbow.types as types import rainbow.utils as utils from rainbow.protos import rainbow_pb2, rainbow_pb2_grpc @@ -161,13 +162,13 @@ def submit_jobspec(self, jobspec): it custom with your own special logic. """ # Ask the database backend if our jobspec can be satisfied - response = self.backend.satisfies(jobspec, self.cfg.match_algorithm) - matches = response.clusters + satisfy_response = self.backend.satisfies(jobspec, self.cfg.match_algorithm) + matches = satisfy_response.clusters # No matches? if not matches: print("No clusters match the request") - return response + return satisfy_response # TODO these need to have (again) the token and name checked # This is backwards because we check the token AFTER getting it, and it needs @@ -179,7 +180,7 @@ def submit_jobspec(self, jobspec): rainbow_pb2.SubmitJobRequest.Cluster(name=match["name"], token=match["token"]) ) - # THEN contact rainbwo with clusters + # THEN contact rainbow with clusters # These are submit variables. A more substantial submit script would have argparse, etc. submitRequest = rainbow_pb2.SubmitJobRequest( name=jobspec.name, @@ -190,7 +191,15 @@ def submit_jobspec(self, jobspec): with grpc.insecure_channel(self.host) as channel: stub = rainbow_pb2_grpc.RainbowSchedulerStub(channel) response = stub.SubmitJob(submitRequest) - return response + + res = types.SatisfyResponse( + cluster=response.cluster, + total_matches=satisfy_response.total_matches, + total_mismatches=satisfy_response.total_mismatches, + total_clusters=satisfy_response.total_clusters, + status=response.status, + ) + return res def submit_job(self, command, nodes=1, tasks=1): """ diff --git a/python/v1/rainbow/protos/memory_pb2.py b/python/v1/rainbow/protos/memory_pb2.py index 4540da6..cbc9eec 100644 --- a/python/v1/rainbow/protos/memory_pb2.py +++ b/python/v1/rainbow/protos/memory_pb2.py @@ -14,7 +14,7 @@ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( - b'\n\x0cmemory.proto\x12\x07service"C\n\x0fRegisterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07payload\x18\x02 \x01(\t\x12\x11\n\tsubsystem\x18\x03 \x01(\t"2\n\x0eSatisfyRequest\x12\x0f\n\x07payload\x18\x01 \x01(\t\x12\x0f\n\x07matcher\x18\x02 \x01(\t"\xb3\x01\n\x0fSatisfyResponse\x12\x10\n\x08\x63lusters\x18\x01 \x03(\t\x12\x33\n\x06status\x18\x02 \x01(\x0e\x32#.service.SatisfyResponse.ResultType"Y\n\nResultType\x12\x1b\n\x17RESULT_TYPE_UNSPECIFIED\x10\x00\x12\x17\n\x13RESULT_TYPE_SUCCESS\x10\x01\x12\x15\n\x11RESULT_TYPE_ERROR\x10\x02"\x93\x01\n\x08Response\x12,\n\x06status\x18\x01 \x01(\x0e\x32\x1c.service.Response.ResultType"Y\n\nResultType\x12\x1b\n\x17RESULT_TYPE_UNSPECIFIED\x10\x00\x12\x17\n\x13RESULT_TYPE_SUCCESS\x10\x01\x12\x15\n\x11RESULT_TYPE_ERROR\x10\x02\x32\x88\x01\n\x0bMemoryGraph\x12>\n\x07Satisfy\x12\x17.service.SatisfyRequest\x1a\x18.service.SatisfyResponse"\x00\x12\x39\n\x08Register\x12\x18.service.RegisterRequest\x1a\x11.service.Response"\x00\x42@Z>github.com/converged-computing/rainbow/backends/memory/serviceb\x06proto3' + b'\n\x0cmemory.proto\x12\x07service"C\n\x0fRegisterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07payload\x18\x02 \x01(\t\x12\x11\n\tsubsystem\x18\x03 \x01(\t"2\n\x0eSatisfyRequest\x12\x0f\n\x07payload\x18\x01 \x01(\t\x12\x0f\n\x07matcher\x18\x02 \x01(\t"\xfc\x01\n\x0fSatisfyResponse\x12\x10\n\x08\x63lusters\x18\x01 \x03(\t\x12\x33\n\x06status\x18\x02 \x01(\x0e\x32#.service.SatisfyResponse.ResultType\x12\x16\n\x0etotal_clusters\x18\x03 \x01(\x05\x12\x15\n\rtotal_matches\x18\x04 \x01(\x05\x12\x18\n\x10total_mismatches\x18\x05 \x01(\x05"Y\n\nResultType\x12\x1b\n\x17RESULT_TYPE_UNSPECIFIED\x10\x00\x12\x17\n\x13RESULT_TYPE_SUCCESS\x10\x01\x12\x15\n\x11RESULT_TYPE_ERROR\x10\x02"\x93\x01\n\x08Response\x12,\n\x06status\x18\x01 \x01(\x0e\x32\x1c.service.Response.ResultType"Y\n\nResultType\x12\x1b\n\x17RESULT_TYPE_UNSPECIFIED\x10\x00\x12\x17\n\x13RESULT_TYPE_SUCCESS\x10\x01\x12\x15\n\x11RESULT_TYPE_ERROR\x10\x02\x32\x88\x01\n\x0bMemoryGraph\x12>\n\x07Satisfy\x12\x17.service.SatisfyRequest\x1a\x18.service.SatisfyResponse"\x00\x12\x39\n\x08Register\x12\x18.service.RegisterRequest\x1a\x11.service.Response"\x00\x42@Z>github.com/converged-computing/rainbow/backends/memory/serviceb\x06proto3' ) _globals = globals() @@ -30,13 +30,13 @@ _globals["_SATISFYREQUEST"]._serialized_start = 94 _globals["_SATISFYREQUEST"]._serialized_end = 144 _globals["_SATISFYRESPONSE"]._serialized_start = 147 - _globals["_SATISFYRESPONSE"]._serialized_end = 326 - _globals["_SATISFYRESPONSE_RESULTTYPE"]._serialized_start = 237 - _globals["_SATISFYRESPONSE_RESULTTYPE"]._serialized_end = 326 - _globals["_RESPONSE"]._serialized_start = 329 - _globals["_RESPONSE"]._serialized_end = 476 - _globals["_RESPONSE_RESULTTYPE"]._serialized_start = 237 - _globals["_RESPONSE_RESULTTYPE"]._serialized_end = 326 - _globals["_MEMORYGRAPH"]._serialized_start = 479 - _globals["_MEMORYGRAPH"]._serialized_end = 615 + _globals["_SATISFYRESPONSE"]._serialized_end = 399 + _globals["_SATISFYRESPONSE_RESULTTYPE"]._serialized_start = 310 + _globals["_SATISFYRESPONSE_RESULTTYPE"]._serialized_end = 399 + _globals["_RESPONSE"]._serialized_start = 402 + _globals["_RESPONSE"]._serialized_end = 549 + _globals["_RESPONSE_RESULTTYPE"]._serialized_start = 310 + _globals["_RESPONSE_RESULTTYPE"]._serialized_end = 399 + _globals["_MEMORYGRAPH"]._serialized_start = 552 + _globals["_MEMORYGRAPH"]._serialized_end = 688 # @@protoc_insertion_point(module_scope) diff --git a/python/v1/rainbow/protos/memory_pb2.pyi b/python/v1/rainbow/protos/memory_pb2.pyi index 75288f3..3ccc780 100644 --- a/python/v1/rainbow/protos/memory_pb2.pyi +++ b/python/v1/rainbow/protos/memory_pb2.pyi @@ -25,7 +25,7 @@ class SatisfyRequest(_message.Message): def __init__(self, payload: _Optional[str] = ..., matcher: _Optional[str] = ...) -> None: ... class SatisfyResponse(_message.Message): - __slots__ = ("clusters", "status") + __slots__ = ("clusters", "status", "total_clusters", "total_matches", "total_mismatches") class ResultType(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): __slots__ = () RESULT_TYPE_UNSPECIFIED: _ClassVar[SatisfyResponse.ResultType] @@ -36,9 +36,15 @@ class SatisfyResponse(_message.Message): RESULT_TYPE_ERROR: SatisfyResponse.ResultType CLUSTERS_FIELD_NUMBER: _ClassVar[int] STATUS_FIELD_NUMBER: _ClassVar[int] + TOTAL_CLUSTERS_FIELD_NUMBER: _ClassVar[int] + TOTAL_MATCHES_FIELD_NUMBER: _ClassVar[int] + TOTAL_MISMATCHES_FIELD_NUMBER: _ClassVar[int] clusters: _containers.RepeatedScalarFieldContainer[str] status: SatisfyResponse.ResultType - def __init__(self, clusters: _Optional[_Iterable[str]] = ..., status: _Optional[_Union[SatisfyResponse.ResultType, str]] = ...) -> None: ... + total_clusters: int + total_matches: int + total_mismatches: int + def __init__(self, clusters: _Optional[_Iterable[str]] = ..., status: _Optional[_Union[SatisfyResponse.ResultType, str]] = ..., total_clusters: _Optional[int] = ..., total_matches: _Optional[int] = ..., total_mismatches: _Optional[int] = ...) -> None: ... class Response(_message.Message): __slots__ = ("status",) diff --git a/python/v1/rainbow/schema.py b/python/v1/rainbow/schema.py index d32dd4a..8d7dd02 100644 --- a/python/v1/rainbow/schema.py +++ b/python/v1/rainbow/schema.py @@ -39,6 +39,7 @@ "description": "metadata for the rainbow graph database", "type": "object", "properties": { + "host": {"type": "string"}, "name": {"type": "string"}, "options": {"type": "object"}, }, diff --git a/python/v1/rainbow/types.py b/python/v1/rainbow/types.py new file mode 100644 index 0000000..a642392 --- /dev/null +++ b/python/v1/rainbow/types.py @@ -0,0 +1,10 @@ +from dataclasses import dataclass + + +@dataclass +class SatisfyResponse: + cluster: str + total_matches: int + total_mismatches: int + total_clusters: int + status: int From b0a73519d77e21adb36932da82ea79ed774d7b1a Mon Sep 17 00:00:00 2001 From: vsoch Date: Mon, 1 Apr 2024 01:41:47 -0600 Subject: [PATCH 3/3] hack: add in range run example script Signed-off-by: vsoch --- .../match-algorithms/range/rainbow-config.yaml | 2 +- hack/run-range-register.sh | 12 ++++++++++++ plugins/algorithms/range/range.go | 8 ++++---- 3 files changed, 17 insertions(+), 5 deletions(-) create mode 100755 hack/run-range-register.sh diff --git a/docs/examples/match-algorithms/range/rainbow-config.yaml b/docs/examples/match-algorithms/range/rainbow-config.yaml index d4f98c1..7ae1b9e 100644 --- a/docs/examples/match-algorithms/range/rainbow-config.yaml +++ b/docs/examples/match-algorithms/range/rainbow-config.yaml @@ -8,7 +8,7 @@ scheduler: name: match cluster: name: spack-builder - secret: bd72a288-cc3d-4659-910b-d665fd95f1a3 + secret: 37e5b798-189f-4c38-bc1c-0a14877acbcf graphdatabase: name: memory host: 127.0.0.1:50051 diff --git a/hack/run-range-register.sh b/hack/run-range-register.sh new file mode 100755 index 0000000..52a7da7 --- /dev/null +++ b/hack/run-range-register.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +go run cmd/rainbow/rainbow.go config init --cluster-name spack-builder --config-path ./docs/examples/match-algorithms/range/rainbow-config.yaml --match-algorithm range + +# Register your nodes +go run cmd/rainbow/rainbow.go register cluster --cluster-name spack-builder --nodes-json ./docs/examples/match-algorithms/range/cluster-nodes.json --config-path ./docs/examples/match-algorithms/range/rainbow-config.yaml --save + +# Register the subsystem +go run cmd/rainbow/rainbow.go register subsystem --subsystem spack --nodes-json ./docs/examples/match-algorithms/range/spack-subsystem.json --config-path ./docs/examples/match-algorithms/range/rainbow-config.yaml + +# Submit a job that asked for a valid range +go run ./cmd/rainbow/rainbow.go submit --config-path ./docs/examples/match-algorithms/range/rainbow-config.yaml --jobspec ./docs/examples/match-algorithms/range/jobspec-valid-range.yaml --match-algorithm range diff --git a/plugins/algorithms/range/range.go b/plugins/algorithms/range/range.go index 38a0c73..ed1f3bc 100644 --- a/plugins/algorithms/range/range.go +++ b/plugins/algorithms/range/range.go @@ -78,7 +78,7 @@ func (req *RangeRequest) Satisfies(value string) (bool, error) { // Is the version provided greater than the min requested? c, err := semver.NewConstraint(fmt.Sprintf(">= %s", req.Min)) if err != nil { - // rlog.Debug(" => Error parsing min constraint %s\n", err) + rlog.Debugf(" => Error parsing min constraint %s\n", err) return false, err } // Check if the version meets the constraints. The a variable will be true. @@ -93,13 +93,13 @@ func (req *RangeRequest) Satisfies(value string) (bool, error) { // Is the version provided less than the max requested? c, err := semver.NewConstraint(fmt.Sprintf("<= %s", req.Max)) if err != nil { - // rlog.Debug(" => Error parsing max constraint %s\n", err) + rlog.Debugf(" => Error parsing max constraint %s\n", err) return false, err } // Check if the version meets the constraints. The a variable will be true. satisfied := c.Check(matchVersion) if !satisfied { - // rlog.Debug(" => Not satisfied\n") + rlog.Debug(" => Not satisfied") return false, err } } @@ -193,7 +193,7 @@ func (m RangeType) GetSlotResourceNeeds(slot *v1.Task) *types.SlotResourceNeeds if len(needs) == 0 { slotNeeds.Satisfied = true } - // rlog.Debug(" => Assessing needs for slot: %v\n", slotNeeds) + rlog.Debugf(" => Assessing needs for slot: %v\n", slotNeeds) return slotNeeds }