From eb3c530fe8e6225f74d3bbedd403be8a5aa499b2 Mon Sep 17 00:00:00 2001 From: Ondra Machacek Date: Wed, 16 Oct 2024 15:42:23 +0200 Subject: [PATCH] Remove collector container and merge it with planner Since collector is simple golang app, we don't need to run it as standalone container. This PR changes the collector to be run from planner-agent. Signed-off-by: Ondra Machacek --- Containerfile.agent | 2 +- Containerfile.collector | 25 ---- Makefile | 8 +- .../README.md => planner-agent/COLLECTOR.md} | 24 +++- data/ignition.template | 77 +++--------- doc/agentvm.md | 39 ++---- doc/deployment.md | 4 +- internal/agent/agent.go | 3 + .../main.go => internal/agent/collector.go | 115 ++++++++++-------- internal/agent/inventory.go | 4 +- internal/image/ova.go | 18 ++- 11 files changed, 123 insertions(+), 196 deletions(-) delete mode 100644 Containerfile.collector rename cmd/{collector/README.md => planner-agent/COLLECTOR.md} (56%) rename cmd/collector/main.go => internal/agent/collector.go (87%) diff --git a/Containerfile.agent b/Containerfile.agent index 9399180..967a55c 100644 --- a/Containerfile.agent +++ b/Containerfile.agent @@ -15,7 +15,7 @@ RUN go mod download COPY . . USER 0 -RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -buildvcs=false -o /planner-agent cmd/planner-agent/main.go +RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -buildvcs=false -o /planner-agent cmd/planner-agent/main.go FROM registry.access.redhat.com/ubi9/ubi-micro diff --git a/Containerfile.collector b/Containerfile.collector deleted file mode 100644 index c3b3a79..0000000 --- a/Containerfile.collector +++ /dev/null @@ -1,25 +0,0 @@ -# Builder container -FROM registry.access.redhat.com/ubi9/go-toolset as builder - -WORKDIR /app -COPY go.mod go.sum ./ -RUN go mod download - -COPY . . - -USER 0 -RUN CGO_ENABLED=1 GOOS=linux go build -buildvcs=false -o /collector cmd/collector/main.go - -FROM registry.access.redhat.com/ubi9/ubi-micro - -WORKDIR /app - -COPY --from=builder /collector /app/ - -# Use non-root user -RUN chown -R 1001:0 /app -USER 1001 - -# Run the server -EXPOSE 3333 -ENTRYPOINT ["/app/collector"] diff --git a/Makefile b/Makefile index da5f023..1cb2cc1 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,6 @@ GO_CACHE := -v $${HOME}/go/migration-planner-go-cache:/opt/app-root/src/go:Z -v TIMEOUT ?= 30m VERBOSE ?= false MIGRATION_PLANNER_AGENT_IMAGE ?= quay.io/kubev2v/migration-planner-agent -MIGRATION_PLANNER_COLLECTOR_IMAGE ?= quay.io/kubev2v/migration-planner-collector MIGRATION_PLANNER_API_IMAGE ?= quay.io/kubev2v/migration-planner-api MIGRATION_PLANNER_UI_IMAGE ?= quay.io/kubev2v/migration-planner-ui DOWNLOAD_RHCOS ?= true @@ -81,23 +80,18 @@ build-api: bin bin/.migration-planner-agent-container: bin Containerfile.agent go.mod go.sum $(GO_FILES) podman build -f Containerfile.agent -t $(MIGRATION_PLANNER_AGENT_IMAGE):latest -bin/.migration-planner-collector-container: bin Containerfile.collector go.mod go.sum $(GO_FILES) - podman build -f Containerfile.collector -t $(MIGRATION_PLANNER_COLLECTOR_IMAGE):latest - bin/.migration-planner-api-container: bin Containerfile.api go.mod go.sum $(GO_FILES) podman build -f Containerfile.api -t $(MIGRATION_PLANNER_API_IMAGE):latest migration-planner-api-container: bin/.migration-planner-api-container -migration-planner-collector-container: bin/.migration-planner-collector-container migration-planner-agent-container: bin/.migration-planner-agent-container -build-containers: migration-planner-api-container migration-planner-agent-container migration-planner-collector-container +build-containers: migration-planner-api-container migration-planner-agent-container .PHONY: build-containers push-containers: build-containers podman push $(MIGRATION_PLANNER_API_IMAGE):latest - podman push $(MIGRATION_PLANNER_COLLECTOR_IMAGE):latest podman push $(MIGRATION_PLANNER_AGENT_IMAGE):latest deploy-on-openshift: diff --git a/cmd/collector/README.md b/cmd/planner-agent/COLLECTOR.md similarity index 56% rename from cmd/collector/README.md rename to cmd/planner-agent/COLLECTOR.md index 4e2e978..1fe22be 100644 --- a/cmd/collector/README.md +++ b/cmd/planner-agent/COLLECTOR.md @@ -4,11 +4,29 @@ To run the collector localy here are the steps. ## Prepare Prepare the dependencies. +### Configuration +Create the planner-agent configuration file: + +``` +$ mkdir /tmp/config +$ mkdir /tmp/data +$ cat < ~/.planner-agent/config.yaml +config-dir: /tmp/config +data-dir: /tmp/data +log-level: debug +source-id: 9195e61d-e56d-407d-8b29-ff2fb7986928 +update-interval: 5s +planner-service: + service: + server: http://127.0.0.1:7443 +EOF +``` + ### Credentials Create VMware credentials file. ``` -cat < /tmp/creds.json +cat < /tmp/data/creds.json { "username": "user@example.com", "password": "userpassword", @@ -28,7 +46,7 @@ podman run -p 8181:8181 -d --name opa --entrypoint '/usr/bin/opa' quay.io/kubev2 Build & run the collector code specifying credentials file as first argument and as second path to invetory file, where data should be written. ``` -go run cmd/collector/main.go /tmp/creds.json /tmp/inventory.json +go run cmd/planner-agent/main.go -config -config ~/.planner-agent/config.yaml ``` -Explore `/tmp/inventory.json` +Explore `/tmp/data/inventory.json` diff --git a/data/ignition.template b/data/ignition.template index 0dfba3f..dc40c3f 100644 --- a/data/ignition.template +++ b/data/ignition.template @@ -22,19 +22,19 @@ storage: name: core group: name: core - - path: /home/core/vol + - path: /home/core/.migration-planner overwrite: true user: name: core group: name: core - - path: /home/core/vol/config + - path: /home/core/.migration-planner/config overwrite: true user: name: core group: name: core - - path: /home/core/vol/data + - path: /home/core/.migration.planner/data overwrite: true user: name: core @@ -46,7 +46,7 @@ storage: contents: inline: | PasswordAuthentication yes - - path: /home/core/vol/config.yaml + - path: /home/core/.migration-planner/config/config.yaml contents: inline: | config-dir: /agent/config @@ -63,7 +63,7 @@ storage: name: core group: name: core - - path: /home/core/.config/containers/systemd/collector.network + - path: /home/core/.config/containers/systemd/agent.network contents: inline: | [Network] @@ -71,50 +71,24 @@ storage: name: core group: name: core - - path: /home/core/.config/containers/systemd/planner.volume - contents: - inline: | - [Volume] - VolumeName=planner.volume - user: - name: core - group: - name: core - - path: /home/core/.config/containers/systemd/planner-setup.container - mode: 0644 - contents: - inline: | - [Unit] - Description=Prepare data volume for the container - Before=planner-agent.service - - [Container] - Image=registry.access.redhat.com/ubi9/ubi-micro - Exec=sh -c "cp -r /mnt/* /agent/ && chmod -R a+rwx /agent" - Volume=planner.volume:/agent - Volume=/home/core/vol:/mnt:Z - - [Service] - Type=oneshot - RemainAfterExit=yes - - [Install] - WantedBy=multi-user.target default.target - path: /home/core/.config/containers/systemd/planner-agent.container mode: 0644 contents: inline: | [Unit] Description=Planner agent quadlet - Wants=planner-setup.service + Wants=planner-agent-opa.service [Container] Image={{.MigrationPlannerAgentImage}} ContainerName=planner-agent AutoUpdate=registry - Exec= -config /agent/config.yaml + Exec= -config /agent/config/config.yaml PublishPort=3333:3333 - Volume=planner.volume:/agent + Volume=/home/core/.migration-planner:/agent:Z + Environment=OPA_SERVER=opa:8181 + Network=agent.network + UserNS=keep-id:uid=1001 [Install] WantedBy=multi-user.target default.target @@ -123,8 +97,8 @@ storage: contents: inline: | [Unit] - Description=Collector quadlet - Before=planner-agent-collector.service + Description=OPA quadlet + Before=planner-agent.service [Container] ContainerName=opa @@ -132,30 +106,7 @@ storage: Entrypoint=/usr/bin/opa PublishPort=8181:8181 Exec=run --server /usr/share/opa/policies - Network=collector.network - - [Install] - WantedBy=multi-user.target default.target - - - path: /home/core/.config/containers/systemd/planner-agent-collector.container - mode: 0644 - contents: - inline: | - [Unit] - Description=Collector quadlet - Wants=planner-agent-opa.service - - [Container] - Image={{.MigrationPlannerCollectorImage}} - ContainerName=migration-planner-collector - AutoUpdate=registry - Exec=/vol/data/credentials.json /vol/data/inventory.json - Volume=planner.volume:/vol - Environment=OPA_SERVER=opa:8181 - Network=collector.network - - [Service] - Restart=on-failure + Network=agent.network [Install] WantedBy=multi-user.target default.target diff --git a/doc/agentvm.md b/doc/agentvm.md index 33f9dbc..6d39b4f 100644 --- a/doc/agentvm.md +++ b/doc/agentvm.md @@ -4,23 +4,17 @@ The VM is initialized using ignition, which configures multiple containers that ## Systemd services The following are a list of systemd services that can be found on agent virtual machines. All of the services -are defined as quadlets. Quadlet configuration can be found in the [ignition template file](../data/config.ign.template). -The Agent containerfile can be found [here](../Containerfile.agent). The collector containerfile is [here](../Containerfile.collector). - -### planner-setup -Planner-setup service is responsible for initializing the volume with data that is shared between the `planner-agent` and the `planner-agent-collector`. +are defined as quadlets. Quadlet configuration can be found in the [ignition template file](../data/ignition.template). +The Agent containerfile can be found [here](../Containerfile.agent). ### planner-agent -Planner-agent is a service that reports the status to the Agent service. The URL of the Agent service is configured in the file `$HOME/vol/config.yaml`, which is injected via ignition. +Planner-agent is a service that reports the status to the Agent service. The URL of the Agent service is configured in the file `$HOME/.migration-planner/config/config.yaml`, which is injected via ignition. -The Planner-agent contains a web application that is exposed via tcp port 3333. Once the user accesses the web application and enters the credentials of their vCenter, the `credentials.json` file is created on the shared volume and the `planner-agent-collector` container is spawned. +The Planner-agent contains a web application that is exposed via tcp port 3333. Once the user accesses the web application and enters the credentials of their vCenter, the `credentials.json` file is created on the shared volume and the `collector` goroutine is spawned, which fetches the vCenter data. The data is stored in `$HOME/.migration-planner/data/inventory.json`. Once `inventory.json` is created, the `planner-agent` service sends the data over to Agent service. ### planner-agent-opa Planner-agent-opa is a service that re-uses the [forklift validation](https://github.com/kubev2v/forklift/blob/main/validation/README.adoc) container. The forklift validation container is responsible for vCenter data validation. When the `planner-agent-collector` fetches vCenter data, it's validated against the OPA server and the report is shared back to the Agent Service. -### planner-agent-collector -Planner-agent-collector service waits until the user enters the vCenter credentials in the `planner-agent` web application. Once the credentials are entered, the vCenter data is collected. The data is stored in `$HOME/vol/data/inventory.json`. Once `inventory.json` is created, the `planner-agent` service sends the data over to Agent service. - ### podman-auto-update Podman auto update is responsible for updating the image of the containers in case there is a new image release. The default `podman-auto-update.timer` is used, which executes `podman-auto-update` every 24 hours. @@ -32,36 +26,21 @@ Useful commands to troubleshoot the Agent VM. Note that all the containers are r $ podman ps ``` -### Checking the status of all our services -``` -$ systemctl --user status planner-* -``` - -### Inspecting the shared volume -A shared volume is created between containers, so that information can be shared between the `planner-agent-collector` and `planner-agent` containers. -In order to export the data stored in the volume, find the mountpoint of the volume: -``` -$ podman volume inspect planner.volume | jq .[0].Mountpoint -``` - -And then the relevant data can be explored, such as: `config.yaml`, `credentials.json`, `inventory.json`, etc. +### Checking the status of planner-agent service ``` -$ ls /var/home/core/.local/share/containers/storage/volumes/planner.volume/_data -$ cat /var/home/core/.local/share/containers/storage/volumes/planner.volume/_data/config.yaml -$ cat /var/home/core/.local/share/containers/storage/volumes/planner.volume/_data/data/credentials.json -$ cat /var/home/core/.local/share/containers/storage/volumes/planner.volume/_data/data/inventory.json +$ systemctl --user status planner-agent ``` ### Inspecting the host directory with data -The ignition create a `vol` directory in `core` user home directory. +The ignition create a `.migration-planner` directory in `core` user home directory. This directory should contain all relevant data, so in order to find misconfiguration please search in this directory. ``` -$ ls -l vol +$ ls -l .migration-planner ``` ### Check logs of the services ``` -$ journalctl --user -f -u planner-* +$ journalctl --user -f -u planner-agent ``` ### Status is `Not connected` after VM is booted. diff --git a/doc/deployment.md b/doc/deployment.md index 951e4ab..e8a4e40 100644 --- a/doc/deployment.md +++ b/doc/deployment.md @@ -31,8 +31,6 @@ Agent images are defined in the ignition file. In order to modify the images of ``` env: - - name: MIGRATION_PLANNER_COLLECTOR_IMAGE - value: quay.io/$USER/migration-planner-collector - name: MIGRATION_PLANNER_AGENT_IMAGE value: quay.io/$USER/migration-planner-agent -``` \ No newline at end of file +``` diff --git a/internal/agent/agent.go b/internal/agent/agent.go index a716c12..2fad1f0 100644 --- a/internal/agent/agent.go +++ b/internal/agent/agent.go @@ -92,6 +92,9 @@ func (a *Agent) Run(ctx context.Context) error { } healthChecker.Start(healthCheckCh) + collector := NewCollector(a.log, a.config.DataDir) + collector.collect(ctx) + inventoryUpdater := NewInventoryUpdater(a.log, a.config, client) inventoryUpdater.UpdateServiceWithInventory(ctx) diff --git a/cmd/collector/main.go b/internal/agent/collector.go similarity index 87% rename from cmd/collector/main.go rename to internal/agent/collector.go index 8f22b7e..a73dcb7 100644 --- a/cmd/collector/main.go +++ b/internal/agent/collector.go @@ -1,16 +1,17 @@ -package main +package agent import ( "bytes" + "context" "encoding/json" "fmt" "io" - "log" "math" "net/http" "os" "path/filepath" "slices" + "sync" "time" api "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1" @@ -21,113 +22,128 @@ import ( libmodel "github.com/konveyor/forklift-controller/pkg/lib/inventory/model" apiplanner "github.com/kubev2v/migration-planner/api/v1alpha1" "github.com/kubev2v/migration-planner/internal/util" + "github.com/kubev2v/migration-planner/pkg/log" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" ) -type VCenterCreds struct { - Url string `json:"url"` - Username string `json:"username"` - Password string `json:"password"` +type Collector struct { + log *log.PrefixLogger + dataDir string + once sync.Once } -func main() { - logger := log.New(os.Stdout, "*********** Collector: ", log.Ldate|log.Ltime|log.Lshortfile) - // Parse command-line arguments - if len(os.Args) < 3 { - fmt.Println("Usage: collector ") - os.Exit(1) +func NewCollector(log *log.PrefixLogger, dataDir string) *Collector { + return &Collector{ + log: log, + dataDir: dataDir, } - credsFile := os.Args[1] - outputFile := os.Args[2] +} + +func (c *Collector) collect(ctx context.Context) { + c.once.Do(func() { + go func() { + for { + select { + case <-ctx.Done(): + return + default: + c.run() + return + } + } + }() + }) +} - logger.Println("Wait for credentials") - waitForFile(credsFile) +func (c *Collector) run() { + credentialsFilePath := filepath.Join(c.dataDir, CredentialsFile) + c.log.Infof("Waiting for credentials") + waitForFile(credentialsFilePath) - logger.Println("Load credentials from file") - credsData, err := os.ReadFile(credsFile) + credsData, err := os.ReadFile(credentialsFilePath) if err != nil { - fmt.Printf("Error reading credentials file: %v\n", err) - os.Exit(1) + c.log.Errorf("Error reading credentials file: %v\n", err) + return } - var creds VCenterCreds + var creds Credentials if err := json.Unmarshal(credsData, &creds); err != nil { - fmt.Printf("Error parsing credentials JSON: %v\n", err) - os.Exit(1) + c.log.Errorf("Error parsing credentials JSON: %v\n", err) + return } opaServer := util.GetEnv("OPA_SERVER", "127.0.0.1:8181") - logger.Println("Create Provider") + c.log.Infof("Create Provider") provider := getProvider(creds) - logger.Println("Create Secret") + c.log.Infof("Create Secret") secret := getSecret(creds) - logger.Println("Check if opaServer is responding") + c.log.Infof("Check if opaServer is responding") resp, err := http.Get("http://" + opaServer + "/health") if err != nil || resp.StatusCode != http.StatusOK { - fmt.Println("OPA server " + opaServer + " is not responding") + c.log.Errorf("OPA server %s is not responding", opaServer) return } defer resp.Body.Close() - logger.Println("Create DB") + c.log.Infof("Create DB") db, err := createDB(provider) if err != nil { - fmt.Println("Error creating DB.", err) + c.log.Errorf("Error creating DB: %s", err) return } - logger.Println("vSphere collector") + c.log.Infof("vSphere collector") collector, err := createCollector(db, provider, secret) if err != nil { - fmt.Println("Error creating collector.", err) + c.log.Errorf("Error running collector: %s", err) return } defer collector.DB().Close(true) defer collector.Shutdown() - logger.Println("List VMs") + c.log.Infof("List VMs") vms := &[]vspheremodel.VM{} err = collector.DB().List(vms, libmodel.FilterOptions{Detail: 1}) if err != nil { - fmt.Println(err) + c.log.Errorf("Error list database: %s", err) return } - logger.Println("List Hosts") + c.log.Infof("List Hosts") hosts := &[]vspheremodel.Host{} err = collector.DB().List(hosts, libmodel.FilterOptions{Detail: 1}) if err != nil { - fmt.Println(err) + c.log.Errorf("Error list database: %s", err) return } - logger.Println("List Clusters") + c.log.Infof("List Clusters") clusters := &[]vspheremodel.Cluster{} err = collector.DB().List(clusters, libmodel.FilterOptions{Detail: 1}) if err != nil { - fmt.Println(err) + c.log.Errorf("Error list database: %s", err) return } - logger.Println("Create inventory") + c.log.Infof("Create inventory") inv := createBasicInventoryObj(vms, collector, hosts, clusters) - logger.Println("Run the validation of VMs") + c.log.Infof("Run the validation of VMs") vms, err = validation(vms, opaServer) if err != nil { - fmt.Println(err) + c.log.Errorf("Error running validation: %s", err) return } - logger.Println("Fill the inventory object with more data") + c.log.Infof("Fill the inventory object with more data") fillInventoryObjectWithMoreData(vms, inv) - logger.Println("Write the inventory to output file") - if err := createOuput(outputFile, inv); err != nil { - fmt.Println("Error writing output:", err) + c.log.Infof("Write the inventory to output file") + if err := createOuput(filepath.Join(c.dataDir, InventoryFile), inv); err != nil { + c.log.Errorf("Fill the inventory object with more data: %s", err) return } } @@ -214,17 +230,17 @@ func createBasicInventoryObj(vms *[]vspheremodel.VM, collector *vsphere.Collecto } } -func getProvider(creds VCenterCreds) *api.Provider { +func getProvider(creds Credentials) *api.Provider { vsphereType := api.VSphere return &api.Provider{ Spec: api.ProviderSpec{ - URL: creds.Url, + URL: creds.URL, Type: &vsphereType, }, } } -func getSecret(creds VCenterCreds) *core.Secret { +func getSecret(creds Credentials) *core.Secret { return &core.Secret{ ObjectMeta: meta.ObjectMeta{ Name: "vsphere-secret", @@ -560,8 +576,3 @@ type VMResult struct { type VMValidation struct { Result []VMResult `json:"result"` } - -type InventoryData struct { - Inventory apiplanner.Inventory `json:"inventory"` - Error string `json:"error"` -} diff --git a/internal/agent/inventory.go b/internal/agent/inventory.go index dd28b1b..b9aefb4 100644 --- a/internal/agent/inventory.go +++ b/internal/agent/inventory.go @@ -28,8 +28,8 @@ type InventoryUpdater struct { } type InventoryData struct { - Inventory api.Inventory - Error string + Inventory api.Inventory `json:"inventory"` + Error string `json:"error"` } func NewInventoryUpdater(log *log.PrefixLogger, config *Config, client client.Planner) *InventoryUpdater { diff --git a/internal/image/ova.go b/internal/image/ova.go index 519febe..e55b76b 100644 --- a/internal/image/ova.go +++ b/internal/image/ova.go @@ -29,11 +29,10 @@ type Ova struct { // IgnitionData defines modifiable fields in ignition config type IgnitionData struct { - SourceId string - SshKey string - PlannerService string - MigrationPlannerCollectorImage string - MigrationPlannerAgentImage string + SourceId string + SshKey string + PlannerService string + MigrationPlannerAgentImage string } type Image interface { @@ -123,11 +122,10 @@ func writeOvf(tw *tar.Writer) error { func (o *Ova) generateIgnition() (string, error) { ignData := IgnitionData{ - SourceId: o.Id.String(), - SshKey: o.SshKey, - PlannerService: util.GetEnv("CONFIG_SERVER", "http://127.0.0.1:7443"), - MigrationPlannerCollectorImage: util.GetEnv("MIGRATION_PLANNER_COLLECTOR_IMAGE", "quay.io/kubev2v/migration-planner-collector"), - MigrationPlannerAgentImage: util.GetEnv("MIGRATION_PLANNER_AGENT_IMAGE", "quay.io/kubev2v/migration-planner-agent"), + SourceId: o.Id.String(), + SshKey: o.SshKey, + PlannerService: util.GetEnv("CONFIG_SERVER", "http://127.0.0.1:7443"), + MigrationPlannerAgentImage: util.GetEnv("MIGRATION_PLANNER_AGENT_IMAGE", "quay.io/kubev2v/migration-planner-agent"), } var buf bytes.Buffer