diff --git a/go.mod b/go.mod index 3240d612d..2ffc61e11 100644 --- a/go.mod +++ b/go.mod @@ -21,6 +21,7 @@ require ( k8s.io/api v0.29.1 k8s.io/apimachinery v0.29.1 k8s.io/client-go v0.29.1 + k8s.io/component-helpers v0.29.1 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index 6a1966373..e16a259b9 100644 --- a/go.sum +++ b/go.sum @@ -57,6 +57,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -245,6 +247,8 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= @@ -514,6 +518,8 @@ k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= +k8s.io/component-helpers v0.29.1 h1:54MMEDu6xeJmMtAKztsPwu0kJKr4+jCUzaEIn2UXRoc= +k8s.io/component-helpers v0.29.1/go.mod h1:+I7xz4kfUgxWAPJIVKrqe4ml4rb9UGpazlOmhXYo+cY= k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= diff --git a/pkg/kapp/cmd/app/deploy.go b/pkg/kapp/cmd/app/deploy.go index 0eac63d6e..d930fd846 100644 --- a/pkg/kapp/cmd/app/deploy.go +++ b/pkg/kapp/cmd/app/deploy.go @@ -4,6 +4,7 @@ package app import ( + "context" "fmt" "io/fs" "os" @@ -27,6 +28,7 @@ import ( ctldiffui "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/diffui" "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/logger" ctllogs "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/logs" + "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/preflight" ctlres "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/resources" ctlresm "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/resourcesmisc" ) @@ -50,11 +52,13 @@ type DeployOptions struct { ResourceTypesFlags ResourceTypesFlags LabelFlags LabelFlags + PreflightChecks *preflight.Registry + FileSystem fs.FS } -func NewDeployOptions(ui ui.UI, depsFactory cmdcore.DepsFactory, logger logger.Logger) *DeployOptions { - return &DeployOptions{ui: ui, depsFactory: depsFactory, logger: logger} +func NewDeployOptions(ui ui.UI, depsFactory cmdcore.DepsFactory, logger logger.Logger, preflights *preflight.Registry) *DeployOptions { + return &DeployOptions{ui: ui, depsFactory: depsFactory, logger: logger, PreflightChecks: preflights} } func NewDeployCmd(o *DeployOptions, flagsFactory cmdcore.FlagsFactory) *cobra.Command { @@ -91,6 +95,7 @@ func NewDeployCmd(o *DeployOptions, flagsFactory cmdcore.FlagsFactory) *cobra.Co o.ResourceTypesFlags.Set(cmd) o.LabelFlags.Set(cmd) o.PrevAppFlags.Set(cmd) + o.PreflightChecks.AddFlags(cmd.Flags()) return cmd } @@ -194,6 +199,13 @@ func (o *DeployOptions) Run() error { return nil } + if o.PreflightChecks != nil { + err = o.PreflightChecks.Run(context.Background(), clusterChangesGraph) + if err != nil { + return fmt.Errorf("preflight checks failed: %w", err) + } + } + err = o.ui.AskForConfirmation() if err != nil { return err diff --git a/pkg/kapp/cmd/app/logs.go b/pkg/kapp/cmd/app/logs.go index 69f63876c..0b80ac008 100644 --- a/pkg/kapp/cmd/app/logs.go +++ b/pkg/kapp/cmd/app/logs.go @@ -74,7 +74,7 @@ func (o *LogsOptions) Run() error { supportObjs.IdentifiedResources.PodResources(labelSelector, nil), } - contFilter := func(pod corev1.Pod) []string { + contFilter := func(_ corev1.Pod) []string { return o.LogsFlags.ContainerNames } diff --git a/pkg/kapp/cmd/appgroup/deploy.go b/pkg/kapp/cmd/appgroup/deploy.go index 107b8bf45..3b55ee745 100644 --- a/pkg/kapp/cmd/appgroup/deploy.go +++ b/pkg/kapp/cmd/appgroup/deploy.go @@ -15,6 +15,7 @@ import ( cmdcore "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/cmd/core" cmdtools "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/cmd/tools" "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/logger" + "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/preflight" ) type DeployOptions struct { @@ -22,9 +23,10 @@ type DeployOptions struct { depsFactory cmdcore.DepsFactory logger logger.Logger - AppGroupFlags Flags - DeployFlags DeployFlags - AppFlags DeployAppFlags + AppGroupFlags Flags + DeployFlags DeployFlags + AppFlags DeployAppFlags + PreflightChecks *preflight.Registry } type DeployAppFlags struct { @@ -36,8 +38,8 @@ type DeployAppFlags struct { LabelFlags cmdapp.LabelFlags } -func NewDeployOptions(ui ui.UI, depsFactory cmdcore.DepsFactory, logger logger.Logger) *DeployOptions { - return &DeployOptions{ui: ui, depsFactory: depsFactory, logger: logger} +func NewDeployOptions(ui ui.UI, depsFactory cmdcore.DepsFactory, logger logger.Logger, preflights *preflight.Registry) *DeployOptions { + return &DeployOptions{ui: ui, depsFactory: depsFactory, logger: logger, PreflightChecks: preflights} } func NewDeployCmd(o *DeployOptions, flagsFactory cmdcore.FlagsFactory) *cobra.Command { @@ -56,6 +58,7 @@ func NewDeployCmd(o *DeployOptions, flagsFactory cmdcore.FlagsFactory) *cobra.Co o.AppFlags.DeleteApplyFlags.SetWithDefaults("delete", cmdapp.ApplyFlagsDeleteDefaults, cmd) o.AppFlags.DeployFlags.Set(cmd) o.AppFlags.LabelFlags.Set(cmd) + o.PreflightChecks.AddFlags(cmd.Flags()) return cmd } @@ -146,7 +149,7 @@ func (o *DeployOptions) deployApp(app appGroupApp) error { o.ui.PrintLinef("--- deploying app '%s' (namespace: %s) from %s", app.Name, o.appNamespace(), app.Path) - deployOpts := cmdapp.NewDeployOptions(o.ui, o.depsFactory, o.logger) + deployOpts := cmdapp.NewDeployOptions(o.ui, o.depsFactory, o.logger, o.PreflightChecks) deployOpts.AppFlags = cmdapp.Flags{ Name: app.Name, NamespaceFlags: o.AppGroupFlags.NamespaceFlags, diff --git a/pkg/kapp/cmd/core/deps_factory.go b/pkg/kapp/cmd/core/deps_factory.go index 1f4955bc7..5aeb7f642 100644 --- a/pkg/kapp/cmd/core/deps_factory.go +++ b/pkg/kapp/cmd/core/deps_factory.go @@ -9,15 +9,20 @@ import ( "sync" "github.com/cppforlife/go-cli-ui/ui" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/discovery" + "k8s.io/client-go/discovery/cached/memory" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" ) type DepsFactory interface { DynamicClient(opts DynamicClientOpts) (dynamic.Interface, error) CoreClient() (kubernetes.Interface, error) + RESTMapper() (meta.RESTMapper, error) ConfigureWarnings(warnings bool) } @@ -83,6 +88,25 @@ func (f *DepsFactoryImpl) CoreClient() (kubernetes.Interface, error) { return clientset, nil } +func (f *DepsFactoryImpl) RESTMapper() (meta.RESTMapper, error) { + config, err := f.configFactory.RESTConfig() + if err != nil { + return nil, err + } + + disc, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, err + } + + cachedDisc := memory.NewMemCacheClient(disc) + mapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDisc) + + f.printTarget(config) + + return mapper, nil +} + func (f *DepsFactoryImpl) ConfigureWarnings(warnings bool) { f.Warnings = warnings } diff --git a/pkg/kapp/cmd/kapp.go b/pkg/kapp/cmd/kapp.go index ced445651..049167623 100644 --- a/pkg/kapp/cmd/kapp.go +++ b/pkg/kapp/cmd/kapp.go @@ -17,6 +17,8 @@ import ( cmdsa "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/cmd/serviceaccount" cmdtools "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/cmd/tools" "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/logger" + "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/permissions" + "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/preflight" "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/version" ) @@ -32,23 +34,34 @@ type KappOptions struct { KubeconfigFlags cmdcore.KubeconfigFlags WarningFlags WarningFlags ProfilingFlags ProfilingFlags + + PreflightChecks *preflight.Registry } func NewKappOptions(ui *ui.ConfUI, configFactory cmdcore.ConfigFactory, - depsFactory cmdcore.DepsFactory) *KappOptions { + depsFactory cmdcore.DepsFactory, preflights *preflight.Registry) *KappOptions { return &KappOptions{ui: ui, logger: logger.NewUILogger(ui), - configFactory: configFactory, depsFactory: depsFactory} + configFactory: configFactory, depsFactory: depsFactory, PreflightChecks: preflights} } func NewDefaultKappCmd(ui *ui.ConfUI) *cobra.Command { configFactory := cmdcore.NewConfigFactoryImpl() depsFactory := cmdcore.NewDepsFactoryImpl(configFactory, ui) - options := NewKappOptions(ui, configFactory, depsFactory) + preflights := defaultKappPreflightRegistry(depsFactory) + options := NewKappOptions(ui, configFactory, depsFactory, preflights) flagsFactory := cmdcore.NewFlagsFactory(configFactory, depsFactory) return NewKappCmd(options, flagsFactory) } +func defaultKappPreflightRegistry(depsFactory cmdcore.DepsFactory) *preflight.Registry { + registry := preflight.NewRegistry(map[string]preflight.Check{ + "PermissionValidation": permissions.NewPreflight(depsFactory, false), + }) + + return registry +} + func NewKappCmd(o *KappOptions, flagsFactory cmdcore.FlagsFactory) *cobra.Command { cmd := &cobra.Command{ Use: "kapp", @@ -95,7 +108,7 @@ func NewKappCmd(o *KappOptions, flagsFactory cmdcore.FlagsFactory) *cobra.Comman cmd.AddCommand(cmdapp.NewListCmd(cmdapp.NewListOptions(o.ui, o.depsFactory, o.logger), flagsFactory)) cmd.AddCommand(cmdapp.NewInspectCmd(cmdapp.NewInspectOptions(o.ui, o.depsFactory, o.logger), flagsFactory)) - cmd.AddCommand(cmdapp.NewDeployCmd(cmdapp.NewDeployOptions(o.ui, o.depsFactory, o.logger), flagsFactory)) + cmd.AddCommand(cmdapp.NewDeployCmd(cmdapp.NewDeployOptions(o.ui, o.depsFactory, o.logger, o.PreflightChecks), flagsFactory)) cmd.AddCommand(cmdapp.NewDeployConfigCmd(cmdapp.NewDeployConfigOptions(o.ui, o.depsFactory), flagsFactory)) cmd.AddCommand(cmdapp.NewDeleteCmd(cmdapp.NewDeleteOptions(o.ui, o.depsFactory, o.logger), flagsFactory)) cmd.AddCommand(cmdapp.NewRenameCmd(cmdapp.NewRenameOptions(o.ui, o.depsFactory, o.logger), flagsFactory)) @@ -103,7 +116,7 @@ func NewKappCmd(o *KappOptions, flagsFactory cmdcore.FlagsFactory) *cobra.Comman cmd.AddCommand(cmdapp.NewLabelCmd(cmdapp.NewLabelOptions(o.ui, o.depsFactory, o.logger), flagsFactory)) agCmd := cmdag.NewCmd() - agCmd.AddCommand(cmdag.NewDeployCmd(cmdag.NewDeployOptions(o.ui, o.depsFactory, o.logger), flagsFactory)) + agCmd.AddCommand(cmdag.NewDeployCmd(cmdag.NewDeployOptions(o.ui, o.depsFactory, o.logger, o.PreflightChecks), flagsFactory)) agCmd.AddCommand(cmdag.NewDeleteCmd(cmdag.NewDeleteOptions(o.ui, o.depsFactory, o.logger), flagsFactory)) cmd.AddCommand(agCmd) diff --git a/pkg/kapp/permissions/basic.go b/pkg/kapp/permissions/basic.go new file mode 100644 index 000000000..8cc66a5ff --- /dev/null +++ b/pkg/kapp/permissions/basic.go @@ -0,0 +1,47 @@ +// Copyright 2024 VMware, Inc. +// SPDX-License-Identifier: Apache-2.0 + +package permissions + +import ( + "context" + + ctlres "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/resources" + authv1 "k8s.io/api/authorization/v1" + "k8s.io/apimachinery/pkg/api/meta" + authv1client "k8s.io/client-go/kubernetes/typed/authorization/v1" +) + +// BasicValidator is a basic validator useful for +// validating basic CRUD permissions for resources. It has no knowledge +// of how to handle permission evaluation for specific +// GroupVersionKinds +type BasicValidator struct { + ssarClient authv1client.SelfSubjectAccessReviewInterface + mapper meta.RESTMapper +} + +var _ Validator = (*BasicValidator)(nil) + +func NewBasicValidator(ssarClient authv1client.SelfSubjectAccessReviewInterface, mapper meta.RESTMapper) *BasicValidator { + return &BasicValidator{ + ssarClient: ssarClient, + mapper: mapper, + } +} + +func (bv *BasicValidator) Validate(ctx context.Context, res ctlres.Resource, verb string) error { + mapping, err := bv.mapper.RESTMapping(res.GroupKind(), res.GroupVersion().Version) + if err != nil { + return err + } + + return ValidatePermissions(ctx, bv.ssarClient, &authv1.ResourceAttributes{ + Group: mapping.Resource.Group, + Version: mapping.Resource.Version, + Resource: mapping.Resource.Resource, + Namespace: res.Namespace(), + Name: res.Name(), + Verb: verb, + }) +} diff --git a/pkg/kapp/permissions/binding.go b/pkg/kapp/permissions/binding.go new file mode 100644 index 000000000..823715e73 --- /dev/null +++ b/pkg/kapp/permissions/binding.go @@ -0,0 +1,130 @@ +// Copyright 2024 VMware, Inc. +// SPDX-License-Identifier: Apache-2.0 + +package permissions + +import ( + "context" + "errors" + "fmt" + + ctlres "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/resources" + authv1 "k8s.io/api/authorization/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/meta" + authv1client "k8s.io/client-go/kubernetes/typed/authorization/v1" + rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1" + "k8s.io/component-helpers/auth/rbac/validation" +) + +// BindingValidator is a Validator implementation +// for validating permissions required to CRUD +// Kubernetes (Cluster)RoleBinding resources +type BindingValidator struct { + ssarClient authv1client.SelfSubjectAccessReviewInterface + rbacClient rbacv1client.RbacV1Interface + mapper meta.RESTMapper +} + +var _ Validator = (*BindingValidator)(nil) + +func NewBindingValidator(ssarClient authv1client.SelfSubjectAccessReviewInterface, rbacClient rbacv1client.RbacV1Interface, mapper meta.RESTMapper) *BindingValidator { + return &BindingValidator{ + rbacClient: rbacClient, + ssarClient: ssarClient, + mapper: mapper, + } +} + +func (bv *BindingValidator) Validate(ctx context.Context, res ctlres.Resource, verb string) error { + mapping, err := bv.mapper.RESTMapping(res.GroupKind(), res.GroupVersion().Version) + if err != nil { + return err + } + + switch verb { + case "create", "update": + // do early validation on create / update to see if a user has + // the "bind" permissions which allows them to perform + // privilege escalation and create any (Cluster)Role + err := ValidatePermissions(ctx, bv.ssarClient, &authv1.ResourceAttributes{ + Group: mapping.Resource.Group, + Version: mapping.Resource.Version, + Resource: mapping.Resource.Resource, + Namespace: res.Namespace(), + Name: res.Name(), + Verb: "bind", + }) + // if the error is nil, the user has the "bind" permissions so we should + // return early. Otherwise, they don't have the "bind" permissions and + // we need to continue our validations. + if err == nil { + return nil + } + + // Check if user has permissions to even create/update the resource + err = ValidatePermissions(ctx, bv.ssarClient, &authv1.ResourceAttributes{ + Group: mapping.Resource.Group, + Version: mapping.Resource.Version, + Resource: mapping.Resource.Resource, + Namespace: res.Namespace(), + Name: res.Name(), + Verb: verb, + }) + if err != nil { + return err + } + + // If user doesn't have "bind" permissions then they can + // only create (Cluster)RolesBindings where the referenced (Cluster)Role + // contains permissions that they already have. + // Loop through all the defined policies and determine + // if a user has the appropriate permissions + rules, err := RulesForBinding(ctx, bv.rbacClient, res) + if err != nil { + return fmt.Errorf("fetching rules for binding: %w", err) + } + + errorSet := []error{} + for _, rule := range rules { + // breakdown the rules into the subset of + // rules such that the subrules contain + // at most one verb, one group, and one resource + // source at: https://github.com/kubernetes/component-helpers/blob/9a5801419916272fc9cec7a7822ed525721b99d3/auth/rbac/validation/policy_comparator.go#L56-L84 + var subrules []rbacv1.PolicyRule = validation.BreakdownRule(rule) + for _, subrule := range subrules { + // TODO: validation checks on all subrule values? + resourceName := "" + if len(subrule.ResourceNames) > 0 { + resourceName = subrule.ResourceNames[0] + } + err := ValidatePermissions(ctx, bv.ssarClient, &authv1.ResourceAttributes{ + Group: subrule.APIGroups[0], + Resource: subrule.Resources[0], + Namespace: res.Namespace(), + Name: resourceName, + Verb: subrule.Verbs[0], + }) + if err != nil { + errorSet = append(errorSet, err) + } + } + } + + if len(errorSet) > 0 { + baseErr := fmt.Errorf("potential privilege escalation, not permitted to %q %s", verb, res.GroupVersion().WithKind(res.Kind()).String()) + return errors.Join(append([]error{baseErr}, errorSet...)...) + } + default: + return ValidatePermissions(ctx, bv.ssarClient, &authv1.ResourceAttributes{ + Group: mapping.Resource.Group, + Version: mapping.Resource.Version, + Resource: mapping.Resource.Resource, + Namespace: res.Namespace(), + Name: res.Name(), + Verb: verb, + }) + } + + return nil +} diff --git a/pkg/kapp/permissions/composite.go b/pkg/kapp/permissions/composite.go new file mode 100644 index 000000000..73451f9e4 --- /dev/null +++ b/pkg/kapp/permissions/composite.go @@ -0,0 +1,36 @@ +// Copyright 2024 VMware, Inc. +// SPDX-License-Identifier: Apache-2.0 + +package permissions + +import ( + "context" + + ctlres "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/resources" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var _ Validator = (*CompositeValidator)(nil) + +// CompositeValidator implements Validator and is used +// for composing multiple validators into a single validator +// that can handle specifying unique validators for different +// GroupVersionKinds +type CompositeValidator struct { + validators map[schema.GroupVersionKind]Validator + defaultValidator Validator +} + +func NewCompositeValidator(defaultValidator Validator, validators map[schema.GroupVersionKind]Validator) *CompositeValidator { + return &CompositeValidator{ + validators: validators, + defaultValidator: defaultValidator, + } +} + +func (cv *CompositeValidator) Validate(ctx context.Context, res ctlres.Resource, verb string) error { + if validator, ok := cv.validators[res.GroupVersion().WithKind(res.Kind())]; ok { + return validator.Validate(ctx, res, verb) + } + return cv.defaultValidator.Validate(ctx, res, verb) +} diff --git a/pkg/kapp/permissions/preflight.go b/pkg/kapp/permissions/preflight.go new file mode 100644 index 000000000..3783fad97 --- /dev/null +++ b/pkg/kapp/permissions/preflight.go @@ -0,0 +1,89 @@ +// Copyright 2024 VMware, Inc. +// SPDX-License-Identifier: Apache-2.0 + +package permissions + +import ( + "context" + "errors" + + cmdcore "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/cmd/core" + ctldgraph "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/diffgraph" + "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/preflight" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Preflight is an implementation of preflight.Check +// to make it easier to add permission validation +// as a preflight check +type Preflight struct { + depsFactory cmdcore.DepsFactory + enabled bool +} + +func NewPreflight(depsFactory cmdcore.DepsFactory, enabled bool) preflight.Check { + return &Preflight{ + depsFactory: depsFactory, + enabled: enabled, + } +} + +func (p *Preflight) Enabled() bool { + return p.enabled +} + +func (p *Preflight) SetEnabled(enabled bool) { + p.enabled = enabled +} + +func (p *Preflight) Run(ctx context.Context, changeGraph *ctldgraph.ChangeGraph) error { + client, err := p.depsFactory.CoreClient() + if err != nil { + return err + } + + mapper, err := p.depsFactory.RESTMapper() + if err != nil { + return err + } + + roleValidator := NewRoleValidator(client.AuthorizationV1().SelfSubjectAccessReviews(), mapper) + bindingValidator := NewBindingValidator(client.AuthorizationV1().SelfSubjectAccessReviews(), client.RbacV1(), mapper) + basicValidator := NewBasicValidator(client.AuthorizationV1().SelfSubjectAccessReviews(), mapper) + + validator := NewCompositeValidator(basicValidator, map[schema.GroupVersionKind]Validator{ + rbacv1.SchemeGroupVersion.WithKind("Role"): roleValidator, + rbacv1.SchemeGroupVersion.WithKind("ClusterRole"): roleValidator, + rbacv1.SchemeGroupVersion.WithKind("RoleBinding"): bindingValidator, + rbacv1.SchemeGroupVersion.WithKind("ClusterRoleBinding"): bindingValidator, + }) + + errorSet := []error{} + for _, change := range changeGraph.All() { + switch change.Change.Op() { + case ctldgraph.ActualChangeOpDelete: + err = validator.Validate(ctx, change.Change.Resource(), "delete") + if err != nil { + errorSet = append(errorSet, err) + } + case ctldgraph.ActualChangeOpUpsert: + // Check both create and update permissions + err = validator.Validate(ctx, change.Change.Resource(), "create") + if err != nil { + errorSet = append(errorSet, err) + } + + err = validator.Validate(ctx, change.Change.Resource(), "update") + if err != nil { + errorSet = append(errorSet, err) + } + } + } + + if len(errorSet) > 0 { + return errors.Join(errorSet...) + } + + return nil +} diff --git a/pkg/kapp/permissions/role.go b/pkg/kapp/permissions/role.go new file mode 100644 index 000000000..7f7e5ba9a --- /dev/null +++ b/pkg/kapp/permissions/role.go @@ -0,0 +1,124 @@ +// Copyright 2024 VMware, Inc. +// SPDX-License-Identifier: Apache-2.0 + +package permissions + +import ( + "context" + "errors" + "fmt" + + ctlres "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/resources" + authv1 "k8s.io/api/authorization/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/meta" + authv1client "k8s.io/client-go/kubernetes/typed/authorization/v1" + "k8s.io/component-helpers/auth/rbac/validation" +) + +// RoleValidator is a Validator implementation +// for validating permissions required to CRUD +// Kubernetes (Cluster)Role resources +type RoleValidator struct { + ssarClient authv1client.SelfSubjectAccessReviewInterface + mapper meta.RESTMapper +} + +var _ Validator = (*RoleValidator)(nil) + +func NewRoleValidator(ssarClient authv1client.SelfSubjectAccessReviewInterface, mapper meta.RESTMapper) *RoleValidator { + return &RoleValidator{ + ssarClient: ssarClient, + mapper: mapper, + } +} + +func (rv *RoleValidator) Validate(ctx context.Context, res ctlres.Resource, verb string) error { + mapping, err := rv.mapper.RESTMapping(res.GroupKind(), res.GroupVersion().Version) + if err != nil { + return err + } + + switch verb { + case "create", "update": + // do early validation on create / update to see if a user has + // the "escalate" permissions which allows them to perform + // privilege escalation and create any (Cluster)Role + err := ValidatePermissions(ctx, rv.ssarClient, &authv1.ResourceAttributes{ + Group: mapping.Resource.Group, + Version: mapping.Resource.Version, + Resource: mapping.Resource.Resource, + Namespace: res.Namespace(), + Name: res.Name(), + Verb: "escalate", + }) + // if the error is nil, the user has the "escalate" permissions so we should + // return early. Otherwise, they don't have the "escalate" permissions and + // we need to continue our validations. + if err == nil { + return nil + } + + // Check if user has permissions to even create/update the resource + err = ValidatePermissions(ctx, rv.ssarClient, &authv1.ResourceAttributes{ + Group: mapping.Resource.Group, + Version: mapping.Resource.Version, + Resource: mapping.Resource.Resource, + Namespace: res.Namespace(), + Name: res.Name(), + Verb: verb, + }) + if err != nil { + return err + } + // If user doesn't have "escalate" permissions then they can + // only create (Cluster)Roles that contain permissions they already have. + // Loop through all the defined policies and determine + // if a user has the appropriate permissions + rules, err := RulesForRole(res) + if err != nil { + return fmt.Errorf("parsing rules for role: %w", err) + } + + errorSet := []error{} + for _, rule := range rules { + // breakdown the rules into the subset of + // rules such that the subrules contain + // at most one verb, one group, and one resource + // source at: https://github.com/kubernetes/component-helpers/blob/9a5801419916272fc9cec7a7822ed525721b99d3/auth/rbac/validation/policy_comparator.go#L56-L84 + var subrules []rbacv1.PolicyRule = validation.BreakdownRule(rule) + for _, subrule := range subrules { + resourceName := "" + if len(subrule.ResourceNames) > 0 { + resourceName = subrule.ResourceNames[0] + } + err := ValidatePermissions(ctx, rv.ssarClient, &authv1.ResourceAttributes{ + Group: subrule.APIGroups[0], + Resource: subrule.Resources[0], + Namespace: res.Namespace(), + Name: resourceName, + Verb: subrule.Verbs[0], + }) + if err != nil { + errorSet = append(errorSet, err) + } + } + } + + if len(errorSet) > 0 { + baseErr := fmt.Errorf("potential privilege escalation, not permitted to %q %s", verb, res.GroupVersion().WithKind(res.Kind()).String()) + return errors.Join(append([]error{baseErr}, errorSet...)...) + } + default: + return ValidatePermissions(ctx, rv.ssarClient, &authv1.ResourceAttributes{ + Group: mapping.Resource.Group, + Version: mapping.Resource.Version, + Resource: mapping.Resource.Resource, + Namespace: res.Namespace(), + Name: res.Name(), + Verb: verb, + }) + } + + return nil +} diff --git a/pkg/kapp/permissions/validator.go b/pkg/kapp/permissions/validator.go new file mode 100644 index 000000000..1d3d611cc --- /dev/null +++ b/pkg/kapp/permissions/validator.go @@ -0,0 +1,154 @@ +// Copyright 2024 VMware, Inc. +// SPDX-License-Identifier: Apache-2.0 + +package permissions + +import ( + "context" + "errors" + "fmt" + + ctlres "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/resources" + authv1 "k8s.io/api/authorization/v1" + rbacv1 "k8s.io/api/rbac/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + authv1client "k8s.io/client-go/kubernetes/typed/authorization/v1" + rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1" +) + +type Validator interface { + Validate(context.Context, ctlres.Resource, string) error +} + +// ValidatePermissons takes in all the parameters necessary to validate permissions using a +// SelfSubjectAccessReview. It returns an error if the SelfSubjectAccessReview indicates that +// the permissions are not present or are unable to be determined. A nil error is returned if +// the SelfSubjectAccessReview indicates that the permissions are present. +// TODO: Look into using SelfSubjectRulesReview instead of SelfSubjectAccessReview +func ValidatePermissions(ctx context.Context, ssarClient authv1client.SelfSubjectAccessReviewInterface, resourceAttributes *authv1.ResourceAttributes) error { + ssar := &authv1.SelfSubjectAccessReview{ + Spec: authv1.SelfSubjectAccessReviewSpec{ + ResourceAttributes: resourceAttributes, + }, + } + + retSsar, err := ssarClient.Create(ctx, ssar, v1.CreateOptions{}) + if err != nil { + return err + } + + if retSsar == nil { + return errors.New("unable to validate permissions: returned SelfSubjectAccessReview is nil") + } + + if retSsar.Status.EvaluationError != "" { + return fmt.Errorf("unable to validate permissions: %s", retSsar.Status.EvaluationError) + } + + if !retSsar.Status.Allowed { + gvr := schema.GroupVersionResource{ + Group: resourceAttributes.Group, + Version: resourceAttributes.Version, + Resource: resourceAttributes.Resource, + } + return fmt.Errorf("not permitted to %q %s", + resourceAttributes.Verb, + gvr.String()) + } + + return nil +} + +// RulesForRole will return a slice of rbacv1.PolicyRule objects +// that are representative of a provided (Cluster)Role's rules. +// It returns an error if one occurs during the process of fetching this +// information or if it is unable to determine the kind of binding this is +func RulesForRole(res ctlres.Resource) ([]rbacv1.PolicyRule, error) { + switch res.Kind() { + case "Role": + role := &rbacv1.Role{} + err := res.AsTypedObj(role) + if err != nil { + return nil, fmt.Errorf("converting resource to typed Role object: %w", err) + } + + return role.Rules, nil + + case "ClusterRole": + role := &rbacv1.ClusterRole{} + err := res.AsTypedObj(role) + if err != nil { + return nil, fmt.Errorf("converting resource to typed ClusterRole object: %w", err) + } + + return role.Rules, nil + } + + return nil, fmt.Errorf("unknown role kind %q", res.Kind()) +} + +// RulesForBinding will return a slice of rbacv1.PolicyRule objects +// that are representative of the (Cluster)Role rules that a (Cluster)RoleBinding +// references. It returns an error if one occurs during the process of fetching this +// information or if it is unable to determine the kind of binding this is +func RulesForBinding(ctx context.Context, rbacClient rbacv1client.RbacV1Interface, res ctlres.Resource) ([]rbacv1.PolicyRule, error) { + switch res.Kind() { + case "RoleBinding": + roleBinding := &rbacv1.RoleBinding{} + err := res.AsTypedObj(roleBinding) + if err != nil { + return nil, fmt.Errorf("converting resource to typed RoleBinding object: %w", err) + } + + return RulesForRoleBinding(ctx, rbacClient, roleBinding) + case "ClusterRoleBinding": + roleBinding := &rbacv1.ClusterRoleBinding{} + err := res.AsTypedObj(roleBinding) + if err != nil { + return nil, fmt.Errorf("converting resource to typed ClusterRoleBinding object: %w", err) + } + + return RulesForClusterRoleBinding(ctx, rbacClient, roleBinding) + } + + return nil, fmt.Errorf("unknown binding kind %q", res.Kind()) +} + +// RulesForRoleBinding will return a slice of rbacv1.PolicyRule objects +// that are representative of the (Cluster)Role rules that a RoleBinding +// references. It returns an error if one occurs during the process of fetching this +// information. +func RulesForRoleBinding(ctx context.Context, rbacClient rbacv1client.RbacV1Interface, rb *rbacv1.RoleBinding) ([]rbacv1.PolicyRule, error) { + switch rb.RoleRef.Kind { + case "ClusterRole": + role, err := rbacClient.ClusterRoles().Get(ctx, rb.RoleRef.Name, v1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("fetching ClusterRole %q for RoleBinding %q: %w", rb.RoleRef.Name, rb.Name, err) + } + + return role.Rules, nil + case "Role": + role, err := rbacClient.Roles(rb.Namespace).Get(ctx, rb.RoleRef.Name, v1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("fetching Role %q for RoleBinding %q: %w", rb.RoleRef.Name, rb.Name, err) + } + + return role.Rules, nil + } + + return nil, fmt.Errorf("unknown role reference kind: %q", rb.RoleRef.Kind) +} + +// RulesForClusterRoleBinding will return a slice of rbacv1.PolicyRule objects +// that are representative of the ClusterRole rules that a ClusterRoleBinding +// references. It returns an error if one occurs during the process of fetching this +// information. +func RulesForClusterRoleBinding(ctx context.Context, crGetter rbacv1client.ClusterRolesGetter, crb *rbacv1.ClusterRoleBinding) ([]rbacv1.PolicyRule, error) { + role, err := crGetter.ClusterRoles().Get(ctx, crb.RoleRef.Name, v1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("fetching ClusterRole %q for ClusterRoleBinding %q: %w", crb.RoleRef.Name, crb.Name, err) + } + + return role.Rules, nil +} diff --git a/pkg/kapp/preflight/check.go b/pkg/kapp/preflight/check.go new file mode 100644 index 000000000..fedf2ec32 --- /dev/null +++ b/pkg/kapp/preflight/check.go @@ -0,0 +1,41 @@ +// Copyright 2024 VMware, Inc. +// SPDX-License-Identifier: Apache-2.0 + +package preflight + +import ( + "context" + ctldgraph "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/diffgraph" +) + +type CheckFunc func(context.Context, *ctldgraph.ChangeGraph) error + +type Check interface { + Enabled() bool + SetEnabled(bool) + Run(context.Context, *ctldgraph.ChangeGraph) error +} + +type checkImpl struct { + enabled bool + checkFunc CheckFunc +} + +func NewCheck(cf CheckFunc, enabled bool) Check { + return &checkImpl{ + enabled: enabled, + checkFunc: cf, + } +} + +func (cf *checkImpl) Enabled() bool { + return cf.enabled +} + +func (cf *checkImpl) SetEnabled(enabled bool) { + cf.enabled = enabled +} + +func (cf *checkImpl) Run(ctx context.Context, changeGraph *ctldgraph.ChangeGraph) error { + return cf.checkFunc(ctx, changeGraph) +} diff --git a/pkg/kapp/preflight/registry.go b/pkg/kapp/preflight/registry.go new file mode 100644 index 000000000..607944f8d --- /dev/null +++ b/pkg/kapp/preflight/registry.go @@ -0,0 +1,120 @@ +// Copyright 2024 VMware, Inc. +// SPDX-License-Identifier: Apache-2.0 + +package preflight + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/spf13/pflag" + ctldgraph "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/diffgraph" +) + +const preflightFlag = "preflight" + +// Registry is a collection of preflight checks +type Registry struct { + known map[string]Check +} + +// NewRegistry will return a new *Registry with the +// provided set of preflight checks added to the registry +func NewRegistry(checks map[string]Check) *Registry { + registry := &Registry{} + for name, check := range checks { + registry.AddCheck(name, check) + } + return registry +} + +// String returns a string representation of the +// preflight checks. It follows the format: +// CheckName={true||false},... +// This method is needed so Registry implements +// the pflag.Value interface +func (c *Registry) String() string { + defaults := []string{} + for k, v := range c.known { + defaults = append(defaults, fmt.Sprintf("%s=%v", k, v.Enabled())) + } + return strings.Join(defaults, ",") +} + +// Type returns a string representing the type +// of the Registry. It is needed to implement the +// pflag.Value interface +func (c *Registry) Type() string { + return fmt.Sprintf("%T", c) +} + +// Set takes in a string in the format of +// CheckName={true||false},... +// and sets the specified preflight check +// as enabled if true, disabled if false +// Returns an error if there is a problem +// parsing the preflight checks +func (c *Registry) Set(s string) error { + if c.known == nil { + return nil + } + + mappings := strings.Split(s, ",") + for _, mapping := range mappings { + set := strings.SplitN(mapping, "=", 2) + if len(set) != 2 { + return fmt.Errorf("unable to parse check definition %q, too many '='. Must follow the format check={true||false}", mapping) + } + key, value := set[0], set[1] + + if _, ok := c.known[key]; !ok { + return fmt.Errorf("unknown preflight check %q specified", key) + } + + enabled, err := strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("unable to parse boolean representation of %q: %w", mapping, err) + } + c.known[key].SetEnabled(enabled) + } + return nil +} + +// AddFlags adds the --preflight flag to a +// pflag.FlagSet and configures the preflight +// checks in the registry based on the user provided +// values. If no values are provided by a user the +// default values are used. +func (c *Registry) AddFlags(flags *pflag.FlagSet) { + knownChecks := []string{} + for name := range c.known { + knownChecks = append(knownChecks, name) + } + flags.Var(c, preflightFlag, fmt.Sprintf("preflight checks to run. Available preflight checks are [%s]", strings.Join(knownChecks, ","))) +} + +// AddCheck adds a new preflight check to the registry. +// The name provided will map to the provided Check. +func (c *Registry) AddCheck(name string, check Check) { + if c.known == nil { + c.known = make(map[string]Check) + } + c.known[name] = check +} + +// Run will execute any enabled preflight checks. The provided +// Context and ChangeGraph will be passed to the preflight checks +// that are being executed. +func (c *Registry) Run(ctx context.Context, cg *ctldgraph.ChangeGraph) error { + for name, check := range c.known { + if check.Enabled() { + err := check.Run(ctx, cg) + if err != nil { + return fmt.Errorf("running preflight check %q: %w", name, err) + } + } + } + return nil +} diff --git a/pkg/kapp/preflight/registry_test.go b/pkg/kapp/preflight/registry_test.go new file mode 100644 index 000000000..9dc1cf2fb --- /dev/null +++ b/pkg/kapp/preflight/registry_test.go @@ -0,0 +1,118 @@ +// Copyright 2024 VMware, Inc. +// SPDX-License-Identifier: Apache-2.0 +package preflight + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" + "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/diffgraph" +) + +func TestRegistrySet(t *testing.T) { + testCases := []struct { + name string + preflights string + registry *Registry + shouldErr bool + }{ + { + name: "no preflight checks registered, parsing skipped, any value can be provided", + preflights: "someCheck=true", + registry: &Registry{}, + }, + { + name: "preflight checks registered, invalid check format in flag, error returned", + preflights: "some=check=something=true", + registry: &Registry{ + known: map[string]Check{ + "some": nil, + }, + }, + shouldErr: true, + }, + { + name: "preflight checks registered, unknown preflight check specified, error returned", + preflights: "nonexistent=true", + registry: &Registry{ + known: map[string]Check{ + "exists": nil, + }, + }, + shouldErr: true, + }, + { + name: "preflight checks registered, known check specified, non-boolean value provided, error returned", + preflights: "someCheck=enabled", + registry: &Registry{ + known: map[string]Check{ + "someCheck": nil, + }, + }, + shouldErr: true, + }, + { + name: "preflight checks registered, valid input, no error returned", + preflights: "someCheck=true", + registry: &Registry{ + known: map[string]Check{ + "someCheck": NewCheck(func(_ context.Context, _ *diffgraph.ChangeGraph) error { return nil }, true), + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := tc.registry.Set(tc.preflights) + require.Equal(t, tc.shouldErr, err != nil) + }) + } +} + +func TestRegistryRun(t *testing.T) { + testCases := []struct { + name string + registry *Registry + shouldErr bool + }{ + { + name: "no preflight checks registered, no error returned", + registry: &Registry{}, + }, + { + name: "preflight checks registered, disabled checks don't run", + registry: &Registry{ + known: map[string]Check{ + "disabledCheck": NewCheck(func(_ context.Context, _ *diffgraph.ChangeGraph) error { return errors.New("should be disabled") }, false), + }, + }, + }, + { + name: "preflight checks registered, enabled check returns an error, error returned", + registry: &Registry{ + known: map[string]Check{ + "errorCheck": NewCheck(func(_ context.Context, _ *diffgraph.ChangeGraph) error { return errors.New("error") }, true), + }, + }, + shouldErr: true, + }, + { + name: "preflight checks registered, enabled checks successful, no error returned", + registry: &Registry{ + known: map[string]Check{ + "someCheck": NewCheck(func(_ context.Context, _ *diffgraph.ChangeGraph) error { return nil }, true), + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := tc.registry.Run(nil, nil) + require.Equal(t, tc.shouldErr, err != nil) + }) + } +} diff --git a/test/e2e/deploy_filesystem_test.go b/test/e2e/deploy_filesystem_test.go index 703e4e703..8bc5c5ba3 100644 --- a/test/e2e/deploy_filesystem_test.go +++ b/test/e2e/deploy_filesystem_test.go @@ -21,6 +21,7 @@ import ( "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/cmd/app" "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/cmd/core" "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/logger" + "github.com/vmware-tanzu/carvel-kapp/pkg/kapp/preflight" ) func TestDeployFilesystem(t *testing.T) { @@ -55,7 +56,7 @@ func TestDeployFilesystem(t *testing.T) { depsFactory := core.NewDepsFactoryImpl(configFactory, theUI) log := logger.NewUILogger(theUI) - deployOptions := app.NewDeployOptions(theUI, depsFactory, log) + deployOptions := app.NewDeployOptions(theUI, depsFactory, log, &preflight.Registry{}) deployOptions.AppFlags.NamespaceFlags.Name = env.Namespace deployOptions.AppFlags.Name = appName deployOptions.FileFlags.Files = []string{ diff --git a/test/e2e/preflight_permission_validation_escalation_test.go b/test/e2e/preflight_permission_validation_escalation_test.go new file mode 100644 index 000000000..ef34474fd --- /dev/null +++ b/test/e2e/preflight_permission_validation_escalation_test.go @@ -0,0 +1,137 @@ +// Copyright 2024 VMware, Inc. +// SPDX-License-Identifier: Apache-2.0 + +package e2e + +import ( + "fmt" + "strings" + "testing" +) + +func TestPreflightPermissionValidationEscalation(t *testing.T) { + env := BuildEnv(t) + logger := Logger{} + kapp := Kapp{t, env.Namespace, env.KappBinaryPath, logger} + kubectl := Kubectl{t, env.Namespace, logger} + + testName := "preflight-permission-validation-escalation" + + base := ` +--- +apiVersion: v1 +kind: Namespace +metadata: + name: __test-name__ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: scoped-sa + namespace: __ns__ +--- +apiVersion: v1 +kind: Secret +metadata: + name: scoped-sa + namespace: __ns__ + annotations: + kubernetes.io/service-account.name: scoped-sa +type: kubernetes.io/service-account-token +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: __test-name__ +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["*"] +- apiGroups: [""] + resources: ["namespaces"] + verbs: ["list"] +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list"] +- apiGroups: ["rbac.authorization.k8s.io"] + resources: ["roles", "rolebindings", "clusterroles", "clusterrolebindings"] + verbs: ["get", "list", "create", "update", "delete", "escalate", "bind"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: __test-name__ +subjects: +- kind: ServiceAccount + name: scoped-sa + namespace: __ns__ +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: __test-name__ +` + + base = strings.ReplaceAll(base, "__test-name__", testName) + base = strings.ReplaceAll(base, "__ns__", env.Namespace) + baseName := "preflight-permission-validation-base-app" + appName := "preflight-permission-validation-app" + scopedContext := "scoped-context" + scopedUser := "scoped-user" + + cleanUp := func() { + kapp.Run([]string{"delete", "-a", baseName}) + kapp.Run([]string{"delete", "-a", appName}) + RemoveClusterResource(t, "ns", testName, "", kubectl) + } + cleanUp() + defer cleanUp() + + kapp.RunWithOpts([]string{"deploy", "-a", baseName, "-f", "-"}, RunOpts{StdinReader: strings.NewReader(base)}) + cleanUpContext := ScopedContext(t, kubectl, testName, scopedContext, scopedUser) + defer cleanUpContext() + + roleResource := ` +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + namespace: __test-name__ + name: __test-name__ +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["*"] +` + + roleResource = strings.ReplaceAll(roleResource, "__test-name__", testName) + logger.Section("deploy app with privilege escalation Role", func() { + kapp.RunWithOpts([]string{"deploy", "--preflight=PermissionValidation=true", "-a", appName, "-f", "-", fmt.Sprintf("--kubeconfig-context=%s", scopedContext)}, + RunOpts{StdinReader: strings.NewReader(roleResource)}) + + NewPresentClusterResource("role", testName, testName, kubectl) + }) + + bindingResource := ` +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + namespace: __test-name__ + name: __test-name__ +subjects: + - kind: ServiceAccount + namespace: __test-name__ + name: default +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io +` + bindingResource = strings.ReplaceAll(bindingResource, "__test-name__", testName) + logger.Section("deploy app with privilege escalation RoleBinding", func() { + kapp.RunWithOpts([]string{"deploy", "--preflight=PermissionValidation=true", "-a", appName, "-f", "-", fmt.Sprintf("--kubeconfig-context=%s", scopedContext)}, + RunOpts{StdinReader: strings.NewReader(bindingResource)}) + + NewPresentClusterResource("rolebinding", testName, testName, kubectl) + }) +} diff --git a/test/e2e/preflight_permission_validation_failed_escalation_test.go b/test/e2e/preflight_permission_validation_failed_escalation_test.go new file mode 100644 index 000000000..527e6218e --- /dev/null +++ b/test/e2e/preflight_permission_validation_failed_escalation_test.go @@ -0,0 +1,141 @@ +// Copyright 2024 VMware, Inc. +// SPDX-License-Identifier: Apache-2.0 + +package e2e + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestPreflightPermissionValidationFailedEscalation(t *testing.T) { + env := BuildEnv(t) + logger := Logger{} + kapp := Kapp{t, env.Namespace, env.KappBinaryPath, logger} + kubectl := Kubectl{t, env.Namespace, logger} + + testName := "preflight-permission-validation-failed-escalation" + + base := ` +--- +apiVersion: v1 +kind: Namespace +metadata: + name: __test-name__ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: scoped-sa + namespace: __ns__ +--- +apiVersion: v1 +kind: Secret +metadata: + name: scoped-sa + namespace: __ns__ + annotations: + kubernetes.io/service-account.name: scoped-sa +type: kubernetes.io/service-account-token +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: __test-name__ +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["*"] +- apiGroups: [""] + resources: ["namespaces"] + verbs: ["list"] +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list"] +- apiGroups: ["rbac.authorization.k8s.io"] + resources: ["roles", "rolebindings", "clusterroles", "clusterrolebindings"] + verbs: ["get", "list", "create", "update", "delete"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: __test-name__ +subjects: +- kind: ServiceAccount + name: scoped-sa + namespace: __ns__ +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: __test-name__ +` + + base = strings.ReplaceAll(base, "__test-name__", testName) + base = strings.ReplaceAll(base, "__ns__", env.Namespace) + baseName := "preflight-permission-validation-base-app" + appName := "preflight-permission-validation-app" + scopedContext := "scoped-context" + scopedUser := "scoped-user" + + cleanUp := func() { + kapp.Run([]string{"delete", "-a", baseName}) + kapp.Run([]string{"delete", "-a", appName}) + RemoveClusterResource(t, "ns", testName, "", kubectl) + } + cleanUp() + defer cleanUp() + + kapp.RunWithOpts([]string{"deploy", "-a", baseName, "-f", "-"}, RunOpts{StdinReader: strings.NewReader(base)}) + cleanUpContext := ScopedContext(t, kubectl, testName, scopedContext, scopedUser) + defer cleanUpContext() + + roleResource := ` +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + namespace: __test-name__ + name: __test-name__ +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["*"] +` + + roleResource = strings.ReplaceAll(roleResource, "__test-name__", testName) + logger.Section("attempt to deploy app with privilege escalation Role without privilege escalation permissions", func() { + _, err := kapp.RunWithOpts([]string{"deploy", "--preflight=PermissionValidation=true", "-a", appName, "-f", "-", fmt.Sprintf("--kubeconfig-context=%s", scopedContext)}, + RunOpts{StdinReader: strings.NewReader(roleResource), AllowError: true}) + require.Error(t, err) + require.Contains(t, err.Error(), "running preflight check \"PermissionValidation\": potential privilege escalation, not permitted to \"create\" rbac.authorization.k8s.io/v1, Kind=Role:") + NewMissingClusterResource(t, "role", testName, testName, kubectl) + }) + + bindingResource := ` +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + namespace: __test-name__ + name: __test-name__ +subjects: + - kind: ServiceAccount + namespace: __test-name__ + name: default +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io +` + bindingResource = strings.ReplaceAll(bindingResource, "__test-name__", testName) + logger.Section("attempt deploy app with privilege escalation RoleBinding without privilege escalation permissions", func() { + _, err := kapp.RunWithOpts([]string{"deploy", "--preflight=PermissionValidation=true", "-a", appName, "-f", "-", fmt.Sprintf("--kubeconfig-context=%s", scopedContext)}, + RunOpts{StdinReader: strings.NewReader(bindingResource), AllowError: true}) + require.Error(t, err) + require.Contains(t, err.Error(), "running preflight check \"PermissionValidation\": potential privilege escalation, not permitted to \"create\" rbac.authorization.k8s.io/v1, Kind=RoleBinding:") + NewMissingClusterResource(t, "rolebinding", testName, testName, kubectl) + }) +} diff --git a/test/e2e/preflight_permission_validation_missing_test.go b/test/e2e/preflight_permission_validation_missing_test.go new file mode 100644 index 000000000..bf46b3812 --- /dev/null +++ b/test/e2e/preflight_permission_validation_missing_test.go @@ -0,0 +1,168 @@ +// Copyright 2024 VMware, Inc. +// SPDX-License-Identifier: Apache-2.0 + +package e2e + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestPreflightPermissionValidationMissingPermissions(t *testing.T) { + env := BuildEnv(t) + logger := Logger{} + kapp := Kapp{t, env.Namespace, env.KappBinaryPath, logger} + kubectl := Kubectl{t, env.Namespace, logger} + + testName := "preflight-permission-validation-missing-permissions" + + base := ` +--- +apiVersion: v1 +kind: Namespace +metadata: + name: __test-name__ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: scoped-sa + namespace: __ns__ +--- +apiVersion: v1 +kind: Secret +metadata: + name: scoped-sa + namespace: __ns__ + annotations: + kubernetes.io/service-account.name: scoped-sa +type: kubernetes.io/service-account-token +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: __test-name__ +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["*"] +- apiGroups: [""] + resources: ["namespaces"] + verbs: ["list"] +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list"] +- apiGroups: ["rbac.authorization.k8s.io"] + resources: ["roles", "rolebindings"] + verbs: ["get", "list"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: __test-name__ +subjects: +- kind: ServiceAccount + name: scoped-sa + namespace: __ns__ +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: __test-name__ +` + + base = strings.ReplaceAll(base, "__test-name__", testName) + base = strings.ReplaceAll(base, "__ns__", env.Namespace) + baseName := "preflight-permission-validation-base-app" + appName := "preflight-permission-validation-app" + scopedContext := "scoped-context" + scopedUser := "scoped-user" + + cleanUp := func() { + kapp.Run([]string{"delete", "-a", baseName}) + kapp.Run([]string{"delete", "-a", appName}) + RemoveClusterResource(t, "ns", testName, "", kubectl) + } + cleanUp() + defer cleanUp() + + kapp.RunWithOpts([]string{"deploy", "-a", baseName, "-f", "-"}, RunOpts{StdinReader: strings.NewReader(base)}) + cleanUpContext := ScopedContext(t, kubectl, testName, scopedContext, scopedUser) + defer cleanUpContext() + + basicResource := ` +--- +apiVersion: v1 +kind: Pod +metadata: + name: __test-name__ + namespace: __test-name__ +spec: + containers: + - name: simple-app + image: docker.io/dkalinin/k8s-simple-app@sha256:4c8b96d4fffdfae29258d94a22ae4ad1fe36139d47288b8960d9958d1e63a9d0 + env: + - name: HELLO_MSG + value: stranger +` + basicResource = strings.ReplaceAll(basicResource, "__test-name__", testName) + logger.Section("attempt to deploy app with a Pod and missing permissions to create Pods", func() { + _, err := kapp.RunWithOpts([]string{"deploy", "--preflight=PermissionValidation=true", "-a", appName, "-f", "-", fmt.Sprintf("--kubeconfig-context=%s", scopedContext)}, + RunOpts{StdinReader: strings.NewReader(basicResource), AllowError: true}) + + require.Error(t, err) + require.Contains(t, err.Error(), "running preflight check \"PermissionValidation\": not permitted to \"create\" /v1, Resource=pods") + NewMissingClusterResource(t, "pod", testName, testName, kubectl) + }) + + roleResource := ` +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + namespace: __test-name__ + name: __test-name__ +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["create", "update", "delete"] +` + + roleResource = strings.ReplaceAll(roleResource, "__test-name__", testName) + logger.Section("attempt to deploy app with a Role and missing permissions to create Roles", func() { + _, err := kapp.RunWithOpts([]string{"deploy", "--preflight=PermissionValidation=true", "-a", appName, "-f", "-", fmt.Sprintf("--kubeconfig-context=%s", scopedContext)}, + RunOpts{StdinReader: strings.NewReader(roleResource), AllowError: true}) + + require.Error(t, err) + require.Contains(t, err.Error(), "running preflight check \"PermissionValidation\": not permitted to \"create\" rbac.authorization.k8s.io/v1, Resource=roles") + NewMissingClusterResource(t, "role", testName, testName, kubectl) + }) + + bindingResource := ` +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + namespace: __test-name__ + name: __test-name__ +subjects: + - kind: ServiceAccount + namespace: __test-name__ + name: default +roleRef: + kind: ClusterRole + name: admin + apiGroup: rbac.authorization.k8s.io +` + bindingResource = strings.ReplaceAll(bindingResource, "__test-name__", testName) + logger.Section("attempt to deploy app with a RoleBinding and missing permissions to create RoleBindings", func() { + _, err := kapp.RunWithOpts([]string{"deploy", "--preflight=PermissionValidation=true", "-a", appName, "-f", "-", fmt.Sprintf("--kubeconfig-context=%s", scopedContext)}, + RunOpts{StdinReader: strings.NewReader(bindingResource), AllowError: true}) + + require.Error(t, err) + require.Contains(t, err.Error(), "running preflight check \"PermissionValidation\": not permitted to \"create\" rbac.authorization.k8s.io/v1, Resource=rolebindings") + NewMissingClusterResource(t, "rolebinding", testName, testName, kubectl) + }) +} diff --git a/test/e2e/preflight_permission_validation_test.go b/test/e2e/preflight_permission_validation_test.go new file mode 100644 index 000000000..58535d861 --- /dev/null +++ b/test/e2e/preflight_permission_validation_test.go @@ -0,0 +1,160 @@ +// Copyright 2024 VMware, Inc. +// SPDX-License-Identifier: Apache-2.0 + +package e2e + +import ( + "fmt" + "strings" + "testing" +) + +func TestPreflightPermissionValidation(t *testing.T) { + env := BuildEnv(t) + logger := Logger{} + kapp := Kapp{t, env.Namespace, env.KappBinaryPath, logger} + kubectl := Kubectl{t, env.Namespace, logger} + + testName := "preflight-permission-validation" + + base := ` +--- +apiVersion: v1 +kind: Namespace +metadata: + name: __test-name__ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: scoped-sa + namespace: __ns__ +--- +apiVersion: v1 +kind: Secret +metadata: + name: scoped-sa + namespace: __ns__ + annotations: + kubernetes.io/service-account.name: scoped-sa +type: kubernetes.io/service-account-token +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: __test-name__ +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["*"] +- apiGroups: [""] + resources: ["namespaces"] + verbs: ["list"] +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "create", "update", "delete"] +- apiGroups: ["rbac.authorization.k8s.io"] + resources: ["roles", "rolebindings"] + verbs: ["get", "list", "create", "update", "delete"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: __test-name__ +subjects: +- kind: ServiceAccount + name: scoped-sa + namespace: __ns__ +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: __test-name__ +` + + base = strings.ReplaceAll(base, "__test-name__", testName) + base = strings.ReplaceAll(base, "__ns__", env.Namespace) + baseName := "preflight-permission-validation-base-app" + appName := "preflight-permission-validation-app" + scopedContext := "scoped-context" + scopedUser := "scoped-user" + + cleanUp := func() { + kapp.Run([]string{"delete", "-a", baseName}) + kapp.Run([]string{"delete", "-a", appName}) + RemoveClusterResource(t, "ns", testName, "", kubectl) + } + cleanUp() + defer cleanUp() + + kapp.RunWithOpts([]string{"deploy", "-a", baseName, "-f", "-"}, RunOpts{StdinReader: strings.NewReader(base)}) + cleanUpContext := ScopedContext(t, kubectl, testName, scopedContext, scopedUser) + defer cleanUpContext() + + basicResource := ` +--- +apiVersion: v1 +kind: Pod +metadata: + name: __test-name__ + namespace: __test-name__ +spec: + containers: + - name: simple-app + image: docker.io/dkalinin/k8s-simple-app@sha256:4c8b96d4fffdfae29258d94a22ae4ad1fe36139d47288b8960d9958d1e63a9d0 + env: + - name: HELLO_MSG + value: stranger +` + basicResource = strings.ReplaceAll(basicResource, "__test-name__", testName) + logger.Section("deploy app with Pod with permissions to create Pods", func() { + kapp.RunWithOpts([]string{"deploy", "--preflight=PermissionValidation=true", "-a", appName, "-f", "-", fmt.Sprintf("--kubeconfig-context=%s", scopedContext)}, + RunOpts{StdinReader: strings.NewReader(basicResource)}) + + NewPresentClusterResource("pod", testName, testName, kubectl) + }) + + roleResource := ` +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + namespace: __test-name__ + name: __test-name__ +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["create", "update"] +` + + roleResource = strings.ReplaceAll(roleResource, "__test-name__", testName) + logger.Section("deploy app with Role with permissions to create Roles", func() { + kapp.RunWithOpts([]string{"deploy", "--preflight=PermissionValidation=true", "-a", appName, "-f", "-", fmt.Sprintf("--kubeconfig-context=%s", scopedContext)}, + RunOpts{StdinReader: strings.NewReader(roleResource)}) + + NewPresentClusterResource("role", testName, testName, kubectl) + }) + + bindingResource := ` +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + namespace: __test-name__ + name: __test-name__ +subjects: + - kind: ServiceAccount + namespace: __test-name__ + name: default +roleRef: + kind: Role + name: __test-name__ + apiGroup: rbac.authorization.k8s.io +` + bindingResource = strings.ReplaceAll(bindingResource, "__test-name__", testName) + logger.Section("deploy app with Pod with permissions to create RoleBindings", func() { + kapp.RunWithOpts([]string{"deploy", "--preflight=PermissionValidation=true", "-a", appName, "-f", "-", fmt.Sprintf("--kubeconfig-context=%s", scopedContext)}, + RunOpts{StdinReader: strings.NewReader(roleResource + bindingResource)}) + + NewPresentClusterResource("rolebinding", testName, testName, kubectl) + }) +} diff --git a/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go b/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go new file mode 100644 index 000000000..3829b3cc0 --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go @@ -0,0 +1,332 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package memory + +import ( + "errors" + "fmt" + "sync" + "syscall" + + openapi_v2 "github.com/google/gnostic-models/openapiv2" + + errorsutil "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/discovery" + "k8s.io/client-go/openapi" + cachedopenapi "k8s.io/client-go/openapi/cached" + restclient "k8s.io/client-go/rest" + "k8s.io/klog/v2" +) + +type cacheEntry struct { + resourceList *metav1.APIResourceList + err error +} + +// memCacheClient can Invalidate() to stay up-to-date with discovery +// information. +// +// TODO: Switch to a watch interface. Right now it will poll after each +// Invalidate() call. +type memCacheClient struct { + delegate discovery.DiscoveryInterface + + lock sync.RWMutex + groupToServerResources map[string]*cacheEntry + groupList *metav1.APIGroupList + cacheValid bool + openapiClient openapi.Client + receivedAggregatedDiscovery bool +} + +// Error Constants +var ( + ErrCacheNotFound = errors.New("not found") +) + +// Server returning empty ResourceList for Group/Version. +type emptyResponseError struct { + gv string +} + +func (e *emptyResponseError) Error() string { + return fmt.Sprintf("received empty response for: %s", e.gv) +} + +var _ discovery.CachedDiscoveryInterface = &memCacheClient{} + +// isTransientConnectionError checks whether given error is "Connection refused" or +// "Connection reset" error which usually means that apiserver is temporarily +// unavailable. +func isTransientConnectionError(err error) bool { + var errno syscall.Errno + if errors.As(err, &errno) { + return errno == syscall.ECONNREFUSED || errno == syscall.ECONNRESET + } + return false +} + +func isTransientError(err error) bool { + if isTransientConnectionError(err) { + return true + } + + if t, ok := err.(errorsutil.APIStatus); ok && t.Status().Code >= 500 { + return true + } + + return errorsutil.IsTooManyRequests(err) +} + +// ServerResourcesForGroupVersion returns the supported resources for a group and version. +func (d *memCacheClient) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + d.lock.Lock() + defer d.lock.Unlock() + if !d.cacheValid { + if err := d.refreshLocked(); err != nil { + return nil, err + } + } + cachedVal, ok := d.groupToServerResources[groupVersion] + if !ok { + return nil, ErrCacheNotFound + } + + if cachedVal.err != nil && isTransientError(cachedVal.err) { + r, err := d.serverResourcesForGroupVersion(groupVersion) + if err != nil { + // Don't log "empty response" as an error; it is a common response for metrics. + if _, emptyErr := err.(*emptyResponseError); emptyErr { + // Log at same verbosity as disk cache. + klog.V(3).Infof("%v", err) + } else { + utilruntime.HandleError(fmt.Errorf("couldn't get resource list for %v: %v", groupVersion, err)) + } + } + cachedVal = &cacheEntry{r, err} + d.groupToServerResources[groupVersion] = cachedVal + } + + return cachedVal.resourceList, cachedVal.err +} + +// ServerGroupsAndResources returns the groups and supported resources for all groups and versions. +func (d *memCacheClient) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + return discovery.ServerGroupsAndResources(d) +} + +// GroupsAndMaybeResources returns the list of APIGroups, and possibly the map of group/version +// to resources. The returned groups will never be nil, but the resources map can be nil +// if there are no cached resources. +func (d *memCacheClient) GroupsAndMaybeResources() (*metav1.APIGroupList, map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error, error) { + d.lock.Lock() + defer d.lock.Unlock() + + if !d.cacheValid { + if err := d.refreshLocked(); err != nil { + return nil, nil, nil, err + } + } + // Build the resourceList from the cache? + var resourcesMap map[schema.GroupVersion]*metav1.APIResourceList + var failedGVs map[schema.GroupVersion]error + if d.receivedAggregatedDiscovery && len(d.groupToServerResources) > 0 { + resourcesMap = map[schema.GroupVersion]*metav1.APIResourceList{} + failedGVs = map[schema.GroupVersion]error{} + for gv, cacheEntry := range d.groupToServerResources { + groupVersion, err := schema.ParseGroupVersion(gv) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to parse group version (%v): %v", gv, err) + } + if cacheEntry.err != nil { + failedGVs[groupVersion] = cacheEntry.err + } else { + resourcesMap[groupVersion] = cacheEntry.resourceList + } + } + } + return d.groupList, resourcesMap, failedGVs, nil +} + +func (d *memCacheClient) ServerGroups() (*metav1.APIGroupList, error) { + groups, _, _, err := d.GroupsAndMaybeResources() + if err != nil { + return nil, err + } + return groups, nil +} + +func (d *memCacheClient) RESTClient() restclient.Interface { + return d.delegate.RESTClient() +} + +func (d *memCacheClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { + return discovery.ServerPreferredResources(d) +} + +func (d *memCacheClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { + return discovery.ServerPreferredNamespacedResources(d) +} + +func (d *memCacheClient) ServerVersion() (*version.Info, error) { + return d.delegate.ServerVersion() +} + +func (d *memCacheClient) OpenAPISchema() (*openapi_v2.Document, error) { + return d.delegate.OpenAPISchema() +} + +func (d *memCacheClient) OpenAPIV3() openapi.Client { + // Must take lock since Invalidate call may modify openapiClient + d.lock.Lock() + defer d.lock.Unlock() + + if d.openapiClient == nil { + d.openapiClient = cachedopenapi.NewClient(d.delegate.OpenAPIV3()) + } + + return d.openapiClient +} + +func (d *memCacheClient) Fresh() bool { + d.lock.RLock() + defer d.lock.RUnlock() + // Return whether the cache is populated at all. It is still possible that + // a single entry is missing due to transient errors and the attempt to read + // that entry will trigger retry. + return d.cacheValid +} + +// Invalidate enforces that no cached data that is older than the current time +// is used. +func (d *memCacheClient) Invalidate() { + d.lock.Lock() + defer d.lock.Unlock() + d.cacheValid = false + d.groupToServerResources = nil + d.groupList = nil + d.openapiClient = nil + d.receivedAggregatedDiscovery = false + if ad, ok := d.delegate.(discovery.CachedDiscoveryInterface); ok { + ad.Invalidate() + } +} + +// refreshLocked refreshes the state of cache. The caller must hold d.lock for +// writing. +func (d *memCacheClient) refreshLocked() error { + // TODO: Could this multiplicative set of calls be replaced by a single call + // to ServerResources? If it's possible for more than one resulting + // APIResourceList to have the same GroupVersion, the lists would need merged. + var gl *metav1.APIGroupList + var err error + + if ad, ok := d.delegate.(discovery.AggregatedDiscoveryInterface); ok { + var resources map[schema.GroupVersion]*metav1.APIResourceList + var failedGVs map[schema.GroupVersion]error + gl, resources, failedGVs, err = ad.GroupsAndMaybeResources() + if resources != nil && err == nil { + // Cache the resources. + d.groupToServerResources = map[string]*cacheEntry{} + d.groupList = gl + for gv, resources := range resources { + d.groupToServerResources[gv.String()] = &cacheEntry{resources, nil} + } + // Cache GroupVersion discovery errors + for gv, err := range failedGVs { + d.groupToServerResources[gv.String()] = &cacheEntry{nil, err} + } + d.receivedAggregatedDiscovery = true + d.cacheValid = true + return nil + } + } else { + gl, err = d.delegate.ServerGroups() + } + if err != nil || len(gl.Groups) == 0 { + utilruntime.HandleError(fmt.Errorf("couldn't get current server API group list: %v", err)) + return err + } + + wg := &sync.WaitGroup{} + resultLock := &sync.Mutex{} + rl := map[string]*cacheEntry{} + for _, g := range gl.Groups { + for _, v := range g.Versions { + gv := v.GroupVersion + wg.Add(1) + go func() { + defer wg.Done() + defer utilruntime.HandleCrash() + + r, err := d.serverResourcesForGroupVersion(gv) + if err != nil { + // Don't log "empty response" as an error; it is a common response for metrics. + if _, emptyErr := err.(*emptyResponseError); emptyErr { + // Log at same verbosity as disk cache. + klog.V(3).Infof("%v", err) + } else { + utilruntime.HandleError(fmt.Errorf("couldn't get resource list for %v: %v", gv, err)) + } + } + + resultLock.Lock() + defer resultLock.Unlock() + rl[gv] = &cacheEntry{r, err} + }() + } + } + wg.Wait() + + d.groupToServerResources, d.groupList = rl, gl + d.cacheValid = true + return nil +} + +func (d *memCacheClient) serverResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + r, err := d.delegate.ServerResourcesForGroupVersion(groupVersion) + if err != nil { + return r, err + } + if len(r.APIResources) == 0 { + return r, &emptyResponseError{gv: groupVersion} + } + return r, nil +} + +// WithLegacy returns current memory-cached discovery client; +// current client does not support legacy-only discovery. +func (d *memCacheClient) WithLegacy() discovery.DiscoveryInterface { + return d +} + +// NewMemCacheClient creates a new CachedDiscoveryInterface which caches +// discovery information in memory and will stay up-to-date if Invalidate is +// called with regularity. +// +// NOTE: The client will NOT resort to live lookups on cache misses. +func NewMemCacheClient(delegate discovery.DiscoveryInterface) discovery.CachedDiscoveryInterface { + return &memCacheClient{ + delegate: delegate, + groupToServerResources: map[string]*cacheEntry{}, + receivedAggregatedDiscovery: false, + } +} diff --git a/vendor/k8s.io/client-go/openapi/cached/client.go b/vendor/k8s.io/client-go/openapi/cached/client.go new file mode 100644 index 000000000..17f63ed26 --- /dev/null +++ b/vendor/k8s.io/client-go/openapi/cached/client.go @@ -0,0 +1,54 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cached + +import ( + "sync" + + "k8s.io/client-go/openapi" +) + +type client struct { + delegate openapi.Client + + once sync.Once + result map[string]openapi.GroupVersion + err error +} + +func NewClient(other openapi.Client) openapi.Client { + return &client{ + delegate: other, + } +} + +func (c *client) Paths() (map[string]openapi.GroupVersion, error) { + c.once.Do(func() { + uncached, err := c.delegate.Paths() + if err != nil { + c.err = err + return + } + + result := make(map[string]openapi.GroupVersion, len(uncached)) + for k, v := range uncached { + result[k] = newGroupVersion(v) + } + c.result = result + }) + return c.result, c.err +} diff --git a/vendor/k8s.io/client-go/openapi/cached/groupversion.go b/vendor/k8s.io/client-go/openapi/cached/groupversion.go new file mode 100644 index 000000000..65a4189f7 --- /dev/null +++ b/vendor/k8s.io/client-go/openapi/cached/groupversion.go @@ -0,0 +1,58 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cached + +import ( + "sync" + + "k8s.io/client-go/openapi" +) + +type groupversion struct { + delegate openapi.GroupVersion + + lock sync.Mutex + docs map[string]docInfo +} + +type docInfo struct { + data []byte + err error +} + +func newGroupVersion(delegate openapi.GroupVersion) *groupversion { + return &groupversion{ + delegate: delegate, + } +} + +func (g *groupversion) Schema(contentType string) ([]byte, error) { + g.lock.Lock() + defer g.lock.Unlock() + + cachedInfo, ok := g.docs[contentType] + if !ok { + if g.docs == nil { + g.docs = make(map[string]docInfo) + } + + cachedInfo.data, cachedInfo.err = g.delegate.Schema(contentType) + g.docs[contentType] = cachedInfo + } + + return cachedInfo.data, cachedInfo.err +} diff --git a/vendor/k8s.io/client-go/restmapper/category_expansion.go b/vendor/k8s.io/client-go/restmapper/category_expansion.go new file mode 100644 index 000000000..484e4c839 --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/category_expansion.go @@ -0,0 +1,119 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" +) + +// CategoryExpander maps category strings to GroupResources. +// Categories are classification or 'tag' of a group of resources. +type CategoryExpander interface { + Expand(category string) ([]schema.GroupResource, bool) +} + +// SimpleCategoryExpander implements CategoryExpander interface +// using a static mapping of categories to GroupResource mapping. +type SimpleCategoryExpander struct { + Expansions map[string][]schema.GroupResource +} + +// Expand fulfills CategoryExpander +func (e SimpleCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + ret, ok := e.Expansions[category] + return ret, ok +} + +// discoveryCategoryExpander struct lets a REST Client wrapper (discoveryClient) to retrieve list of APIResourceList, +// and then convert to fallbackExpander +type discoveryCategoryExpander struct { + discoveryClient discovery.DiscoveryInterface +} + +// NewDiscoveryCategoryExpander returns a category expander that makes use of the "categories" fields from +// the API, found through the discovery client. In case of any error or no category found (which likely +// means we're at a cluster prior to categories support, fallback to the expander provided. +func NewDiscoveryCategoryExpander(client discovery.DiscoveryInterface) CategoryExpander { + if client == nil { + panic("Please provide discovery client to shortcut expander") + } + return discoveryCategoryExpander{discoveryClient: client} +} + +// Expand fulfills CategoryExpander +func (e discoveryCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + // Get all supported resources for groups and versions from server, if no resource found, fallback anyway. + _, apiResourceLists, _ := e.discoveryClient.ServerGroupsAndResources() + if len(apiResourceLists) == 0 { + return nil, false + } + + discoveredExpansions := map[string][]schema.GroupResource{} + for _, apiResourceList := range apiResourceLists { + gv, err := schema.ParseGroupVersion(apiResourceList.GroupVersion) + if err != nil { + continue + } + // Collect GroupVersions by categories + for _, apiResource := range apiResourceList.APIResources { + if categories := apiResource.Categories; len(categories) > 0 { + for _, category := range categories { + groupResource := schema.GroupResource{ + Group: gv.Group, + Resource: apiResource.Name, + } + discoveredExpansions[category] = append(discoveredExpansions[category], groupResource) + } + } + } + } + + ret, ok := discoveredExpansions[category] + return ret, ok +} + +// UnionCategoryExpander implements CategoryExpander interface. +// It maps given category string to union of expansions returned by all the CategoryExpanders in the list. +type UnionCategoryExpander []CategoryExpander + +// Expand fulfills CategoryExpander +func (u UnionCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + ret := []schema.GroupResource{} + ok := false + + // Expand the category for each CategoryExpander in the list and merge/combine the results. + for _, expansion := range u { + curr, currOk := expansion.Expand(category) + + for _, currGR := range curr { + found := false + for _, existing := range ret { + if existing == currGR { + found = true + break + } + } + if !found { + ret = append(ret, currGR) + } + } + ok = ok || currOk + } + + return ret, ok +} diff --git a/vendor/k8s.io/client-go/restmapper/discovery.go b/vendor/k8s.io/client-go/restmapper/discovery.go new file mode 100644 index 000000000..3505178b6 --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/discovery.go @@ -0,0 +1,338 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "fmt" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + + "k8s.io/klog/v2" +) + +// APIGroupResources is an API group with a mapping of versions to +// resources. +type APIGroupResources struct { + Group metav1.APIGroup + // A mapping of version string to a slice of APIResources for + // that version. + VersionedResources map[string][]metav1.APIResource +} + +// NewDiscoveryRESTMapper returns a PriorityRESTMapper based on the discovered +// groups and resources passed in. +func NewDiscoveryRESTMapper(groupResources []*APIGroupResources) meta.RESTMapper { + unionMapper := meta.MultiRESTMapper{} + + var groupPriority []string + // /v1 is special. It should always come first + resourcePriority := []schema.GroupVersionResource{{Group: "", Version: "v1", Resource: meta.AnyResource}} + kindPriority := []schema.GroupVersionKind{{Group: "", Version: "v1", Kind: meta.AnyKind}} + + for _, group := range groupResources { + groupPriority = append(groupPriority, group.Group.Name) + + // Make sure the preferred version comes first + if len(group.Group.PreferredVersion.Version) != 0 { + preferred := group.Group.PreferredVersion.Version + if _, ok := group.VersionedResources[preferred]; ok { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group.Group.Name, + Version: group.Group.PreferredVersion.Version, + Resource: meta.AnyResource, + }) + + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group.Group.Name, + Version: group.Group.PreferredVersion.Version, + Kind: meta.AnyKind, + }) + } + } + + for _, discoveryVersion := range group.Group.Versions { + resources, ok := group.VersionedResources[discoveryVersion.Version] + if !ok { + continue + } + + // Add non-preferred versions after the preferred version, in case there are resources that only exist in those versions + if discoveryVersion.Version != group.Group.PreferredVersion.Version { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group.Group.Name, + Version: discoveryVersion.Version, + Resource: meta.AnyResource, + }) + + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group.Group.Name, + Version: discoveryVersion.Version, + Kind: meta.AnyKind, + }) + } + + gv := schema.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version} + versionMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{gv}) + + for _, resource := range resources { + scope := meta.RESTScopeNamespace + if !resource.Namespaced { + scope = meta.RESTScopeRoot + } + + // if we have a slash, then this is a subresource and we shouldn't create mappings for those. + if strings.Contains(resource.Name, "/") { + continue + } + + plural := gv.WithResource(resource.Name) + singular := gv.WithResource(resource.SingularName) + // this is for legacy resources and servers which don't list singular forms. For those we must still guess. + if len(resource.SingularName) == 0 { + _, singular = meta.UnsafeGuessKindToResource(gv.WithKind(resource.Kind)) + } + + versionMapper.AddSpecific(gv.WithKind(strings.ToLower(resource.Kind)), plural, singular, scope) + versionMapper.AddSpecific(gv.WithKind(resource.Kind), plural, singular, scope) + // TODO this is producing unsafe guesses that don't actually work, but it matches previous behavior + versionMapper.Add(gv.WithKind(resource.Kind+"List"), scope) + } + // TODO why is this type not in discovery (at least for "v1") + versionMapper.Add(gv.WithKind("List"), meta.RESTScopeRoot) + unionMapper = append(unionMapper, versionMapper) + } + } + + for _, group := range groupPriority { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group, + Version: meta.AnyVersion, + Resource: meta.AnyResource, + }) + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group, + Version: meta.AnyVersion, + Kind: meta.AnyKind, + }) + } + + return meta.PriorityRESTMapper{ + Delegate: unionMapper, + ResourcePriority: resourcePriority, + KindPriority: kindPriority, + } +} + +// GetAPIGroupResources uses the provided discovery client to gather +// discovery information and populate a slice of APIGroupResources. +func GetAPIGroupResources(cl discovery.DiscoveryInterface) ([]*APIGroupResources, error) { + gs, rs, err := cl.ServerGroupsAndResources() + if rs == nil || gs == nil { + return nil, err + // TODO track the errors and update callers to handle partial errors. + } + rsm := map[string]*metav1.APIResourceList{} + for _, r := range rs { + rsm[r.GroupVersion] = r + } + + var result []*APIGroupResources + for _, group := range gs { + groupResources := &APIGroupResources{ + Group: *group, + VersionedResources: make(map[string][]metav1.APIResource), + } + for _, version := range group.Versions { + resources, ok := rsm[version.GroupVersion] + if !ok { + continue + } + groupResources.VersionedResources[version.Version] = resources.APIResources + } + result = append(result, groupResources) + } + return result, nil +} + +// DeferredDiscoveryRESTMapper is a RESTMapper that will defer +// initialization of the RESTMapper until the first mapping is +// requested. +type DeferredDiscoveryRESTMapper struct { + initMu sync.Mutex + delegate meta.RESTMapper + cl discovery.CachedDiscoveryInterface +} + +// NewDeferredDiscoveryRESTMapper returns a +// DeferredDiscoveryRESTMapper that will lazily query the provided +// client for discovery information to do REST mappings. +func NewDeferredDiscoveryRESTMapper(cl discovery.CachedDiscoveryInterface) *DeferredDiscoveryRESTMapper { + return &DeferredDiscoveryRESTMapper{ + cl: cl, + } +} + +func (d *DeferredDiscoveryRESTMapper) getDelegate() (meta.RESTMapper, error) { + d.initMu.Lock() + defer d.initMu.Unlock() + + if d.delegate != nil { + return d.delegate, nil + } + + groupResources, err := GetAPIGroupResources(d.cl) + if err != nil { + return nil, err + } + + d.delegate = NewDiscoveryRESTMapper(groupResources) + return d.delegate, nil +} + +// Reset resets the internally cached Discovery information and will +// cause the next mapping request to re-discover. +func (d *DeferredDiscoveryRESTMapper) Reset() { + klog.V(5).Info("Invalidating discovery information") + + d.initMu.Lock() + defer d.initMu.Unlock() + + d.cl.Invalidate() + d.delegate = nil +} + +// KindFor takes a partial resource and returns back the single match. +// It returns an error if there are multiple matches. +func (d *DeferredDiscoveryRESTMapper) KindFor(resource schema.GroupVersionResource) (gvk schema.GroupVersionKind, err error) { + del, err := d.getDelegate() + if err != nil { + return schema.GroupVersionKind{}, err + } + gvk, err = del.KindFor(resource) + if err != nil && !d.cl.Fresh() { + d.Reset() + gvk, err = d.KindFor(resource) + } + return +} + +// KindsFor takes a partial resource and returns back the list of +// potential kinds in priority order. +func (d *DeferredDiscoveryRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvks []schema.GroupVersionKind, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + gvks, err = del.KindsFor(resource) + if len(gvks) == 0 && !d.cl.Fresh() { + d.Reset() + gvks, err = d.KindsFor(resource) + } + return +} + +// ResourceFor takes a partial resource and returns back the single +// match. It returns an error if there are multiple matches. +func (d *DeferredDiscoveryRESTMapper) ResourceFor(input schema.GroupVersionResource) (gvr schema.GroupVersionResource, err error) { + del, err := d.getDelegate() + if err != nil { + return schema.GroupVersionResource{}, err + } + gvr, err = del.ResourceFor(input) + if err != nil && !d.cl.Fresh() { + d.Reset() + gvr, err = d.ResourceFor(input) + } + return +} + +// ResourcesFor takes a partial resource and returns back the list of +// potential resource in priority order. +func (d *DeferredDiscoveryRESTMapper) ResourcesFor(input schema.GroupVersionResource) (gvrs []schema.GroupVersionResource, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + gvrs, err = del.ResourcesFor(input) + if len(gvrs) == 0 && !d.cl.Fresh() { + d.Reset() + gvrs, err = d.ResourcesFor(input) + } + return +} + +// RESTMapping identifies a preferred resource mapping for the +// provided group kind. +func (d *DeferredDiscoveryRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (m *meta.RESTMapping, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + m, err = del.RESTMapping(gk, versions...) + if err != nil && !d.cl.Fresh() { + d.Reset() + m, err = d.RESTMapping(gk, versions...) + } + return +} + +// RESTMappings returns the RESTMappings for the provided group kind +// in a rough internal preferred order. If no kind is found, it will +// return a NoResourceMatchError. +func (d *DeferredDiscoveryRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) (ms []*meta.RESTMapping, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + ms, err = del.RESTMappings(gk, versions...) + if len(ms) == 0 && !d.cl.Fresh() { + d.Reset() + ms, err = d.RESTMappings(gk, versions...) + } + return +} + +// ResourceSingularizer converts a resource name from plural to +// singular (e.g., from pods to pod). +func (d *DeferredDiscoveryRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + del, err := d.getDelegate() + if err != nil { + return resource, err + } + singular, err = del.ResourceSingularizer(resource) + if err != nil && !d.cl.Fresh() { + d.Reset() + singular, err = d.ResourceSingularizer(resource) + } + return +} + +func (d *DeferredDiscoveryRESTMapper) String() string { + del, err := d.getDelegate() + if err != nil { + return fmt.Sprintf("DeferredDiscoveryRESTMapper{%v}", err) + } + return fmt.Sprintf("DeferredDiscoveryRESTMapper{\n\t%v\n}", del) +} + +// Make sure it satisfies the interface +var _ meta.ResettableRESTMapper = &DeferredDiscoveryRESTMapper{} diff --git a/vendor/k8s.io/client-go/restmapper/shortcut.go b/vendor/k8s.io/client-go/restmapper/shortcut.go new file mode 100644 index 000000000..ca517a01d --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/shortcut.go @@ -0,0 +1,211 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "fmt" + "strings" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" +) + +// shortcutExpander is a RESTMapper that can be used for Kubernetes resources. It expands the resource first, then invokes the wrapped +type shortcutExpander struct { + RESTMapper meta.RESTMapper + + discoveryClient discovery.DiscoveryInterface + + warningHandler func(string) +} + +var _ meta.ResettableRESTMapper = shortcutExpander{} + +// NewShortcutExpander wraps a restmapper in a layer that expands shortcuts found via discovery +func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface, warningHandler func(string)) meta.RESTMapper { + return shortcutExpander{RESTMapper: delegate, discoveryClient: client, warningHandler: warningHandler} +} + +// KindFor fulfills meta.RESTMapper +func (e shortcutExpander) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + // expandResourceShortcut works with current API resources as read from discovery cache. + // In case of new CRDs this means we potentially don't have current state of discovery. + // In the current wiring in k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go#toRESTMapper, + // we are using DeferredDiscoveryRESTMapper which on KindFor failure will clear the + // cache and fetch all data from a cluster (see vendor/k8s.io/client-go/restmapper/discovery.go#KindFor). + // Thus another call to expandResourceShortcut, after a NoMatchError should successfully + // read Kind to the user or an error. + gvk, err := e.RESTMapper.KindFor(e.expandResourceShortcut(resource)) + if meta.IsNoMatchError(err) { + return e.RESTMapper.KindFor(e.expandResourceShortcut(resource)) + } + return gvk, err +} + +// KindsFor fulfills meta.RESTMapper +func (e shortcutExpander) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + return e.RESTMapper.KindsFor(e.expandResourceShortcut(resource)) +} + +// ResourcesFor fulfills meta.RESTMapper +func (e shortcutExpander) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + return e.RESTMapper.ResourcesFor(e.expandResourceShortcut(resource)) +} + +// ResourceFor fulfills meta.RESTMapper +func (e shortcutExpander) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + return e.RESTMapper.ResourceFor(e.expandResourceShortcut(resource)) +} + +// ResourceSingularizer fulfills meta.RESTMapper +func (e shortcutExpander) ResourceSingularizer(resource string) (string, error) { + return e.RESTMapper.ResourceSingularizer(e.expandResourceShortcut(schema.GroupVersionResource{Resource: resource}).Resource) +} + +// RESTMapping fulfills meta.RESTMapper +func (e shortcutExpander) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + return e.RESTMapper.RESTMapping(gk, versions...) +} + +// RESTMappings fulfills meta.RESTMapper +func (e shortcutExpander) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + return e.RESTMapper.RESTMappings(gk, versions...) +} + +// getShortcutMappings returns a set of tuples which holds short names for resources. +// First the list of potential resources will be taken from the API server. +// Next we will append the hardcoded list of resources - to be backward compatible with old servers. +// NOTE that the list is ordered by group priority. +func (e shortcutExpander) getShortcutMappings() ([]*metav1.APIResourceList, []resourceShortcuts, error) { + res := []resourceShortcuts{} + // get server resources + // This can return an error *and* the results it was able to find. We don't need to fail on the error. + _, apiResList, err := e.discoveryClient.ServerGroupsAndResources() + if err != nil { + klog.V(1).Infof("Error loading discovery information: %v", err) + } + for _, apiResources := range apiResList { + gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) + if err != nil { + klog.V(1).Infof("Unable to parse groupversion = %s due to = %s", apiResources.GroupVersion, err.Error()) + continue + } + for _, apiRes := range apiResources.APIResources { + for _, shortName := range apiRes.ShortNames { + rs := resourceShortcuts{ + ShortForm: schema.GroupResource{Group: gv.Group, Resource: shortName}, + LongForm: schema.GroupResource{Group: gv.Group, Resource: apiRes.Name}, + } + res = append(res, rs) + } + } + } + + return apiResList, res, nil +} + +// expandResourceShortcut will return the expanded version of resource +// (something that a pkg/api/meta.RESTMapper can understand), if it is +// indeed a shortcut. If no match has been found, we will match on group prefixing. +// Lastly we will return resource unmodified. +func (e shortcutExpander) expandResourceShortcut(resource schema.GroupVersionResource) schema.GroupVersionResource { + // get the shortcut mappings and return on first match. + if allResources, shortcutResources, err := e.getShortcutMappings(); err == nil { + // avoid expanding if there's an exact match to a full resource name + for _, apiResources := range allResources { + gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) + if err != nil { + continue + } + if len(resource.Group) != 0 && resource.Group != gv.Group { + continue + } + for _, apiRes := range apiResources.APIResources { + if resource.Resource == apiRes.Name { + return resource + } + if resource.Resource == apiRes.SingularName { + return resource + } + } + } + + found := false + var rsc schema.GroupVersionResource + warnedAmbiguousShortcut := make(map[schema.GroupResource]bool) + for _, item := range shortcutResources { + if len(resource.Group) != 0 && resource.Group != item.ShortForm.Group { + continue + } + if resource.Resource == item.ShortForm.Resource { + if found { + if item.LongForm.Group == rsc.Group && item.LongForm.Resource == rsc.Resource { + // It is common and acceptable that group/resource has multiple + // versions registered in cluster. This does not introduce ambiguity + // in terms of shortname usage. + continue + } + if !warnedAmbiguousShortcut[item.LongForm] { + if e.warningHandler != nil { + e.warningHandler(fmt.Sprintf("short name %q could also match lower priority resource %s", resource.Resource, item.LongForm.String())) + } + warnedAmbiguousShortcut[item.LongForm] = true + } + continue + } + rsc.Resource = item.LongForm.Resource + rsc.Group = item.LongForm.Group + found = true + } + } + if found { + return rsc + } + + // we didn't find exact match so match on group prefixing. This allows autoscal to match autoscaling + if len(resource.Group) == 0 { + return resource + } + for _, item := range shortcutResources { + if !strings.HasPrefix(item.ShortForm.Group, resource.Group) { + continue + } + if resource.Resource == item.ShortForm.Resource { + resource.Resource = item.LongForm.Resource + resource.Group = item.LongForm.Group + return resource + } + } + } + + return resource +} + +func (e shortcutExpander) Reset() { + meta.MaybeResetRESTMapper(e.RESTMapper) +} + +// ResourceShortcuts represents a structure that holds the information how to +// transition from resource's shortcut to its full name. +type resourceShortcuts struct { + ShortForm schema.GroupResource + LongForm schema.GroupResource +} diff --git a/vendor/k8s.io/component-helpers/LICENSE b/vendor/k8s.io/component-helpers/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/component-helpers/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/component-helpers/auth/rbac/validation/policy_comparator.go b/vendor/k8s.io/component-helpers/auth/rbac/validation/policy_comparator.go new file mode 100644 index 000000000..7a0268b5e --- /dev/null +++ b/vendor/k8s.io/component-helpers/auth/rbac/validation/policy_comparator.go @@ -0,0 +1,173 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "strings" + + rbacv1 "k8s.io/api/rbac/v1" +) + +// Covers determines whether or not the ownerRules cover the servantRules in terms of allowed actions. +// It returns whether or not the ownerRules cover and a list of the rules that the ownerRules do not cover. +func Covers(ownerRules, servantRules []rbacv1.PolicyRule) (bool, []rbacv1.PolicyRule) { + // 1. Break every servantRule into individual rule tuples: group, verb, resource, resourceName + // 2. Compare the mini-rules against each owner rule. Because the breakdown is down to the most atomic level, we're guaranteed that each mini-servant rule will be either fully covered or not covered by a single owner rule + // 3. Any left over mini-rules means that we are not covered and we have a nice list of them. + // TODO: it might be nice to collapse the list down into something more human readable + + subrules := []rbacv1.PolicyRule{} + for _, servantRule := range servantRules { + subrules = append(subrules, BreakdownRule(servantRule)...) + } + + uncoveredRules := []rbacv1.PolicyRule{} + for _, subrule := range subrules { + covered := false + for _, ownerRule := range ownerRules { + if ruleCovers(ownerRule, subrule) { + covered = true + break + } + } + + if !covered { + uncoveredRules = append(uncoveredRules, subrule) + } + } + + return (len(uncoveredRules) == 0), uncoveredRules +} + +// BreadownRule takes a rule and builds an equivalent list of rules that each have at most one verb, one +// resource, and one resource name +func BreakdownRule(rule rbacv1.PolicyRule) []rbacv1.PolicyRule { + subrules := []rbacv1.PolicyRule{} + for _, group := range rule.APIGroups { + for _, resource := range rule.Resources { + for _, verb := range rule.Verbs { + if len(rule.ResourceNames) > 0 { + for _, resourceName := range rule.ResourceNames { + subrules = append(subrules, rbacv1.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}, ResourceNames: []string{resourceName}}) + } + + } else { + subrules = append(subrules, rbacv1.PolicyRule{APIGroups: []string{group}, Resources: []string{resource}, Verbs: []string{verb}}) + } + + } + } + } + + // Non-resource URLs are unique because they only combine with verbs. + for _, nonResourceURL := range rule.NonResourceURLs { + for _, verb := range rule.Verbs { + subrules = append(subrules, rbacv1.PolicyRule{NonResourceURLs: []string{nonResourceURL}, Verbs: []string{verb}}) + } + } + + return subrules +} + +func has(set []string, ele string) bool { + for _, s := range set { + if s == ele { + return true + } + } + return false +} + +func hasAll(set, contains []string) bool { + owning := make(map[string]struct{}, len(set)) + for _, ele := range set { + owning[ele] = struct{}{} + } + for _, ele := range contains { + if _, ok := owning[ele]; !ok { + return false + } + } + return true +} + +func resourceCoversAll(setResources, coversResources []string) bool { + // if we have a star or an exact match on all resources, then we match + if has(setResources, rbacv1.ResourceAll) || hasAll(setResources, coversResources) { + return true + } + + for _, path := range coversResources { + // if we have an exact match, then we match. + if has(setResources, path) { + continue + } + // if we're not a subresource, then we definitely don't match. fail. + if !strings.Contains(path, "/") { + return false + } + tokens := strings.SplitN(path, "/", 2) + resourceToCheck := "*/" + tokens[1] + if !has(setResources, resourceToCheck) { + return false + } + } + + return true +} + +func nonResourceURLsCoversAll(set, covers []string) bool { + for _, path := range covers { + covered := false + for _, owner := range set { + if nonResourceURLCovers(owner, path) { + covered = true + break + } + } + if !covered { + return false + } + } + return true +} + +func nonResourceURLCovers(ownerPath, subPath string) bool { + if ownerPath == subPath { + return true + } + return strings.HasSuffix(ownerPath, "*") && strings.HasPrefix(subPath, strings.TrimRight(ownerPath, "*")) +} + +// ruleCovers determines whether the ownerRule (which may have multiple verbs, resources, and resourceNames) covers +// the subrule (which may only contain at most one verb, resource, and resourceName) +func ruleCovers(ownerRule, subRule rbacv1.PolicyRule) bool { + verbMatches := has(ownerRule.Verbs, rbacv1.VerbAll) || hasAll(ownerRule.Verbs, subRule.Verbs) + groupMatches := has(ownerRule.APIGroups, rbacv1.APIGroupAll) || hasAll(ownerRule.APIGroups, subRule.APIGroups) + resourceMatches := resourceCoversAll(ownerRule.Resources, subRule.Resources) + nonResourceURLMatches := nonResourceURLsCoversAll(ownerRule.NonResourceURLs, subRule.NonResourceURLs) + + resourceNameMatches := false + + if len(subRule.ResourceNames) == 0 { + resourceNameMatches = (len(ownerRule.ResourceNames) == 0) + } else { + resourceNameMatches = (len(ownerRule.ResourceNames) == 0) || hasAll(ownerRule.ResourceNames, subRule.ResourceNames) + } + + return verbMatches && groupMatches && resourceMatches && resourceNameMatches && nonResourceURLMatches +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 5f25f784e..103fc4215 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -385,6 +385,7 @@ k8s.io/client-go/applyconfigurations/storage/v1 k8s.io/client-go/applyconfigurations/storage/v1alpha1 k8s.io/client-go/applyconfigurations/storage/v1beta1 k8s.io/client-go/discovery +k8s.io/client-go/discovery/cached/memory k8s.io/client-go/dynamic k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/scheme @@ -440,6 +441,7 @@ k8s.io/client-go/kubernetes/typed/storage/v1 k8s.io/client-go/kubernetes/typed/storage/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1beta1 k8s.io/client-go/openapi +k8s.io/client-go/openapi/cached k8s.io/client-go/pkg/apis/clientauthentication k8s.io/client-go/pkg/apis/clientauthentication/install k8s.io/client-go/pkg/apis/clientauthentication/v1 @@ -452,6 +454,7 @@ k8s.io/client-go/plugin/pkg/client/auth/gcp k8s.io/client-go/plugin/pkg/client/auth/oidc k8s.io/client-go/rest k8s.io/client-go/rest/watch +k8s.io/client-go/restmapper k8s.io/client-go/tools/auth k8s.io/client-go/tools/clientcmd k8s.io/client-go/tools/clientcmd/api @@ -466,6 +469,9 @@ k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/workqueue +# k8s.io/component-helpers v0.29.1 +## explicit; go 1.21 +k8s.io/component-helpers/auth/rbac/validation # k8s.io/klog/v2 v2.110.1 ## explicit; go 1.13 k8s.io/klog/v2