diff --git a/apis/dataplex/v1beta1/zz_generated.conversion_hubs.go b/apis/dataplex/v1beta1/zz_generated.conversion_hubs.go index a6d8d8adf..91d499e46 100755 --- a/apis/dataplex/v1beta1/zz_generated.conversion_hubs.go +++ b/apis/dataplex/v1beta1/zz_generated.conversion_hubs.go @@ -8,3 +8,12 @@ package v1beta1 // Hub marks this type as a conversion hub. func (tr *AspectType) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LakeIAMBinding) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LakeIAMMember) Hub() {} + +// Hub marks this type as a conversion hub. +func (tr *LakeIAMPolicy) Hub() {} diff --git a/apis/dataplex/v1beta1/zz_generated.deepcopy.go b/apis/dataplex/v1beta1/zz_generated.deepcopy.go index 7e5181e23..b5c7c67b6 100644 --- a/apis/dataplex/v1beta1/zz_generated.deepcopy.go +++ b/apis/dataplex/v1beta1/zz_generated.deepcopy.go @@ -814,6 +814,96 @@ func (in *AssetStatusParameters) DeepCopy() *AssetStatusParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionInitParameters) DeepCopyInto(out *ConditionInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionInitParameters. +func (in *ConditionInitParameters) DeepCopy() *ConditionInitParameters { + if in == nil { + return nil + } + out := new(ConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionObservation) DeepCopyInto(out *ConditionObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionObservation. +func (in *ConditionObservation) DeepCopy() *ConditionObservation { + if in == nil { + return nil + } + out := new(ConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionParameters) DeepCopyInto(out *ConditionParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionParameters. +func (in *ConditionParameters) DeepCopy() *ConditionParameters { + if in == nil { + return nil + } + out := new(ConditionParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CsvOptionsInitParameters) DeepCopyInto(out *CsvOptionsInitParameters) { *out = *in @@ -1461,6 +1551,846 @@ func (in *Lake) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMBinding) DeepCopyInto(out *LakeIAMBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMBinding. +func (in *LakeIAMBinding) DeepCopy() *LakeIAMBinding { + if in == nil { + return nil + } + out := new(LakeIAMBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LakeIAMBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMBindingInitParameters) DeepCopyInto(out *LakeIAMBindingInitParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(ConditionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Lake != nil { + in, out := &in.Lake, &out.Lake + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMBindingInitParameters. +func (in *LakeIAMBindingInitParameters) DeepCopy() *LakeIAMBindingInitParameters { + if in == nil { + return nil + } + out := new(LakeIAMBindingInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMBindingList) DeepCopyInto(out *LakeIAMBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LakeIAMBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMBindingList. +func (in *LakeIAMBindingList) DeepCopy() *LakeIAMBindingList { + if in == nil { + return nil + } + out := new(LakeIAMBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LakeIAMBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMBindingObservation) DeepCopyInto(out *LakeIAMBindingObservation) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(ConditionObservation) + (*in).DeepCopyInto(*out) + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Lake != nil { + in, out := &in.Lake, &out.Lake + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMBindingObservation. +func (in *LakeIAMBindingObservation) DeepCopy() *LakeIAMBindingObservation { + if in == nil { + return nil + } + out := new(LakeIAMBindingObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMBindingParameters) DeepCopyInto(out *LakeIAMBindingParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(ConditionParameters) + (*in).DeepCopyInto(*out) + } + if in.Lake != nil { + in, out := &in.Lake, &out.Lake + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]*string, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(string) + **out = **in + } + } + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMBindingParameters. +func (in *LakeIAMBindingParameters) DeepCopy() *LakeIAMBindingParameters { + if in == nil { + return nil + } + out := new(LakeIAMBindingParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMBindingSpec) DeepCopyInto(out *LakeIAMBindingSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMBindingSpec. +func (in *LakeIAMBindingSpec) DeepCopy() *LakeIAMBindingSpec { + if in == nil { + return nil + } + out := new(LakeIAMBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMBindingStatus) DeepCopyInto(out *LakeIAMBindingStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMBindingStatus. +func (in *LakeIAMBindingStatus) DeepCopy() *LakeIAMBindingStatus { + if in == nil { + return nil + } + out := new(LakeIAMBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMMember) DeepCopyInto(out *LakeIAMMember) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMMember. +func (in *LakeIAMMember) DeepCopy() *LakeIAMMember { + if in == nil { + return nil + } + out := new(LakeIAMMember) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LakeIAMMember) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMMemberConditionInitParameters) DeepCopyInto(out *LakeIAMMemberConditionInitParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMMemberConditionInitParameters. +func (in *LakeIAMMemberConditionInitParameters) DeepCopy() *LakeIAMMemberConditionInitParameters { + if in == nil { + return nil + } + out := new(LakeIAMMemberConditionInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMMemberConditionObservation) DeepCopyInto(out *LakeIAMMemberConditionObservation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMMemberConditionObservation. +func (in *LakeIAMMemberConditionObservation) DeepCopy() *LakeIAMMemberConditionObservation { + if in == nil { + return nil + } + out := new(LakeIAMMemberConditionObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMMemberConditionParameters) DeepCopyInto(out *LakeIAMMemberConditionParameters) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Title != nil { + in, out := &in.Title, &out.Title + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMMemberConditionParameters. +func (in *LakeIAMMemberConditionParameters) DeepCopy() *LakeIAMMemberConditionParameters { + if in == nil { + return nil + } + out := new(LakeIAMMemberConditionParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMMemberInitParameters) DeepCopyInto(out *LakeIAMMemberInitParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(LakeIAMMemberConditionInitParameters) + (*in).DeepCopyInto(*out) + } + if in.Lake != nil { + in, out := &in.Lake, &out.Lake + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = new(string) + **out = **in + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMMemberInitParameters. +func (in *LakeIAMMemberInitParameters) DeepCopy() *LakeIAMMemberInitParameters { + if in == nil { + return nil + } + out := new(LakeIAMMemberInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMMemberList) DeepCopyInto(out *LakeIAMMemberList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LakeIAMMember, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMMemberList. +func (in *LakeIAMMemberList) DeepCopy() *LakeIAMMemberList { + if in == nil { + return nil + } + out := new(LakeIAMMemberList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LakeIAMMemberList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMMemberObservation) DeepCopyInto(out *LakeIAMMemberObservation) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(LakeIAMMemberConditionObservation) + (*in).DeepCopyInto(*out) + } + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Lake != nil { + in, out := &in.Lake, &out.Lake + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = new(string) + **out = **in + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMMemberObservation. +func (in *LakeIAMMemberObservation) DeepCopy() *LakeIAMMemberObservation { + if in == nil { + return nil + } + out := new(LakeIAMMemberObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMMemberParameters) DeepCopyInto(out *LakeIAMMemberParameters) { + *out = *in + if in.Condition != nil { + in, out := &in.Condition, &out.Condition + *out = new(LakeIAMMemberConditionParameters) + (*in).DeepCopyInto(*out) + } + if in.Lake != nil { + in, out := &in.Lake, &out.Lake + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.Member != nil { + in, out := &in.Member, &out.Member + *out = new(string) + **out = **in + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.Role != nil { + in, out := &in.Role, &out.Role + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMMemberParameters. +func (in *LakeIAMMemberParameters) DeepCopy() *LakeIAMMemberParameters { + if in == nil { + return nil + } + out := new(LakeIAMMemberParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMMemberSpec) DeepCopyInto(out *LakeIAMMemberSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMMemberSpec. +func (in *LakeIAMMemberSpec) DeepCopy() *LakeIAMMemberSpec { + if in == nil { + return nil + } + out := new(LakeIAMMemberSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMMemberStatus) DeepCopyInto(out *LakeIAMMemberStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMMemberStatus. +func (in *LakeIAMMemberStatus) DeepCopy() *LakeIAMMemberStatus { + if in == nil { + return nil + } + out := new(LakeIAMMemberStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMPolicy) DeepCopyInto(out *LakeIAMPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMPolicy. +func (in *LakeIAMPolicy) DeepCopy() *LakeIAMPolicy { + if in == nil { + return nil + } + out := new(LakeIAMPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LakeIAMPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMPolicyInitParameters) DeepCopyInto(out *LakeIAMPolicyInitParameters) { + *out = *in + if in.Lake != nil { + in, out := &in.Lake, &out.Lake + *out = new(string) + **out = **in + } + if in.LakeRef != nil { + in, out := &in.LakeRef, &out.LakeRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LakeSelector != nil { + in, out := &in.LakeSelector, &out.LakeSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PolicyData != nil { + in, out := &in.PolicyData, &out.PolicyData + *out = new(string) + **out = **in + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.ProjectRef != nil { + in, out := &in.ProjectRef, &out.ProjectRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ProjectSelector != nil { + in, out := &in.ProjectSelector, &out.ProjectSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMPolicyInitParameters. +func (in *LakeIAMPolicyInitParameters) DeepCopy() *LakeIAMPolicyInitParameters { + if in == nil { + return nil + } + out := new(LakeIAMPolicyInitParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMPolicyList) DeepCopyInto(out *LakeIAMPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LakeIAMPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMPolicyList. +func (in *LakeIAMPolicyList) DeepCopy() *LakeIAMPolicyList { + if in == nil { + return nil + } + out := new(LakeIAMPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LakeIAMPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMPolicyObservation) DeepCopyInto(out *LakeIAMPolicyObservation) { + *out = *in + if in.Etag != nil { + in, out := &in.Etag, &out.Etag + *out = new(string) + **out = **in + } + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Lake != nil { + in, out := &in.Lake, &out.Lake + *out = new(string) + **out = **in + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.PolicyData != nil { + in, out := &in.PolicyData, &out.PolicyData + *out = new(string) + **out = **in + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMPolicyObservation. +func (in *LakeIAMPolicyObservation) DeepCopy() *LakeIAMPolicyObservation { + if in == nil { + return nil + } + out := new(LakeIAMPolicyObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMPolicyParameters) DeepCopyInto(out *LakeIAMPolicyParameters) { + *out = *in + if in.Lake != nil { + in, out := &in.Lake, &out.Lake + *out = new(string) + **out = **in + } + if in.LakeRef != nil { + in, out := &in.LakeRef, &out.LakeRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LakeSelector != nil { + in, out := &in.LakeSelector, &out.LakeSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.Location != nil { + in, out := &in.Location, &out.Location + *out = new(string) + **out = **in + } + if in.LocationRef != nil { + in, out := &in.LocationRef, &out.LocationRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.LocationSelector != nil { + in, out := &in.LocationSelector, &out.LocationSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } + if in.PolicyData != nil { + in, out := &in.PolicyData, &out.PolicyData + *out = new(string) + **out = **in + } + if in.Project != nil { + in, out := &in.Project, &out.Project + *out = new(string) + **out = **in + } + if in.ProjectRef != nil { + in, out := &in.ProjectRef, &out.ProjectRef + *out = new(v1.Reference) + (*in).DeepCopyInto(*out) + } + if in.ProjectSelector != nil { + in, out := &in.ProjectSelector, &out.ProjectSelector + *out = new(v1.Selector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMPolicyParameters. +func (in *LakeIAMPolicyParameters) DeepCopy() *LakeIAMPolicyParameters { + if in == nil { + return nil + } + out := new(LakeIAMPolicyParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMPolicySpec) DeepCopyInto(out *LakeIAMPolicySpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) + in.InitProvider.DeepCopyInto(&out.InitProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMPolicySpec. +func (in *LakeIAMPolicySpec) DeepCopy() *LakeIAMPolicySpec { + if in == nil { + return nil + } + out := new(LakeIAMPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LakeIAMPolicyStatus) DeepCopyInto(out *LakeIAMPolicyStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LakeIAMPolicyStatus. +func (in *LakeIAMPolicyStatus) DeepCopy() *LakeIAMPolicyStatus { + if in == nil { + return nil + } + out := new(LakeIAMPolicyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LakeInitParameters) DeepCopyInto(out *LakeInitParameters) { *out = *in diff --git a/apis/dataplex/v1beta1/zz_generated.managed.go b/apis/dataplex/v1beta1/zz_generated.managed.go index c2a8fd2d8..6b11ba235 100644 --- a/apis/dataplex/v1beta1/zz_generated.managed.go +++ b/apis/dataplex/v1beta1/zz_generated.managed.go @@ -187,6 +187,186 @@ func (mg *Lake) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { mg.Spec.WriteConnectionSecretToReference = r } +// GetCondition of this LakeIAMBinding. +func (mg *LakeIAMBinding) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LakeIAMBinding. +func (mg *LakeIAMBinding) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LakeIAMBinding. +func (mg *LakeIAMBinding) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LakeIAMBinding. +func (mg *LakeIAMBinding) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LakeIAMBinding. +func (mg *LakeIAMBinding) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LakeIAMBinding. +func (mg *LakeIAMBinding) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LakeIAMBinding. +func (mg *LakeIAMBinding) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LakeIAMBinding. +func (mg *LakeIAMBinding) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LakeIAMBinding. +func (mg *LakeIAMBinding) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LakeIAMBinding. +func (mg *LakeIAMBinding) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LakeIAMBinding. +func (mg *LakeIAMBinding) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LakeIAMBinding. +func (mg *LakeIAMBinding) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LakeIAMMember. +func (mg *LakeIAMMember) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LakeIAMMember. +func (mg *LakeIAMMember) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LakeIAMMember. +func (mg *LakeIAMMember) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LakeIAMMember. +func (mg *LakeIAMMember) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LakeIAMMember. +func (mg *LakeIAMMember) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LakeIAMMember. +func (mg *LakeIAMMember) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LakeIAMMember. +func (mg *LakeIAMMember) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LakeIAMMember. +func (mg *LakeIAMMember) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LakeIAMMember. +func (mg *LakeIAMMember) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LakeIAMMember. +func (mg *LakeIAMMember) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LakeIAMMember. +func (mg *LakeIAMMember) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LakeIAMMember. +func (mg *LakeIAMMember) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + +// GetCondition of this LakeIAMPolicy. +func (mg *LakeIAMPolicy) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this LakeIAMPolicy. +func (mg *LakeIAMPolicy) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetManagementPolicies of this LakeIAMPolicy. +func (mg *LakeIAMPolicy) GetManagementPolicies() xpv1.ManagementPolicies { + return mg.Spec.ManagementPolicies +} + +// GetProviderConfigReference of this LakeIAMPolicy. +func (mg *LakeIAMPolicy) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +// GetPublishConnectionDetailsTo of this LakeIAMPolicy. +func (mg *LakeIAMPolicy) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this LakeIAMPolicy. +func (mg *LakeIAMPolicy) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this LakeIAMPolicy. +func (mg *LakeIAMPolicy) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this LakeIAMPolicy. +func (mg *LakeIAMPolicy) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetManagementPolicies of this LakeIAMPolicy. +func (mg *LakeIAMPolicy) SetManagementPolicies(r xpv1.ManagementPolicies) { + mg.Spec.ManagementPolicies = r +} + +// SetProviderConfigReference of this LakeIAMPolicy. +func (mg *LakeIAMPolicy) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +// SetPublishConnectionDetailsTo of this LakeIAMPolicy. +func (mg *LakeIAMPolicy) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this LakeIAMPolicy. +func (mg *LakeIAMPolicy) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} + // GetCondition of this Zone. func (mg *Zone) GetCondition(ct xpv1.ConditionType) xpv1.Condition { return mg.Status.GetCondition(ct) diff --git a/apis/dataplex/v1beta1/zz_generated.managedlist.go b/apis/dataplex/v1beta1/zz_generated.managedlist.go index c7c5b94f6..5bb88ed95 100644 --- a/apis/dataplex/v1beta1/zz_generated.managedlist.go +++ b/apis/dataplex/v1beta1/zz_generated.managedlist.go @@ -25,6 +25,33 @@ func (l *AssetList) GetItems() []resource.Managed { return items } +// GetItems of this LakeIAMBindingList. +func (l *LakeIAMBindingList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LakeIAMMemberList. +func (l *LakeIAMMemberList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + +// GetItems of this LakeIAMPolicyList. +func (l *LakeIAMPolicyList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} + // GetItems of this LakeList. func (l *LakeList) GetItems() []resource.Managed { items := make([]resource.Managed, len(l.Items)) diff --git a/apis/dataplex/v1beta1/zz_generated.resolvers.go b/apis/dataplex/v1beta1/zz_generated.resolvers.go index 7f52a70bd..da4f17fab 100644 --- a/apis/dataplex/v1beta1/zz_generated.resolvers.go +++ b/apis/dataplex/v1beta1/zz_generated.resolvers.go @@ -9,6 +9,7 @@ package v1beta1 import ( "context" reference "github.com/crossplane/crossplane-runtime/pkg/reference" + resource "github.com/crossplane/upjet/pkg/resource" errors "github.com/pkg/errors" xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" @@ -116,6 +117,113 @@ func (mg *Asset) ResolveReferences(ctx context.Context, c client.Reader) error { return nil } +// ResolveReferences of this LakeIAMPolicy. +func (mg *LakeIAMPolicy) ResolveReferences(ctx context.Context, c client.Reader) error { + var m xpresource.Managed + var l xpresource.ManagedList + r := reference.NewAPIResolver(c, mg) + + var rsp reference.ResolutionResponse + var err error + { + m, l, err = apisresolver.GetManagedResource("dataplex.gcp.upbound.io", "v1beta2", "Lake", "LakeList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Lake), + Extract: reference.ExternalName(), + Reference: mg.Spec.ForProvider.LakeRef, + Selector: mg.Spec.ForProvider.LakeSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Lake") + } + mg.Spec.ForProvider.Lake = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LakeRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("dataplex.gcp.upbound.io", "v1beta2", "Lake", "LakeList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Location), + Extract: resource.ExtractParamPath("location", false), + Reference: mg.Spec.ForProvider.LocationRef, + Selector: mg.Spec.ForProvider.LocationSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Location") + } + mg.Spec.ForProvider.Location = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.LocationRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("dataplex.gcp.upbound.io", "v1beta2", "Lake", "LakeList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.ForProvider.Project), + Extract: resource.ExtractParamPath("project", false), + Reference: mg.Spec.ForProvider.ProjectRef, + Selector: mg.Spec.ForProvider.ProjectSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.ForProvider.Project") + } + mg.Spec.ForProvider.Project = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.ForProvider.ProjectRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("dataplex.gcp.upbound.io", "v1beta2", "Lake", "LakeList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Lake), + Extract: reference.ExternalName(), + Reference: mg.Spec.InitProvider.LakeRef, + Selector: mg.Spec.InitProvider.LakeSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Lake") + } + mg.Spec.InitProvider.Lake = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.LakeRef = rsp.ResolvedReference + { + m, l, err = apisresolver.GetManagedResource("dataplex.gcp.upbound.io", "v1beta2", "Lake", "LakeList") + if err != nil { + return errors.Wrap(err, "failed to get the reference target managed resource and its list for reference resolution") + } + + rsp, err = r.Resolve(ctx, reference.ResolutionRequest{ + CurrentValue: reference.FromPtrValue(mg.Spec.InitProvider.Project), + Extract: resource.ExtractParamPath("project", false), + Reference: mg.Spec.InitProvider.ProjectRef, + Selector: mg.Spec.InitProvider.ProjectSelector, + To: reference.To{List: l, Managed: m}, + }) + } + if err != nil { + return errors.Wrap(err, "mg.Spec.InitProvider.Project") + } + mg.Spec.InitProvider.Project = reference.ToPtrValue(rsp.ResolvedValue) + mg.Spec.InitProvider.ProjectRef = rsp.ResolvedReference + + return nil +} + // ResolveReferences of this Zone. func (mg *Zone) ResolveReferences(ctx context.Context, c client.Reader) error { var m xpresource.Managed diff --git a/apis/dataplex/v1beta1/zz_lakeiambinding_terraformed.go b/apis/dataplex/v1beta1/zz_lakeiambinding_terraformed.go new file mode 100755 index 000000000..c2ba98d0d --- /dev/null +++ b/apis/dataplex/v1beta1/zz_lakeiambinding_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LakeIAMBinding +func (mg *LakeIAMBinding) GetTerraformResourceType() string { + return "google_dataplex_lake_iam_binding" +} + +// GetConnectionDetailsMapping for this LakeIAMBinding +func (tr *LakeIAMBinding) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LakeIAMBinding +func (tr *LakeIAMBinding) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LakeIAMBinding +func (tr *LakeIAMBinding) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LakeIAMBinding +func (tr *LakeIAMBinding) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LakeIAMBinding +func (tr *LakeIAMBinding) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LakeIAMBinding +func (tr *LakeIAMBinding) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LakeIAMBinding +func (tr *LakeIAMBinding) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LakeIAMBinding +func (tr *LakeIAMBinding) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LakeIAMBinding using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LakeIAMBinding) LateInitialize(attrs []byte) (bool, error) { + params := &LakeIAMBindingParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LakeIAMBinding) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/dataplex/v1beta1/zz_lakeiambinding_types.go b/apis/dataplex/v1beta1/zz_lakeiambinding_types.go new file mode 100755 index 000000000..a865d4e38 --- /dev/null +++ b/apis/dataplex/v1beta1/zz_lakeiambinding_types.go @@ -0,0 +1,162 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type ConditionInitParameters struct { + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + Title *string `json:"title,omitempty" tf:"title,omitempty"` +} + +type ConditionObservation struct { + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + Title *string `json:"title,omitempty" tf:"title,omitempty"` +} + +type ConditionParameters struct { + + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // +kubebuilder:validation:Optional + Expression *string `json:"expression" tf:"expression,omitempty"` + + // +kubebuilder:validation:Optional + Title *string `json:"title" tf:"title,omitempty"` +} + +type LakeIAMBindingInitParameters struct { + Condition *ConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + Lake *string `json:"lake,omitempty" tf:"lake,omitempty"` + + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + Project *string `json:"project,omitempty" tf:"project,omitempty"` + + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type LakeIAMBindingObservation struct { + Condition *ConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` + + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + Lake *string `json:"lake,omitempty" tf:"lake,omitempty"` + + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + Project *string `json:"project,omitempty" tf:"project,omitempty"` + + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type LakeIAMBindingParameters struct { + + // +kubebuilder:validation:Optional + Condition *ConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + // +kubebuilder:validation:Optional + Lake *string `json:"lake,omitempty" tf:"lake,omitempty"` + + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // +kubebuilder:validation:Optional + // +listType=set + Members []*string `json:"members,omitempty" tf:"members,omitempty"` + + // +kubebuilder:validation:Optional + Project *string `json:"project,omitempty" tf:"project,omitempty"` + + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +// LakeIAMBindingSpec defines the desired state of LakeIAMBinding +type LakeIAMBindingSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LakeIAMBindingParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LakeIAMBindingInitParameters `json:"initProvider,omitempty"` +} + +// LakeIAMBindingStatus defines the observed state of LakeIAMBinding. +type LakeIAMBindingStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LakeIAMBindingObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// LakeIAMBinding is the Schema for the LakeIAMBindings API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,gcp} +type LakeIAMBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.lake) || (has(self.initProvider) && has(self.initProvider.lake))",message="spec.forProvider.lake is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.members) || (has(self.initProvider) && has(self.initProvider.members))",message="spec.forProvider.members is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec LakeIAMBindingSpec `json:"spec"` + Status LakeIAMBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LakeIAMBindingList contains a list of LakeIAMBindings +type LakeIAMBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LakeIAMBinding `json:"items"` +} + +// Repository type metadata. +var ( + LakeIAMBinding_Kind = "LakeIAMBinding" + LakeIAMBinding_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LakeIAMBinding_Kind}.String() + LakeIAMBinding_KindAPIVersion = LakeIAMBinding_Kind + "." + CRDGroupVersion.String() + LakeIAMBinding_GroupVersionKind = CRDGroupVersion.WithKind(LakeIAMBinding_Kind) +) + +func init() { + SchemeBuilder.Register(&LakeIAMBinding{}, &LakeIAMBindingList{}) +} diff --git a/apis/dataplex/v1beta1/zz_lakeiammember_terraformed.go b/apis/dataplex/v1beta1/zz_lakeiammember_terraformed.go new file mode 100755 index 000000000..bf7a7f4fb --- /dev/null +++ b/apis/dataplex/v1beta1/zz_lakeiammember_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LakeIAMMember +func (mg *LakeIAMMember) GetTerraformResourceType() string { + return "google_dataplex_lake_iam_member" +} + +// GetConnectionDetailsMapping for this LakeIAMMember +func (tr *LakeIAMMember) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LakeIAMMember +func (tr *LakeIAMMember) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LakeIAMMember +func (tr *LakeIAMMember) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LakeIAMMember +func (tr *LakeIAMMember) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LakeIAMMember +func (tr *LakeIAMMember) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LakeIAMMember +func (tr *LakeIAMMember) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LakeIAMMember +func (tr *LakeIAMMember) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LakeIAMMember +func (tr *LakeIAMMember) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LakeIAMMember using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LakeIAMMember) LateInitialize(attrs []byte) (bool, error) { + params := &LakeIAMMemberParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LakeIAMMember) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/dataplex/v1beta1/zz_lakeiammember_types.go b/apis/dataplex/v1beta1/zz_lakeiammember_types.go new file mode 100755 index 000000000..6f616d6ca --- /dev/null +++ b/apis/dataplex/v1beta1/zz_lakeiammember_types.go @@ -0,0 +1,159 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type LakeIAMMemberConditionInitParameters struct { + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + Title *string `json:"title,omitempty" tf:"title,omitempty"` +} + +type LakeIAMMemberConditionObservation struct { + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + Expression *string `json:"expression,omitempty" tf:"expression,omitempty"` + + Title *string `json:"title,omitempty" tf:"title,omitempty"` +} + +type LakeIAMMemberConditionParameters struct { + + // +kubebuilder:validation:Optional + Description *string `json:"description,omitempty" tf:"description,omitempty"` + + // +kubebuilder:validation:Optional + Expression *string `json:"expression" tf:"expression,omitempty"` + + // +kubebuilder:validation:Optional + Title *string `json:"title" tf:"title,omitempty"` +} + +type LakeIAMMemberInitParameters struct { + Condition *LakeIAMMemberConditionInitParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + Lake *string `json:"lake,omitempty" tf:"lake,omitempty"` + + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + Member *string `json:"member,omitempty" tf:"member,omitempty"` + + Project *string `json:"project,omitempty" tf:"project,omitempty"` + + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type LakeIAMMemberObservation struct { + Condition *LakeIAMMemberConditionObservation `json:"condition,omitempty" tf:"condition,omitempty"` + + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + Lake *string `json:"lake,omitempty" tf:"lake,omitempty"` + + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + Member *string `json:"member,omitempty" tf:"member,omitempty"` + + Project *string `json:"project,omitempty" tf:"project,omitempty"` + + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +type LakeIAMMemberParameters struct { + + // +kubebuilder:validation:Optional + Condition *LakeIAMMemberConditionParameters `json:"condition,omitempty" tf:"condition,omitempty"` + + // +kubebuilder:validation:Optional + Lake *string `json:"lake,omitempty" tf:"lake,omitempty"` + + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // +kubebuilder:validation:Optional + Member *string `json:"member,omitempty" tf:"member,omitempty"` + + // +kubebuilder:validation:Optional + Project *string `json:"project,omitempty" tf:"project,omitempty"` + + // +kubebuilder:validation:Optional + Role *string `json:"role,omitempty" tf:"role,omitempty"` +} + +// LakeIAMMemberSpec defines the desired state of LakeIAMMember +type LakeIAMMemberSpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LakeIAMMemberParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LakeIAMMemberInitParameters `json:"initProvider,omitempty"` +} + +// LakeIAMMemberStatus defines the observed state of LakeIAMMember. +type LakeIAMMemberStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LakeIAMMemberObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// LakeIAMMember is the Schema for the LakeIAMMembers API. +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,gcp} +type LakeIAMMember struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.lake) || (has(self.initProvider) && has(self.initProvider.lake))",message="spec.forProvider.lake is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.member) || (has(self.initProvider) && has(self.initProvider.member))",message="spec.forProvider.member is a required parameter" + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.role) || (has(self.initProvider) && has(self.initProvider.role))",message="spec.forProvider.role is a required parameter" + Spec LakeIAMMemberSpec `json:"spec"` + Status LakeIAMMemberStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LakeIAMMemberList contains a list of LakeIAMMembers +type LakeIAMMemberList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LakeIAMMember `json:"items"` +} + +// Repository type metadata. +var ( + LakeIAMMember_Kind = "LakeIAMMember" + LakeIAMMember_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LakeIAMMember_Kind}.String() + LakeIAMMember_KindAPIVersion = LakeIAMMember_Kind + "." + CRDGroupVersion.String() + LakeIAMMember_GroupVersionKind = CRDGroupVersion.WithKind(LakeIAMMember_Kind) +) + +func init() { + SchemeBuilder.Register(&LakeIAMMember{}, &LakeIAMMemberList{}) +} diff --git a/apis/dataplex/v1beta1/zz_lakeiampolicy_terraformed.go b/apis/dataplex/v1beta1/zz_lakeiampolicy_terraformed.go new file mode 100755 index 000000000..947dc0bfe --- /dev/null +++ b/apis/dataplex/v1beta1/zz_lakeiampolicy_terraformed.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + "dario.cat/mergo" + "github.com/pkg/errors" + + "github.com/crossplane/upjet/pkg/resource" + "github.com/crossplane/upjet/pkg/resource/json" +) + +// GetTerraformResourceType returns Terraform resource type for this LakeIAMPolicy +func (mg *LakeIAMPolicy) GetTerraformResourceType() string { + return "google_dataplex_lake_iam_policy" +} + +// GetConnectionDetailsMapping for this LakeIAMPolicy +func (tr *LakeIAMPolicy) GetConnectionDetailsMapping() map[string]string { + return nil +} + +// GetObservation of this LakeIAMPolicy +func (tr *LakeIAMPolicy) GetObservation() (map[string]any, error) { + o, err := json.TFParser.Marshal(tr.Status.AtProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(o, &base) +} + +// SetObservation for this LakeIAMPolicy +func (tr *LakeIAMPolicy) SetObservation(obs map[string]any) error { + p, err := json.TFParser.Marshal(obs) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Status.AtProvider) +} + +// GetID returns ID of underlying Terraform resource of this LakeIAMPolicy +func (tr *LakeIAMPolicy) GetID() string { + if tr.Status.AtProvider.ID == nil { + return "" + } + return *tr.Status.AtProvider.ID +} + +// GetParameters of this LakeIAMPolicy +func (tr *LakeIAMPolicy) GetParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.ForProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// SetParameters for this LakeIAMPolicy +func (tr *LakeIAMPolicy) SetParameters(params map[string]any) error { + p, err := json.TFParser.Marshal(params) + if err != nil { + return err + } + return json.TFParser.Unmarshal(p, &tr.Spec.ForProvider) +} + +// GetInitParameters of this LakeIAMPolicy +func (tr *LakeIAMPolicy) GetInitParameters() (map[string]any, error) { + p, err := json.TFParser.Marshal(tr.Spec.InitProvider) + if err != nil { + return nil, err + } + base := map[string]any{} + return base, json.TFParser.Unmarshal(p, &base) +} + +// GetInitParameters of this LakeIAMPolicy +func (tr *LakeIAMPolicy) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error) { + params, err := tr.GetParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get parameters for resource '%q'", tr.GetName()) + } + if !shouldMergeInitProvider { + return params, nil + } + + initParams, err := tr.GetInitParameters() + if err != nil { + return nil, errors.Wrapf(err, "cannot get init parameters for resource '%q'", tr.GetName()) + } + + // Note(lsviben): mergo.WithSliceDeepCopy is needed to merge the + // slices from the initProvider to forProvider. As it also sets + // overwrite to true, we need to set it back to false, we don't + // want to overwrite the forProvider fields with the initProvider + // fields. + err = mergo.Merge(¶ms, initParams, mergo.WithSliceDeepCopy, func(c *mergo.Config) { + c.Overwrite = false + }) + if err != nil { + return nil, errors.Wrapf(err, "cannot merge spec.initProvider and spec.forProvider parameters for resource '%q'", tr.GetName()) + } + + return params, nil +} + +// LateInitialize this LakeIAMPolicy using its observed tfState. +// returns True if there are any spec changes for the resource. +func (tr *LakeIAMPolicy) LateInitialize(attrs []byte) (bool, error) { + params := &LakeIAMPolicyParameters{} + if err := json.TFParser.Unmarshal(attrs, params); err != nil { + return false, errors.Wrap(err, "failed to unmarshal Terraform state parameters for late-initialization") + } + opts := []resource.GenericLateInitializerOption{resource.WithZeroValueJSONOmitEmptyFilter(resource.CNameWildcard)} + + li := resource.NewGenericLateInitializer(opts...) + return li.LateInitialize(&tr.Spec.ForProvider, params) +} + +// GetTerraformSchemaVersion returns the associated Terraform schema version +func (tr *LakeIAMPolicy) GetTerraformSchemaVersion() int { + return 0 +} diff --git a/apis/dataplex/v1beta1/zz_lakeiampolicy_types.go b/apis/dataplex/v1beta1/zz_lakeiampolicy_types.go new file mode 100755 index 000000000..9b0cfe906 --- /dev/null +++ b/apis/dataplex/v1beta1/zz_lakeiampolicy_types.go @@ -0,0 +1,179 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +type LakeIAMPolicyInitParameters struct { + + // Used to find the parent resource to bind the IAM policy to + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/dataplex/v1beta2.Lake + Lake *string `json:"lake,omitempty" tf:"lake,omitempty"` + + // Reference to a Lake in dataplex to populate lake. + // +kubebuilder:validation:Optional + LakeRef *v1.Reference `json:"lakeRef,omitempty" tf:"-"` + + // Selector for a Lake in dataplex to populate lake. + // +kubebuilder:validation:Optional + LakeSelector *v1.Selector `json:"lakeSelector,omitempty" tf:"-"` + + // The policy data generated by + // a google_iam_policy data source. + PolicyData *string `json:"policyData,omitempty" tf:"policy_data,omitempty"` + + // The ID of the project in which the resource belongs. + // If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/dataplex/v1beta2.Lake + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("project",false) + Project *string `json:"project,omitempty" tf:"project,omitempty"` + + // Reference to a Lake in dataplex to populate project. + // +kubebuilder:validation:Optional + ProjectRef *v1.Reference `json:"projectRef,omitempty" tf:"-"` + + // Selector for a Lake in dataplex to populate project. + // +kubebuilder:validation:Optional + ProjectSelector *v1.Selector `json:"projectSelector,omitempty" tf:"-"` +} + +type LakeIAMPolicyObservation struct { + + // (Computed) The etag of the IAM policy. + Etag *string `json:"etag,omitempty" tf:"etag,omitempty"` + + ID *string `json:"id,omitempty" tf:"id,omitempty"` + + // Used to find the parent resource to bind the IAM policy to + Lake *string `json:"lake,omitempty" tf:"lake,omitempty"` + + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // The policy data generated by + // a google_iam_policy data source. + PolicyData *string `json:"policyData,omitempty" tf:"policy_data,omitempty"` + + // The ID of the project in which the resource belongs. + // If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. + Project *string `json:"project,omitempty" tf:"project,omitempty"` +} + +type LakeIAMPolicyParameters struct { + + // Used to find the parent resource to bind the IAM policy to + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/dataplex/v1beta2.Lake + // +kubebuilder:validation:Optional + Lake *string `json:"lake,omitempty" tf:"lake,omitempty"` + + // Reference to a Lake in dataplex to populate lake. + // +kubebuilder:validation:Optional + LakeRef *v1.Reference `json:"lakeRef,omitempty" tf:"-"` + + // Selector for a Lake in dataplex to populate lake. + // +kubebuilder:validation:Optional + LakeSelector *v1.Selector `json:"lakeSelector,omitempty" tf:"-"` + + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/dataplex/v1beta2.Lake + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("location",false) + // +kubebuilder:validation:Optional + Location *string `json:"location,omitempty" tf:"location,omitempty"` + + // Reference to a Lake in dataplex to populate location. + // +kubebuilder:validation:Optional + LocationRef *v1.Reference `json:"locationRef,omitempty" tf:"-"` + + // Selector for a Lake in dataplex to populate location. + // +kubebuilder:validation:Optional + LocationSelector *v1.Selector `json:"locationSelector,omitempty" tf:"-"` + + // The policy data generated by + // a google_iam_policy data source. + // +kubebuilder:validation:Optional + PolicyData *string `json:"policyData,omitempty" tf:"policy_data,omitempty"` + + // The ID of the project in which the resource belongs. + // If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. + // +crossplane:generate:reference:type=github.com/upbound/provider-gcp/apis/dataplex/v1beta2.Lake + // +crossplane:generate:reference:extractor=github.com/crossplane/upjet/pkg/resource.ExtractParamPath("project",false) + // +kubebuilder:validation:Optional + Project *string `json:"project,omitempty" tf:"project,omitempty"` + + // Reference to a Lake in dataplex to populate project. + // +kubebuilder:validation:Optional + ProjectRef *v1.Reference `json:"projectRef,omitempty" tf:"-"` + + // Selector for a Lake in dataplex to populate project. + // +kubebuilder:validation:Optional + ProjectSelector *v1.Selector `json:"projectSelector,omitempty" tf:"-"` +} + +// LakeIAMPolicySpec defines the desired state of LakeIAMPolicy +type LakeIAMPolicySpec struct { + v1.ResourceSpec `json:",inline"` + ForProvider LakeIAMPolicyParameters `json:"forProvider"` + // THIS IS A BETA FIELD. It will be honored + // unless the Management Policies feature flag is disabled. + // InitProvider holds the same fields as ForProvider, with the exception + // of Identifier and other resource reference fields. The fields that are + // in InitProvider are merged into ForProvider when the resource is created. + // The same fields are also added to the terraform ignore_changes hook, to + // avoid updating them after creation. This is useful for fields that are + // required on creation, but we do not desire to update them after creation, + // for example because of an external controller is managing them, like an + // autoscaler. + InitProvider LakeIAMPolicyInitParameters `json:"initProvider,omitempty"` +} + +// LakeIAMPolicyStatus defines the observed state of LakeIAMPolicy. +type LakeIAMPolicyStatus struct { + v1.ResourceStatus `json:",inline"` + AtProvider LakeIAMPolicyObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// LakeIAMPolicy is the Schema for the LakeIAMPolicys API. Collection of resources to manage IAM policy for Dataplex Lake +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,gcp} +type LakeIAMPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.policyData) || (has(self.initProvider) && has(self.initProvider.policyData))",message="spec.forProvider.policyData is a required parameter" + Spec LakeIAMPolicySpec `json:"spec"` + Status LakeIAMPolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// LakeIAMPolicyList contains a list of LakeIAMPolicys +type LakeIAMPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LakeIAMPolicy `json:"items"` +} + +// Repository type metadata. +var ( + LakeIAMPolicy_Kind = "LakeIAMPolicy" + LakeIAMPolicy_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: LakeIAMPolicy_Kind}.String() + LakeIAMPolicy_KindAPIVersion = LakeIAMPolicy_Kind + "." + CRDGroupVersion.String() + LakeIAMPolicy_GroupVersionKind = CRDGroupVersion.WithKind(LakeIAMPolicy_Kind) +) + +func init() { + SchemeBuilder.Register(&LakeIAMPolicy{}, &LakeIAMPolicyList{}) +} diff --git a/config/externalname.go b/config/externalname.go index 474391cc2..fe9953bf8 100644 --- a/config/externalname.go +++ b/config/externalname.go @@ -426,6 +426,12 @@ var terraformPluginSDKExternalNameConfigs = map[string]config.ExternalName{ // // Imported by using the following format: projects/{{project}}/locations/{{location}}/lakes/{{name}} "google_dataplex_lake": config.TemplatedStringAsIdentifier("name", "projects/{{ .setup.configuration.project }}/locations/{{ .parameters.location }}/lakes/{{ .external_name }}"), + // Imported by using the following format: projects/{{project}}/locations/{{location}}/lakes/{{lake}} roles/viewer + "google_dataplex_lake_iam_binding": config.IdentifierFromProvider, + // Imported by using the following format: projects/{{project}}/locations/{{location}}/lakes/{{lake}} roles/viewer user:jane@example.com + "google_dataplex_lake_iam_member": config.IdentifierFromProvider, + // Imported by using the following format: projects/{{project}}/locations/{{location}}/lakes/{{lake}} + "google_dataplex_lake_iam_policy": config.TemplatedStringAsIdentifier("", "projects/{{ .setup.configuration.project }}/locations/{{ .parameters.location }}/lakes/{{ .external_name }}"), // Imported by using the following projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{dataplex_zone}}/assets/{{name}} "google_dataplex_asset": config.TemplatedStringAsIdentifier("name", "projects/{{ .setup.configuration.project }}/locations/{{ .parameters.location }}/lakes/{{ .parameters.lake }}/zones/{{ .parameters.dataplex_zone }}/assets/{{ .external_name }}"), // Imported by using the following projects/{{project}}/locations/{{location}}/lakes/{{lake}}/zones/{{name}} diff --git a/config/generated.lst b/config/generated.lst index 556d552f6..82f9a26a9 100644 --- a/config/generated.lst +++ b/config/generated.lst @@ -1 +1 @@ -["google_access_context_manager_access_level","google_access_context_manager_access_level_condition","google_access_context_manager_access_policy","google_access_context_manager_access_policy_iam_member","google_access_context_manager_service_perimeter","google_access_context_manager_service_perimeter_resource","google_active_directory_domain","google_alloydb_backup","google_alloydb_cluster","google_alloydb_instance","google_apigee_addons_config","google_apigee_endpoint_attachment","google_apigee_envgroup","google_apigee_envgroup_attachment","google_apigee_environment","google_apigee_environment_iam_member","google_apigee_instance","google_apigee_instance_attachment","google_apigee_nat_address","google_apigee_organization","google_apigee_sync_authorization","google_app_engine_application","google_app_engine_application_url_dispatch_rules","google_app_engine_firewall_rule","google_app_engine_service_network_settings","google_app_engine_standard_app_version","google_artifact_registry_repository","google_artifact_registry_repository_iam_member","google_beyondcorp_app_connection","google_beyondcorp_app_connector","google_beyondcorp_app_gateway","google_bigquery_analytics_hub_data_exchange","google_bigquery_analytics_hub_data_exchange_iam_member","google_bigquery_analytics_hub_listing","google_bigquery_connection","google_bigquery_data_transfer_config","google_bigquery_dataset","google_bigquery_dataset_access","google_bigquery_dataset_iam_binding","google_bigquery_dataset_iam_member","google_bigquery_dataset_iam_policy","google_bigquery_job","google_bigquery_reservation","google_bigquery_reservation_assignment","google_bigquery_routine","google_bigquery_table","google_bigquery_table_iam_binding","google_bigquery_table_iam_member","google_bigquery_table_iam_policy","google_bigtable_app_profile","google_bigtable_gc_policy","google_bigtable_instance","google_bigtable_instance_iam_binding","google_bigtable_instance_iam_member","google_bigtable_instance_iam_policy","google_bigtable_table","google_bigtable_table_iam_binding","google_bigtable_table_iam_member","google_bigtable_table_iam_policy","google_binary_authorization_attestor","google_binary_authorization_policy","google_certificate_manager_certificate","google_certificate_manager_certificate_map","google_certificate_manager_certificate_map_entry","google_certificate_manager_dns_authorization","google_certificate_manager_trust_config","google_cloud_ids_endpoint","google_cloud_run_domain_mapping","google_cloud_run_service","google_cloud_run_service_iam_member","google_cloud_run_v2_job","google_cloud_run_v2_service","google_cloud_scheduler_job","google_cloud_tasks_queue","google_cloudbuild_trigger","google_cloudbuild_worker_pool","google_cloudfunctions2_function","google_cloudfunctions_function","google_cloudfunctions_function_iam_member","google_composer_environment","google_compute_address","google_compute_attached_disk","google_compute_autoscaler","google_compute_backend_bucket","google_compute_backend_bucket_signed_url_key","google_compute_backend_service","google_compute_backend_service_signed_url_key","google_compute_disk","google_compute_disk_iam_member","google_compute_disk_resource_policy_attachment","google_compute_external_vpn_gateway","google_compute_firewall","google_compute_firewall_policy","google_compute_firewall_policy_association","google_compute_firewall_policy_rule","google_compute_forwarding_rule","google_compute_global_address","google_compute_global_forwarding_rule","google_compute_global_network_endpoint","google_compute_global_network_endpoint_group","google_compute_ha_vpn_gateway","google_compute_health_check","google_compute_http_health_check","google_compute_https_health_check","google_compute_image","google_compute_image_iam_member","google_compute_instance","google_compute_instance_from_template","google_compute_instance_group","google_compute_instance_group_manager","google_compute_instance_group_named_port","google_compute_instance_iam_member","google_compute_instance_template","google_compute_interconnect_attachment","google_compute_managed_ssl_certificate","google_compute_network","google_compute_network_endpoint","google_compute_network_endpoint_group","google_compute_network_firewall_policy","google_compute_network_firewall_policy_association","google_compute_network_peering","google_compute_network_peering_routes_config","google_compute_node_group","google_compute_node_template","google_compute_packet_mirroring","google_compute_per_instance_config","google_compute_project_default_network_tier","google_compute_project_metadata","google_compute_project_metadata_item","google_compute_region_autoscaler","google_compute_region_backend_service","google_compute_region_disk","google_compute_region_disk_iam_member","google_compute_region_disk_resource_policy_attachment","google_compute_region_health_check","google_compute_region_instance_group_manager","google_compute_region_network_endpoint","google_compute_region_network_endpoint_group","google_compute_region_network_firewall_policy","google_compute_region_network_firewall_policy_association","google_compute_region_per_instance_config","google_compute_region_ssl_certificate","google_compute_region_target_http_proxy","google_compute_region_target_https_proxy","google_compute_region_target_tcp_proxy","google_compute_region_url_map","google_compute_reservation","google_compute_resource_policy","google_compute_route","google_compute_router","google_compute_router_interface","google_compute_router_nat","google_compute_router_peer","google_compute_security_policy","google_compute_service_attachment","google_compute_shared_vpc_host_project","google_compute_shared_vpc_service_project","google_compute_snapshot","google_compute_snapshot_iam_member","google_compute_ssl_certificate","google_compute_ssl_policy","google_compute_subnetwork","google_compute_subnetwork_iam_member","google_compute_target_grpc_proxy","google_compute_target_http_proxy","google_compute_target_https_proxy","google_compute_target_instance","google_compute_target_pool","google_compute_target_ssl_proxy","google_compute_target_tcp_proxy","google_compute_url_map","google_compute_vpn_gateway","google_compute_vpn_tunnel","google_container_analysis_note","google_container_attached_cluster","google_container_aws_cluster","google_container_aws_node_pool","google_container_azure_client","google_container_azure_cluster","google_container_azure_node_pool","google_container_cluster","google_container_node_pool","google_container_registry","google_data_catalog_entry","google_data_catalog_entry_group","google_data_catalog_policy_tag","google_data_catalog_tag","google_data_catalog_tag_template","google_data_catalog_taxonomy","google_data_fusion_instance","google_data_loss_prevention_deidentify_template","google_data_loss_prevention_inspect_template","google_data_loss_prevention_job_trigger","google_data_loss_prevention_stored_info_type","google_dataflow_job","google_dataplex_aspect_type","google_dataplex_asset","google_dataplex_lake","google_dataplex_zone","google_dataproc_autoscaling_policy","google_dataproc_cluster","google_dataproc_job","google_dataproc_metastore_service","google_dataproc_workflow_template","google_datastore_index","google_datastream_connection_profile","google_datastream_private_connection","google_dialogflow_cx_agent","google_dialogflow_cx_entity_type","google_dialogflow_cx_environment","google_dialogflow_cx_flow","google_dialogflow_cx_intent","google_dialogflow_cx_page","google_dialogflow_cx_version","google_dialogflow_cx_webhook","google_dns_managed_zone","google_dns_managed_zone_iam_member","google_dns_policy","google_dns_record_set","google_dns_response_policy","google_dns_response_policy_rule","google_document_ai_processor","google_essential_contacts_contact","google_eventarc_channel","google_eventarc_google_channel_config","google_eventarc_trigger","google_filestore_backup","google_filestore_instance","google_filestore_snapshot","google_firebaserules_release","google_firebaserules_ruleset","google_folder","google_folder_iam_member","google_gke_backup_backup_plan","google_gke_hub_membership","google_gke_hub_membership_iam_member","google_healthcare_consent_store","google_healthcare_dataset","google_healthcare_dataset_iam_member","google_iam_workload_identity_pool","google_iam_workload_identity_pool_provider","google_iap_app_engine_service_iam_member","google_iap_app_engine_version_iam_member","google_iap_tunnel_iam_member","google_iap_web_backend_service_iam_member","google_iap_web_iam_member","google_iap_web_type_app_engine_iam_member","google_iap_web_type_compute_iam_member","google_identity_platform_default_supported_idp_config","google_identity_platform_inbound_saml_config","google_identity_platform_oauth_idp_config","google_identity_platform_project_default_config","google_identity_platform_tenant","google_identity_platform_tenant_default_supported_idp_config","google_identity_platform_tenant_inbound_saml_config","google_identity_platform_tenant_oauth_idp_config","google_kms_crypto_key","google_kms_crypto_key_iam_member","google_kms_crypto_key_version","google_kms_key_ring","google_kms_key_ring_iam_member","google_kms_key_ring_import_job","google_kms_secret_ciphertext","google_logging_folder_bucket_config","google_logging_folder_exclusion","google_logging_folder_sink","google_logging_log_view","google_logging_metric","google_logging_project_bucket_config","google_logging_project_exclusion","google_logging_project_sink","google_memcache_instance","google_ml_engine_model","google_monitoring_alert_policy","google_monitoring_custom_service","google_monitoring_dashboard","google_monitoring_group","google_monitoring_metric_descriptor","google_monitoring_notification_channel","google_monitoring_service","google_monitoring_slo","google_monitoring_uptime_check_config","google_network_connectivity_hub","google_network_connectivity_service_connection_policy","google_network_connectivity_spoke","google_network_management_connectivity_test","google_notebooks_environment","google_notebooks_instance","google_notebooks_instance_iam_member","google_notebooks_runtime","google_notebooks_runtime_iam_member","google_org_policy_policy","google_organization_iam_audit_config","google_organization_iam_custom_role","google_organization_iam_member","google_os_config_os_policy_assignment","google_os_config_patch_deployment","google_os_login_ssh_public_key","google_privateca_ca_pool","google_privateca_ca_pool_iam_member","google_privateca_certificate","google_privateca_certificate_authority","google_privateca_certificate_template","google_privateca_certificate_template_iam_member","google_project","google_project_default_service_accounts","google_project_iam_audit_config","google_project_iam_custom_role","google_project_iam_member","google_project_service","google_project_usage_export_bucket","google_pubsub_lite_reservation","google_pubsub_lite_subscription","google_pubsub_lite_topic","google_pubsub_schema","google_pubsub_subscription","google_pubsub_subscription_iam_member","google_pubsub_topic","google_pubsub_topic_iam_member","google_redis_cluster","google_redis_instance","google_secret_manager_secret","google_secret_manager_secret_iam_member","google_secret_manager_secret_version","google_service_account","google_service_account_iam_member","google_service_account_key","google_service_networking_connection","google_service_networking_peered_dns_domain","google_sourcerepo_repository","google_sourcerepo_repository_iam_member","google_spanner_database","google_spanner_database_iam_member","google_spanner_instance","google_spanner_instance_iam_member","google_sql_database","google_sql_database_instance","google_sql_source_representation_instance","google_sql_ssl_cert","google_sql_user","google_storage_bucket","google_storage_bucket_access_control","google_storage_bucket_acl","google_storage_bucket_iam_member","google_storage_bucket_object","google_storage_default_object_access_control","google_storage_default_object_acl","google_storage_hmac_key","google_storage_notification","google_storage_object_access_control","google_storage_object_acl","google_storage_transfer_agent_pool","google_tags_tag_binding","google_tags_tag_key","google_tags_tag_value","google_tpu_node","google_vertex_ai_dataset","google_vertex_ai_featurestore","google_vertex_ai_featurestore_entitytype","google_vertex_ai_tensorboard","google_vpc_access_connector","google_workflows_workflow"] \ No newline at end of file +["google_access_context_manager_access_level","google_access_context_manager_access_level_condition","google_access_context_manager_access_policy","google_access_context_manager_access_policy_iam_member","google_access_context_manager_service_perimeter","google_access_context_manager_service_perimeter_resource","google_active_directory_domain","google_alloydb_backup","google_alloydb_cluster","google_alloydb_instance","google_apigee_addons_config","google_apigee_endpoint_attachment","google_apigee_envgroup","google_apigee_envgroup_attachment","google_apigee_environment","google_apigee_environment_iam_member","google_apigee_instance","google_apigee_instance_attachment","google_apigee_nat_address","google_apigee_organization","google_apigee_sync_authorization","google_app_engine_application","google_app_engine_application_url_dispatch_rules","google_app_engine_firewall_rule","google_app_engine_service_network_settings","google_app_engine_standard_app_version","google_artifact_registry_repository","google_artifact_registry_repository_iam_member","google_beyondcorp_app_connection","google_beyondcorp_app_connector","google_beyondcorp_app_gateway","google_bigquery_analytics_hub_data_exchange","google_bigquery_analytics_hub_data_exchange_iam_member","google_bigquery_analytics_hub_listing","google_bigquery_connection","google_bigquery_data_transfer_config","google_bigquery_dataset","google_bigquery_dataset_access","google_bigquery_dataset_iam_binding","google_bigquery_dataset_iam_member","google_bigquery_dataset_iam_policy","google_bigquery_job","google_bigquery_reservation","google_bigquery_reservation_assignment","google_bigquery_routine","google_bigquery_table","google_bigquery_table_iam_binding","google_bigquery_table_iam_member","google_bigquery_table_iam_policy","google_bigtable_app_profile","google_bigtable_gc_policy","google_bigtable_instance","google_bigtable_instance_iam_binding","google_bigtable_instance_iam_member","google_bigtable_instance_iam_policy","google_bigtable_table","google_bigtable_table_iam_binding","google_bigtable_table_iam_member","google_bigtable_table_iam_policy","google_binary_authorization_attestor","google_binary_authorization_policy","google_certificate_manager_certificate","google_certificate_manager_certificate_map","google_certificate_manager_certificate_map_entry","google_certificate_manager_dns_authorization","google_certificate_manager_trust_config","google_cloud_ids_endpoint","google_cloud_run_domain_mapping","google_cloud_run_service","google_cloud_run_service_iam_member","google_cloud_run_v2_job","google_cloud_run_v2_service","google_cloud_scheduler_job","google_cloud_tasks_queue","google_cloudbuild_trigger","google_cloudbuild_worker_pool","google_cloudfunctions2_function","google_cloudfunctions_function","google_cloudfunctions_function_iam_member","google_composer_environment","google_compute_address","google_compute_attached_disk","google_compute_autoscaler","google_compute_backend_bucket","google_compute_backend_bucket_signed_url_key","google_compute_backend_service","google_compute_backend_service_signed_url_key","google_compute_disk","google_compute_disk_iam_member","google_compute_disk_resource_policy_attachment","google_compute_external_vpn_gateway","google_compute_firewall","google_compute_firewall_policy","google_compute_firewall_policy_association","google_compute_firewall_policy_rule","google_compute_forwarding_rule","google_compute_global_address","google_compute_global_forwarding_rule","google_compute_global_network_endpoint","google_compute_global_network_endpoint_group","google_compute_ha_vpn_gateway","google_compute_health_check","google_compute_http_health_check","google_compute_https_health_check","google_compute_image","google_compute_image_iam_member","google_compute_instance","google_compute_instance_from_template","google_compute_instance_group","google_compute_instance_group_manager","google_compute_instance_group_named_port","google_compute_instance_iam_member","google_compute_instance_template","google_compute_interconnect_attachment","google_compute_managed_ssl_certificate","google_compute_network","google_compute_network_endpoint","google_compute_network_endpoint_group","google_compute_network_firewall_policy","google_compute_network_firewall_policy_association","google_compute_network_peering","google_compute_network_peering_routes_config","google_compute_node_group","google_compute_node_template","google_compute_packet_mirroring","google_compute_per_instance_config","google_compute_project_default_network_tier","google_compute_project_metadata","google_compute_project_metadata_item","google_compute_region_autoscaler","google_compute_region_backend_service","google_compute_region_disk","google_compute_region_disk_iam_member","google_compute_region_disk_resource_policy_attachment","google_compute_region_health_check","google_compute_region_instance_group_manager","google_compute_region_network_endpoint","google_compute_region_network_endpoint_group","google_compute_region_network_firewall_policy","google_compute_region_network_firewall_policy_association","google_compute_region_per_instance_config","google_compute_region_ssl_certificate","google_compute_region_target_http_proxy","google_compute_region_target_https_proxy","google_compute_region_target_tcp_proxy","google_compute_region_url_map","google_compute_reservation","google_compute_resource_policy","google_compute_route","google_compute_router","google_compute_router_interface","google_compute_router_nat","google_compute_router_peer","google_compute_security_policy","google_compute_service_attachment","google_compute_shared_vpc_host_project","google_compute_shared_vpc_service_project","google_compute_snapshot","google_compute_snapshot_iam_member","google_compute_ssl_certificate","google_compute_ssl_policy","google_compute_subnetwork","google_compute_subnetwork_iam_member","google_compute_target_grpc_proxy","google_compute_target_http_proxy","google_compute_target_https_proxy","google_compute_target_instance","google_compute_target_pool","google_compute_target_ssl_proxy","google_compute_target_tcp_proxy","google_compute_url_map","google_compute_vpn_gateway","google_compute_vpn_tunnel","google_container_analysis_note","google_container_attached_cluster","google_container_aws_cluster","google_container_aws_node_pool","google_container_azure_client","google_container_azure_cluster","google_container_azure_node_pool","google_container_cluster","google_container_node_pool","google_container_registry","google_data_catalog_entry","google_data_catalog_entry_group","google_data_catalog_policy_tag","google_data_catalog_tag","google_data_catalog_tag_template","google_data_catalog_taxonomy","google_data_fusion_instance","google_data_loss_prevention_deidentify_template","google_data_loss_prevention_inspect_template","google_data_loss_prevention_job_trigger","google_data_loss_prevention_stored_info_type","google_dataflow_job","google_dataplex_aspect_type","google_dataplex_asset","google_dataplex_lake","google_dataplex_lake_iam_binding","google_dataplex_lake_iam_member","google_dataplex_lake_iam_policy","google_dataplex_zone","google_dataproc_autoscaling_policy","google_dataproc_cluster","google_dataproc_job","google_dataproc_metastore_service","google_dataproc_workflow_template","google_datastore_index","google_datastream_connection_profile","google_datastream_private_connection","google_dialogflow_cx_agent","google_dialogflow_cx_entity_type","google_dialogflow_cx_environment","google_dialogflow_cx_flow","google_dialogflow_cx_intent","google_dialogflow_cx_page","google_dialogflow_cx_version","google_dialogflow_cx_webhook","google_dns_managed_zone","google_dns_managed_zone_iam_member","google_dns_policy","google_dns_record_set","google_dns_response_policy","google_dns_response_policy_rule","google_document_ai_processor","google_essential_contacts_contact","google_eventarc_channel","google_eventarc_google_channel_config","google_eventarc_trigger","google_filestore_backup","google_filestore_instance","google_filestore_snapshot","google_firebaserules_release","google_firebaserules_ruleset","google_folder","google_folder_iam_member","google_gke_backup_backup_plan","google_gke_hub_membership","google_gke_hub_membership_iam_member","google_healthcare_consent_store","google_healthcare_dataset","google_healthcare_dataset_iam_member","google_iam_workload_identity_pool","google_iam_workload_identity_pool_provider","google_iap_app_engine_service_iam_member","google_iap_app_engine_version_iam_member","google_iap_tunnel_iam_member","google_iap_web_backend_service_iam_member","google_iap_web_iam_member","google_iap_web_type_app_engine_iam_member","google_iap_web_type_compute_iam_member","google_identity_platform_default_supported_idp_config","google_identity_platform_inbound_saml_config","google_identity_platform_oauth_idp_config","google_identity_platform_project_default_config","google_identity_platform_tenant","google_identity_platform_tenant_default_supported_idp_config","google_identity_platform_tenant_inbound_saml_config","google_identity_platform_tenant_oauth_idp_config","google_kms_crypto_key","google_kms_crypto_key_iam_member","google_kms_crypto_key_version","google_kms_key_ring","google_kms_key_ring_iam_member","google_kms_key_ring_import_job","google_kms_secret_ciphertext","google_logging_folder_bucket_config","google_logging_folder_exclusion","google_logging_folder_sink","google_logging_log_view","google_logging_metric","google_logging_project_bucket_config","google_logging_project_exclusion","google_logging_project_sink","google_memcache_instance","google_ml_engine_model","google_monitoring_alert_policy","google_monitoring_custom_service","google_monitoring_dashboard","google_monitoring_group","google_monitoring_metric_descriptor","google_monitoring_notification_channel","google_monitoring_service","google_monitoring_slo","google_monitoring_uptime_check_config","google_network_connectivity_hub","google_network_connectivity_service_connection_policy","google_network_connectivity_spoke","google_network_management_connectivity_test","google_notebooks_environment","google_notebooks_instance","google_notebooks_instance_iam_member","google_notebooks_runtime","google_notebooks_runtime_iam_member","google_org_policy_policy","google_organization_iam_audit_config","google_organization_iam_custom_role","google_organization_iam_member","google_os_config_os_policy_assignment","google_os_config_patch_deployment","google_os_login_ssh_public_key","google_privateca_ca_pool","google_privateca_ca_pool_iam_member","google_privateca_certificate","google_privateca_certificate_authority","google_privateca_certificate_template","google_privateca_certificate_template_iam_member","google_project","google_project_default_service_accounts","google_project_iam_audit_config","google_project_iam_custom_role","google_project_iam_member","google_project_service","google_project_usage_export_bucket","google_pubsub_lite_reservation","google_pubsub_lite_subscription","google_pubsub_lite_topic","google_pubsub_schema","google_pubsub_subscription","google_pubsub_subscription_iam_member","google_pubsub_topic","google_pubsub_topic_iam_member","google_redis_cluster","google_redis_instance","google_secret_manager_secret","google_secret_manager_secret_iam_member","google_secret_manager_secret_version","google_service_account","google_service_account_iam_member","google_service_account_key","google_service_networking_connection","google_service_networking_peered_dns_domain","google_sourcerepo_repository","google_sourcerepo_repository_iam_member","google_spanner_database","google_spanner_database_iam_member","google_spanner_instance","google_spanner_instance_iam_member","google_sql_database","google_sql_database_instance","google_sql_source_representation_instance","google_sql_ssl_cert","google_sql_user","google_storage_bucket","google_storage_bucket_access_control","google_storage_bucket_acl","google_storage_bucket_iam_member","google_storage_bucket_object","google_storage_default_object_access_control","google_storage_default_object_acl","google_storage_hmac_key","google_storage_notification","google_storage_object_access_control","google_storage_object_acl","google_storage_transfer_agent_pool","google_tags_tag_binding","google_tags_tag_key","google_tags_tag_value","google_tpu_node","google_vertex_ai_dataset","google_vertex_ai_featurestore","google_vertex_ai_featurestore_entitytype","google_vertex_ai_tensorboard","google_vpc_access_connector","google_workflows_workflow"] \ No newline at end of file diff --git a/examples-generated/dataplex/v1beta1/lakeiampolicy.yaml b/examples-generated/dataplex/v1beta1/lakeiampolicy.yaml new file mode 100644 index 000000000..acce7e39c --- /dev/null +++ b/examples-generated/dataplex/v1beta1/lakeiampolicy.yaml @@ -0,0 +1,20 @@ +apiVersion: dataplex.gcp.upbound.io/v1beta1 +kind: LakeIAMPolicy +metadata: + annotations: + meta.upbound.io/example-id: dataplex/v1beta1/lakeiampolicy + labels: + testing.upbound.io/example-name: policy + name: policy +spec: + forProvider: + lakeSelector: + matchLabels: + testing.upbound.io/example-name: example + locationSelector: + matchLabels: + testing.upbound.io/example-name: example + policyData: ${data.google_iam_policy.admin.policy_data} + projectSelector: + matchLabels: + testing.upbound.io/example-name: example diff --git a/examples/dataplex/v1beta1/lakeiambinding.yaml b/examples/dataplex/v1beta1/lakeiambinding.yaml new file mode 100644 index 000000000..b744a8f72 --- /dev/null +++ b/examples/dataplex/v1beta1/lakeiambinding.yaml @@ -0,0 +1,32 @@ +apiVersion: dataplex.gcp.upbound.io/v1beta1 +kind: Lake +metadata: + annotations: + meta.upbound.io/example-id: dataplex/v1beta1/lake + labels: + testing.upbound.io/example-name: my-lake + name: my-lake +spec: + forProvider: + description: my Lake for DCL + displayName: my Lake for DCL + labels: + my-lake: exists + location: us-central1 +--- +apiVersion: dataplex.gcp.upbound.io/v1beta1 +kind: LakeIAMBinding +metadata: + annotations: + meta.upbound.io/example-id: dataplex/v1beta1/lakeiambinding + upjet.upbound.io/manual-intervention: "Dependent resource skipped: Policy data refers to an existing user account. Note: One can only use Predefined Dataplex roles or Basic roles for LakeIAMBinding policyData." + labels: + testing.upbound.io/example-name: binding + name: binding +spec: + forProvider: + role: roles/dataplex.editor + members: + - user:jane@example.com + lake: my-lake + location: us-central1 diff --git a/examples/dataplex/v1beta1/lakeiammember.yaml b/examples/dataplex/v1beta1/lakeiammember.yaml new file mode 100644 index 000000000..5ccb340a2 --- /dev/null +++ b/examples/dataplex/v1beta1/lakeiammember.yaml @@ -0,0 +1,31 @@ +apiVersion: dataplex.gcp.upbound.io/v1beta1 +kind: Lake +metadata: + annotations: + meta.upbound.io/example-id: dataplex/v1beta1/lake + labels: + testing.upbound.io/example-name: secondary + name: secondary +spec: + forProvider: + description: secondary Lake for DCL + displayName: secondary Lake for DCL + labels: + my-lake: exists + location: us-central1 +--- +apiVersion: dataplex.gcp.upbound.io/v1beta1 +kind: LakeIAMMember +metadata: + annotations: + meta.upbound.io/example-id: dataplex/v1beta1/lakeiammember + upjet.upbound.io/manual-intervention: "Dependent resource skipped: Policy data refers to an existing user account. Note: One can only use Predefined Dataplex roles or Basic roles for LakeIAMMember policyData." + labels: + testing.upbound.io/example-name: binding + name: binding +spec: + forProvider: + role: roles/dataplex.viewer + member: user:jane@example.com + lake: secondary + location: us-central1 \ No newline at end of file diff --git a/examples/dataplex/v1beta1/lakeiampolicy.yaml b/examples/dataplex/v1beta1/lakeiampolicy.yaml new file mode 100644 index 000000000..b801a0c19 --- /dev/null +++ b/examples/dataplex/v1beta1/lakeiampolicy.yaml @@ -0,0 +1,42 @@ +apiVersion: dataplex.gcp.upbound.io/v1beta1 +kind: Lake +metadata: + annotations: + meta.upbound.io/example-id: dataplex/v1beta1/lake + labels: + testing.upbound.io/example-name: my-second-lake + name: my-second-lake +spec: + forProvider: + description: my second Lake for DCL + displayName: my second Lake for DCL + labels: + my-second-lake: exists + location: us-central1 +--- +apiVersion: dataplex.gcp.upbound.io/v1beta1 +kind: LakeIAMPolicy +metadata: + annotations: + meta.upbound.io/example-id: dataplex/v1beta1/lakeiampolicy + upjet.upbound.io/manual-intervention: "Dependent resource skipped: Policy data refers to an existing user account. Note: One can only use Predefined Dataplex roles or Basic roles for LakeIAMPolicy policyData." + labels: + testing.upbound.io/example-name: binding + name: binding +spec: + forProvider: + lakeSelector: + matchLabels: + testing.upbound.io/example-name: my-second-lake + location: us-central1 + policyData: | + { + "bindings":[ + { + "members":[ + "user:jane@example.com" + ], + "role": "roles/dataplex.admin" + } + ] + } \ No newline at end of file diff --git a/internal/controller/dataplex/lakeiambinding/zz_controller.go b/internal/controller/dataplex/lakeiambinding/zz_controller.go new file mode 100755 index 000000000..873072c88 --- /dev/null +++ b/internal/controller/dataplex/lakeiambinding/zz_controller.go @@ -0,0 +1,91 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package lakeiambinding + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1beta1 "github.com/upbound/provider-gcp/apis/dataplex/v1beta1" + features "github.com/upbound/provider-gcp/internal/features" +) + +// Setup adds a controller that reconciles LakeIAMBinding managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1beta1.LakeIAMBinding_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1beta1.LakeIAMBinding_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1beta1.LakeIAMBinding_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["google_dataplex_lake_iam_binding"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1beta1.LakeIAMBinding_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1beta1.LakeIAMBinding + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1beta1.LakeIAMBinding{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1beta1.LakeIAMBinding") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1beta1.LakeIAMBindingList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1beta1.LakeIAMBindingList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1beta1.LakeIAMBinding_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1beta1.LakeIAMBinding{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/dataplex/lakeiammember/zz_controller.go b/internal/controller/dataplex/lakeiammember/zz_controller.go new file mode 100755 index 000000000..7d48b6c52 --- /dev/null +++ b/internal/controller/dataplex/lakeiammember/zz_controller.go @@ -0,0 +1,91 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package lakeiammember + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1beta1 "github.com/upbound/provider-gcp/apis/dataplex/v1beta1" + features "github.com/upbound/provider-gcp/internal/features" +) + +// Setup adds a controller that reconciles LakeIAMMember managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1beta1.LakeIAMMember_GroupVersionKind.String()) + var initializers managed.InitializerChain + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1beta1.LakeIAMMember_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1beta1.LakeIAMMember_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["google_dataplex_lake_iam_member"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1beta1.LakeIAMMember_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1beta1.LakeIAMMember + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1beta1.LakeIAMMember{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1beta1.LakeIAMMember") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1beta1.LakeIAMMemberList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1beta1.LakeIAMMemberList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1beta1.LakeIAMMember_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1beta1.LakeIAMMember{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/dataplex/lakeiampolicy/zz_controller.go b/internal/controller/dataplex/lakeiampolicy/zz_controller.go new file mode 100755 index 000000000..882f5b3ee --- /dev/null +++ b/internal/controller/dataplex/lakeiampolicy/zz_controller.go @@ -0,0 +1,92 @@ +// SPDX-FileCopyrightText: 2024 The Crossplane Authors +// +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by upjet. DO NOT EDIT. + +package lakeiampolicy + +import ( + "time" + + "github.com/crossplane/crossplane-runtime/pkg/connection" + "github.com/crossplane/crossplane-runtime/pkg/event" + "github.com/crossplane/crossplane-runtime/pkg/ratelimiter" + "github.com/crossplane/crossplane-runtime/pkg/reconciler/managed" + xpresource "github.com/crossplane/crossplane-runtime/pkg/resource" + "github.com/crossplane/crossplane-runtime/pkg/statemetrics" + tjcontroller "github.com/crossplane/upjet/pkg/controller" + "github.com/crossplane/upjet/pkg/controller/handler" + "github.com/crossplane/upjet/pkg/metrics" + "github.com/pkg/errors" + ctrl "sigs.k8s.io/controller-runtime" + + v1beta1 "github.com/upbound/provider-gcp/apis/dataplex/v1beta1" + features "github.com/upbound/provider-gcp/internal/features" +) + +// Setup adds a controller that reconciles LakeIAMPolicy managed resources. +func Setup(mgr ctrl.Manager, o tjcontroller.Options) error { + name := managed.ControllerName(v1beta1.LakeIAMPolicy_GroupVersionKind.String()) + var initializers managed.InitializerChain + initializers = append(initializers, managed.NewNameAsExternalName(mgr.GetClient())) + cps := []managed.ConnectionPublisher{managed.NewAPISecretPublisher(mgr.GetClient(), mgr.GetScheme())} + if o.SecretStoreConfigGVK != nil { + cps = append(cps, connection.NewDetailsManager(mgr.GetClient(), *o.SecretStoreConfigGVK, connection.WithTLSConfig(o.ESSOptions.TLSConfig))) + } + eventHandler := handler.NewEventHandler(handler.WithLogger(o.Logger.WithValues("gvk", v1beta1.LakeIAMPolicy_GroupVersionKind))) + ac := tjcontroller.NewAPICallbacks(mgr, xpresource.ManagedKind(v1beta1.LakeIAMPolicy_GroupVersionKind), tjcontroller.WithEventHandler(eventHandler), tjcontroller.WithStatusUpdates(false)) + opts := []managed.ReconcilerOption{ + managed.WithExternalConnecter( + tjcontroller.NewTerraformPluginSDKAsyncConnector(mgr.GetClient(), o.OperationTrackerStore, o.SetupFn, o.Provider.Resources["google_dataplex_lake_iam_policy"], + tjcontroller.WithTerraformPluginSDKAsyncLogger(o.Logger), + tjcontroller.WithTerraformPluginSDKAsyncConnectorEventHandler(eventHandler), + tjcontroller.WithTerraformPluginSDKAsyncCallbackProvider(ac), + tjcontroller.WithTerraformPluginSDKAsyncMetricRecorder(metrics.NewMetricRecorder(v1beta1.LakeIAMPolicy_GroupVersionKind, mgr, o.PollInterval)), + tjcontroller.WithTerraformPluginSDKAsyncManagementPolicies(o.Features.Enabled(features.EnableBetaManagementPolicies)))), + managed.WithLogger(o.Logger.WithValues("controller", name)), + managed.WithRecorder(event.NewAPIRecorder(mgr.GetEventRecorderFor(name))), + managed.WithFinalizer(tjcontroller.NewOperationTrackerFinalizer(o.OperationTrackerStore, xpresource.NewAPIFinalizer(mgr.GetClient(), managed.FinalizerName))), + managed.WithTimeout(3 * time.Minute), + managed.WithInitializers(initializers), + managed.WithConnectionPublishers(cps...), + managed.WithPollInterval(o.PollInterval), + } + if o.PollJitter != 0 { + opts = append(opts, managed.WithPollJitterHook(o.PollJitter)) + } + if o.Features.Enabled(features.EnableBetaManagementPolicies) { + opts = append(opts, managed.WithManagementPolicies()) + } + if o.MetricOptions != nil { + opts = append(opts, managed.WithMetricRecorder(o.MetricOptions.MRMetrics)) + } + + // register webhooks for the kind v1beta1.LakeIAMPolicy + // if they're enabled. + if o.StartWebhooks { + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1beta1.LakeIAMPolicy{}). + Complete(); err != nil { + return errors.Wrap(err, "cannot register webhook for the kind v1beta1.LakeIAMPolicy") + } + } + + if o.MetricOptions != nil && o.MetricOptions.MRStateMetrics != nil { + stateMetricsRecorder := statemetrics.NewMRStateRecorder( + mgr.GetClient(), o.Logger, o.MetricOptions.MRStateMetrics, &v1beta1.LakeIAMPolicyList{}, o.MetricOptions.PollStateMetricInterval, + ) + if err := mgr.Add(stateMetricsRecorder); err != nil { + return errors.Wrap(err, "cannot register MR state metrics recorder for kind v1beta1.LakeIAMPolicyList") + } + } + + r := managed.NewReconciler(mgr, xpresource.ManagedKind(v1beta1.LakeIAMPolicy_GroupVersionKind), opts...) + + return ctrl.NewControllerManagedBy(mgr). + Named(name). + WithOptions(o.ForControllerRuntime()). + WithEventFilter(xpresource.DesiredStateChanged()). + Watches(&v1beta1.LakeIAMPolicy{}, eventHandler). + Complete(ratelimiter.NewReconciler(name, r, o.GlobalRateLimiter)) +} diff --git a/internal/controller/zz_dataplex_setup.go b/internal/controller/zz_dataplex_setup.go index d8b5b1ff8..bee9b6328 100755 --- a/internal/controller/zz_dataplex_setup.go +++ b/internal/controller/zz_dataplex_setup.go @@ -12,6 +12,9 @@ import ( aspecttype "github.com/upbound/provider-gcp/internal/controller/dataplex/aspecttype" asset "github.com/upbound/provider-gcp/internal/controller/dataplex/asset" lake "github.com/upbound/provider-gcp/internal/controller/dataplex/lake" + lakeiambinding "github.com/upbound/provider-gcp/internal/controller/dataplex/lakeiambinding" + lakeiammember "github.com/upbound/provider-gcp/internal/controller/dataplex/lakeiammember" + lakeiampolicy "github.com/upbound/provider-gcp/internal/controller/dataplex/lakeiampolicy" zone "github.com/upbound/provider-gcp/internal/controller/dataplex/zone" ) @@ -22,6 +25,9 @@ func Setup_dataplex(mgr ctrl.Manager, o controller.Options) error { aspecttype.Setup, asset.Setup, lake.Setup, + lakeiambinding.Setup, + lakeiammember.Setup, + lakeiampolicy.Setup, zone.Setup, } { if err := setup(mgr, o); err != nil { diff --git a/internal/controller/zz_monolith_setup.go b/internal/controller/zz_monolith_setup.go index 8d8068985..eab2c08ae 100755 --- a/internal/controller/zz_monolith_setup.go +++ b/internal/controller/zz_monolith_setup.go @@ -223,6 +223,9 @@ import ( aspecttype "github.com/upbound/provider-gcp/internal/controller/dataplex/aspecttype" asset "github.com/upbound/provider-gcp/internal/controller/dataplex/asset" lake "github.com/upbound/provider-gcp/internal/controller/dataplex/lake" + lakeiambinding "github.com/upbound/provider-gcp/internal/controller/dataplex/lakeiambinding" + lakeiammember "github.com/upbound/provider-gcp/internal/controller/dataplex/lakeiammember" + lakeiampolicy "github.com/upbound/provider-gcp/internal/controller/dataplex/lakeiampolicy" zone "github.com/upbound/provider-gcp/internal/controller/dataplex/zone" autoscalingpolicy "github.com/upbound/provider-gcp/internal/controller/dataproc/autoscalingpolicy" clusterdataproc "github.com/upbound/provider-gcp/internal/controller/dataproc/cluster" @@ -592,6 +595,9 @@ func Setup_monolith(mgr ctrl.Manager, o controller.Options) error { aspecttype.Setup, asset.Setup, lake.Setup, + lakeiambinding.Setup, + lakeiammember.Setup, + lakeiampolicy.Setup, zone.Setup, autoscalingpolicy.Setup, clusterdataproc.Setup, diff --git a/package/crds/dataplex.gcp.upbound.io_lakeiambindings.yaml b/package/crds/dataplex.gcp.upbound.io_lakeiambindings.yaml new file mode 100644 index 000000000..6d1e81c86 --- /dev/null +++ b/package/crds/dataplex.gcp.upbound.io_lakeiambindings.yaml @@ -0,0 +1,407 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: lakeiambindings.dataplex.gcp.upbound.io +spec: + group: dataplex.gcp.upbound.io + names: + categories: + - crossplane + - managed + - gcp + kind: LakeIAMBinding + listKind: LakeIAMBindingList + plural: lakeiambindings + singular: lakeiambinding + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: LakeIAMBinding is the Schema for the LakeIAMBindings API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LakeIAMBindingSpec defines the desired state of LakeIAMBinding + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + condition: + properties: + description: + type: string + expression: + type: string + title: + type: string + type: object + lake: + type: string + location: + type: string + members: + items: + type: string + type: array + x-kubernetes-list-type: set + project: + type: string + role: + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + condition: + properties: + description: + type: string + expression: + type: string + title: + type: string + type: object + lake: + type: string + location: + type: string + members: + items: + type: string + type: array + x-kubernetes-list-type: set + project: + type: string + role: + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.lake is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.lake) + || (has(self.initProvider) && has(self.initProvider.lake))' + - message: spec.forProvider.members is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.members) + || (has(self.initProvider) && has(self.initProvider.members))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: LakeIAMBindingStatus defines the observed state of LakeIAMBinding. + properties: + atProvider: + properties: + condition: + properties: + description: + type: string + expression: + type: string + title: + type: string + type: object + etag: + type: string + id: + type: string + lake: + type: string + location: + type: string + members: + items: + type: string + type: array + x-kubernetes-list-type: set + project: + type: string + role: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/dataplex.gcp.upbound.io_lakeiammembers.yaml b/package/crds/dataplex.gcp.upbound.io_lakeiammembers.yaml new file mode 100644 index 000000000..c47108320 --- /dev/null +++ b/package/crds/dataplex.gcp.upbound.io_lakeiammembers.yaml @@ -0,0 +1,397 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: lakeiammembers.dataplex.gcp.upbound.io +spec: + group: dataplex.gcp.upbound.io + names: + categories: + - crossplane + - managed + - gcp + kind: LakeIAMMember + listKind: LakeIAMMemberList + plural: lakeiammembers + singular: lakeiammember + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: LakeIAMMember is the Schema for the LakeIAMMembers API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LakeIAMMemberSpec defines the desired state of LakeIAMMember + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + condition: + properties: + description: + type: string + expression: + type: string + title: + type: string + type: object + lake: + type: string + location: + type: string + member: + type: string + project: + type: string + role: + type: string + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + condition: + properties: + description: + type: string + expression: + type: string + title: + type: string + type: object + lake: + type: string + location: + type: string + member: + type: string + project: + type: string + role: + type: string + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.lake is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.lake) + || (has(self.initProvider) && has(self.initProvider.lake))' + - message: spec.forProvider.member is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.member) + || (has(self.initProvider) && has(self.initProvider.member))' + - message: spec.forProvider.role is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.role) + || (has(self.initProvider) && has(self.initProvider.role))' + status: + description: LakeIAMMemberStatus defines the observed state of LakeIAMMember. + properties: + atProvider: + properties: + condition: + properties: + description: + type: string + expression: + type: string + title: + type: string + type: object + etag: + type: string + id: + type: string + lake: + type: string + location: + type: string + member: + type: string + project: + type: string + role: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/package/crds/dataplex.gcp.upbound.io_lakeiampolicies.yaml b/package/crds/dataplex.gcp.upbound.io_lakeiampolicies.yaml new file mode 100644 index 000000000..d1fbca4db --- /dev/null +++ b/package/crds/dataplex.gcp.upbound.io_lakeiampolicies.yaml @@ -0,0 +1,750 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: lakeiampolicies.dataplex.gcp.upbound.io +spec: + group: dataplex.gcp.upbound.io + names: + categories: + - crossplane + - managed + - gcp + kind: LakeIAMPolicy + listKind: LakeIAMPolicyList + plural: lakeiampolicies + singular: lakeiampolicy + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: LakeIAMPolicy is the Schema for the LakeIAMPolicys API. Collection + of resources to manage IAM policy for Dataplex Lake + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: LakeIAMPolicySpec defines the desired state of LakeIAMPolicy + properties: + deletionPolicy: + default: Delete + description: |- + DeletionPolicy specifies what will happen to the underlying external + when this managed resource is deleted - either "Delete" or "Orphan" the + external resource. + This field is planned to be deprecated in favor of the ManagementPolicies + field in a future release. Currently, both could be set independently and + non-default values would be honored if the feature flag is enabled. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + enum: + - Orphan + - Delete + type: string + forProvider: + properties: + lake: + description: Used to find the parent resource to bind the IAM + policy to + type: string + lakeRef: + description: Reference to a Lake in dataplex to populate lake. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + lakeSelector: + description: Selector for a Lake in dataplex to populate lake. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + location: + type: string + locationRef: + description: Reference to a Lake in dataplex to populate location. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + locationSelector: + description: Selector for a Lake in dataplex to populate location. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + policyData: + description: |- + The policy data generated by + a google_iam_policy data source. + type: string + project: + description: |- + The ID of the project in which the resource belongs. + If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. + type: string + projectRef: + description: Reference to a Lake in dataplex to populate project. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + projectSelector: + description: Selector for a Lake in dataplex to populate project. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + initProvider: + description: |- + THIS IS A BETA FIELD. It will be honored + unless the Management Policies feature flag is disabled. + InitProvider holds the same fields as ForProvider, with the exception + of Identifier and other resource reference fields. The fields that are + in InitProvider are merged into ForProvider when the resource is created. + The same fields are also added to the terraform ignore_changes hook, to + avoid updating them after creation. This is useful for fields that are + required on creation, but we do not desire to update them after creation, + for example because of an external controller is managing them, like an + autoscaler. + properties: + lake: + description: Used to find the parent resource to bind the IAM + policy to + type: string + lakeRef: + description: Reference to a Lake in dataplex to populate lake. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + lakeSelector: + description: Selector for a Lake in dataplex to populate lake. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + policyData: + description: |- + The policy data generated by + a google_iam_policy data source. + type: string + project: + description: |- + The ID of the project in which the resource belongs. + If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. + type: string + projectRef: + description: Reference to a Lake in dataplex to populate project. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + projectSelector: + description: Selector for a Lake in dataplex to populate project. + properties: + matchControllerRef: + description: |- + MatchControllerRef ensures an object with the same controller reference + as the selecting object is selected. + type: boolean + matchLabels: + additionalProperties: + type: string + description: MatchLabels ensures an object with matching labels + is selected. + type: object + policy: + description: Policies for selection. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + type: object + type: object + managementPolicies: + default: + - '*' + description: |- + THIS IS A BETA FIELD. It is on by default but can be opted out + through a Crossplane feature flag. + ManagementPolicies specify the array of actions Crossplane is allowed to + take on the managed and external resources. + This field is planned to replace the DeletionPolicy field in a future + release. Currently, both could be set independently and non-default + values would be honored if the feature flag is enabled. If both are + custom, the DeletionPolicy field will be ignored. + See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 + and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md + items: + description: |- + A ManagementAction represents an action that the Crossplane controllers + can take on an external resource. + enum: + - Observe + - Create + - Update + - Delete + - LateInitialize + - '*' + type: string + type: array + providerConfigRef: + default: + name: default + description: |- + ProviderConfigReference specifies how the provider that will be used to + create, observe, update, and delete this managed resource should be + configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: |- + PublishConnectionDetailsTo specifies the connection secret config which + contains a name, metadata and a reference to secret store config to + which any connection details for this managed resource should be written. + Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: |- + SecretStoreConfigRef specifies which secret store config should be used + for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: |- + Resolution specifies whether resolution of this reference is required. + The default is 'Required', which means the reconcile will fail if the + reference cannot be resolved. 'Optional' means this reference will be + a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: |- + Resolve specifies when this reference should be resolved. The default + is 'IfNotPresent', which will attempt to resolve the reference only when + the corresponding field is not present. Use 'Always' to resolve the + reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations are the annotations to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.annotations". + - It is up to Secret Store implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: |- + Labels are the labels/tags to be added to connection secret. + - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store types. + type: object + type: + description: |- + Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: |- + WriteConnectionSecretToReference specifies the namespace and name of a + Secret to which any connection details for this managed resource should + be written. Connection details frequently include the endpoint, username, + and password required to connect to the managed resource. + This field is planned to be replaced in a future release in favor of + PublishConnectionDetailsTo. Currently, both could be set independently + and connection details would be published to both without affecting + each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + x-kubernetes-validations: + - message: spec.forProvider.policyData is a required parameter + rule: '!(''*'' in self.managementPolicies || ''Create'' in self.managementPolicies + || ''Update'' in self.managementPolicies) || has(self.forProvider.policyData) + || (has(self.initProvider) && has(self.initProvider.policyData))' + status: + description: LakeIAMPolicyStatus defines the observed state of LakeIAMPolicy. + properties: + atProvider: + properties: + etag: + description: (Computed) The etag of the IAM policy. + type: string + id: + type: string + lake: + description: Used to find the parent resource to bind the IAM + policy to + type: string + location: + type: string + policyData: + description: |- + The policy data generated by + a google_iam_policy data source. + type: string + project: + description: |- + The ID of the project in which the resource belongs. + If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time this condition transitioned from one + status to another. + format: date-time + type: string + message: + description: |- + A Message containing details about this condition's last transition from + one status to another, if any. + type: string + observedGeneration: + description: |- + ObservedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: |- + Type of this condition. At most one of each condition type may apply to + a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: |- + ObservedGeneration is the latest metadata.generation + which resulted in either a ready state, or stalled due to error + it can not recover from without human intervention. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {}