Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/master' into u/cuza/making-skj-a…
Browse files Browse the repository at this point in the history
…nd-ckj-aware-of-downthenup-bounces-across-namespaces

* origin/master:
  Update paasta_tools/kubernetes_tools.py
  Released 0.205.1 via make release
  Use the correct registry_uri to check if we need to sudo
  Released 0.205.0 via make release
  Respect a service's docker_registry for adhoc spark-runs (#3728)
  Update requirements-dev-minimal.txt
  Making paasta playground not override existing confs
  add VSCode debugpy as dev requirement
  added paasta-crons make target
  Adding LimitRanges for paasta managed namespaces
  • Loading branch information
cuza committed Oct 25, 2023
2 parents c24dc52 + 406bb5b commit ad79077
Show file tree
Hide file tree
Showing 12 changed files with 148 additions and 12 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ general_itests/fake_etc_paasta/clusters.json
pip-wheel-metadata
debian/debhelper-build-stamp
unique-run
.vault-token

# Coverage artifacts
.coverage
Expand Down
20 changes: 20 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -158,13 +158,33 @@ setup-kubernetes-job: k8s_fake_cluster generate_deployments_for_service
export PAASTA_TEST_CLUSTER=kind-${USER}-k8s-test;\
.tox/py38-linux/bin/python -m paasta_tools.list_kubernetes_service_instances -d ./soa_config_playground --shuffle --group-lines 1 | xargs --no-run-if-empty .tox/py38-linux/bin/python -m paasta_tools.setup_kubernetes_job -d ./soa_config_playground -c kind-${USER}-k8s-test

.PHONY: cleanup-kubernetes-jobs
cleanup-kubernetes-jobs:
export KUBECONFIG=./k8s_itests/kubeconfig;\
export PAASTA_SYSTEM_CONFIG_DIR=./etc_paasta_playground/;\
export PAASTA_TEST_CLUSTER=kind-${USER}-k8s-test;\
.tox/py38-linux/bin/python -m paasta_tools.cleanup_kubernetes_jobs -d ./soa_config_playground -c kind-${USER}-k8s-test --force

.PHONY: paasta-secrets-sync
paasta-secrets-sync: setup-kubernetes-job .vault-token
export KUBECONFIG=./k8s_itests/kubeconfig;\
export PAASTA_SYSTEM_CONFIG_DIR=./etc_paasta_playground/;\
export PAASTA_TEST_CLUSTER=kind-${USER}-k8s-test;\
{ .tox/py38-linux/bin/python -m paasta_tools.list_kubernetes_service_instances -d ./soa_config_playground ; echo -n \ _shared; } | cut -f1 -d"." | uniq | shuf | xargs .tox/py38-linux/bin/python -m paasta_tools.kubernetes.bin.paasta_secrets_sync -v -d ./soa_config_playground -t ./.vault-token

define ANNOUNCE_CRONS_BODY
The following PaaSTA cron jobs will run on an infinite loop using the PaaSTA Playground k8s cluster:
- setup-kubernetes-job
- cleanup-kubernetes-job
- paasta-secrets-sync
- generate_deployments_for_service
endef
export ANNOUNCE_CRONS_BODY
.PHONY: paasta-crons
make paasta-cronjobs:
@echo "$$ANNOUNCE_CRONS_BODY"
while true; do make paasta-secrets-sync && make cleanup-kubernetes-jobs; sleep 5; done

.vault-token:
export VAULT_ADDR=https://vault-devc.yelpcorp.com:8200 ;\
export VAULT_SKIP_VERIFY=true ;\
Expand Down
21 changes: 21 additions & 0 deletions debian/changelog
Original file line number Diff line number Diff line change
@@ -1,3 +1,24 @@
paasta-tools (0.205.1) xenial; urgency=medium

* 0.205.1 tagged with 'make release'
Commit: Merge pull request #3730 from
Yelp/u/jfong/fix_spark_run_docker_reg Use the correct registry_uri
to check if we need to sudo

-- Jen Patague <[email protected]> Mon, 23 Oct 2023 17:08:53 -0700

paasta-tools (0.205.0) xenial; urgency=medium

* 0.205.0 tagged with 'make release'
Commit: Respect a service"s docker_registry for adhoc spark-runs
(#3728) There are some services where we want to ensure that a
specialized docker registry is always used - even for adhoc
development runs. This change is loosely based on what we do in
push-to-registry, where we read service.yaml to see if a specialized
registry needs to be used.

-- Luis Perez <[email protected]> Mon, 23 Oct 2023 13:45:05 -0700

paasta-tools (0.204.2) xenial; urgency=medium

* 0.204.2 tagged with 'make release'
Expand Down
2 changes: 1 addition & 1 deletion paasta_tools/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,4 @@
# setup phase, the dependencies may not exist on disk yet.
#
# Don't bump version manually. See `make release` docs in ./Makefile
__version__ = "0.204.2"
__version__ = "0.205.1"
20 changes: 17 additions & 3 deletions paasta_tools/cli/cmds/spark_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

import yaml
from boto3.exceptions import Boto3Error
from service_configuration_lib import read_service_configuration
from service_configuration_lib import spark_config
from service_configuration_lib.spark_config import get_aws_credentials
from service_configuration_lib.spark_config import get_grafana_url
Expand Down Expand Up @@ -218,7 +219,7 @@ def add_subparser(subparsers):
list_parser.add_argument(
"--docker-registry",
help="Docker registry to push the Spark image built.",
default=DEFAULT_SPARK_DOCKER_REGISTRY,
default=None,
)

list_parser.add_argument(
Expand Down Expand Up @@ -1036,6 +1037,14 @@ def get_docker_cmd(
return inject_spark_conf_str(original_docker_cmd, spark_conf_str)


def _get_adhoc_docker_registry(service: str, soa_dir: str = DEFAULT_SOA_DIR) -> str:
if service is None:
raise NotImplementedError('"None" is not a valid service')

service_configuration = read_service_configuration(service, soa_dir)
return service_configuration.get("docker_registry", DEFAULT_SPARK_DOCKER_REGISTRY)


def build_and_push_docker_image(args: argparse.Namespace) -> Optional[str]:
"""
Build an image if the default Spark service image is not preferred.
Expand All @@ -1059,14 +1068,19 @@ def build_and_push_docker_image(args: argparse.Namespace) -> Optional[str]:
if cook_return != 0:
return None

docker_url = f"{args.docker_registry}/{docker_tag}"
registry_uri = args.docker_registry or _get_adhoc_docker_registry(
service=args.service,
soa_dir=args.yelpsoa_config_root,
)

docker_url = f"{registry_uri}/{docker_tag}"
command = f"docker tag {docker_tag} {docker_url}"
print(PaastaColors.grey(command))
retcode, _ = _run(command, stream=True)
if retcode != 0:
return None

if args.docker_registry != DEFAULT_SPARK_DOCKER_REGISTRY:
if registry_uri != DEFAULT_SPARK_DOCKER_REGISTRY:
command = "sudo -H docker push %s" % docker_url
else:
command = "docker push %s" % docker_url
Expand Down
1 change: 1 addition & 0 deletions paasta_tools/contrib/create_paasta_playground.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ def main():
src="./k8s_itests/deployments/paasta/fake_soa_config",
dst="soa_config_playground",
values=values_path,
overwrite=False,
)


Expand Down
14 changes: 8 additions & 6 deletions paasta_tools/contrib/render_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,23 +31,25 @@ def render_file(src, dst, values):
new.write(replace(old.read(), values))


def render(src, dst, values={}, exclude={}):
def render(src, dst, values={}, exclude={}, overwrite=True):
if os.path.isfile(src):
render_file(src, dst, values)
if overwrite:
render_file(src, dst, values)
return
for f in os.scandir(src):
if f.name.startswith(".") or f.path in exclude:
continue
if os.path.isfile(f.path):
render_file(f.path, dst, values)
if overwrite:
render_file(f.path, dst, values)
else:
new_dst = replace(f"{dst}/{f.name}", values)
try:
os.makedirs(new_dst, exist_ok=True)
except OSError as e:
if e.errno != os.errno.EEXIST:
raise
render(f.path, new_dst, values, exclude)
render(f.path, new_dst, values, exclude, overwrite)


def parse_args():
Expand Down Expand Up @@ -82,7 +84,7 @@ def parse_args():
return args


def render_values(src: str, dst: str, values: str) -> None:
def render_values(src: str, dst: str, values: str, overwrite=True) -> None:
if values is not None:
values = os.path.abspath(values)
# Validate src and values. Dst needs to be a directory. src can be either a valid folder of directory. values need to be valid file if provided.
Expand All @@ -108,7 +110,7 @@ def render_values(src: str, dst: str, values: str) -> None:
),
v,
)
render(src, dst, config_dict, {values})
render(src, dst, config_dict, {values}, overwrite)


def main():
Expand Down
46 changes: 45 additions & 1 deletion paasta_tools/kubernetes_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,9 @@
from kubernetes.client import V1KeyToPath
from kubernetes.client import V1LabelSelector
from kubernetes.client import V1Lifecycle
from kubernetes.client import V1LimitRange
from kubernetes.client import V1LimitRangeItem
from kubernetes.client import V1LimitRangeSpec
from kubernetes.client import V1Namespace
from kubernetes.client import V1Node
from kubernetes.client import V1NodeAffinity
Expand Down Expand Up @@ -2676,14 +2679,15 @@ def ensure_namespace(kube_client: KubeClient, namespace: str) -> None:
kube_client.core.create_namespace(body=paasta_namespace)

ensure_paasta_api_rolebinding(kube_client, namespace)
ensure_paasta_namespace_limits(kube_client, namespace)


def ensure_paasta_api_rolebinding(kube_client: KubeClient, namespace: str) -> None:
rolebindings = get_all_role_bindings(kube_client, namespace=namespace)
rolebinding_names = [item.metadata.name for item in rolebindings]
if "paasta-api-server-per-namespace" not in rolebinding_names:
log.warning(
f"Creating rolebinding paasta-api-server-per-namespace as it does not exist"
f"Creating rolebinding paasta-api-server-per-namespace on {namespace} namespace as it does not exist"
)
role_binding = V1RoleBinding(
metadata=V1ObjectMeta(
Expand All @@ -2707,6 +2711,39 @@ def ensure_paasta_api_rolebinding(kube_client: KubeClient, namespace: str) -> No
)


def ensure_paasta_namespace_limits(kube_client: KubeClient, namespace: str) -> None:
limits = get_all_limit_ranges(kube_client, namespace=namespace)
limits_names = {item.metadata.name for item in limits}
if "limit-mem-cpu-disk-per-container" not in limits_names:
log.warning(
f"Creating limit: limit-mem-cpu-disk-per-container on {namespace} namespace as it does not exist"
)
limit = V1LimitRange(
metadata=V1ObjectMeta(
name="limit-mem-cpu-disk-per-container",
namespace=namespace,
),
spec=V1LimitRangeSpec(
limits=[
V1LimitRangeItem(
type="Container",
default={
"cpu": "1",
"memory": "1024Mi",
"ephemeral-storage": "1Gi",
},
default_request={
"cpu": "1",
"memory": "1024Mi",
"ephemeral-storage": "1Gi",
},
)
]
),
)
kube_client.core.create_namespaced_limit_range(namespace=namespace, body=limit)


def list_deployments_in_all_namespaces(
kube_client: KubeClient, label_selector: str
) -> List[KubeDeployment]:
Expand Down Expand Up @@ -3893,6 +3930,13 @@ def get_all_role_bindings(
return kube_client.rbac.list_namespaced_role_binding(namespace=namespace).items


def get_all_limit_ranges(
kube_client: KubeClient,
namespace: str,
) -> Sequence[V1LimitRange]:
return kube_client.core.list_namespaced_limit_range(namespace).items


_RE_NORMALIZE_IAM_ROLE = re.compile(r"[^0-9a-zA-Z]+")


Expand Down
3 changes: 3 additions & 0 deletions requirements-dev-minimal.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
astroid
asynctest
coverage
# VSCode debugging requirement
# See https://code.visualstudio.com/docs/python/debugging#_local-script-debugging
debugpy
docutils
flake8
freezegun
Expand Down
1 change: 1 addition & 0 deletions requirements-dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ asynctest==0.12.0
Babel==2.9.1
cfgv==2.0.1
coverage==6.5.0
debugpy==1.8.0
distlib==0.3.4
exceptiongroup==1.1.2
filelock==3.0.12
Expand Down
29 changes: 29 additions & 0 deletions tests/test_kubernetes_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@
from paasta_tools.kubernetes_tools import create_stateful_set
from paasta_tools.kubernetes_tools import ensure_namespace
from paasta_tools.kubernetes_tools import ensure_paasta_api_rolebinding
from paasta_tools.kubernetes_tools import ensure_paasta_namespace_limits
from paasta_tools.kubernetes_tools import filter_nodes_by_blacklist
from paasta_tools.kubernetes_tools import filter_pods_by_service_instance
from paasta_tools.kubernetes_tools import force_delete_pods
Expand Down Expand Up @@ -3022,6 +3023,8 @@ def test_KubeClient():
def test_ensure_namespace_doesnt_create_if_namespace_exists():
with mock.patch(
"paasta_tools.kubernetes_tools.ensure_paasta_api_rolebinding", autospec=True
), mock.patch(
"paasta_tools.kubernetes_tools.ensure_paasta_namespace_limits", autospec=True
):
mock_metadata = mock.Mock()
type(mock_metadata).name = "paasta"
Expand All @@ -3036,6 +3039,8 @@ def test_ensure_namespace_doesnt_create_if_namespace_exists():
def test_ensure_namespace_kube_system():
with mock.patch(
"paasta_tools.kubernetes_tools.ensure_paasta_api_rolebinding", autospec=True
), mock.patch(
"paasta_tools.kubernetes_tools.ensure_paasta_namespace_limits", autospec=True
):
mock_metadata = mock.Mock()
type(mock_metadata).name = "kube-system"
Expand All @@ -3050,6 +3055,8 @@ def test_ensure_namespace_kube_system():
def test_ensure_namespace_creates_namespace_if_doesnt_exist():
with mock.patch(
"paasta_tools.kubernetes_tools.ensure_paasta_api_rolebinding", autospec=True
), mock.patch(
"paasta_tools.kubernetes_tools.ensure_paasta_namespace_limits", autospec=True
):
mock_namespaces = mock.Mock(items=[])
mock_client = mock.Mock(
Expand Down Expand Up @@ -3085,6 +3092,28 @@ def test_ensure_paasta_api_rolebinding_doesnt_create_if_exists():
assert not mock_client.rbac.create_namespaced_role_binding.called


def test_ensure_paasta_namespace_limits_creates_if_not_exist():
mock_limits = mock.Mock(items=[])
mock_client = mock.Mock(
core=mock.Mock(list_namespaced_limit_range=mock.Mock(return_value=mock_limits)),
)

ensure_paasta_namespace_limits(mock_client, namespace="paastasvc-cool-service-name")
assert mock_client.core.create_namespaced_limit_range.called


def test_ensure_paasta_namespace_limits_doesnt_create_if_exists():
mock_metadata = mock.Mock()
type(mock_metadata).name = "limit-mem-cpu-disk-per-container"
mock_limits = mock.Mock(items=[mock.Mock(metadata=mock_metadata)])
mock_client = mock.Mock(
core=mock.Mock(list_namespaced_limit_range=mock.Mock(return_value=mock_limits)),
)

ensure_paasta_namespace_limits(mock_client, namespace="paastasvc-cool-service-name")
assert not mock_client.core.create_namespaced_role_binding.called


@pytest.mark.parametrize(
"addl_labels,replicas",
(
Expand Down
2 changes: 1 addition & 1 deletion yelp_package/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

# Edit this release and run "make release"
RELEASE=0.204.2
RELEASE=0.205.1

SHELL=/bin/bash

Expand Down

0 comments on commit ad79077

Please sign in to comment.