diff --git a/.bazelrc b/.bazelrc index a96ed18f2..33b532e7c 100644 --- a/.bazelrc +++ b/.bazelrc @@ -2,7 +2,7 @@ # Required by envoy and its tests build --define=grpc_no_ares=true -build --cxxopt=-std=c++17 --host_cxxopt=-std=c++17 +build --cxxopt=-std=c++20 --host_cxxopt=-std=c++20 # We already have absl in the build, define absl=1 to tell googletest to use absl for backtrace. build --define absl=1 diff --git a/Makefile b/Makefile index efc1b84e9..7405e9ef5 100644 --- a/Makefile +++ b/Makefile @@ -253,8 +253,13 @@ tools.buildifier: go install github.com/bazelbuild/buildtools/buildifier@latest; \ fi +# beautysh is already installed in the newer prow image with pipx +# TODO(shuoyang2016): remove this after we switch prow to the +# latest version. tools.beautysh: - @command -v beautysh >/dev/null ; if [ $$? -ne 0 ]; then \ + @command -v pipx >/dev/null;\ + PIPX_INSTALLED=$$?;\ + command -v beautysh >/dev/null; if [ $$? -ne 0 ] && [ $${PIPX_INSTALLED} -ne 0 ]; then \ echo "--> installing beautysh"; \ pip3 install --user beautysh; \ fi @@ -298,8 +303,13 @@ clang-format: @echo $(CPP_PROTO_FILES) | xargs clang-format-14 -i shell-format: tools.beautysh - @echo "--> formatting shell scripts with 'beautysh' tool" - @git ls-files "*.sh" | xargs ${HOME}/.local/bin/beautysh -i 2 + @command -v pipx >/dev/null;\ + PIPX_INSTALLED=$$?;\ + echo "--> formatting shell scripts with 'beautysh' tool"; \ + if [ $${PIPX_INSTALLED} -ne 0 ]; then \ + git ls-files "*.sh" | xargs ${HOME}/.local/bin/beautysh -i 2; \ + else git ls-files "*.sh" | xargs pipx run beautysh -i 2; \ + fi .PHONY: format.check format.check: tools.goimports diff --git a/WORKSPACE b/WORKSPACE index 1cdad11ca..56eb30c8b 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -39,9 +39,9 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") # 3) Check if envoy_build_config/extensions_build_config.bzl is up-to-date. # Try to match it with the one in source/extensions and comment out unneeded extensions. -ENVOY_SHA1 = "6b9db09c69965d5bfb37bdd29693f8b7f9e9e9ec" # v1.27.1, 2023.10.11 +ENVOY_SHA1 = "816188b86a0a52095b116b107f576324082c7c02" # v1.30.1 -ENVOY_SHA256 = "d6cde20343d67fa4e25b9047bd805c522ece80b8058f1f311cb90ee7f3287f63" +ENVOY_SHA256 = "41064ee8fbafc2ac3fd0d6531a106139adbbea3585defff22fdb99e06d7862e5" http_archive( name = "envoy", @@ -105,9 +105,10 @@ pip_install( requirements = "@com_github_grpc_grpc//:requirements.bazel.txt", ) +load("@com_github_grpc_grpc//bazel:grpc_deps.bzl", "grpc_deps", "grpc_test_only_deps") + # ============================================================================== load("@com_github_grpc_grpc//bazel:grpc_python_deps.bzl", "grpc_python_deps") -load("@com_github_grpc_grpc//bazel:grpc_deps.bzl", "grpc_deps", "grpc_test_only_deps") grpc_python_deps() diff --git a/docker/Dockerfile-prow-env b/docker/Dockerfile-prow-env index 3e6083881..a263c434b 100644 --- a/docker/Dockerfile-prow-env +++ b/docker/Dockerfile-prow-env @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM debian:buster +FROM debian:bullseye LABEL maintainer="esp-eng@google.com" # add env we can debug with the image name:tag @@ -22,13 +22,13 @@ ENV IMAGE=${IMAGE_ARG} RUN apt-get update -y RUN apt-get -y install \ - wget make cmake python3 python3-pip pkg-config coreutils \ + wget make cmake python3 python3-pip python3-venv pkg-config coreutils \ zlib1g-dev curl libtool automake zip time rsync ninja-build \ git bash-completion jq default-jdk python3-distutils libicu-dev libbrotli-dev # install nodejs, which is needed for integration tests -RUN sh -c 'curl -sL https://deb.nodesource.com/setup_12.x | bash -' +RUN sh -c 'curl -sL https://deb.nodesource.com/setup_20.x | bash -' RUN apt-get install -y nodejs # install Bazelisk @@ -38,7 +38,7 @@ RUN wget -O /usr/local/bin/bazelisk https://github.com/bazelbuild/bazelisk/relea # install clang-14 and associated tools (new envoy) # see https://apt.llvm.org/ for exhaustive list of all llvm related packages RUN wget -O- https://apt.llvm.org/llvm-snapshot.gpg.key| apt-key add - && \ - echo "deb https://apt.llvm.org/buster/ llvm-toolchain-buster-14 main" >> /etc/apt/sources.list && \ + echo "deb https://apt.llvm.org/bullseye/ llvm-toolchain-bullseye-14 main" >> /etc/apt/sources.list && \ apt-get update && \ apt-get install -y llvm-14 llvm-14-dev libclang-14-dev clang-14 \ lld-14 clang-tools-14 clang-format-14 libc++-dev xz-utils libclang-rt-14-dev @@ -59,6 +59,11 @@ RUN wget -q "https://go.dev/dl/${GO_TARBALL}" && \ # Install buildifier RUN go install github.com/bazelbuild/buildtools/buildifier@latest +RUN pip install pipx + +# Install beautysh +RUN pipx install beautysh + # install gcloud package RUN curl https://dl.google.com/dl/cloudsdk/release/google-cloud-sdk.tar.gz > /tmp/google-cloud-sdk.tar.gz RUN mkdir -p /usr/local/gcloud \ diff --git a/docker/Dockerfile-proxy.tmpl b/docker/Dockerfile-proxy.tmpl index db2150757..4272dc200 100644 --- a/docker/Dockerfile-proxy.tmpl +++ b/docker/Dockerfile-proxy.tmpl @@ -18,10 +18,7 @@ FROM _ENVOY_IMAGE_SHA_NAME RUN apk add --update --no-cache openssl # Install python3 -ENV PYTHONUNBUFFERED=1 -RUN apk add --update --no-cache python3 && ln -sf python3 /usr/bin/python -RUN python3 -m ensurepip -RUN pip3 install --no-cache --upgrade pip setuptools +RUN apk add --no-cache python3 py3-pip ENV PATH /bin:$PATH diff --git a/prow/e2e-gcloud-build-image.sh b/prow/e2e-gcloud-build-image.sh index b8bf64ee0..d27a7d064 100755 --- a/prow/e2e-gcloud-build-image.sh +++ b/prow/e2e-gcloud-build-image.sh @@ -73,28 +73,28 @@ echo "=== Test 1: Specify a fully qualified version. ===" EXPECTED_IMAGE_NAME=$(formImageName "2.7.0") cleanupOldImage "${EXPECTED_IMAGE_NAME}" ${ROOT}/docker/serverless/gcloud_build_image \ - -s "${SERVICE_NAME}" \ - -c "${CONFIG_ID}" \ - -p "${PROJECT_NAME}" \ - -v "2.7.0" + -s "${SERVICE_NAME}" \ + -c "${CONFIG_ID}" \ + -p "${PROJECT_NAME}" \ + -v "2.7.0" expectImage "${EXPECTED_IMAGE_NAME}" echo "=== Test 2: Specify a minor version. ===" EXPECTED_IMAGE_NAME=$(formImageName "2.4.0") cleanupOldImage "${EXPECTED_IMAGE_NAME}" ${ROOT}/docker/serverless/gcloud_build_image \ - -s "${SERVICE_NAME}" \ - -c "${CONFIG_ID}" \ - -p "${PROJECT_NAME}" \ - -v "2.4" + -s "${SERVICE_NAME}" \ + -c "${CONFIG_ID}" \ + -p "${PROJECT_NAME}" \ + -v "2.4" expectImage "${EXPECTED_IMAGE_NAME}" echo "=== Test 3: Sepcify an invalid version fails. ===" if ${ROOT}/docker/serverless/gcloud_build_image \ - -s "${SERVICE_NAME}" \ - -c "${CONFIG_ID}" \ - -p "${PROJECT_NAME}" \ - -v "2.11.47"; then + -s "${SERVICE_NAME}" \ + -c "${CONFIG_ID}" \ + -p "${PROJECT_NAME}" \ + -v "2.11.47"; then error_exit "Script should fail for invalid version." else echo "Script failed as expected." @@ -104,21 +104,21 @@ echo "=== Test 4: Specify a custom image. ===" EXPECTED_IMAGE_NAME=$(formImageName "custom") cleanupOldImage "${EXPECTED_IMAGE_NAME}" ${ROOT}/docker/serverless/gcloud_build_image \ - -s "${SERVICE_NAME}" \ - -c "${CONFIG_ID}" \ - -p "${PROJECT_NAME}" \ - -i "gcr.io/cloudesf-testing/apiproxy-serverless:gcloud-build-image-test" + -s "${SERVICE_NAME}" \ + -c "${CONFIG_ID}" \ + -p "${PROJECT_NAME}" \ + -i "gcr.io/cloudesf-testing/apiproxy-serverless:gcloud-build-image-test" expectImage "${EXPECTED_IMAGE_NAME}" echo "=== Test 5: Specify a GAR_REPOSITORY_IMAGE_PREFIX with -g flag. ===" EXPECTED_IMAGE_NAME=$(formGarImageName "2.30.3") cleanupOldImage "${EXPECTED_IMAGE_NAME}" ${ROOT}/docker/serverless/gcloud_build_image \ - -s "${SERVICE_NAME}" \ - -c "${CONFIG_ID}" \ - -p "${PROJECT_NAME}" \ - -v "2.30.3" \ - -g "${GAR_REPOSITORY_IMAGE_PREFIX}" + -s "${SERVICE_NAME}" \ + -c "${CONFIG_ID}" \ + -p "${PROJECT_NAME}" \ + -v "2.30.3" \ + -g "${GAR_REPOSITORY_IMAGE_PREFIX}" expectImage "${EXPECTED_IMAGE_NAME}" echo "=== Test 6: When no ESP version is specified, the script uses the latest ESPv2 release. ===" @@ -127,7 +127,7 @@ echo "=== Test 6: When no ESP version is specified, the script uses the latest E # That means we don't have a reliable way of checking if the output is correct. # So just test the script passes, and allow the developer to manually verify the output. ${ROOT}/docker/serverless/gcloud_build_image \ - -s "${SERVICE_NAME}" \ - -c "${CONFIG_ID}" \ - -p "${PROJECT_NAME}" + -s "${SERVICE_NAME}" \ + -c "${CONFIG_ID}" \ + -p "${PROJECT_NAME}" echo ">>> WARNING: For the test above, manually verify the output version of the image is expected." diff --git a/prow/gcpproxy-e2e.sh b/prow/gcpproxy-e2e.sh index 444a7fac9..9eeea0aea 100755 --- a/prow/gcpproxy-e2e.sh +++ b/prow/gcpproxy-e2e.sh @@ -36,7 +36,7 @@ function runE2E() { g) local backend="${OPTARG}" ;; m) local apiproxy_image="${OPTARG}" ;; R) local rollout_strategy="${OPTARG}" ;; - S) local using_sa_cred='true';; + S) local using_sa_cred='true' ;; t) local test_type="$(echo ${OPTARG} | tr '[A-Z]' '[a-z]')" ;; esac done @@ -89,7 +89,7 @@ case ${TEST_CASE} in "tight-http-bookstore-managed") runE2E -p "gke" -c "tight" -t "http" -g "bookstore" -R "managed" -m "$(get_proxy_image_name_with_sha)" ;; - "tight-http-bookstore-managed-using-sa-cred") + "tight-http-bookstore-managed-using-sa-cred") runE2E -p "gke" -c "tight" -t "http" -g "bookstore" -R "managed" -S -m "$(get_proxy_image_name_with_sha)" ;; "tight-grpc-echo-managed") @@ -107,7 +107,7 @@ case ${TEST_CASE} in "cloud-run-cloud-function-http-bookstore") runE2E -p "cloud-run" -f "cloud-function" -t "http" -g "bookstore" -R "managed" -m "$(get_serverless_image_name_with_sha)" ;; - "cloud-run-app-engine-http-bookstore") + "cloud-run-app-engine-http-bookstore") runE2E -p "cloud-run" -f "app-engine" -t "http" -g "bookstore" -R "managed" -m "$(get_serverless_image_name_with_sha)" ;; "anthos-cloud-run-anthos-cloud-run-http-bookstore") diff --git a/prow/gcpproxy-presubmit.sh b/prow/gcpproxy-presubmit.sh index 01291dc9d..9af943298 100755 --- a/prow/gcpproxy-presubmit.sh +++ b/prow/gcpproxy-presubmit.sh @@ -63,7 +63,7 @@ make depend.install # GOOGLE_APPLICATION_CREDENTIALS will be set in our test environment but this env # var if set will be used by start_proxy.py as service_json_path. - (unset GOOGLE_APPLICATION_CREDENTIALS; make test) +(unset GOOGLE_APPLICATION_CREDENTIALS; make test) # c++ test echo '======================================================' diff --git a/prow/janitor.sh b/prow/janitor.sh index da538ca92..cd1b881ef 100755 --- a/prow/janitor.sh +++ b/prow/janitor.sh @@ -71,8 +71,8 @@ for PROJECT in ${PROJECT_IDS[@]}; do ### App Engines ### APP_ENGINES=$(gcloud app services list \ - --filter="SERVICE ~ ^e2e-test-" \ - --format="value(SERVICE)") + --filter="SERVICE ~ ^e2e-test-" \ + --format="value(SERVICE)") for ap in ${APP_ENGINES} ; do echo "Deleting App Engine: ${ap}" gcloud app services delete "${ap}" \ @@ -86,7 +86,7 @@ for PROJECT in ${PROJECT_IDS[@]}; do FIREWALL_RULES=$(gcloud compute firewall-rules list \ --filter="targetTags:(gke-e2e-cloud-run) \ AND creationTimestamp < ${LIMIT_DATE}" \ - --format="value(name)") + --format="value(name)") for rule in $FIREWALL_RULES ; do echo "Deleting Firewall rule: ${rule}" @@ -101,19 +101,19 @@ for PROJECT in ${PROJECT_IDS[@]}; do TARGET_POOLS=$(gcloud compute target-pools list \ --regions="${REGION}" \ - --format='value(name)') + --format='value(name)') for targetpool in $TARGET_POOLS; do echo "Query Forwarding Rule for target pool ${targetpool}" forwardingitem=$(gcloud compute forwarding-rules list \ - --filter=TARGET="https://www.googleapis.com/compute/v1/projects/$PROJECT/regions/$REGION/targetPools/$targetpool" \ + --filter=TARGET="https://www.googleapis.com/compute/v1/projects/$PROJECT/regions/$REGION/targetPools/$targetpool" \ --format='value(name)') - if [[ -z "$forwardingitem" ]]; then - echo "Deleting unused target pool ${targetpool}" - gcloud compute target-pools delete "${targetpool}" \ - --region="${REGION}" \ - --quiet - fi + if [[ -z "$forwardingitem" ]]; then + echo "Deleting unused target pool ${targetpool}" + gcloud compute target-pools delete "${targetpool}" \ + --region="${REGION}" \ + --quiet + fi done echo "Done cleaning up target pools without forwarding rules" @@ -124,12 +124,12 @@ for PROJECT in ${PROJECT_IDS[@]}; do TARGET_POOLS=$(gcloud compute target-pools list \ --regions="${REGION}" \ --filter="creationTimestamp < ${LIMIT_DATE}" \ - --format='value(name)') + --format='value(name)') for targetpool in $TARGET_POOLS; do echo "Detected cloud run target pool ${targetpool}, querying forwarding rule" forwardingitem=$(gcloud compute forwarding-rules list \ - --filter=TARGET="https://www.googleapis.com/compute/v1/projects/$PROJECT/regions/$REGION/targetPools/$targetpool" \ + --filter=TARGET="https://www.googleapis.com/compute/v1/projects/$PROJECT/regions/$REGION/targetPools/$targetpool" \ --format='value(name)') echo "Deleting forwarding rule ${forwardingitem}" gcloud compute forwarding-rules delete "${forwardingitem}" \ @@ -149,8 +149,8 @@ for PROJECT in ${PROJECT_IDS[@]}; do ### Static IPs ### # Clean up static IPs that are reserved 1 day ago but not in use. STATIC_IPS=$(gcloud compute addresses list \ - --filter="status=RESERVED AND creationTimestamp < ${LIMIT_DATE}" \ - --regions="${REGION}" \ + --filter="status=RESERVED AND creationTimestamp < ${LIMIT_DATE}" \ + --regions="${REGION}" \ --format="value(name)") for static_ip in $STATIC_IPS; do gcloud compute addresses delete ${static_ip} --region $REGION --quiet diff --git a/scripts/format-examples.sh b/scripts/format-examples.sh index b8097df8f..0eae5c8ff 100755 --- a/scripts/format-examples.sh +++ b/scripts/format-examples.sh @@ -23,12 +23,12 @@ shopt -s globstar ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" for filename in $ROOT/examples/**/*.json; do - echo "Formatting $filename" - TEMP_FILE=$(mktemp) + echo "Formatting $filename" + TEMP_FILE=$(mktemp) - # jq is a common bash utility used to format/sort/filter json. - # Sort keys (-S) for all fields (.) in the input file and output to the temp file. - jq -S '.' "$filename" > "$TEMP_FILE" - cp -f "$TEMP_FILE" "$filename" - rm "$TEMP_FILE" + # jq is a common bash utility used to format/sort/filter json. + # Sort keys (-S) for all fields (.) in the input file and output to the temp file. + jq -S '.' "$filename" > "$TEMP_FILE" + cp -f "$TEMP_FILE" "$filename" + rm "$TEMP_FILE" done diff --git a/scripts/release/release-changelog.sh b/scripts/release/release-changelog.sh index 77e8ba7d3..72d6fd5dd 100755 --- a/scripts/release/release-changelog.sh +++ b/scripts/release/release-changelog.sh @@ -50,11 +50,11 @@ DIRECTORY="." while getopts :s:l:d:n: arg; do case ${arg} in - s) SHA="${OPTARG}";; - l) LAST_COMMIT="${OPTARG}";; - d) DIRECTORY="${OPTARG}";; - n) VERSION="${OPTARG}";; - *) usage "Invalid option: -${OPTARG}";; + s) SHA="${OPTARG}" ;; + l) LAST_COMMIT="${OPTARG}" ;; + d) DIRECTORY="${OPTARG}" ;; + n) VERSION="${OPTARG}" ;; + *) usage "Invalid option: -${OPTARG}" ;; esac done @@ -113,7 +113,7 @@ EOF echo $(pwd) git log ${LAST_COMMIT}..${SHA} --pretty="- %s%w(76,2)" \ | perl -pe'BEGIN {undef $/;} s/\s+Change-Id:[^\n]*\s+/\n/gs;' \ - >> "${CHANGELOG}" + >> "${CHANGELOG}" cat <> "${CHANGELOG}" diff --git a/scripts/release/release-new-branch.sh b/scripts/release/release-new-branch.sh index 3ab7cdf68..67fcdcb1e 100755 --- a/scripts/release/release-new-branch.sh +++ b/scripts/release/release-new-branch.sh @@ -38,8 +38,8 @@ SHA='' while getopts :n:s: arg; do case ${arg} in - s) SHA="${OPTARG}";; - *) usage "Invalid option: -${OPTARG}";; + s) SHA="${OPTARG}" ;; + *) usage "Invalid option: -${OPTARG}" ;; esac done diff --git a/scripts/release/release-publish.sh b/scripts/release/release-publish.sh index db3349d4d..6da87d000 100755 --- a/scripts/release/release-publish.sh +++ b/scripts/release/release-publish.sh @@ -45,11 +45,11 @@ SHA="" while getopts :g:u:s:n: arg; do case ${arg} in - g) GCLOUD="${OPTARG}";; - u) GSUTIL="${OPTARG}";; - s) SHA="${OPTARG}";; - n) VERSION="${OPTARG}";; - *) usage "Invalid option: -${OPTARG}";; + g) GCLOUD="${OPTARG}" ;; + u) GSUTIL="${OPTARG}" ;; + s) SHA="${OPTARG}" ;; + n) VERSION="${OPTARG}" ;; + *) usage "Invalid option: -${OPTARG}" ;; esac done @@ -75,12 +75,12 @@ if RELEASE_BRANCH_SHA="$(git rev-parse upstream/${CURRENT_BRANCH})"; then if [[ "${SHA}" != "${RELEASE_BRANCH_SHA}" ]]; then printf "\e[31m WARNING: Release branch commit (${RELEASE_BRANCH_SHA}) doesn't match ${SHA}. -\e[0m" + \e[0m" fi else printf "\e[31m WARNING: Cannot find release branch origin/release-${VERSION}. -\e[0m" + \e[0m" fi function push_docker_image() { diff --git a/scripts/release/release-qualify.sh b/scripts/release/release-qualify.sh index 9a8f5cfed..16f4bb2c0 100755 --- a/scripts/release/release-qualify.sh +++ b/scripts/release/release-qualify.sh @@ -16,7 +16,7 @@ ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" . ${ROOT}/scripts/all-utilities.sh || { echo "Cannot load Bash utilities"; - exit 1; } +exit 1; } function usage() { [[ -n "${1}" ]] && echo "${1}" @@ -105,58 +105,58 @@ function count_stress_failures() { if (total > 0) { print "Failed/Total: ", (failed + non2xx) / total } - }' "${@}" + }' "${@}" } ( echo "Release qualification of ${SHA}." -echo "It is now: $(date)" + echo "It is now: $(date)" -mkdir -p "${LOGS}/${SHA}" + mkdir -p "${LOGS}/${SHA}" -echo "Downloading prow logs to '${LOGS}' directory." -${GSUTIL} -m -q cp -r "gs://apiproxy-continuous-long-run/${SHA}/logs/*" "${LOGS}/${SHA}/" 2>&1 \ - || error_exit "Failed to download logs from gs://apiproxy-continuous-long-run/${SHA}/logs/*" + echo "Downloading prow logs to '${LOGS}' directory." + ${GSUTIL} -m -q cp -r "gs://apiproxy-continuous-long-run/${SHA}/logs/*" "${LOGS}/${SHA}/" 2>&1 \ + || error_exit "Failed to download logs from gs://apiproxy-continuous-long-run/${SHA}/logs/*" -python3 "${ROOT}/scripts/release/validate_release.py" \ - --commit_sha "${SHA}" \ - --path "${LOGS}/${SHA}" \ - || error_exit "Release is not qualified." + python3 "${ROOT}/scripts/release/validate_release.py" \ + --commit_sha "${SHA}" \ + --path "${LOGS}/${SHA}" \ + || error_exit "Release is not qualified." -RQ_TESTS=( ) + RQ_TESTS=( ) -# This while loop reads from a redirect set up at the "done" clause. -# Because we read user input inside the loop, we set up the input -# coming from the "find" command on file descriptor 3. This is why -# we use "read -u3" here. + # This while loop reads from a redirect set up at the "done" clause. + # Because we read user input inside the loop, we set up the input + # coming from the "find" command on file descriptor 3. This is why + # we use "read -u3" here. -while read -u3 LOG_FILE; do - DIR="$(dirname "${LOG_FILE}")" - JSON_FILE="${LOG_FILE%.log}.json" - RUN="${DIR##*/}" + while read -u3 LOG_FILE; do + DIR="$(dirname "${LOG_FILE}")" + JSON_FILE="${LOG_FILE%.log}.json" + RUN="${DIR##*/}" - [[ -f "${JSON_FILE}" ]] \ - || error_exit "Result of release qualification test ${JSON_FILE} not found." + [[ -f "${JSON_FILE}" ]] \ + || error_exit "Result of release qualification test ${JSON_FILE} not found." - echo '*********************************************************************' - echo "Release qualification run: ${RUN}" + echo '*********************************************************************' + echo "Release qualification run: ${RUN}" - echo '' - echo "Checking ${JSON_FILE}" - echo '' - check_result "${JSON_FILE}" "${SHA}" || continue + echo '' + echo "Checking ${JSON_FILE}" + echo '' + check_result "${JSON_FILE}" "${SHA}" || continue - echo '' - echo "Checking ${LOG_FILE}" - echo '' - count_stress_failures "${LOG_FILE}" + echo '' + echo "Checking ${LOG_FILE}" + echo '' + count_stress_failures "${LOG_FILE}" - RQ_TESTS+=(${DIR}) + RQ_TESTS+=(${DIR}) -# the ! -path ... excludes the root directory which is otherwise -# included in the result -done 3< <( find "${LOGS}/${SHA}" ! -path "${LOGS}/${SHA}" -type f -name 'long-run-test*.log' ) + # the ! -path ... excludes the root directory which is otherwise + # included in the result + done 3< <( find "${LOGS}/${SHA}" ! -path "${LOGS}/${SHA}" -type f -name 'long-run-test*.log' ) -if [[ ${#RQ_TESTS[@]} -le 0 ]]; then + if [[ ${#RQ_TESTS[@]} -le 0 ]]; then echo '*********************************************************************' echo '* Release qualification INCOMPLETE. *' echo '* ********** *' @@ -170,7 +170,7 @@ if [[ ${#RQ_TESTS[@]} -le 0 ]]; then exit 0 fi -#TODO(taoxuy):add envoy log check + #TODO(taoxuy):add envoy log check echo '' echo '*********************************************************************' diff --git a/scripts/release/release-show.sh b/scripts/release/release-show.sh index 985bf8e0c..289e244e8 100755 --- a/scripts/release/release-show.sh +++ b/scripts/release/release-show.sh @@ -33,7 +33,7 @@ END_USAGE while getopts :r: arg; do case ${arg} in - *) usage "Invalid option: -${OPTARG}";; + *) usage "Invalid option: -${OPTARG}" ;; esac done diff --git a/scripts/release/release-stable.sh b/scripts/release/release-stable.sh index 825b3f891..42660ef60 100755 --- a/scripts/release/release-stable.sh +++ b/scripts/release/release-stable.sh @@ -26,14 +26,10 @@ Usage: ${BASH_SOURCE[0]} [-n ] This script will release stable ESPv2 docker image with format of: $(get_proxy_image_release_name):\${MINOR_BASE_VERSION} - $(get_proxy_image_release_name):\${MAJOR_BASE_VERSION} $(get_serverless_image_release_name):\${MINOR_BASE_VERSION} - $(get_serverless_image_release_name):\${MAJOR_BASE_VERSION} $(get_gcsrunner_image_release_name):\${MINOR_BASE_VERSION} - $(get_gcsrunner_image_release_name):\${MAJOR_BASE_VERSION} where: MINOR_BASE_VERSION=major.minor - MAJOR_BASE_VERSION=major END_USAGE exit 1 @@ -42,8 +38,8 @@ END_USAGE while getopts :n: arg; do case ${arg} in - n) VERSION="${OPTARG}";; - *) usage "Invalid option: -${OPTARG}";; + n) VERSION="${OPTARG}" ;; + *) usage "Invalid option: -${OPTARG}" ;; esac done set -x @@ -55,14 +51,12 @@ fi # Minor base is 1.33 if version is 1.33.0 MINOR_BASE_VERSION=${VERSION%.*} -# Major base is 1 if version is 1.33.0 -MAJOR_BASE_VERSION=${MINOR_BASE_VERSION%.*} function tag_stable_image() { local image=$1 gcloud container images add-tag "${image}:${VERSION}" \ - "${image}:${MINOR_BASE_VERSION}" "${image}:${MAJOR_BASE_VERSION}" --quiet + "${image}:${MINOR_BASE_VERSION}" --quiet } tag_stable_image "$(get_proxy_image_release_name)" diff --git a/scripts/release/release-tag-git.sh b/scripts/release/release-tag-git.sh index e8ce920ed..d96813bef 100755 --- a/scripts/release/release-tag-git.sh +++ b/scripts/release/release-tag-git.sh @@ -43,10 +43,10 @@ TAG_REF='' while getopts :b:t:n: arg; do case ${arg} in - b) BUILD_REF="${OPTARG}";; - t) TAG_REF="${OPTARG}";; - n) VERSION="${OPTARG}";; - *) usage "Invalid option: -${OPTARG}";; + b) BUILD_REF="${OPTARG}" ;; + t) TAG_REF="${OPTARG}" ;; + n) VERSION="${OPTARG}" ;; + *) usage "Invalid option: -${OPTARG}" ;; esac done diff --git a/src/api_proxy/service_control/check_response_convert_utils_test.cc b/src/api_proxy/service_control/check_response_convert_utils_test.cc index d0ab51c09..0c68be5a1 100644 --- a/src/api_proxy/service_control/check_response_convert_utils_test.cc +++ b/src/api_proxy/service_control/check_response_convert_utils_test.cc @@ -191,8 +191,7 @@ TEST_F(CheckResponseConverterTest, ConvertConsumerInfo) { EXPECT_EQ(info.consumer_number, std::to_string(consumer_number)); } -TEST_F(CheckResponseConverterTest, - ApiKeyUidCarriedInCheckResponseInfo) { +TEST_F(CheckResponseConverterTest, ApiKeyUidCarriedInCheckResponseInfo) { CheckResponseInfo info; CheckResponse response; response.mutable_check_info()->set_api_key_uid("fake_api_key_uid"); diff --git a/src/api_proxy/service_control/request_builder.cc b/src/api_proxy/service_control/request_builder.cc index 345c56174..4ce5ecf5a 100644 --- a/src/api_proxy/service_control/request_builder.cc +++ b/src/api_proxy/service_control/request_builder.cc @@ -348,7 +348,6 @@ constexpr char kServiceControlBackendProtocol[] = constexpr char kServiceControlConsumerProject[] = "serviceruntime.googleapis.com/consumer_project"; constexpr char kApiKeyPrefix[] = "apikey:"; - // User agent label value // The value for kUserAgent should be configured at service control server. diff --git a/src/envoy/BUILD b/src/envoy/BUILD index 5ddf75e54..480a3ac0c 100644 --- a/src/envoy/BUILD +++ b/src/envoy/BUILD @@ -1,12 +1,12 @@ -package( - default_visibility = ["//visibility:public"], -) - load( "@envoy//bazel:envoy_build_system.bzl", "envoy_cc_binary", ) +package( + default_visibility = ["//visibility:public"], +) + alias( name = "backend_auth", actual = "//src/envoy/http/backend_auth:filter_factory", diff --git a/src/envoy/http/backend_auth/config_parser_impl.cc b/src/envoy/http/backend_auth/config_parser_impl.cc index d1652475c..459494e44 100644 --- a/src/envoy/http/backend_auth/config_parser_impl.cc +++ b/src/envoy/http/backend_auth/config_parser_impl.cc @@ -39,7 +39,7 @@ AudienceContext::AudienceContext( const FilterConfig& filter_config, const token::TokenSubscriberFactory& token_subscriber_factory, GetTokenFunc access_token_fn) - : tls_(context.threadLocal()) { + : tls_(context.serverFactoryContext().threadLocal()) { tls_.set( [](Envoy::Event::Dispatcher&) { return std::make_shared(); }); diff --git a/src/envoy/http/grpc_metadata_scrubber/BUILD b/src/envoy/http/grpc_metadata_scrubber/BUILD index a34c5b092..d758421b8 100644 --- a/src/envoy/http/grpc_metadata_scrubber/BUILD +++ b/src/envoy/http/grpc_metadata_scrubber/BUILD @@ -16,7 +16,7 @@ envoy_cc_library( repository = "@envoy", deps = [ ":filter_lib", - "@envoy//source/exe:envoy_common_lib", + "@envoy//source/exe:all_extensions_lib", ], ) diff --git a/src/envoy/http/header_sanitizer/BUILD b/src/envoy/http/header_sanitizer/BUILD index 03843ff2a..bdd457503 100644 --- a/src/envoy/http/header_sanitizer/BUILD +++ b/src/envoy/http/header_sanitizer/BUILD @@ -34,6 +34,6 @@ envoy_cc_library( deps = [ ":filter_lib", "//api/envoy/v12/http/header_sanitizer:config_proto_cc_proto", - "@envoy//source/exe:envoy_common_lib", + "@envoy//source/exe:all_extensions_lib", ], ) diff --git a/src/envoy/http/service_control/BUILD b/src/envoy/http/service_control/BUILD index 541073f03..7aa76cd0f 100644 --- a/src/envoy/http/service_control/BUILD +++ b/src/envoy/http/service_control/BUILD @@ -168,7 +168,7 @@ envoy_cc_library( ], repository = "@envoy", deps = [ - "@envoy//source/exe:envoy_common_lib", + "@envoy//source/exe:all_extensions_lib", ], ) @@ -188,7 +188,7 @@ envoy_cc_library( "//src/envoy/utils:rc_detail_utils_lib", "@envoy//source/common/grpc:status_lib", "@envoy//source/common/http:headers_lib", - "@envoy//source/exe:envoy_common_lib", + "@envoy//source/exe:all_extensions_lib", "@envoy//source/extensions/filters/http/common:pass_through_filter_lib", ], ) @@ -203,7 +203,7 @@ envoy_cc_library( ":filter_stats_lib", ":handler_impl_lib", ":service_control_call_impl_lib", - "@envoy//source/exe:envoy_common_lib", + "@envoy//source/exe:all_extensions_lib", ], ) @@ -215,7 +215,7 @@ envoy_cc_library( deps = [ ":filter_config_lib", ":filter_lib", - "@envoy//source/exe:envoy_common_lib", + "@envoy//source/exe:all_extensions_lib", ], ) diff --git a/src/envoy/http/service_control/config_parser.cc b/src/envoy/http/service_control/config_parser.cc index ad720001e..4c9e2d60b 100644 --- a/src/envoy/http/service_control/config_parser.cc +++ b/src/envoy/http/service_control/config_parser.cc @@ -40,18 +40,20 @@ FilterConfigParser::FilterConfigParser(const FilterConfig& config, service_map_.emplace(service.service_name(), ServiceContextPtr(srv_ctx)); } if (first_srv_ctx == nullptr) { - throw Envoy::ProtoValidationException("Empty services", config_); + Envoy::ProtoExceptionUtil::throwProtoValidationException("Empty services", + config_); } if (service_map_.size() < static_cast(config_.services_size())) { - throw Envoy::ProtoValidationException("Duplicated service names", config_); + Envoy::ProtoExceptionUtil::throwProtoValidationException( + "Duplicated service names", config_); } for (const auto& requirement : config_.requirements()) { const auto service_it = service_map_.find(requirement.service_name()); if (service_it == service_map_.end()) { - throw Envoy::ProtoValidationException("Invalid service name", - requirement); + Envoy::ProtoExceptionUtil::throwProtoValidationException( + "Invalid service name", requirement); } requirements_map_.emplace(requirement.operation_name(), RequirementContextPtr(new RequirementContext( @@ -60,8 +62,8 @@ FilterConfigParser::FilterConfigParser(const FilterConfig& config, if (requirements_map_.size() < static_cast(config_.requirements_size())) { - throw Envoy::ProtoValidationException("Duplicated operation names", - config_); + Envoy::ProtoExceptionUtil::throwProtoValidationException( + "Duplicated operation names", config_); } // Construct a requirement for non matched requests diff --git a/src/envoy/http/service_control/config_parser.h b/src/envoy/http/service_control/config_parser.h index 1af91b8ed..dd208dac5 100644 --- a/src/envoy/http/service_control/config_parser.h +++ b/src/envoy/http/service_control/config_parser.h @@ -51,7 +51,7 @@ class ServiceContext { min_stream_report_interval_ms_ = kDefaultMinStreamReportIntervalMs; } if (min_stream_report_interval_ms_ < kLowerBoundMinStreamReportIntervalMs) { - throw Envoy::ProtoValidationException( + Envoy::ProtoExceptionUtil::throwProtoValidationException( absl::StrCat("min_stream_report_interval_ms must be larger than: ", kLowerBoundMinStreamReportIntervalMs), config); diff --git a/src/envoy/http/service_control/filter.cc b/src/envoy/http/service_control/filter.cc index e673812f5..4a21d7845 100644 --- a/src/envoy/http/service_control/filter.cc +++ b/src/envoy/http/service_control/filter.cc @@ -110,7 +110,7 @@ void ServiceControlFilter::rejectRequest(Envoy::Http::Code code, decoder_callbacks_->sendLocalReply(code, error_msg, nullptr, absl::nullopt, rc_detail); decoder_callbacks_->streamInfo().setResponseFlag( - Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService); + Envoy::StreamInfo::UnauthorizedExternalService); } Envoy::Http::FilterDataStatus ServiceControlFilter::decodeData( @@ -132,20 +132,17 @@ Envoy::Http::FilterTrailersStatus ServiceControlFilter::decodeTrailers( } void ServiceControlFilter::log( - const Envoy::Http::RequestHeaderMap* request_headers, - const Envoy::Http::ResponseHeaderMap* response_headers, - const Envoy::Http::ResponseTrailerMap* response_trailers, - const Envoy::StreamInfo::StreamInfo&, Envoy::AccessLog::AccessLogType) { + const Envoy::Formatter::HttpFormatterContext& context, + const Envoy::StreamInfo::StreamInfo&) { ENVOY_LOG(debug, "Called ServiceControl Filter : {}", __func__); if (!handler_) { - if (!request_headers) return; - handler_ = - factory_.createHandler(*request_headers, decoder_callbacks_, stats_); + handler_ = factory_.createHandler(context.requestHeaders(), + decoder_callbacks_, stats_); } Envoy::Tracing::Span& parent_span = decoder_callbacks_->activeSpan(); - handler_->callReport(request_headers, response_headers, response_trailers, - parent_span); + handler_->callReport(&context.requestHeaders(), &context.responseHeaders(), + &context.responseTrailers(), parent_span); } } // namespace service_control diff --git a/src/envoy/http/service_control/filter.h b/src/envoy/http/service_control/filter.h index 2aaeedaa4..937afa477 100644 --- a/src/envoy/http/service_control/filter.h +++ b/src/envoy/http/service_control/filter.h @@ -51,11 +51,8 @@ class ServiceControlFilter Envoy::Http::RequestTrailerMap&) override; // Called when the request is completed. - void log(const Envoy::Http::RequestHeaderMap* request_headers, - const Envoy::Http::ResponseHeaderMap* response_headers, - const Envoy::Http::ResponseTrailerMap* response_trailers, - const Envoy::StreamInfo::StreamInfo& stream_info, - Envoy::AccessLog::AccessLogType access_log_type) override; + void log(const Envoy::Formatter::HttpFormatterContext& context, + const Envoy::StreamInfo::StreamInfo& stream_info) override; // For Handler::CheckDoneCallback, called when callCheck() is done void onCheckDone(const absl::Status& status, diff --git a/src/envoy/http/service_control/filter_config.h b/src/envoy/http/service_control/filter_config.h index de381207b..647b46d5d 100644 --- a/src/envoy/http/service_control/filter_config.h +++ b/src/envoy/http/service_control/filter_config.h @@ -45,8 +45,9 @@ class ServiceControlFilterConfig proto_config)), call_factory_(proto_config_, stats_prefix, context), config_parser_(*proto_config_, call_factory_), - handler_factory_(context.api().randomGenerator(), config_parser_, - context.timeSource()) {} + handler_factory_(context.serverFactoryContext().api().randomGenerator(), + config_parser_, + context.serverFactoryContext().timeSource()) {} const ServiceControlHandlerFactory& handler_factory() const { return handler_factory_; diff --git a/src/envoy/http/service_control/filter_fuzz_test.cc b/src/envoy/http/service_control/filter_fuzz_test.cc index a3a835db3..90842d860 100644 --- a/src/envoy/http/service_control/filter_fuzz_test.cc +++ b/src/envoy/http/service_control/filter_fuzz_test.cc @@ -72,7 +72,8 @@ DEFINE_PROTO_FUZZER( NiceMock context; NiceMock thread_local_cluster; NiceMock request( - &context.cluster_manager_.thread_local_cluster_.async_client_); + &context.server_factory_context_.cluster_manager_.thread_local_cluster_ + .async_client_); NiceMock mock_decoder_callbacks; @@ -82,7 +83,7 @@ DEFINE_PROTO_FUZZER( // Callback for token subscriber to start. Envoy::Event::TimerCb onReadyCallback; - EXPECT_CALL(context.dispatcher_, createTimer_(_)) + EXPECT_CALL(context.server_factory_context_.dispatcher_, createTimer_(_)) .WillRepeatedly( Invoke([&onReadyCallback](const Envoy::Event::TimerCb& cb) { ENVOY_LOG_MISC(trace, "Mocking dispatcher createTimer"); @@ -92,7 +93,8 @@ DEFINE_PROTO_FUZZER( // Mock the http async client. int resp_num = 0; - EXPECT_CALL(context.cluster_manager_, getThreadLocalCluster(_)) + EXPECT_CALL(context.server_factory_context_.cluster_manager_, + getThreadLocalCluster(_)) .WillRepeatedly(Return(&thread_local_cluster)); EXPECT_CALL(thread_local_cluster.async_client_, send_(_, _, _)) .WillRepeatedly(Invoke([&request, &input, &resp_num]( diff --git a/src/envoy/http/service_control/filter_test.cc b/src/envoy/http/service_control/filter_test.cc index a88bb35a5..94360773c 100644 --- a/src/envoy/http/service_control/filter_test.cc +++ b/src/envoy/http/service_control/filter_test.cc @@ -91,8 +91,7 @@ TEST_F(ServiceControlFilterTest, DecodeHeadersMissingHeaders) { // Filter should reject this request EXPECT_CALL(mock_decoder_callbacks_.stream_info_, - setResponseFlag( - Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService)) + setResponseFlag(Envoy::StreamInfo::UnauthorizedExternalService)) .Times(2); EXPECT_CALL(mock_decoder_callbacks_, @@ -146,10 +145,8 @@ TEST_F(ServiceControlFilterTest, DecodeHeadersSyncBadStatus) { "service_control_check_error{API_KEY_INVALID}"); })); - EXPECT_CALL( - mock_decoder_callbacks_.stream_info_, - setResponseFlag( - Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService)); + EXPECT_CALL(mock_decoder_callbacks_.stream_info_, + setResponseFlag(Envoy::StreamInfo::UnauthorizedExternalService)); EXPECT_CALL( mock_decoder_callbacks_, sendLocalReply(Envoy::Http::Code::Unauthorized, "UNAUTHENTICATED: test", @@ -202,10 +199,8 @@ TEST_F(ServiceControlFilterTest, DecodeHeadersAsyncBadStatus) { filter_->decodeHeaders(req_headers_, true)); // Filter should reject this request - EXPECT_CALL( - mock_decoder_callbacks_.stream_info_, - setResponseFlag( - Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService)); + EXPECT_CALL(mock_decoder_callbacks_.stream_info_, + setResponseFlag(Envoy::StreamInfo::UnauthorizedExternalService)); EXPECT_CALL( mock_decoder_callbacks_, @@ -215,22 +210,12 @@ TEST_F(ServiceControlFilterTest, DecodeHeadersAsyncBadStatus) { kBadStatus, "service_control_check_error{API_KEY_INVALID}"); } -TEST_F(ServiceControlFilterTest, LogWithoutHandlerOrHeaders) { - // Test: If no handler and no headers, a handler is not created - EXPECT_CALL(mock_handler_factory_, createHandler(_, _, _)).Times(0); - - // Filter has no handler. If it tries to callReport, it will seg fault - filter_->log(nullptr, &resp_headers_, &resp_trailer_, - mock_decoder_callbacks_.stream_info_, - Envoy::AccessLog::AccessLogType::NotSet); -} - TEST_F(ServiceControlFilterTest, LogWithoutHandler) { // Test: When a Filter has no Handler yet, another is created for log() EXPECT_CALL(*mock_handler_, callReport(_, _, _, _)); - filter_->log(&req_headers_, &resp_headers_, &resp_trailer_, - mock_decoder_callbacks_.stream_info_, - Envoy::AccessLog::AccessLogType::NotSet); + Envoy::Formatter::HttpFormatterContext context(&req_headers_, &resp_headers_, + &resp_trailer_); + filter_->log(context, mock_decoder_callbacks_.stream_info_); } TEST_F(ServiceControlFilterTest, LogWithHandler) { @@ -240,9 +225,9 @@ TEST_F(ServiceControlFilterTest, LogWithHandler) { EXPECT_CALL(mock_handler_factory_, createHandler(_, _, _)).Times(0); EXPECT_CALL(*mock_handler_, callReport(_, _, _, _)); - filter_->log(&req_headers_, &resp_headers_, &resp_trailer_, - mock_decoder_callbacks_.stream_info_, - Envoy::AccessLog::AccessLogType::NotSet); + Envoy::Formatter::HttpFormatterContext context(&req_headers_, &resp_headers_, + &resp_trailer_); + filter_->log(context, mock_decoder_callbacks_.stream_info_); } TEST_F(ServiceControlFilterTest, DecodeHelpersWhileStopped) { diff --git a/src/envoy/http/service_control/handler_utils.cc b/src/envoy/http/service_control/handler_utils.cc index 0f05d6126..ffe7c9ec9 100644 --- a/src/envoy/http/service_control/handler_utils.cc +++ b/src/envoy/http/service_control/handler_utils.cc @@ -62,22 +62,24 @@ inline int64_t convertNsToMs(std::chrono::nanoseconds ns) { return std::chrono::duration_cast(ns).count(); } -bool extractAPIKeyFromQuery(const Envoy::Http::RequestHeaderMap& headers, - const std::string& query, bool& were_params_parsed, - Envoy::Http::Utility::QueryParams& parsed_params, - std::string& api_key) { +bool extractAPIKeyFromQuery( + const Envoy::Http::RequestHeaderMap& headers, const std::string& query, + bool& were_params_parsed, + Envoy::Http::Utility::QueryParamsMulti& parsed_params, + std::string& api_key) { if (!were_params_parsed) { if (headers.Path() == nullptr) { return false; } - parsed_params = Envoy::Http::Utility::parseQueryString( + parsed_params = Envoy::Http::Utility::QueryParamsMulti::parseQueryString( headers.Path()->value().getStringView()); were_params_parsed = true; } - const auto& it = parsed_params.find(query); - if (it != parsed_params.end()) { - api_key = it->second; + std::optional query_param_value = + parsed_params.getFirstValue(query); + if (query_param_value.has_value()) { + api_key = *query_param_value; return true; } return false; @@ -309,7 +311,7 @@ bool extractAPIKey( // If checking multiple headers, cache the parameters so they are only parsed // once bool were_params_parsed{false}; - Envoy::Http::Utility::QueryParams parsed_params; + Envoy::Http::Utility::QueryParamsMulti parsed_params; for (const auto& location : locations) { switch (location.key_case()) { diff --git a/src/envoy/http/service_control/http_call.cc b/src/envoy/http/service_control/http_call.cc index 70cca8955..4eb6c35ad 100644 --- a/src/envoy/http/service_control/http_call.cc +++ b/src/envoy/http/service_control/http_call.cc @@ -207,7 +207,8 @@ class HttpCallImpl : public HttpCall, request_span_->setTag(Envoy::Tracing::Tags::get().HttpMethod, "POST"); Envoy::Http::RequestMessagePtr message = prepareHeaders(token); - request_span_->injectContext(message->headers(), nullptr); + Envoy::Tracing::HttpTraceContext trace_context(message->headers()); + request_span_->injectContext(trace_context, nullptr); ENVOY_LOG(debug, "http call from [uri = {}]: start", uri_); const auto thread_local_cluster = diff --git a/src/envoy/http/service_control/service_control_call_impl.cc b/src/envoy/http/service_control/service_control_call_impl.cc index 02d934cfc..505eab846 100644 --- a/src/envoy/http/service_control/service_control_call_impl.cc +++ b/src/envoy/http/service_control/service_control_call_impl.cc @@ -107,13 +107,14 @@ ServiceControlCallImpl::ServiceControlCallImpl( Envoy::Server::Configuration::FactoryContext& context) : filter_config_(*proto_config), token_subscriber_factory_(context), - tls_(context.threadLocal()) { + tls_(context.serverFactoryContext().threadLocal()) { // Pass shared_ptr of proto_config to the function capture so that // it will not be released when the function is called. - tls_.set([proto_config, &config, stats_prefix, &scope = context.scope(), - &cm = context.clusterManager(), - &time_source = - context.timeSource()](Envoy::Event::Dispatcher& dispatcher) { + tls_.set([proto_config, &config, stats_prefix, + &scope = context.serverFactoryContext().scope(), + &cm = context.serverFactoryContext().clusterManager(), + &time_source = context.serverFactoryContext().timeSource()]( + Envoy::Event::Dispatcher& dispatcher) { return std::make_shared(config, *proto_config, stats_prefix, scope, cm, time_source, dispatcher); diff --git a/src/envoy/token/token_subscriber.cc b/src/envoy/token/token_subscriber.cc index d2ae362c4..fd1d396a6 100644 --- a/src/envoy/token/token_subscriber.cc +++ b/src/envoy/token/token_subscriber.cc @@ -57,8 +57,9 @@ TokenSubscriber::TokenSubscriber( void TokenSubscriber::init() { init_target_ = std::make_unique( debug_name_, [this] { refresh(); }); - refresh_timer_ = context_.mainThreadDispatcher().createTimer( - [this]() -> void { refresh(); }); + refresh_timer_ = + context_.serverFactoryContext().mainThreadDispatcher().createTimer( + [this]() -> void { refresh(); }); context_.initManager().add(*init_target_); } @@ -128,7 +129,8 @@ void TokenSubscriber::refresh() { .setSendXff(false); const auto thread_local_cluster = - context_.clusterManager().getThreadLocalCluster(token_cluster_); + context_.serverFactoryContext().clusterManager().getThreadLocalCluster( + token_cluster_); if (thread_local_cluster) { active_request_ = thread_local_cluster->httpAsyncClient().send( std::move(message), *this, options); diff --git a/src/envoy/token/token_subscriber_test.cc b/src/envoy/token/token_subscriber_test.cc index 0e976cae6..d7481246c 100644 --- a/src/envoy/token/token_subscriber_test.cc +++ b/src/envoy/token/token_subscriber_test.cc @@ -69,7 +69,8 @@ class TokenSubscriberTest : public testing::Test { })); // Setup mock http async client. - EXPECT_CALL(context_.cluster_manager_, getThreadLocalCluster(_)) + EXPECT_CALL(context_.server_factory_context_.cluster_manager_, + getThreadLocalCluster(_)) .WillRepeatedly(Return(&thread_local_cluster_)); EXPECT_CALL(thread_local_cluster_.async_client_, send_(_, _, _)) .WillRepeatedly( @@ -83,7 +84,7 @@ class TokenSubscriberTest : public testing::Test { })); // Setup mock refresh timer. - EXPECT_CALL(context_.dispatcher_, createTimer_(_)) + EXPECT_CALL(context_.server_factory_context_.dispatcher_, createTimer_(_)) .WillOnce(Invoke([this](Envoy::Event::TimerCb cb) { timer_cb_ = cb; return mock_timer_; diff --git a/src/envoy/utils/BUILD b/src/envoy/utils/BUILD index 82a9a942c..018e88dac 100644 --- a/src/envoy/utils/BUILD +++ b/src/envoy/utils/BUILD @@ -59,7 +59,7 @@ envoy_cc_library( deps = [ "@envoy//source/common/common:empty_string", "@envoy//source/common/router:string_accessor_lib", - "@envoy//source/exe:envoy_common_lib", + "@envoy//source/exe:all_extensions_lib", ], ) @@ -82,7 +82,7 @@ envoy_cc_library( repository = "@envoy", deps = [ "@envoy//source/common/common:empty_string", - "@envoy//source/exe:envoy_common_lib", + "@envoy//source/exe:all_extensions_lib", ], ) @@ -91,6 +91,7 @@ envoy_cc_library( srcs = ["rc_detail_utils.cc"], hdrs = ["rc_detail_utils.h"], repository = "@envoy", + deps = ["@com_google_absl//absl/strings"], ) envoy_cc_test( diff --git a/tests/e2e/scripts/cloud-run/deploy.sh b/tests/e2e/scripts/cloud-run/deploy.sh index dd476ad53..a1e8ea52a 100755 --- a/tests/e2e/scripts/cloud-run/deploy.sh +++ b/tests/e2e/scripts/cloud-run/deploy.sh @@ -84,15 +84,15 @@ function deployBackend() { "bookstore") backend_image="gcr.io/cloudesf-testing/http-bookstore:3" backend_port=8080 - ;; + ;; "echo") backend_image="gcr.io/cloudesf-testing/grpc-echo-server:latest" backend_port=8081 - ;; + ;; *) echo "No such backend image for backend ${BACKEND}" exit 1 - ;; + ;; esac ${GCLOUD_BETA} run deploy "${BACKEND_SERVICE_NAME}" ${USE_HTTP2} \ @@ -126,19 +126,19 @@ function deployBackend() { BACKEND_HOST=$(gcloud functions describe ${BACKEND_SERVICE_NAME} --format="value(httpsTrigger.url)" --quiet) ;; "app-engine") - cd ${ROOT}/tests/endpoints/bookstore + cd ${ROOT}/tests/endpoints/bookstore - sed "s/SERVICE_NAME/${BACKEND_SERVICE_NAME}/g" app_template.yaml > app.yaml - gcloud app deploy --quiet - sleep_wrapper "1m" "Sleep 1m for App Engine backend setup" + sed "s/SERVICE_NAME/${BACKEND_SERVICE_NAME}/g" app_template.yaml > app.yaml + gcloud app deploy --quiet + sleep_wrapper "1m" "Sleep 1m for App Engine backend setup" - # For how requests are routed in App Engine, refer to - # https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed#example_urls - BACKEND_HOST="https://${BACKEND_SERVICE_NAME}-dot-cloudesf-testing.uc.r.appspot.com" + # For how requests are routed in App Engine, refer to + # https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed#example_urls + BACKEND_HOST="https://${BACKEND_SERVICE_NAME}-dot-cloudesf-testing.uc.r.appspot.com" - cd ${ROOT} - ;; + cd ${ROOT} + ;; *) echo "No such backend platform ${BACKEND_PLATFORM}" @@ -264,49 +264,49 @@ function setup() { fi case "${BACKEND}" in - 'bookstore') - local service_idl_tmpl="${ROOT}/tests/endpoints/bookstore/bookstore_swagger_template.json" - local service_idl="${ROOT}/tests/endpoints/bookstore/bookstore_swagger.json" - local create_service_args=${service_idl} - - # Change the `host` to point to the proxy host (required by validation in service management). - # Change the `title` to identify this test (for readability in cloud console). - # Change the jwt audience to point to the proxy host (required for calling authenticated endpoints). - # Add in the `x-google-backend` to point to the backend URL (required for backend routing). - # Modify one path with `disable_auth`. - cat "${service_idl_tmpl}" \ - | jq ".host = \"${ENDPOINTS_SERVICE_NAME}\" \ + 'bookstore') + local service_idl_tmpl="${ROOT}/tests/endpoints/bookstore/bookstore_swagger_template.json" + local service_idl="${ROOT}/tests/endpoints/bookstore/bookstore_swagger.json" + local create_service_args=${service_idl} + + # Change the `host` to point to the proxy host (required by validation in service management). + # Change the `title` to identify this test (for readability in cloud console). + # Change the jwt audience to point to the proxy host (required for calling authenticated endpoints). + # Add in the `x-google-backend` to point to the backend URL (required for backend routing). + # Modify one path with `disable_auth`. + cat "${service_idl_tmpl}" \ + | jq ".host = \"${ENDPOINTS_SERVICE_NAME}\" \ | .\"x-google-endpoints\"[0].name = \"${ENDPOINTS_SERVICE_NAME}\" \ | .schemes = [\"${scheme}\"] \ | .info.title = \"${ENDPOINTS_SERVICE_TITLE}\" \ | .securityDefinitions.auth0_jwk.\"x-google-audiences\" = \"${PROXY_HOST}\" \ | . + { \"x-google-backend\": { \"address\": \"${BACKEND_HOST}\", \"protocol\": \"${backend_protocol}\" } } \ - | .paths.\"/echo_token/disable_auth\".get += { \"x-google-backend\": { \"address\": \"${BACKEND_HOST}\/echo_token\/disable_auth\", \"disable_auth\": true } } "\ - > "${service_idl}" - - if [[ ${BACKEND_PLATFORM} == "app-engine" ]]; then - tmpfile=$(mktemp) - cp "${service_idl}" "${tmpfile}" - cat "${tmpfile}" \ - | jq ".\"x-google-backend\" += { \"jwt_audience\": \"${APP_ENGINE_IAP_CLIENT_ID}\" }" \ + | .paths.\"/echo_token/disable_auth\".get += { \"x-google-backend\": { \"address\": \"${BACKEND_HOST}\/echo_token\/disable_auth\", \"disable_auth\": true } } "\ > "${service_idl}" - fi - ;; - 'echo') - local service_idl_tmpl="${ROOT}/tests/endpoints/grpc_echo/grpc-test-dynamic-routing.tmpl.yaml" - local service_idl="${ROOT}/tests/endpoints/grpc_echo/grpc-test-dynamic-routing.yaml" - local service_descriptor="${ROOT}/tests/endpoints/grpc_echo/proto/api_descriptor.pb" - local create_service_args="${service_idl} ${service_descriptor}" - - # Replace values for dynamic routing. - sed -e "s/ENDPOINTS_SERVICE_NAME/${ENDPOINTS_SERVICE_NAME}/g" \ - -e "s/ENDPOINTS_SERVICE_TITLE/${ENDPOINTS_SERVICE_TITLE}/g" \ - -e "s/BACKEND_ADDRESS/${BACKEND_HOST#https://}/g" \ - "${service_idl_tmpl}" > "${service_idl}" - ;; - *) - echo "Invalid backend ${BACKEND} for creating endpoints service" - return 1 ;; + + if [[ ${BACKEND_PLATFORM} == "app-engine" ]]; then + tmpfile=$(mktemp) + cp "${service_idl}" "${tmpfile}" + cat "${tmpfile}" \ + | jq ".\"x-google-backend\" += { \"jwt_audience\": \"${APP_ENGINE_IAP_CLIENT_ID}\" }" \ + > "${service_idl}" + fi + ;; + 'echo') + local service_idl_tmpl="${ROOT}/tests/endpoints/grpc_echo/grpc-test-dynamic-routing.tmpl.yaml" + local service_idl="${ROOT}/tests/endpoints/grpc_echo/grpc-test-dynamic-routing.yaml" + local service_descriptor="${ROOT}/tests/endpoints/grpc_echo/proto/api_descriptor.pb" + local create_service_args="${service_idl} ${service_descriptor}" + + # Replace values for dynamic routing. + sed -e "s/ENDPOINTS_SERVICE_NAME/${ENDPOINTS_SERVICE_NAME}/g" \ + -e "s/ENDPOINTS_SERVICE_TITLE/${ENDPOINTS_SERVICE_TITLE}/g" \ + -e "s/BACKEND_ADDRESS/${BACKEND_HOST#https://}/g" \ + "${service_idl_tmpl}" > "${service_idl}" + ;; + *) + echo "Invalid backend ${BACKEND} for creating endpoints service" + return 1 ;; esac # Deploy the service config @@ -340,15 +340,15 @@ function setup() { if [[ ${PROXY_PLATFORM} == "cloud-run" ]]; then echo "Redeploying ESPv2 ${BACKEND_SERVICE_NAME} on Cloud Run(Fully managed)" - # - Hops: Allow our fake client IP restriction test (via API keys) to function. - # If we were restricting by our actual client ip, then the default of 0 would work. - # But we are actually testing with a fake xff header, so we need a higher hops count. - # On GKE we default to 2. AppHosting infra adds one more IP to xff, so 3 for serverless. + # - Hops: Allow our fake client IP restriction test (via API keys) to function. + # If we were restricting by our actual client ip, then the default of 0 would work. + # But we are actually testing with a fake xff header, so we need a higher hops count. + # On GKE we default to 2. AppHosting infra adds one more IP to xff, so 3 for serverless. proxy_args="${proxy_args}++--envoy_xff_num_trusted_hops=3" else echo "Deploying ESPv2 ${BACKEND_SERVICE_NAME} on Cloud Run(Anthos)" - # - Hops: Allow our fake client IP restriction test (via API keys) to function. - # Anthos has 2 more proxies than Cloud Run(Fully managed). + # - Hops: Allow our fake client IP restriction test (via API keys) to function. + # Anthos has 2 more proxies than Cloud Run(Fully managed). proxy_args="${proxy_args}++--envoy_xff_num_trusted_hops=5" fi proxy_args="${proxy_args}++--enable_debug" @@ -446,7 +446,7 @@ function tearDown() { "app-engine") gcloud app services delete "${BACKEND_SERVICE_NAME}" --quiet - ;; + ;; *) echo "No such backend platform ${BACKEND_PLATFORM}" exit 1 diff --git a/tests/e2e/scripts/gke/deploy.sh b/tests/e2e/scripts/gke/deploy.sh index a84994744..ba02a1b22 100755 --- a/tests/e2e/scripts/gke/deploy.sh +++ b/tests/e2e/scripts/gke/deploy.sh @@ -87,10 +87,10 @@ case "${BACKEND}" in SERVICE_IDL="${ROOT}/tests/endpoints/bookstore/bookstore_swagger.json" cat "${SERVICE_IDL_TMPL}" \ - | jq ".host = \"${APIPROXY_SERVICE}\" \ + | jq ".host = \"${APIPROXY_SERVICE}\" \ | .\"x-google-endpoints\"[0].name = \"${APIPROXY_SERVICE}\" \ - | .securityDefinitions.auth0_jwk.\"x-google-audiences\" = \"${APIPROXY_SERVICE}\"" \ - > "${SERVICE_IDL}" + | .securityDefinitions.auth0_jwk.\"x-google-audiences\" = \"${APIPROXY_SERVICE}\"" \ + > "${SERVICE_IDL}" CREATE_SERVICE_ARGS="${SERVICE_IDL}" ;; @@ -106,7 +106,7 @@ case "${BACKEND}" in ARGS="$ARGS -g" ;; *) echo "Invalid backend ${BACKEND}" - exit 1;; + exit 1 ;; esac diff --git a/tests/e2e/scripts/linux-test-kb-long-run.sh b/tests/e2e/scripts/linux-test-kb-long-run.sh index 98371aecf..80457f004 100755 --- a/tests/e2e/scripts/linux-test-kb-long-run.sh +++ b/tests/e2e/scripts/linux-test-kb-long-run.sh @@ -99,7 +99,7 @@ while true; do --api_key=${API_KEY} \ --auth_token=${JWT_TOKEN} \ --allow_unverified_cert=true \ - --host_header="${HOST_HEADER}" || ((BOOKSTORE_FAILURES++))) + --host_header="${HOST_HEADER}" || ((BOOKSTORE_FAILURES++))) echo "Starting bookstore API Key restriction test at $(date)." (set -x; @@ -108,7 +108,7 @@ while true; do --allow_unverified_cert=true \ --key_restriction_tests=${ROOT}/tests/e2e/testdata/bookstore/key_restriction_test.json.template \ --key_restriction_keys_file=${API_RESTRICTION_KEYS_FILE} \ - --host_header="${HOST_HEADER}") + --host_header="${HOST_HEADER}") #TODO(taoxuy): b/148950591 enable stress test for cloud run on anthos if [[ -z ${HOST_HEADER} ]]; then diff --git a/tests/e2e/scripts/prow-utilities.sh b/tests/e2e/scripts/prow-utilities.sh index 8a8bcb9ee..5287b4987 100755 --- a/tests/e2e/scripts/prow-utilities.sh +++ b/tests/e2e/scripts/prow-utilities.sh @@ -182,7 +182,7 @@ function check_http_service() { local errors="$(mktemp /tmp/curl.XXXXX)" if [[ -n ${host_header} ]]; then - local http_response="$(curl -k -m 20 --write-out %{http_code} --silent --output ${errors} ${host} -H "HOST:${host_header}")" + local http_response="$(curl -k -m 20 --write-out %{http_code} --silent --output ${errors} ${host} -H "HOST:${host_header}")" else local http_response="$(curl -k -m 20 --write-out %{http_code} --silent --output ${errors} ${host})" fi @@ -321,4 +321,5 @@ function get_apiproxy_service() { function install_e2e_dependencies() { pip3 install python-gflags + pip3 install six } diff --git a/tests/endpoints/bookstore/app_template.yaml b/tests/endpoints/bookstore/app_template.yaml index b6f3f1adc..53738001a 100644 --- a/tests/endpoints/bookstore/app_template.yaml +++ b/tests/endpoints/bookstore/app_template.yaml @@ -1,4 +1,4 @@ -runtime: nodejs10 +runtime: nodejs18 service: SERVICE_NAME resources: diff --git a/third_party/tools/coverage/cpp_unit.sh b/third_party/tools/coverage/cpp_unit.sh index 30ef6bca4..cb6606844 100755 --- a/third_party/tools/coverage/cpp_unit.sh +++ b/third_party/tools/coverage/cpp_unit.sh @@ -24,7 +24,7 @@ elif [[ -n "${COVERAGE_TARGET}" ]]; then else # For fuzz builds, this overrides to just fuzz targets. COVERAGE_TARGETS=//src/... && [[ ${FUZZ_COVERAGE} == "true" ]] && - COVERAGE_TARGETS="$(bazel query 'attr("tags", "fuzzer", //src/...)')" + COVERAGE_TARGETS="$(bazel query 'attr("tags", "fuzzer", //src/...)')" fi if [[ "${FUZZ_COVERAGE}" == "true" ]]; then @@ -56,10 +56,10 @@ if [[ "$VALIDATE_COVERAGE" == "true" ]]; then fi COVERAGE_FAILED=$(echo "${COVERAGE_VALUE}<${COVERAGE_THRESHOLD}" | bc) if test ${COVERAGE_FAILED} -eq 1; then - echo Code coverage ${COVERAGE_VALUE} is lower than limit of ${COVERAGE_THRESHOLD} - exit 1 + echo Code coverage ${COVERAGE_VALUE} is lower than limit of ${COVERAGE_THRESHOLD} + exit 1 else - echo Code coverage ${COVERAGE_VALUE} is good and higher than limit of ${COVERAGE_THRESHOLD} + echo Code coverage ${COVERAGE_VALUE} is good and higher than limit of ${COVERAGE_THRESHOLD} fi fi echo "HTML coverage report is in ${COVERAGE_DIR}/index.html" \ No newline at end of file diff --git a/third_party/tools/coverage/gen_coverage.sh b/third_party/tools/coverage/gen_coverage.sh index 9cfb4bf25..ee49f8500 100755 --- a/third_party/tools/coverage/gen_coverage.sh +++ b/third_party/tools/coverage/gen_coverage.sh @@ -4,11 +4,11 @@ set -e # Using GTEST_SHUFFLE here to workaround https://github.com/envoyproxy/envoy/issues/10108 BAZEL_USE_LLVM_NATIVE_COVERAGE=1 GCOV=llvm-profdata-8 CC=clang-8 CXX=clang++-8 \ - bazel coverage ${BAZEL_BUILD_OPTIONS} \ - -c fastbuild --copt=-DNDEBUG --instrumentation_filter="//src/..." \ - --test_timeout=2000 --cxxopt="-DENVOY_CONFIG_COVERAGE=1" --test_output=errors \ - --test_arg="--log-path /dev/null" --test_arg="-l trace" --test_env=HEAPCHECK= \ - --test_env=GTEST_SHUFFLE=1 --flaky_test_attempts=5 ${TARGET} + bazel coverage ${BAZEL_BUILD_OPTIONS} \ + -c fastbuild --copt=-DNDEBUG --instrumentation_filter="//src/..." \ + --test_timeout=2000 --cxxopt="-DENVOY_CONFIG_COVERAGE=1" --test_output=errors \ + --test_arg="--log-path /dev/null" --test_arg="-l trace" --test_env=HEAPCHECK= \ + --test_env=GTEST_SHUFFLE=1 --flaky_test_attempts=5 ${TARGET} COVERAGE_DIR="${SRCDIR}"/generated/${TARGET_PATH} mkdir -p "${COVERAGE_DIR}" @@ -28,7 +28,7 @@ sed -i -e 's|>bazel-out/[^/]*/bin/\([^/]*\)/[^<]*/_virtual_includes/[^/]*|>\1|g' COVERAGE_VALUE=$(llvm-cov-8 export "${COVERAGE_BINARY}" -instr-profile="${COVERAGE_DATA}" \ -ignore-filename-regex="${COVERAGE_IGNORE_REGEX}" -summary-only | \ - python3 -c "import sys, json; print(json.load(sys.stdin)['data'][0]['totals']['lines']['percent'])") + python3 -c "import sys, json; print(json.load(sys.stdin)['data'][0]['totals']['lines']['percent'])") echo "Covered lines percentage: ${COVERAGE_VALUE}" echo "HTML coverage report is in ${COVERAGE_DIR}/index.html" \ No newline at end of file diff --git a/third_party/tools/gcrgc/gcrgc.sh b/third_party/tools/gcrgc/gcrgc.sh index 35198d031..4ec78ec47 100755 --- a/third_party/tools/gcrgc/gcrgc.sh +++ b/third_party/tools/gcrgc/gcrgc.sh @@ -38,7 +38,7 @@ main(){ IMAGE="${1}" DATE="${2}" for digest in $(gcloud container images list-tags ${IMAGE} --limit=999999 --sort-by=TIMESTAMP \ - --filter="timestamp.datetime < '${DATE}'" --format='get(digest)'); do + --filter="timestamp.datetime < '${DATE}'" --format='get(digest)'); do ( set -x gcloud container images delete -q --force-delete-tags "${IMAGE}@${digest}"