INFRA-809 | Ashvin | Move jsonnet to kutegen (#864)

* INFRA-809 | Ashvin | Replace jsonnet with kutegen

* INFRA-809 | Ashvin | Install kutegen on build and test

* INFRA-809 | Ashvin | Fix 'static' modifier out of order with the JLS suggestions in KutegenClient.java

* INFRA-809 | Ashvin | Remove all templates files

* INFRA-809 | Ashvin | Add kutegen submodule

* INFRA-809 | Ashvin | Use go run kutegen instead of build

This will save dev an extra step of building kutegen each time they make a change in the templates files and thus save time and effort. go.work file lets us run go build and run in the parent directory.

* INFRA-809 | Ashvin | Remove redundant dirs from Dockerfile

* INFRA-809 | Ashvin | Remove go build in maven file.

This was causing various dependency issue in the build process. I will add a Makefile if there is a need in the future for now. For now all local development will be done using go run command so this will not be necessary.

* INFRA-809 | Ashvin | Update kutegen version

* INFRA-809 | Ashvin | Revert YAML support from kutegen

* INFRA-809 | Ashvin | Rebase the commit to kutegen

* INFRA-809 | Ashvin | Fix test

Using changes in the kutegen

* INFRA-809 | Ashvin | Remove the templates dir

* INFRA-809 | Ashvin | Clean generated dirs with mvn clean
This commit is contained in:
Ashvin S
2024-04-05 12:28:32 +05:30
committed by GitHub
parent 8f192f7689
commit 935d9d1012
93 changed files with 168 additions and 7209 deletions

1
.gitignore vendored
View File

@@ -50,3 +50,4 @@ pipelines
pipeline_manifests
user-mapping.yaml
**/vendor/
bin/

3
.gitmodules vendored Normal file
View File

@@ -0,0 +1,3 @@
[submodule "kutegen"]
path = kutegen
url = ../kutegen

View File

@@ -1,13 +1,18 @@
ARG BUILDER_CACHE_TARGET=193044292705.dkr.ecr.ap-south-1.amazonaws.com/common/spring-boot-maven:1.0
FROM 193044292705.dkr.ecr.ap-south-1.amazonaws.com/common/golang:1.21.1 as golang_builder
WORKDIR /app
COPY ./kutegen/go.mod ./kutegen/go.sum ./
RUN go mod download
COPY ./kutegen ./
RUN go build -o kutegen cmd/main.go
FROM ${BUILDER_CACHE_TARGET} as builder
ARG ARTIFACT_VERSION=0.0.1-SNAPSHOT
RUN rm -rf /build && mkdir -p /build
WORKDIR /build
COPY src /build/src
COPY pom.xml /build
COPY templates /build/src/templates
COPY gocd-templates /build/src/gocd-templates
COPY scripts /build/src/scripts
COPY entrypoint.sh /build/entrypoint.sh
RUN wget -O elastic-apm.jar https://repo1.maven.org/maven2/co/elastic/apm/elastic-apm-agent/1.42.0/elastic-apm-agent-1.42.0.jar
RUN mvn -Dhttps.protocols=TLSv1.2 -B dependency:resolve dependency:resolve-plugins
@@ -20,10 +25,8 @@ WORKDIR /usr/local/
COPY --from=builder /build/elastic-apm.jar /usr/local/elastic-apm.jar
COPY --from=builder /build/src/main/resources/elasticapm.properties /usr/local/elasticapm.properties
COPY --from=builder /build/target/deployment-portal-backend-${ARTIFACT_VERSION}.jar /usr/local/deployment-portal-backend.jar
COPY --from=builder /build/src/templates /usr/local/templates
COPY --from=builder /build/src/gocd-templates /usr/local/gocd-templates
COPY --from=builder /build/src/scripts /usr/local/scripts
COPY --from=builder /build/entrypoint.sh /usr/local/entrypoint.sh
COPY --from=golang_builder /app/kutegen /usr/local/bin/kutegen
RUN apt-get update && \
apt-get install telnet curl dnsutils kafkacat -y && \
adduser --system --uid 4000 --disabled-password non-root-user && \

View File

@@ -1,4 +1,5 @@
# Deployment Portal Backend
- Run `git submodule update --init --recursive` to pull the kutegen submodule.
- Use `export ENVIRONMENT=test` to avoid applying changes to the cluster.
- Docker Setup
- To run the application just do `docker-compose up`

3
go.work Normal file
View File

@@ -0,0 +1,3 @@
go 1.21.0
use ./kutegen

19
go.work.sum Normal file
View File

@@ -0,0 +1,19 @@
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc=
github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=

View File

@@ -1,5 +0,0 @@
local pipelines = import 'pipelines.jsonnet';
{
'pipelines.json': pipelines,
}

View File

@@ -1,156 +0,0 @@
local pipeline_helper = import 'pipeline_helper.jsonnet';
local pipeline_manifest = import 'pipeline_manifest.json';
local pipelines = pipeline_manifest.pipelines;
local name = pipeline_manifest.name;
local infraVertical = pipeline_manifest.infraVertical;
local githubOrgMap = {
lending:: {
default:: 'git@github.com:navi-medici/',
prod:: self.default,
qa:: self.default,
dev:: self.default,
uat:: self.default,
perf:: self.default,
test:: self.default,
build:: self.default,
'data-platform-prod':: 'git@github.com:navi-data/',
'data-platform-nonprod':: 'git@github.com:navi-data/',
},
insurance:: {
default:: 'git@github.com:navi-gi/',
prod:: self.default,
qa:: self.default,
dev:: self.default,
uat:: self.default,
perf:: self.default,
test:: self.default,
build:: self.default,
},
infra:: {
default: 'git@github.com:navi-infra/',
prod:: self.default,
qa:: self.default,
dev:: self.default,
uat:: self.default,
perf:: self.default,
test:: self.default,
build:: self.default,
},
amc:: {
default: 'git@github.com:navi-amc/',
prod:: self.default,
qa:: self.default,
dev:: self.default,
uat:: self.default,
perf:: self.default,
test:: self.default,
build:: self.default,
},
sa:: {
default: 'git@github.com:navi-sa/',
prod:: self.default,
qa:: self.default,
dev:: self.default,
uat:: self.default,
perf:: self.default,
test:: self.default,
build:: self.default,
},
colending:: {
default: 'git@github.com:navi-co-lending/',
prod:: self.default,
qa:: self.default,
dev:: self.default,
uat:: self.default,
perf:: self.default,
test:: self.default,
build:: self.default,
},
'navi-pay':: {
default: 'git@github.com:navi-pay/',
prod:: self.default,
qa:: self.default,
dev:: self.default,
uat:: self.default,
perf:: self.default,
test:: self.default,
build:: self.default,
},
'navi-saas':: {
default: 'git@github.com:navi-saas/',
prod:: self.default,
qa:: self.default,
dev:: self.default,
uat:: self.default,
perf:: self.default,
test:: self.default,
build:: self.default,
},
'navi-ppl':: {
default: 'git@github.com:navi-ppl/',
prod:: self.default,
qa:: self.default,
dev:: self.default,
uat:: self.default,
perf:: self.default,
test:: self.default,
build:: self.default,
},
};
{
getMaterial(name, env):: {
test:: {
plugin_configuration: {
id: 'github.pr',
version: '1.4.0-RC2',
},
options: {
url: githubOrgMap[infraVertical][env] + name + '.git',
branch: 'master',
},
destination: 'test',
},
build:: {
git: {
git: githubOrgMap[infraVertical][env] + name + '.git',
shallow_clone: true,
branch: 'master',
},
},
'rds-deploy':: {
mygit: {
git: githubOrgMap[infraVertical][env] + name + '.git',
shallow_clone: true,
branch: 'master',
},
},
's3-deploy':: self['rds-deploy'],
'iam-deploy':: self['rds-deploy'],
'redis-deploy':: self['rds-deploy'],
'docdb-deploy':: self['rds-deploy'],
'migrate-deploy':: {
git: {
git: githubOrgMap[infraVertical][env] + name + '.git',
shallow_clone: true,
branch: 'master',
},
},
},
getUpstreamMaterial(name, pipeline)::
(if pipeline.type == 'migrate-deploy' then {
code: {
pipeline: pipeline_helper.getUpstreamPipelineName(pipeline),
stage: pipeline_helper.getUpstreamPipelineStage(pipeline),
},
} else {}),
material(name, pipeline):: $.getMaterial(name, pipeline.env)[pipeline.type] + $.getUpstreamMaterial(name, pipeline),
pipelineName(name, pipeline):: $.getPipelineName(name, pipeline.type, pipeline.env),
getPipelineName(name, type, env)::
if type == 'test' || type == 'build' then
(name + '-' + type)
else if type == 'rds-deploy' || type == 's3-deploy' || type == 'redis-deploy' || type == 'docdb-deploy' || type == 'iam-deploy' then
(name + '-' + env + '-all-resource-deploy')
else (name + '-' + env + '-' + type),
}

View File

@@ -1,70 +0,0 @@
local pipeline_manifest = import 'pipeline_manifest.json';
local pipelines = pipeline_manifest.pipelines;
local name = pipeline_manifest.name;
local buildPipelineName = name + '-build';
local devPipelineName = name + '-dev-migrate-deploy';
local qaPipelineName = name + '-qa-migrate-deploy';
local uatPipelineName = name + '-uat-migrate-deploy';
local prodPipelineName = name + '-prod-migrate-deploy';
local pipelineMap = {
[pipeline.env]: true
for pipeline in pipelines
};
local approvalTypeMap(stages) = {
[stage.type]: stage.approvalType
for stage in stages
};
local hasDevPipeline = std.objectHas(pipelineMap, 'dev');
local hasQaPipeline = std.objectHas(pipelineMap, 'qa');
local hasUatPipeline = std.objectHas(pipelineMap, 'uat');
local hasProdPipeline = std.objectHas(pipelineMap, 'prod');
{
getUpstreamPipelineName(pipeline):: (
if pipeline.env == 'dev' then buildPipelineName
else if pipeline.env == 'qa' then (
if hasDevPipeline then devPipelineName
else buildPipelineName
)
else if pipeline.env == 'uat' then (
if hasQaPipeline then qaPipelineName
else if hasDevPipeline then devPipelineName
else buildPipelineName
)
else if pipeline.env == 'prod' then (
if hasUatPipeline then uatPipelineName
else if hasQaPipeline then qaPipelineName
else if hasDevPipeline then devPipelineName
else buildPipelineName
)
),
getUpstreamPipelineStage(pipeline):: (
if pipeline.env == 'dev' then 'build'
else if pipeline.env == 'qa' then (if hasDevPipeline then 'deploy' else 'build')
else if pipeline.env == 'uat' then (if (hasQaPipeline || hasDevPipeline) then 'deploy' else 'build')
else if pipeline.env == 'prod' then (if (hasQaPipeline || hasDevPipeline || hasUatPipeline) then 'deploy' else 'build')
),
getUpstreamPipelineJob(pipeline):: $.getUpstreamPipelineStage(pipeline),
stageMap(pipeline):: {
[stage.type]: true
for stage in pipeline.stages
},
artifactPipeline(pipeline)::
if pipeline.env == 'dev' then buildPipelineName
else if pipeline.env == 'qa' then (
if hasDevPipeline then (buildPipelineName + '/' + devPipelineName)
else buildPipelineName
)
else if pipeline.env == 'uat' then (
buildPipelineName +
(if hasDevPipeline then ('/' + devPipelineName) else '') +
(if hasQaPipeline then ('/' + qaPipelineName) else '')
)
else if pipeline.env == 'prod' then (
buildPipelineName +
(if hasDevPipeline then ('/' + devPipelineName) else '') +
(if hasQaPipeline then ('/' + qaPipelineName) else '') +
(if hasUatPipeline then ('/' + uatPipelineName) else '')
),
getApprovalType(allStages,stageType):: approvalTypeMap(allStages)[stageType]
}

View File

@@ -1,92 +0,0 @@
local pipeline_manifest = import 'pipeline_manifest.json';
local pipelines = pipeline_manifest.pipelines;
local name = pipeline_manifest.name;
local util = import 'material.jsonnet';
local stage_util = import 'stages.jsonnet';
local infraVertical = pipeline_manifest.infraVertical;
local groupMap = {
lending: {
test: 'Medici-test',
build: 'Medici',
dev: 'Medici-deploy-dev',
qa: 'Medici-deploy-qa',
uat: 'Medici-deploy-uat',
prod: 'Medici-deploy-prod',
cmd: 'Infrastructure',
'data-platform-prod': 'Data',
'data-platform-nonprod': 'Data',
},
insurance:: {
test:: 'GI-test',
build: 'GI',
dev: 'GI-deploy-dev',
qa: 'GI-deploy-qa',
uat: 'GI-deploy-uat',
prod: 'GI-deploy-prod',
},
amc:: {
test:: 'amc-test',
build: 'amc',
dev: 'amc-deploy-dev',
qa: 'amc-deploy-qa',
prod: 'amc-deploy-prod',
},
sa:: {
test:: 'SA-test',
build: 'SA',
dev: 'SA-deploy-dev',
qa: 'SA-deploy-qa',
uat: 'SA-deploy-uat',
prod: 'SA-deploy-prod',
},
infra:: {},
colending::{
test:: 'Co-Lending-test',
build: 'Co-Lending',
dev: 'Co-Lending-deploy-dev',
qa: 'Co-Lending-deploy-qa',
prod: 'Co-Lending-deploy-prod',
},
'navi-pay'::{
test:: 'Navi-Pay-deploy-dev',
build: 'Navi-Pay',
dev: 'Navi-Pay-deploy-dev',
qa: 'Navi-Pay-deploy-qa',
uat: 'Navi-Pay-deploy-uat',
prod: 'Navi-Pay-deploy-prod',
},
'navi-saas'::{
test:: 'Navi-Saas-deploy-dev',
build: 'Navi-Saas',
dev: 'Navi-Saas-deploy-dev',
qa: 'Navi-Saas-deploy-qa',
prod: 'Navi-Saas-deploy-prod',
},
'navi-ppl'::{
test:: 'Navi-PPL-deploy-dev',
build: 'Navi-PPL',
dev: 'Navi-PPL-deploy-dev',
qa: 'Navi-PPL-deploy-qa',
prod: 'Navi-PPL-deploy-prod',
},
};
{
format_version: 3,
pipelines: {
[util.pipelineName(name, pipeline)]: {
group: groupMap[infraVertical][pipeline.env],
materials: (if pipeline.type == 'test' then {
[util.pipelineName(name, pipeline)]: util.material(name, pipeline),
} else util.material(name, pipeline)),
environment_variables: {
ENVIRONMENT: pipeline.env,
APP_NAME: name,
},
stages: stage_util.getStages(pipeline),
},
for pipeline in pipelines
},
}

View File

@@ -1,66 +0,0 @@
{
"name": "spring-boot-demo",
"pipelines": [
{
"type": "test",
"env": "test",
"stages": [
{
"type": "test"
}
]
},
{
"type": "build",
"env": "build",
"stages": [
{
"type": "build"
}
]
},
{
"type": "migrate-deploy",
"env": "dev",
"stages": [
{
"type": "migrate",
"approvalType":"auto"
},
{
"type": "deploy",
"approvalType":"auto"
}
]
},
{
"type": "migrate-deploy",
"env": "qa",
"stages": [
{
"type": "migrate",
"approvalType":"auto"
},
{
"type": "deploy",
"approvalType":"auto"
}
]
},
{
"type": "migrate-deploy",
"env": "prod",
"stages": [
{
"type": "migrate",
"approvalType":"manual"
},
{
"type": "deploy",
"approvalType":"manual"
}
]
}
],
"infraVertical": "medici"
}

View File

@@ -1,243 +0,0 @@
local materialUtil = import 'material.jsonnet';
local helpers = import 'pipeline_helper.jsonnet';
local pipeline_manifest = import 'pipeline_manifest.json';
local name = pipeline_manifest.name;
local pipelines = pipeline_manifest.pipelines;
local elastic_profile_map = {
build: {
build: 'prod-default',
},
test: {
test: 'prod-default',
},
dev: {
migrate: 'prod-default',
deploy: 'nonprod-infra',
},
qa: {
migrate: 'prod-default',
deploy: 'nonprod-infra',
},
uat: {
migrate: 'prod-default',
deploy: 'nonprod-infra',
},
prod: {
migrate: 'prod-default',
deploy: 'prod-infra',
},
};
local infra_provisioner_arg = {
'rds-deploy': 'database',
's3-deploy': 's3-buckets',
'iam-deploy': 'iam-roles',
'redis-deploy': 'redis',
'docdb-deploy': 'docdb',
};
{
test(pipeline):: [
{
test:{
fetch_materials: true,
approval: {
type: 'success',
allow_only_on_success: false,
},
jobs: {
test: {
timeout: 0,
elastic_profile_id: elastic_profile_map[pipeline.env].test,
tasks: [
{
exec: {
command: 'bash',
arguments: [
'-c',
'git submodule update --remote --init',
],
working_directory: 'test',
run_if: 'passed',
},
},
{
exec: {
arguments: [
'-c',
'eval $(aws ecr get-login --no-include-email --region ap-south-1 --registry-id 193044292705) && docker-compose up --abort-on-container-exit',
],
command: 'bash',
run_if: 'passed',
working_directory: 'test',
},
},
],
},
},
}
},
],
build(pipeline):: [
{
build: {
fetch_materials: true,
jobs: {
build: {
timeout: 0,
elastic_profile_id: elastic_profile_map[pipeline.env].build,
tasks: [
{
exec: {
arguments: [
'-c',
'docker-build' + ' ' + pipeline_manifest.name,
],
command: 'bash',
run_if: 'passed',
},
},
],
artifacts: [
{
build: {
source: 'image_version',
destination: '',
},
},
],
},
},
},
},
],
migrate(pipeline):: [
{
migration: {
fetch_materials: true,
approval: {
type: helpers.getApprovalType(pipeline.stages,'migrate'),
allow_only_on_success: false,
},
jobs: {
migration: {
elastic_profile_id: elastic_profile_map[pipeline.env].migrate,
tasks: [
{
fetch: {
is_file: true,
source: 'image_version',
destination: 'deployment',
pipeline: helpers.artifactPipeline(pipeline),
stage: 'build',
job: 'build',
run_if: 'passed',
},
},
{
script: ' cd deployment \n . fetch_config_portal \n eval $(aws ecr get-login --no-include-email --region ap-south-1 --registry-id 193044292705)\n docker run -w /usr/local \\ \n -e DATASOURCE_URL=${DATASOURCE_URL} -e DATASOURCE_USERNAME=${DATASOURCE_USERNAME} \\ \n -e DATASOURCE_PASSWORD=${DATASOURCE_PASSWORD} `cat image_version` java -jar database.jar',
},
],
},
},
},
},
],
deploy(pipeline):: [
{
deploy: {
fetch_materials: true,
approval: {
type: helpers.getApprovalType(pipeline.stages,'deploy'),
allow_only_on_success: false,
},
jobs: {
deploy: {
timeout: 0,
elastic_profile_id: elastic_profile_map[pipeline.env].deploy,
tasks: [
{
fetch: {
is_file: true,
source: 'image_version',
destination: 'deployment',
pipeline: helpers.artifactPipeline(pipeline),
stage: 'build',
job: 'build',
run_if: 'passed',
},
},
{
exec: {
arguments: [
'-c',
'portal_deploy ${ENVIRONMENT} `cat image_version`',
],
command: 'bash',
run_if: 'passed',
working_directory: 'deployment',
},
},
],
},
},
},
},
],
deployAwsResourcesWithPlan(pipeline, type):: [
{
plan: {
approval: {
type: "manual",
allow_only_on_success: false
},
environment_variables: {
"ADDITIONAL_OPTIONS": "--plan"
},
jobs: {
"deploy": {
elastic_profile_id: 'prod-infra',
tasks: [
{
script: '. fetch_manifest\n infra-provisioner-v2 -m $MANIFEST ${ADDITIONAL_OPTIONS} all\n'
}
]
}
}
}
},
{
deploy: {
approval: {
type: "manual",
allow_only_on_success: false
},
environment_variables: {
"ADDITIONAL_OPTIONS": ""
},
jobs: {
"deploy": {
elastic_profile_id: 'prod-infra',
tasks: [
{
script: ". fetch_manifest\n infra-provisioner-v2 -m $MANIFEST ${ADDITIONAL_OPTIONS} all\n"
}
]
}
}
}
},
],
getStages(pipeline)::
if pipeline.type == 'test' then $.test(pipeline)
else if pipeline.type == 'build' then $.build(pipeline)
else if pipeline.type == 'migrate-deploy' then (
(if std.objectHas(helpers.stageMap(pipeline), 'migrate') then $.migrate(pipeline) else []) +
(if std.objectHas(helpers.stageMap(pipeline), 'deploy') then $.deploy(pipeline) else [])
) else if pipeline.type == 'rds-deploy' ||
pipeline.type == 's3-deploy' ||
pipeline.type == 'redis-deploy' ||
pipeline.type == 'docdb-deploy' ||
pipeline.type == 'iam-deploy' then $.deployAwsResourcesWithPlan(pipeline, infra_provisioner_arg[pipeline.type])
}

1
kutegen Submodule

Submodule kutegen added at b23e2dedf6

19
pom.xml
View File

@@ -325,6 +325,25 @@
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-clean-plugin</artifactId>
<configuration>
<filesets>
<fileset>
<directory>${project.basedir}/kubernetes_manifests</directory>
</fileset>
<fileset>
<directory>${project.basedir}/manifests</directory>
</fileset>
<fileset>
<directory>${project.basedir}/pipeline_manifests</directory>
</fileset>
<fileset>
<directory>${project.basedir}/pipelines</directory>
</fileset>
</filesets>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@@ -3,7 +3,7 @@ package com.navi.infra.portal.service.gocd;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.navi.infra.portal.domain.gocd.PipelineManifest;
import com.navi.infra.portal.repository.PipelineManifestRepository;
import com.navi.infra.portal.util.JsonnetUtil;
import com.navi.infra.portal.util.KubernetesManifestGenerator;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileReader;
@@ -30,8 +30,6 @@ import org.springframework.web.server.ResponseStatusException;
public class PipelineManifestService {
private final String PIPELINE_MANIFEST_PATH = "pipeline_manifests";
private final String PIPELINE_TEMPLATES_FOLDER = "gocd-templates";
private final String mainJsonnet = "main.jsonnet";
private final String PIPELINE_YAML_PATH = "pipelines";
private final String PIPELINE_MANIFEST_FILE_NAME = "pipeline_manifest.json";
private final String PIPELINE_YAML_FILE_NAME = "pipelines.json";
@@ -40,7 +38,7 @@ public class PipelineManifestService {
private final PipelineManifestRepository pipelineManifestRepository;
private final JsonnetUtil jsonnetUtil;
private final KubernetesManifestGenerator kubernetesManifestGenerator;
public List<String> pipelineManifestList() {
return pipelineManifestRepository
@@ -88,9 +86,12 @@ public class PipelineManifestService {
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
PrintStream printStream = new PrintStream(byteArrayOutputStream, false,
StandardCharsets.UTF_8);
var exitCode = jsonnetUtil.generateKManifests(getPipelinesManifestPath(pipelineManifest),
getPipelinesPath(pipelineManifest), printStream, new String[]{}, mainJsonnet,
PIPELINE_TEMPLATES_FOLDER);
var exitCode = kubernetesManifestGenerator.generate(
printStream, new String[]{"gocd-pipeline",
"-f", getPipelinesManifestPath(pipelineManifest) + PIPELINE_MANIFEST_FILE_NAME,
"-o", getPipelinesPath(pipelineManifest)
}
);
if (exitCode != 0) {
throw new RuntimeException(String.format("Could not generate manifests %s",

View File

@@ -31,7 +31,7 @@ import com.navi.infra.portal.domain.manifest.StatusMarker;
import com.navi.infra.portal.dto.manifest.SecurityGroup;
import com.navi.infra.portal.exceptions.KubernetesManifestException;
import com.navi.infra.portal.service.manifest.DeploymentService;
import com.navi.infra.portal.util.JsonnetUtil;
import com.navi.infra.portal.util.KubernetesManifestGenerator;
import com.navi.infra.portal.util.MapDiffUtil;
import com.navi.infra.portal.util.kubernetes.KubernetesManifestUtils;
import io.kubernetes.client.openapi.ApiException;
@@ -71,8 +71,6 @@ import org.springframework.stereotype.Service;
@Slf4j
public class KubernetesManifestService {
private static final String JSONNET_TEMPLATES_FOLDER = "templates";
private static final String GENERATED_KUBERNETES_MANIFEST_FOLDER = "kubernetes_manifests";
private static final String MANIFEST_INPUT_FILE_PATH = "manifests";
@@ -93,7 +91,7 @@ public class KubernetesManifestService {
private final String environment;
private final JsonnetUtil jsonnetUtil;
private final KubernetesManifestGenerator kubernetesManifestGenerator;
private final KubeClient kubeClient;
private final long securityGroupIdFetchRetryFixedBackoff;
@@ -104,7 +102,7 @@ public class KubernetesManifestService {
ObjectMapper objectMapper,
Executor executor,
@Value("${ENVIRONMENT:test}") String environment,
JsonnetUtil jsonnetUtil,
KubernetesManifestGenerator kubernetesManifestGenerator,
KubeClient kubeClient,
@Value("${kubernetes.security-group.id.fetch.fixed-backoff.interval}") long securityGroupIdFetchRetryFixedBackoff,
@Value("${kubernetes.security-group.id.fetch.fixed-backoff.max-attempts}") int securityGroupIdFetchRetryMaxAttempts
@@ -113,7 +111,7 @@ public class KubernetesManifestService {
this.objectMapper = objectMapper;
this.executor = executor;
this.environment = environment;
this.jsonnetUtil = jsonnetUtil;
this.kubernetesManifestGenerator = kubernetesManifestGenerator;
this.kubeClient = kubeClient;
this.securityGroupIdFetchRetryFixedBackoff = securityGroupIdFetchRetryFixedBackoff;
this.securityGroupIdFetchRetryMaxAttempts = securityGroupIdFetchRetryMaxAttempts;
@@ -132,7 +130,7 @@ public class KubernetesManifestService {
public void generateManifestsAndApply(Manifest manifest) {
if (manifest.getDeployment() != null) {
final String kManifestPath = generateManifests(manifest, null);
final String kManifestPath = generateManifests(manifest, "null");
log.info("Generated kubernetes manifests at {}", kManifestPath);
if (environment.equals("test")) {
log.info(
@@ -340,8 +338,10 @@ public class KubernetesManifestService {
PrintStream printStream = new PrintStream(byteArrayOutputStream, false,
StandardCharsets.UTF_8);
String writePath = getKubernetesManifestPath(manifest);
int exitCode = generateKManifests(getManifestPath(manifest), writePath, printStream,
new String[]{"--ext-str", "IMAGE=" + image }, mainJsonnet);
int exitCode = generateKManifests(printStream,
new String[]{"generate", "--image", image,
"--file", getManifestPath(manifest) + MANIFEST_INPUT_FILE_NAME,
"--output", writePath});
if (exitCode > 0) {
throw new KubernetesManifestException(
format("Not able to generate kubernetes manifests: %s",
@@ -501,15 +501,8 @@ public class KubernetesManifestService {
return jsonObject;
}
private int generateKManifests(
String readPath,
String writePath,
PrintStream ps,
String[] jsonnetAdditionalOptions,
String mainJsonnet
) {
return jsonnetUtil.generateKManifests(readPath, writePath, ps, jsonnetAdditionalOptions,
mainJsonnet, JSONNET_TEMPLATES_FOLDER);
private int generateKManifests(PrintStream ps, String[] jsonnetAdditionalOptions) {
return kubernetesManifestGenerator.generate(ps, jsonnetAdditionalOptions);
}
/**

View File

@@ -1,19 +0,0 @@
package com.navi.infra.portal.util;
import java.io.PrintStream;
public interface JsonnetUtil {
int generateKManifests(
String readPath, String writePath, PrintStream ps,
String[] jsonnetAdditionalOptions, String mainJsonnet, String jsonnetTemplatesFolder
);
int run(
String writePath,
PrintStream ps,
String[] jsonnetAdditionalOptions,
String mainJsonnet,
String jsonnetTemplatesFolder
);
}

View File

@@ -1,68 +0,0 @@
package com.navi.infra.portal.util;
import static java.lang.System.arraycopy;
import static scala.None$.empty;
import java.io.PrintStream;
import org.springframework.stereotype.Component;
import os.package$;
import sjsonnet.DefaultParseCache;
import sjsonnet.SjsonnetMain;
@Component
public class JsonnetUtilImpl implements JsonnetUtil {
@Override
public int generateKManifests(
String readPath, String writePath, PrintStream ps,
String[] jsonnetAdditionalOptions, String mainJsonnet, String jsonnetTemplatesFolder
) {
String[] jsonnetOptions = new String[]{jsonnetTemplatesFolder + "/" + mainJsonnet, "-J",
readPath, "-c", "-m", writePath};
return run(jsonnetOptions, jsonnetAdditionalOptions, ps);
}
@Override
public int run(
String writePath,
PrintStream ps,
String[] jsonnetAdditionalOptions,
String mainJsonnet,
String jsonnetTemplatesFolder
) {
String[] jsonnetOptions = new String[]{jsonnetTemplatesFolder + "/" + mainJsonnet,
"-c", "-m", writePath};
return run(jsonnetOptions, jsonnetAdditionalOptions, ps);
}
private static int run(
String[] options,
String[] additionalOptions,
PrintStream ps
) {
PrintStream stdErr = System.err;
System.setErr(ps);
try {
var jsonnetCommand = new String[additionalOptions.length + options.length];
arraycopy(options, 0, jsonnetCommand, 0, options.length);
arraycopy(additionalOptions, 0, jsonnetCommand, options.length,
additionalOptions.length);
return SjsonnetMain.main0(jsonnetCommand,
new DefaultParseCache(),
System.in,
System.out,
System.err,
package$.MODULE$.pwd(),
empty(),
empty()
);
} finally {
System.setErr(stdErr);
}
}
}

View File

@@ -0,0 +1,8 @@
package com.navi.infra.portal.util;
import java.io.PrintStream;
public interface KubernetesManifestGenerator {
int generate(PrintStream ps, String[] args);
}

View File

@@ -0,0 +1,52 @@
package com.navi.infra.portal.util;
import static java.lang.System.arraycopy;
import java.io.PrintStream;
import java.util.Arrays;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
@Component
@Slf4j
public class KutegenClient implements KubernetesManifestGenerator {
private final String[] kutegenPath;
public KutegenClient(@Value("${ENVIRONMENT:test}") String environment) {
if (environment.equals("test")) {
kutegenPath = new String[]{"go", "run", "kutegen/cmd/main.go"};
} else {
kutegenPath = new String[]{"bin/kutegen"};
}
}
@Override
public int generate(PrintStream ps, String[] args) {
final var exitCode = run(args, ps);
log.info("Kutegen exit code: {}", exitCode);
return exitCode;
}
private int run(String[] args, PrintStream errorStream) {
final var stdErr = System.err;
System.setErr(errorStream);
try {
final var command = new String[kutegenPath.length + args.length];
arraycopy(kutegenPath, 0, command, 0, kutegenPath.length);
arraycopy(args, 0, command, kutegenPath.length, args.length);
log.info("Running kutegen with options: {}", Arrays.toString(command));
final var processBuilder = new ProcessBuilder(command);
final var process = processBuilder.inheritIO().start();
return process.waitFor();
} catch (Exception e) {
throw new RuntimeException("Error running kutegen", e);
} finally {
System.setErr(stdErr);
}
}
}

View File

@@ -9,7 +9,7 @@ import static java.util.Objects.requireNonNull;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.navi.infra.portal.exceptions.KubernetesManifestException;
import com.navi.infra.portal.util.JsonnetUtil;
import com.navi.infra.portal.util.KubernetesManifestGenerator;
import com.navi.infra.portal.util.kubernetes.KubernetesManifestUtils;
import java.io.ByteArrayOutputStream;
import java.io.File;
@@ -33,7 +33,7 @@ public class IngressGroupApplierImpl implements IngressGroupApplier {
private final String kubernetesManifestsPath;
private final JsonnetUtil jsonnetUtil;
private final KubernetesManifestGenerator kubernetesManifestGenerator;
private final KubernetesManifestUtils kubernetesManifestUtils;
private final ObjectMapper jsonMapper;
@@ -41,13 +41,13 @@ public class IngressGroupApplierImpl implements IngressGroupApplier {
public IngressGroupApplierImpl(
@Value("templates/shared_ingress_config") String jsonnetTemplatesFolder,
@Value("kubernetes_manifests") String kubernetesManifestsPath,
JsonnetUtil jsonnetUtil,
KubernetesManifestGenerator kubernetesManifestGenerator,
KubernetesManifestUtils kubernetesManifestUtils,
@Qualifier("jsonMapper") ObjectMapper jsonMapper
) {
this.jsonnetTemplatesFolder = jsonnetTemplatesFolder;
this.kubernetesManifestsPath = kubernetesManifestsPath;
this.jsonnetUtil = jsonnetUtil;
this.kubernetesManifestGenerator = kubernetesManifestGenerator;
this.kubernetesManifestUtils = kubernetesManifestUtils;
this.jsonMapper = jsonMapper;
}
@@ -72,23 +72,18 @@ public class IngressGroupApplierImpl implements IngressGroupApplier {
}
private String createK8sManifest(IngressGroupCreateRequest request, String writePath) {
final var jsonnetAdditionalOptions = new String[]{
"--tla-str", "cluster=" + request.getCluster(),
"--tla-str", "namespace=" + request.getNamespace(),
"--tla-str", "group_name=" + request.getName(),
"--tla-str", "environment=" + request.getEnvironment(),
"--tla-str", "product=" + request.getProduct()
final var args = new String[]{"shared-alb-config",
"--output", writePath,
"--cluster", request.getCluster(),
"--namespace", request.getNamespace(),
"--group_name", request.getName(),
"--environment", request.getEnvironment(),
"--product", request.getProduct()
};
var byteArrayOutputStream = new ByteArrayOutputStream();
var ps = new PrintStream(byteArrayOutputStream, false, UTF_8);
var exitCode = jsonnetUtil.run(
writePath,
ps,
jsonnetAdditionalOptions,
"main.jsonnet",
jsonnetTemplatesFolder
);
var exitCode = kubernetesManifestGenerator.generate(ps, args);
if (exitCode > 0) {
throw new KubernetesManifestException(

View File

@@ -21,8 +21,8 @@ import com.navi.infra.portal.provider.Common;
import com.navi.infra.portal.service.kubernetes.BashExecute;
import com.navi.infra.portal.service.kubernetes.KubernetesManifestService;
import com.navi.infra.portal.service.manifest.DeploymentService;
import com.navi.infra.portal.util.JsonnetUtil;
import com.navi.infra.portal.util.JsonnetUtilImpl;
import com.navi.infra.portal.util.KubernetesManifestGenerator;
import com.navi.infra.portal.util.KutegenClient;
import com.navi.infra.portal.util.MapDiffUtil;
import io.kubernetes.client.openapi.ApiException;
import java.io.IOException;
@@ -55,7 +55,7 @@ public class KubernetesManifestServiceTest {
private final BashExecute bashExecute;
private final JsonnetUtil jsonnetUtil;
private final KubernetesManifestGenerator kubernetesManifestGenerator;
private final KubeClient kubeClient;
@@ -67,10 +67,10 @@ public class KubernetesManifestServiceTest {
deploymentService = Mockito.mock(DeploymentService.class);
bashExecute = Mockito.mock(BashExecute.class);
kubeClient = Mockito.mock(KubeClient.class);
jsonnetUtil = new JsonnetUtilImpl();
kubernetesManifestGenerator = new KutegenClient("test");
kubernetesManifestService = new KubernetesManifestService(deploymentService, objectMapper,
bashExecute, "dev", jsonnetUtil, kubeClient, 2000L,
bashExecute, "dev", kubernetesManifestGenerator, kubeClient, 2000L,
5);
}

View File

@@ -4,6 +4,7 @@ import com.navi.infra.portal.domain.gocd.PipelineManifest;
import com.navi.infra.portal.provider.ExternalIntegrationProvider;
import java.io.IOException;
import org.apache.commons.io.FileUtils;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.junit.jupiter.MockitoExtension;
@@ -19,6 +20,11 @@ public class PipelineManifestServiceTest extends ExternalIntegrationProvider {
@Autowired
private PipelineManifestService pipelineManifestService;
@BeforeAll
static void setUp() {
System.setProperty("ENVIRONMENT", "test");
}
private void assertJsonEqual(String inputFile, String expectedFile) throws IOException {
String pipelineManifestJson = FileUtils
.readFileToString(ResourceUtils.getFile(inputFile), "UTF-8");

View File

@@ -7,7 +7,7 @@ import static org.mockito.Mockito.when;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.navi.infra.portal.exceptions.KubernetesManifestException;
import com.navi.infra.portal.service.kubernetes.BashExecute;
import com.navi.infra.portal.util.JsonnetUtil;
import com.navi.infra.portal.util.KubernetesManifestGenerator;
import com.navi.infra.portal.util.kubernetes.KubernetesManifestUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.DisplayName;
@@ -20,7 +20,7 @@ import org.mockito.junit.jupiter.MockitoExtension;
class IngressGroupApplierImplTest {
@Mock
private JsonnetUtil jsonnetUtil;
private KubernetesManifestGenerator kubernetesManifestGenerator;
private IngressGroupApplier ingressGroupApplier;
@@ -28,7 +28,7 @@ class IngressGroupApplierImplTest {
@BeforeEach
void setup() {
ingressGroupApplier = new IngressGroupApplierImpl("", "", jsonnetUtil,
ingressGroupApplier = new IngressGroupApplierImpl("", "", kubernetesManifestGenerator,
new KubernetesManifestUtils(
new BashExecute(), "test"), jsonMapper);
}
@@ -39,7 +39,7 @@ class IngressGroupApplierImplTest {
var createRequest = new IngressGroupCreateRequest("name", "namespace", "cluster",
"environment", "product");
when(jsonnetUtil.run(any(), any(), any(), any(), any())).thenReturn(1);
when(kubernetesManifestGenerator.generate(any(), any())).thenReturn(1);
assertThrows(KubernetesManifestException.class,
() -> ingressGroupApplier.createAndApply(createRequest));

View File

@@ -258,7 +258,7 @@
},
{
"name": "secretMd5",
"value": "ca5855f61008767291e629652da57dc6"
"value": "d74618e323ae5b8a83fa496eb16ef003"
}
],
"ports": [

View File

@@ -242,7 +242,7 @@
},
{
"name": "secretMd5",
"value": "ca5855f61008767291e629652da57dc6"
"value": "d74618e323ae5b8a83fa496eb16ef003"
}
],
"ports": [

View File

@@ -225,7 +225,7 @@
},
{
"name": "secretMd5",
"value": "ca5855f61008767291e629652da57dc6"
"value": "d74618e323ae5b8a83fa496eb16ef003"
}
],
"image": "IMAGE",

View File

@@ -225,7 +225,7 @@
},
{
"name": "secretMd5",
"value": "ca5855f61008767291e629652da57dc6"
"value": "d74618e323ae5b8a83fa496eb16ef003"
}
],
"image": "IMAGE",

View File

@@ -242,7 +242,7 @@
},
{
"name": "secretMd5",
"value": "129cd9ea6fd37de0e07a8ff94467306f"
"value": "8a40bdadb732b9107fbf1eba768a302a"
}
],
"ports": [

View File

@@ -242,7 +242,7 @@
},
{
"name": "secretMd5",
"value": "129cd9ea6fd37de0e07a8ff94467306f"
"value": "8a40bdadb732b9107fbf1eba768a302a"
}
],
"ports": [

View File

@@ -1,14 +0,0 @@
## Steps to test JSONNET changes
1. Create a temporary directory in templates directory:
```mkdir <path_to_cloned_repo>/templates/temp```
2. Change directory to the same
```cd <path_to_cloned_repo>/templates/temp```
3. Run the following command to create json for testing purposes:
```jsonnet main.jsonnet -J <path_to_cloned_repo>/manifests/<manifest_number>/<manifest_id> --ext-str IMAGE='image' -m temp```
4. Inspect the jsonnets in temp folder.

View File

@@ -1,94 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local deployment = deployment_manifest.deployment;
local chaos_util = import 'chaos_util.jsonnet';
local env = function(experiment) {
latencyInjection: [
{
name: 'NETWORK_LATENCY',
value: experiment.details.latency,
},
],
packetLoss: [
{
name: 'NETWORK_PACKET_LOSS_PERCENTAGE',
value: experiment.details.packetLoss,
},
],
}[experiment.type];
std.map(function(experiment) {
apiVersion: 'litmuschaos.io/v1alpha1',
kind: 'ChaosEngine',
metadata: {
name: '%s-chaos' % deployment.name,
namespace: deployment.namespace,
labels: {
resource_id: '%s-chaos' % deployment.name,
},
},
spec: {
jobCleanUpPolicy: 'delete',
annotationCheck: 'true',
engineState: 'active',
auxiliaryAppInfo: '',
monitoring: false,
appinfo: {
appns: deployment.namespace,
applabel: 'release=%s' % deployment.name,
appkind: 'deployment',
},
chaosServiceAccount: '%s-sa' % chaos_util.experimentName(experiment),
components: {
runner: {
runnerannotation: {
'sidecar.istio.io/inject': 'false',
},
},
},
experiments: [{
name: chaos_util.experimentName(experiment),
spec: {
components: {
experimentannotation: {
'sidecar.istio.io/inject': 'false',
},
env: [
{
name: 'CHAOS_NAMESPACE',
value: deployment.namespace,
},
{
name: 'APP_NAMESPACE',
value: deployment.namespace,
},
{
name: 'NETWORK_INTERFACE',
value: 'eth0',
},
{
name: 'TARGET_CONTAINER',
value: chart.full_service_name(deployment.name),
},
{
name: 'TOTAL_CHAOS_DURATION',
value: experiment.duration,
},
{
name: 'PODS_AFFECTED_PERC',
value: '100',
},
{
name: 'TARGET_HOSTS',
value: std.join(',', experiment.details.targetHosts),
},
] + env(experiment),
},
},
}],
},
}, deployment.faults)

View File

@@ -1,87 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local deployment = deployment_manifest.deployment;
local chaos_util = import 'chaos_util.jsonnet';
local descriptionMessage = function(experiment) {
latencyInjection: 'Injects network latency on pods belonging to an app deployment\n',
packetLoss: 'Injects network packet loss on pods belonging to an app deployment\n',
}[experiment.type];
local args = function(experiment) {
latencyInjection: './experiments/pod-network-latency',
packetLoss: './experiments/pod-network-loss',
}[experiment.type];
std.map(
function(experiment)
{
apiVersion: 'litmuschaos.io/v1alpha1',
description: {
message: descriptionMessage(experiment),
},
kind: 'ChaosExperiment',
metadata: {
name: chaos_util.experimentName(experiment),
},
spec: {
definition: {
scope: 'Namespaced',
permissions: [
{
apiGroups: [
'',
'batch',
'litmuschaos.io',
],
resources: [
'jobs',
'pods',
'pods/log',
'events',
'chaosengines',
'chaosexperiments',
'chaosresults',
],
verbs: [
'create',
'list',
'get',
'patch',
'update',
'delete',
],
},
],
image: '193044292705.dkr.ecr.ap-south-1.amazonaws.com/litmus-go:1.8.1',
imagePullPolicy: 'Always',
args: [
'-c',
args(experiment),
],
command: [
'/bin/bash',
],
env: [
{
name: 'NETWORK_INTERFACE',
value: 'eth0',
},
{
name: 'LIB_IMAGE',
value: '193044292705.dkr.ecr.ap-south-1.amazonaws.com/litmus-go:1.8.1',
},
{
name: 'TC_IMAGE',
value: 'gaiadocker/iproute2',
},
],
labels: {
name: chaos_util.experimentName(experiment),
},
},
},
},
deployment.faults
)

View File

@@ -1,118 +0,0 @@
local chaos_engines = import 'chaos_engine.jsonnet';
local chaos_experiments = import 'chaos_experiment.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local deployment = deployment_manifest.deployment;
local chaos_util = import 'chaos_util.jsonnet';
local chaos_sa = function(experiment) {
apiVersion: 'v1',
kind: 'ServiceAccount',
metadata: {
labels: {
name: '%s-sa' % chaos_util.experimentName(experiment),
},
name: '%s-sa' % chaos_util.experimentName(experiment),
namespace: deployment.namespace,
},
};
local chaos_role = function(experiment) {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'Role',
metadata: {
labels: {
name: '%s-sa' % chaos_util.experimentName(experiment),
},
name: '%s-sa' % chaos_util.experimentName(experiment),
namespace: deployment.namespace,
},
rules: [
{
apiGroups: [
'',
'litmuschaos.io',
'batch',
],
resources: [
'pods',
'jobs',
'pods/log',
'events',
'chaosengines',
'chaosexperiments',
'chaosresults',
],
verbs: [
'create',
'list',
'get',
'patch',
'update',
'delete',
],
},
],
};
local chaos_rolebinding = function(experiment) {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'RoleBinding',
metadata: {
labels: {
name: '%s-sa' % chaos_util.experimentName(experiment),
},
name: '%s-sa' % chaos_util.experimentName(experiment),
namespace: deployment.namespace,
},
roleRef: {
apiGroup: 'rbac.authorization.k8s.io',
kind: 'Role',
name: '%s-sa' % chaos_util.experimentName(experiment),
},
subjects: [
{
kind: 'ServiceAccount',
name: '%s-sa' % chaos_util.experimentName(experiment),
},
],
};
local chaos_privileged_rolebinding = function(experiment) {
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'RoleBinding',
metadata: {
name: 'psp:privileged:%s-sa' % chaos_util.experimentName(experiment),
namespace: deployment.namespace,
},
roleRef: {
apiGroup: 'rbac.authorization.k8s.io',
kind: 'ClusterRole',
name: 'psp:privileged',
},
subjects: [
{
kind: 'ServiceAccount',
name: '%s-sa' % chaos_util.experimentName(experiment),
},
],
};
local getFiles = function(s, fn) {
[s % index]: fn(deployment.faults[index])
for index in std.range(0, std.length(chaos_experiments) - 1)
};
if 'faults' in deployment && std.length(deployment.faults) > 0 then
(if chaos_experiments != null then {
['1_%s_chaos_experiment.json' % index]: chaos_experiments[index]
for index in std.range(0, std.length(chaos_experiments) - 1)
} else {}) +
getFiles('2_%s_chaos_sa.json', chaos_sa) +
getFiles('3_%s_chaos_role.json', chaos_role) +
getFiles('4_%s_chaos_rolebinding.json', chaos_rolebinding) +
getFiles('5_%s_chaos_privileged_rolebinding.json', chaos_privileged_rolebinding) +
(if chaos_engines != null then {
['6_%s_chaos_engine.json' % index]: chaos_engines[index]
for index in std.range(0, std.length(chaos_engines) - 1)
} else {})

View File

@@ -1,6 +0,0 @@
{
experimentName:: function(experiment) {
latencyInjection: 'pod-network-latency',
packetLoss: 'pod-network-loss',
}[experiment.type],
}

View File

@@ -1,29 +0,0 @@
{
//Private values
values:: {
apiVersion: 'v1',
name: 'navi-service',
description: 'Deploy navi services to kubernetes',
version: '0.0.1',
appVersion: '0.1',
home: 'https://github.cmd.navi-tech.in/navi-infra',
maintainers: [
{
name: 'Infra',
email: 'infra-team@navi.com',
},
],
},
//Public functions
service_name: self.values.name,
full_service_name(deployment_name): (
assert std.length(deployment_name) <= 63 : 'Service name must be less than 63 characters. name: %s' % deployment_name;
local name = '%s-%s' % [deployment_name, self.service_name];
assert std.length(name) <= 253 : 'Full Service name must be less than 253 characters. name: %s' % name;
name
),
service_chart: '%s-%s' % [self.values.name, self.values.version],
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,67 +0,0 @@
local chart = import 'chart.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local load_balancer_util = import 'load_balancer_util.jsonnet';
local namespace_values = import 'namespace_values.jsonnet';
local util = import 'util.jsonnet';
local metadata = deployment_manifest.metadata;
local remove_slash = function(key, value)
util.replace_character_in_string(metadata[key], '/', '_');
local metadata_without_slash = std.mapWithKey(remove_slash, metadata);
local metadata_labels = { [field]: metadata_without_slash[field] for field in std.objectFields(metadata_without_slash) if field != 'product' };
{
labels::
(if 'labels' in deployment_manifest then deployment_manifest.labels else {}) +
(metadata_labels) +
{
app: chart.service_name,
chart: chart.service_chart,
heritage: 'NaviDeploymentManifest',
release: deployment_manifest.name,
Team: deployment_manifest.team.name,
Environment: deployment_manifest.environment,
Name: deployment_manifest.name,
Product: if 'product' in metadata then metadata.product else namespace_values.additionalTags.product,
Owner: if deployment_manifest.infraVertical == 'lending' then 'medici' else if deployment_manifest.infraVertical == 'insurance' then 'gi' else deployment_manifest.infraVertical,
},
matchLabels::
{
app: chart.service_name,
release: deployment_manifest.name,
},
awsTags:: {
app: deployment_manifest.name,
Environment: $.labels.Environment,
Team: $.labels.Team,
Name: $.labels.Name,
Owner: $.labels.Owner,
Product: $.labels.Product,
Namespace: deployment_manifest.deployment.namespace,
Ingress: load_balancer_util.alb_ingress_name(chart.full_service_name($.labels.Name)),
},
perfMockServerLabels:: $.labels {
release: deployment_manifest.deployment.name + '-mock-server',
Name: deployment_manifest.deployment.name + '-mock-server',
},
perfPostgresServerLabels:: $.labels {
release: deployment_manifest.deployment.name + '-postgres-server',
Name: deployment_manifest.deployment.name + '-postgres-server',
},
janitor_annotation:: {
'janitor/ttl': deployment_manifest.metadata.ttl,
},
annotations_map:: {
perf: $.janitor_annotation,
sandbox: $.janitor_annotation,
},
annotations:: if deployment_manifest.environment in $.annotations_map then $.annotations_map[deployment_manifest.environment] else {},
}

View File

@@ -1,213 +0,0 @@
//Imports
local chart = import 'chart.jsonnet';
local cluster_values = import 'cluster_values.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local load_balancer_util = import 'load_balancer_util.jsonnet';
local namespace_values = import 'namespace_values.jsonnet';
local port_map = import 'port_map.jsonnet';
local util = import 'util.jsonnet';
local deployment = deployment_manifest.deployment;
local create_gateway_ingress(environment, servicePrefix, typeIdentifier, gateway, attributeIndex, serviceName) = {
local resourceName = '%s-%s-%s-%s' % [environment, servicePrefix, typeIdentifier, gateway.gatewayAttributes[attributeIndex].pathName],
local commonResourceName = '%s-%s-%s' % [environment, servicePrefix, gateway.gatewayAttributes[attributeIndex].pathName],
local rateLimitIdentifier = '%s-%s-%s' % [environment, servicePrefix, typeIdentifier],
local pathName = if 'pathName' in gateway.gatewayAttributes[attributeIndex] then gateway.gatewayAttributes[attributeIndex].pathName,
local urlRewritePlugin = if (std.objectHas(gateway.gatewayAttributes[attributeIndex], 'sourceGatewayPath'))
&& (gateway.gatewayAttributes[attributeIndex].sourceGatewayPath != gateway.gatewayAttributes[attributeIndex].targetGatewayPath)
then commonResourceName + '-url-rewrite',
local externalAuthPlugin = if (std.objectHas(gateway.gatewayAttributes[attributeIndex], 'externalAuth') && typeIdentifier == 'external')
then (if gateway.gatewayAttributes[attributeIndex].externalAuth then resourceName + '-external-auth'),
local ipRestrictedPlugin = if (std.objectHas(gateway.gatewayAttributes[attributeIndex], 'whitelistedGatewayIps') && typeIdentifier == 'external')
then resourceName + '-ip-restriction',
local rateLimitRules = if 'rateLimitRules' in gateway.gatewayAttributes[attributeIndex]
then gateway.gatewayAttributes[attributeIndex].rateLimitRules else [],
local rateLimitPlugin = std.map(function(rule)
'%s-%s-%s-%s-%s' % [environment, servicePrefix, pathName, rule.name, 'rl'],
rateLimitRules),
local kongPluginsList = [
urlRewritePlugin,
ipRestrictedPlugin,
externalAuthPlugin,
(if typeIdentifier == 'external' then std.join(',', rateLimitPlugin)),
],
local filteresKongPluginsList = std.filter(function(plugin) plugin != null && std.length(plugin) > 0, kongPluginsList),
apiVersion: 'networking.k8s.io/v1',
kind: 'Ingress',
metadata: {
name: resourceName,
labels: common.labels {
Name: resourceName,
'gateway-resource-identifier': commonResourceName,
},
annotations: common.annotations {
[if filteresKongPluginsList != null && std.length(filteresKongPluginsList) > 0 then 'konghq.com/plugins']: std.join(',', filteresKongPluginsList),
'external-dns.alpha.kubernetes.io/exclude': 'true',
},
namespace: deployment_manifest.deployment.namespace,
},
spec: {
ingressClassName: 'kong-' + typeIdentifier,
rules: [
{
host: if (typeIdentifier == 'external') then gateway.commonApiGatewayUrl else gateway.internalCommonApiGatewayUrl,
http: {
paths: [
{
path: gateway.gatewayAttributes[attributeIndex].sourceGatewayPath,
pathType: 'ImplementationSpecific',
backend: {
service: {
name: serviceName,
port: {
number: port_map.getPort('serviceport'),
},
},
},
},
],
},
},
],
},
};
// This will be a common resource across internal & external gateways
local create_gateway_url_plugin(environment, servicePrefix, gateway, attributeIndex, serviceName) = {
local resourceName = '%s-%s-%s' % [environment, servicePrefix, gateway.gatewayAttributes[attributeIndex].pathName],
apiVersion: 'configuration.konghq.com/v1',
kind: 'KongPlugin',
metadata: {
name: resourceName + '-url-rewrite',
labels: common.labels {
Name: resourceName + '-url-rewrite',
'gateway-resource-identifier': resourceName,
},
namespace: deployment_manifest.deployment.namespace,
},
config: {
replace: {
uri: gateway.gatewayAttributes[attributeIndex].targetGatewayPath,
},
},
plugin: 'request-transformer',
};
local create_external_auth_plugin(environment, servicePrefix, typeIdentifier, gateway, attributeIndex, serviceName) = {
local resourceName = '%s-%s-%s-%s' % [environment, servicePrefix, typeIdentifier, gateway.gatewayAttributes[attributeIndex].pathName],
local commonResourceName = '%s-%s-%s' % [environment, servicePrefix, gateway.gatewayAttributes[attributeIndex].pathName],
local currentCluster = deployment_manifest.cluster,
local currentNamespace = deployment_manifest.deployment.namespace,
local configValues = cluster_values[deployment_manifest.cluster],
local configUrl = if currentNamespace in configValues
then configValues[currentNamespace].commonApiGateway.externalAuth.config.url
else configValues.default.commonApiGateway.externalAuth.config.url,
apiVersion: 'configuration.konghq.com/v1',
kind: 'KongPlugin',
metadata: {
name: resourceName + '-external-auth',
labels: common.labels {
Name: resourceName + '-external-auth',
'gateway-resource-identifier': resourceName,
},
namespace: currentNamespace,
},
config: {
url: configUrl,
},
plugin: 'external-auth',
};
// This will only be required for External Gateway
local create_gateway_ip_plugin(environment, servicePrefix, typeIdentifier, gateway, attributeIndex, serviceName) = {
local resourceName = '%s-%s-%s-%s' % [environment, servicePrefix, typeIdentifier, gateway.gatewayAttributes[attributeIndex].pathName],
local commonResourceName = '%s-%s-%s' % [environment, servicePrefix, gateway.gatewayAttributes[attributeIndex].pathName],
apiVersion: 'configuration.konghq.com/v1',
kind: 'KongPlugin',
metadata: {
name: resourceName + '-ip-restriction',
labels: common.labels {
Name: resourceName + '-ip-restriction',
'gateway-resource-identifier': commonResourceName,
},
namespace: deployment_manifest.deployment.namespace,
},
config: {
allow: std.split(std.strReplace(gateway.gatewayAttributes[attributeIndex].whitelistedGatewayIps, ' ', ''), ','),
},
plugin: 'ip-restriction',
};
// This is only for external api gateways currently.
local create_kong_rate_limiter(environment, servicePrefix, typeIdentifier, gateway, attributeIndex, serviceName) = {
local resourceName = '%s-%s-%s' % [environment, servicePrefix, gateway.gatewayAttributes[attributeIndex].pathName],
local commonResourceName = '%s-%s-%s' % [environment, servicePrefix, gateway.gatewayAttributes[attributeIndex].pathName],
local forTheGateway = gateway.gatewayAttributes[attributeIndex].sourceGatewayPath,
local rateLimitRules = if 'rateLimitRules' in gateway.gatewayAttributes[attributeIndex]
then gateway.gatewayAttributes[attributeIndex].rateLimitRules else [],
kongrules: [{
apiVersion: 'configuration.konghq.com/v1',
kind: 'KongPlugin',
plugin: 'rate-limiting',
metadata: {
name: resourceName + '-%s' % rule.name + '-rl', // shortening due 63 character limits
labels: common.labels {
'gateway-resource-identifier': resourceName,
},
},
config: {
minute: rule.limit,
limit_by: '%s' % rule.options,
[if rule.options == 'path' then 'path' else null]: '%s' % forTheGateway,
[if rule.options == 'header' then 'header_name' else null]: '%s' % rule.header,
},
} for rule in rateLimitRules],
};
local gateways = deployment.commonApiGateways;
local gatewaysLen = std.length(deployment.commonApiGateways);
std.map(
function(apiGatewayIndex) {
local gateway = gateways[apiGatewayIndex],
local serviceName = chart.full_service_name(deployment.name),
local servicePrefix = deployment.name,
local environment = deployment_manifest.environment,
local gatewayAttributeLen = std.length(gateway.gatewayAttributes),
local kongRateLimits = [
create_kong_rate_limiter(environment, servicePrefix, 'external', gateway, attributeIndex, serviceName)
for attributeIndex in std.range(0, gatewayAttributeLen - 1)
if (std.objectHas(gateway.gatewayAttributes[attributeIndex], 'rateLimitRules'))
],
apiVersion: 'v1',
kind: 'List',
items: [create_gateway_ingress(environment, servicePrefix, 'external', gateway, attributeIndex, serviceName) for attributeIndex in std.range(0, gatewayAttributeLen - 1)] +
[create_gateway_ingress(environment, servicePrefix, 'internal', gateway, attributeIndex, serviceName) for attributeIndex in std.range(0, gatewayAttributeLen - 1)] +
[
create_gateway_url_plugin(environment, servicePrefix, gateway, attributeIndex, serviceName)
for attributeIndex in std.range(0, gatewayAttributeLen - 1)
if (gateway.gatewayAttributes[attributeIndex].sourceGatewayPath != gateway.gatewayAttributes[attributeIndex].targetGatewayPath)
] +
[
create_gateway_ip_plugin(environment, servicePrefix, 'external', gateway, attributeIndex, serviceName)
for attributeIndex in std.range(0, gatewayAttributeLen - 1)
if (std.objectHas(gateway.gatewayAttributes[attributeIndex], 'whitelistedGatewayIps'))
] +
[
create_external_auth_plugin(environment, servicePrefix, 'external', gateway, attributeIndex, serviceName)
for attributeIndex in std.range(0, gatewayAttributeLen - 1)
if (gateway.gatewayAttributes[attributeIndex].externalAuth)
] +
if (std.length(kongRateLimits) > 0) then kongRateLimits[0].kongrules else [],
},
std.range(0, gatewaysLen - 1)
)

View File

@@ -1,20 +0,0 @@
local chart = import 'chart.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local util = import 'util.jsonnet';
local deployment = deployment_manifest.deployment;
local common = import 'common.jsonnet';
if 'environmentFile' in deployment then
local environmentFile = deployment.environmentFile;
{
apiVersion: 'v1',
data: {
[util.file_name(environmentFile.path)]: environmentFile.data,
},
kind: 'ConfigMap',
metadata: {
name: chart.full_service_name(deployment_manifest.deployment.name) + '-cm',
namespace: deployment_manifest.deployment.namespace,
annotations: common.annotations,
},
}

View File

@@ -1,34 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local util = import 'util.jsonnet';
local vars = import 'vars.jsonnet';
local deployment = deployment_manifest.deployment;
local hpa = deployment_manifest.deployment.hpa;
local isEnabled = hpa.type == vars.deployment.hpa.type.cron;
local basename = chart.full_service_name(deployment.name);
local name = if isEnabled then
basename + '-cron-hpa-autoscaler'
else
basename + '-cron-hpa-autoscaler-disabled';
if std.length(hpa.cronJobs) != 0 then {
apiVersion: 'autoscaling.alibabacloud.com/v1beta1',
kind: 'CronHorizontalPodAutoscaler',
metadata: {
name: name,
labels: common.labels { 'controller-tools.k8s.io': '1.0' },
namespace: deployment_manifest.deployment.namespace,
annotations: common.annotations,
},
spec: {
scaleTargetRef: util.hpa_scale_target_ref(deployment.name, deployment.controller, !isEnabled),
deploymentName: chart.full_service_name(deployment_manifest.deployment.name),
jobs: [job + (if job.name == 'ScaleDown' then { targetSize: hpa.minReplicas } else { targetSize: hpa.maxReplicas }) for job in hpa.cronJobs],
},
}

View File

@@ -1,407 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local namespace_values = import 'namespace_values.jsonnet';
local app_name = chart.full_service_name(deployment_manifest.deployment.name);
local namespace = deployment_manifest.deployment.namespace;
local load_balancer_util = import 'load_balancer_util.jsonnet';
local alerts = deployment_manifest.deployment.alerts;
local manifest_util = import 'manifest_util.jsonnet';
local deployment = deployment_manifest.deployment;
local vars = import 'vars.jsonnet';
local util = import 'util.jsonnet';
local isVpaEnabled = deployment_manifest.deployment.isVpaEnabled;
local environment = deployment_manifest.environment;
local commonAlertFields = {
appName: common.awsTags.Name,
fullName: chart.full_service_name(deployment.name),
namespace: namespace,
environment: environment,
};
local loadBalancerAlertFields = commonAlertFields {
albIngressName: load_balancer_util.alb_ingress_name(app_name),
};
local databaseAlertFields = commonAlertFields {
dbInstance: deployment_manifest.extraResources.database.instanceName,
};
local baseLabels = function(alert) {
labels: {
severity: alert.severity,
alertTeam: deployment_manifest.team.name,
appName: app_name,
[if manifest_util.is_custom_slack_channel_enabled(alert) then 'slackChannel']: alert.slackChannel,
},
};
local baseAnnotations = function(alert) {
annotations: {
runbook: 'https://navihq.atlassian.net/wiki/spaces/IN/pages/509936863/Runbook',
},
};
local mapAlerts(alertGroup, alerts) = std.filterMap(
function(alert) alert.type in alertGroup,
function(alert) baseAnnotations(alert) + alertGroup[alert.type](alert) + baseLabels(alert),
alerts
);
local targetGroupAlerts = {
http4xx: function(alert) {
alert: 'HighHTTP4xx',
annotations+: {
description: '%(namespace)s/%(appName)s has more than %(threshold)s%% http 4xx errors in last %(duration)s' % (loadBalancerAlertFields { threshold: alert.threshold, duration: alert.duration }),
summary: 'Service is facing lot of http 4xx errors',
},
expr: '((aws_alb_tg_httpcode_target_4_xx_count_sum{tag_Name=~"%(fullName)s",tag_Namespace="%(namespace)s"}/aws_alb_tg_request_count_sum{tag_Name=~"%(fullName)s",tag_Namespace="%(namespace)s"})*100) > %(threshold)s' % (loadBalancerAlertFields { threshold: alert.threshold }),
'for': alert.duration,
},
http5xx: function(alert) {
alert: 'HighHTTP5xx',
annotations+: {
description: '%(namespace)s/%(appName)s has more than %(threshold)s%% http 5xx errors in last %(duration)s' % (loadBalancerAlertFields { threshold: alert.threshold, duration: alert.duration }),
summary: 'Service is facing lot of http 5xx errors',
},
expr: '((aws_alb_tg_httpcode_target_5_xx_count_sum{tag_Name=~"%(fullName)s",tag_Namespace="%(namespace)s"}/aws_alb_tg_request_count_sum{tag_Name=~"%(fullName)s",tag_Namespace="%(namespace)s"})*100) > %(threshold)s' % (loadBalancerAlertFields { threshold: alert.threshold }),
'for': alert.duration,
},
latency: function(alert) {
alert: 'HighHTTPLatency',
annotations+: {
description: '%(namespace)s/%(appName)s has latency higher than %(threshold)sms in last %(duration)s' % (loadBalancerAlertFields { threshold: alert.threshold, duration: alert.duration }),
summary: 'Service is having high latency',
},
expr: '(aws_alb_tg_target_response_time_average{tag_Name=~"%(fullName)s",tag_Namespace="%(namespace)s"}) > %(threshold)s' % (loadBalancerAlertFields { threshold: alert.threshold }),
'for': alert.duration,
},
};
/*
- TargetGroup will take default Name tag based on service name pass in ingress.
*/
local albAlerts = {
elb4xx: function(alert) {
alert: 'HighELB4xx',
annotations+: {
description: '%(namespace)s/%(appName)s has more than %(threshold)s%% elb 4xx errors in last %(duration)s' % (loadBalancerAlertFields { threshold: alert.threshold, duration: alert.duration }),
summary: 'Service is facing lot of elb 4xx errors',
},
expr: '((sum by (tag_Ingress) (aws_alb_httpcode_elb_4_xx_count_sum{tag_Ingress="%(albIngressName)s"})/(sum by (tag_Ingress) (aws_alb_tg_request_count_sum{tag_Ingress="%(albIngressName)s"})))*100) > %(threshold)s' % (loadBalancerAlertFields { threshold: alert.threshold }),
'for': alert.duration,
},
elb5xx: function(alert) {
alert: 'HighELB5xx',
annotations+: {
description: '%(namespace)s/%(appName)s has more than %(threshold)s%% elb 5xx errors in last %(duration)s' % (loadBalancerAlertFields { threshold: alert.threshold, duration: alert.duration }),
summary: 'Service is facing lot of elb 5xx errors',
},
expr: '((sum by (tag_Ingress) (aws_alb_httpcode_elb_5_xx_count_sum{tag_Ingress="%(albIngressName)s"})/(sum by (tag_Ingress) (aws_alb_tg_request_count_sum{tag_Ingress="%(albIngressName)s"})))*100) > %(threshold)s' % (loadBalancerAlertFields { threshold: alert.threshold }),
'for': alert.duration,
},
};
// Database alert
local databaseAlerts = {
highActiveConnection: function(alert) {
alert: 'HighActiveConnection',
annotations+: {
description: 'rds {{ $labels.server }} have high number of active connection {{ $value }}',
summary: 'High Active Connections',
},
expr: '(sum(pg_stat_database_active_connection{server=~"%(dbInstance)s\\\\..*"}) by(server) / on (server) pg_params_max_connections) * 100 > %(threshold)s' % (databaseAlertFields { threshold: alert.threshold }),
'for': alert.duration,
},
provisionedCPUNotEnough: function(alert) {
alert: 'ProvisionedCPUNotEnough',
annotations+: {
description: 'rds {{ $labels.server }} have dip in cpu credit balance {{ $value }}',
summary: 'Fall in CPU credit balance',
},
expr: 'delta(aws_rds_cpucredit_balance_minimum{dimension_DBInstanceIdentifier=~"%(dbInstance)s\\\\..*"}[10m]) < %(threshold)s' % (databaseAlertFields { threshold: alert.threshold }),
'for': alert.duration,
},
provisionedDiskNotEnough: function(alert) {
alert: 'DBProvisionedDiskNotEnough',
annotations+: {
description: 'rds {{ $labels.server }} have dip in burst balance {{ $value }}',
summary: 'Fall in EBS burst balance',
},
expr: 'delta(aws_rds_burst_balance_minimum{dimension_DBInstanceIdentifier=~"%(dbInstance)s\\\\..*"}[10m]) < %(threshold)s' % (databaseAlertFields { threshold: alert.threshold }),
'for': alert.duration,
},
connectionAcquireTimeIsHigh: function(alert) {
alert: 'DBConnectionAcquireTimeIsHigh',
annotations+: {
description: 'Namespace: %(appName)s, AppName: %(namespace)s; Acquiring a DB connection for pod {{ $labels.pod }} took more than %(threshold)ss' % (databaseAlertFields { threshold: alert.threshold }),
summary: 'Container is taking too long to connect to database',
},
expr: 'hikaricp_connections_acquire_seconds_max{pod=~"%(appName)s-.*",namespace="%(namespace)s"} > %(threshold)s AND on(pod,namespace) ((time() - kube_pod_created) >600)' % (databaseAlertFields { threshold: alert.threshold }),
'for': alert.duration,
},
maxConnectionPoolReached: function(alert) {
alert: 'MaxDBConnectionPoolReached',
annotations+: {
description: 'Namespace: %(namespace)s, AppName: %(appName)s; All connection in connection pool for pod {{ $labels.pod }} are used since %(duration)s' % (databaseAlertFields { duration: alert.duration }),
summary: 'All connections in hikari connection pool are used',
},
expr: 'hikaricp_connections_active{pod=~"%(appName)s-.*",namespace="%(namespace)s"} / hikaricp_connections_max{pod=~"%(appName)s-.*",namespace="%(namespace)s"} == %(threshold)s' % (databaseAlertFields { threshold: alert.threshold }),
'for': alert.duration,
},
rdsCPUUnderUtilised: function(alert) {
alert: 'RdsCPUUnderUtilised',
annotations+: {
description: 'Namespace: %(namespace)s , AppName: %(appName)s; RDS utilised is below benchmark for last one week, consider downscaling. threshold: %(threshold)s percent' % (databaseAlertFields { threshold: alert.threshold }),
summary: 'RDS utilised is below benchmark for last one week',
},
expr: '(weekly_rds_cpu_usage_average:dimension_DBInstanceIdentifier:labels{dimension_DBInstanceIdentifier=~"%(dbInstance)s.*"} < bool %(threshold)s ) >0' % (databaseAlertFields { threshold: alert.threshold }),
'for': alert.duration,
},
};
local underUtilisedResourcesAlerts = {
k8sCpuUnderUtilised: function(alert) {
alert: 'K8sCpuUnderUtilised',
annotations+: {
description: 'Namespace: %(namespace)s , AppName: %(appName)s; K8s utilised is below benchmark for last one week, consider downscaling. threshold: %(threshold)s percent ' % (databaseAlertFields { threshold: alert.threshold }),
summary: 'K8S utilised is below benchmark for last one week',
},
expr: 'max_over_time(container_cpu_usage_percentage:1h:container:namespace{namespace="%(namespace)s", container =~"%(appName)s.*"}[1w]) < %(threshold)s ' % (databaseAlertFields { threshold: alert.threshold }),
'for': alert.duration,
},
k8sMemoryUnderUtilised: function(alert) {
alert: 'K8sMemoryUnderUtilised',
annotations+: {
description: 'Namespace: %(namespace)s , AppName: %(appName)s; K8s utilised is below benchmark for last one week, consider downscaling. threshold: %(threshold)s percent' % (databaseAlertFields { threshold: alert.threshold }),
summary: 'K8S utilised is below benchmark for last one week',
},
expr: '(container_memory_usage_percentage:1w:container:namespace{namespace="%(namespace)s", container =~"%(appName)s.*"} ) < %(threshold)s ' % (databaseAlertFields { threshold: alert.threshold }),
'for': alert.duration,
},
};
local kafkaAlerts = {
consumerGroupLag: function(alert) {
alert: 'HighConsumerGroupLag',
annotations+: {
description: '%(group)s has more than %(threshold)s lag in last %(duration)s' % ({ group: alert.group, threshold: alert.threshold, duration: alert.duration }),
summary: 'High consumergroup lag',
},
expr: 'sum(kafka_consumergroup_lag{topic=~"%(topic)s",consumergroup=~"%(group)s"}) > %(threshold)s' % ({ group: alert.group, threshold: alert.threshold, topic: alert.topic }),
'for': alert.duration,
},
kafkaMessageRate: function(alert) {
alert: 'kafkaMessageRate',
annotations+: {
description: '%(topic)s has more than %(threshold)s message in last %(duration)s' % ({ topic: alert.topic, threshold: alert.threshold, duration: alert.duration }),
summary: 'High Message Rate',
},
expr: 'sum(increase(kafka_topic_partition_current_offset{topic=~"%(topic)s"}[10m])) > %(threshold)s' % ({ threshold: alert.threshold, topic: alert.topic }),
'for': alert.duration,
},
};
//Custom Alerts
local customAlerts = {
custom: function(alert) {
alert: alert.name,
annotations+: {
description: 'Namespace:%s; App:%s; ' % [namespace, app_name] + alert.description,
summary: alert.summary,
},
[if alert.duration != null then 'for']: alert.duration,
expr: alert.expression,
},
};
//Custom RecordingRules
local recordingRulesForm = {
prometheusRecordingRule: function(alert) {
name: '%s' % [alert.name],
interval: '%s' % [alert.duration],
rules: [
{
record: '%s' % [alert.record],
expr: '%s' % [alert.expression],
},
],
},
};
local kongAlerts = {
kong4xx: function(alert) {
alert: 'Kong4xx',
annotations+: {
description: '{{ $labels.exported_service }} URI path has more than %(threshold)s%% http 4xx errors per minute for last %(duration)s' % (loadBalancerAlertFields { threshold: alert.threshold, duration: alert.duration }),
summary: 'One of the URI path in Kong API gateway is facing lot of http 4xx errors',
},
expr: '((sum by (exported_service) (increase(kong_http_requests_total{exported_service=~".*%(appName)s.*", code=~"4.*"}[1m])) / sum by (exported_service) (increase(kong_http_requests_total{exported_service=~".*%(appName)s.*"}[1m]))) * 100) > %(threshold)s' % (loadBalancerAlertFields { threshold: alert.threshold }),
'for': alert.duration,
},
kong5xx: function(alert) {
alert: 'Kong5xx',
annotations+: {
description: '{{ $labels.exported_service }} URI path has more than %(threshold)s%% http 5xx errors per minute for last %(duration)s' % (loadBalancerAlertFields { threshold: alert.threshold, duration: alert.duration }),
summary: 'One of the URI path in Kong API gateway is facing lot of http 5xx errors',
},
expr: '((sum by (exported_service) (increase(kong_http_requests_total{exported_service=~".*%(appName)s.*", code=~"5.*"}[1m])) / sum by (exported_service) (increase(kong_http_requests_total{exported_service=~".*%(appName)s.*"}[1m]))) * 100) > %(threshold)s' % (loadBalancerAlertFields { threshold: alert.threshold }),
'for': alert.duration,
},
kongLatency: function(alert) {
alert: 'KongLatency',
annotations+: {
description: '{{ $labels.exported_service }} URI path has a 5 minute average latency higher than %(threshold)sms for last %(duration)s' % (loadBalancerAlertFields { threshold: alert.threshold, duration: alert.duration }),
summary: 'One of the URI path in Kong API gateway has 5 minute average high latency',
},
expr: '(sum by (exported_service) (rate(kong_kong_latency_ms_sum{exported_service=~".*%(appName)s.*"}[5m]) / rate(kong_kong_latency_ms_count{exported_service=~".*%(appName)s.*"}[5m]))) > %(threshold)s' % (loadBalancerAlertFields { threshold: alert.threshold }),
'for': alert.duration,
},
};
local podAlerts = {
HighPodRestarts: function(alert) {
alert: 'HighPodRestarts',
annotations: {
description: 'Namespace: %s, AppName: %s; Pod restarted multiple times' % [namespace, app_name],
summary: 'High Pod Restarts',
runbook: 'https://navihq.atlassian.net/wiki/spaces/IN/pages/279937094/Act+On+Pod+Alert',
},
expr: 'sum(increase(kube_pod_container_status_restarts_total{namespace="%s", pod=~"%s.*"}[%s])) > %s' % [namespace, app_name, alert.duration, alert.threshold],
},
HighPodFailures: function(alert) {
alert: 'HighPodFailures',
annotations: {
description: 'Namespace: %s, AppName: %s; Pods were last terminated due to reason {{ $labels.reason }}' % [namespace, app_name],
summary: 'High Pod Failures',
runbook: 'https://navihq.atlassian.net/wiki/spaces/IN/pages/279937094/Act+On+Pod+Alert',
},
expr: 'sum(increase(kube_pod_container_status_last_terminated_reason{namespace="%s", container=~"%s.*",reason !~ "Completed|Evicted|OOMKilled"}[%s])) by (reason,pod) > %s' % [namespace, app_name, alert.duration, alert.threshold],
},
FrequentPodOOMKilled: function(alert) {
alert: 'FrequentPodOOMKilled',
annotations: {
description: 'Namespace: %s, AppName: %s; Pod: {{ $labels.pod }} is restarting multiple times because of OOMKilled' % [namespace, app_name],
summary: 'High Pod Failures',
runbook: 'https://navihq.atlassian.net/wiki/spaces/IN/pages/279937094/Act+On+Pod+Alert',
},
expr: 'increase(kube_pod_container_status_restarts_total{namespace="%s", container="%s"}[%s]) >= %s AND ignoring(reason) kube_pod_container_status_last_terminated_reason{namespace="%s", container="%s", reason="OOMKilled"} > 0' % [namespace, app_name, alert.duration, alert.threshold, namespace, app_name],
},
PodOOMKilled: function(alert) {
alert: 'PodOOMKilled',
annotations: {
description: 'Namespace: %s, AppName: %s; Pod: {{ $labels.pod }} killed because of OOMKilled' % [namespace, app_name],
summary: 'Pod OOMKilled',
runbook: 'https://navihq.atlassian.net/wiki/spaces/IN/pages/279937094/Act+On+Pod+Alert',
},
expr: 'kube_pod_container_status_restarts_total{namespace="%s", container="%s"} - kube_pod_container_status_restarts_total{namespace="%s", container="%s"} offset %s >= %s AND ignoring(reason) kube_pod_container_status_last_terminated_reason{namespace="%s", container="%s", reason="OOMKilled"} > 0' % [namespace, app_name, namespace, app_name, alert.duration, alert.threshold, namespace, app_name],
},
KubeContainerWaiting: function(alert) {
alert: 'KubeContainerWaiting',
annotations: {
description: 'Namespace: %s, AppName: %s; container in waiting state for one hour' % [namespace, app_name],
summary: 'container is waiting for too long',
runbook: 'https://navihq.atlassian.net/wiki/spaces/IN/pages/279937094/Act+On+Pod+Alert',
},
expr: 'sum by (namespace, pod, container) (kube_pod_container_status_waiting_reason{container="%s", namespace="%s"}) > %s' % [app_name, namespace, alert.threshold],
'for': alert.duration,
},
};
local mapRecordingRule(alertGroup, alerts) = std.filterMap(
function(alert) alert.type in alertGroup,
function(alert) alertGroup[alert.type](alert),
alerts
);
local vpaAlerts(appName, namespace, teamName) =
(if isVpaEnabled then [
{
alert: 'VPAUncappedTargetGreaterThanCappedTarget',
annotations: {
description: 'Uncapped target is more than bounds Namespace:%s; App:%s; ' % [namespace, app_name],
summary: 'Uncapped target is more than bounds, this means your service is requires lot more resources than what node may have',
},
labels: {
severity: 'warning',
alertTeam: teamName,
appName: app_name,
},
'for': '1m',
expr: 'kube_verticalpodautoscaler_status_recommendation_containerrecommendations_uncappedtarget{container="%s"} / kube_verticalpodautoscaler_status_recommendation_containerrecommendations_target{container="%s"} > 1' % [appName, appName],
},
] else []);
if !util.is_sandbox(environment) then {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'PrometheusRule',
metadata: {
labels: common.labels {
prometheus: 'kube-prometheus',
role: 'alert-rules',
},
name: app_name,
namespace: namespace,
annotations: common.annotations,
},
spec: {
groups: [
{
name: '%s-basic' % [app_name],
rules: (mapAlerts(podAlerts, alerts.pod))
+ (if manifest_util.is_database_present(deployment_manifest) then [
{
alert: 'CriticalFreeDiskSpace',
annotations: {
description: 'rds {{ $labels.identifier }} have disk space less than {{ $value }}% and disk space autoscaling have reached the allowed limit.',
summary: 'Critical free disk space',
runbook: 'https://navihq.atlassian.net/wiki/spaces/IN/pages/269844543/Act+on+DB+alert#CriticalFreeDiskSpace',
},
expr: '(aws_rds_free_storage_space_average{dimension_DBInstanceIdentifier=~"%(dbInstance)s"}/(1024*1024*1024)/ on () rds_config_AllocatedStorage{identifier=~"%(dbInstance)s"})*100 < 10 And on() (rds_config_AllocatedStorage{identifier=~"%(dbInstance)s"} / rds_config_MaxAllocatedStorage{identifier=~"%(dbInstance)s"}) > 0.9 ' % (databaseAlertFields),
'for': '5m',
labels: {
severity: 'critical',
alertTeam: deployment_manifest.team.name,
appName: app_name,
},
},
] else []) + (if (deployment.controller == vars.defaultController) then [
{
alert: 'ReplicaUnavailableAlert',
annotations: {
description: 'Namespace: %s, AppName: %s; Not enough instances available since past 15m' % [namespace, app_name],
summary: 'Low desired replica count',
runbook: 'https://navihq.atlassian.net/wiki/spaces/IN/pages/279937094/Act+On+Pod+Alert',
},
expr: '(kube_deployment_status_replicas_available{deployment="%s", namespace="%s"}) - ignoring(poddisruptionbudget, deployment) (kube_poddisruptionbudget_status_desired_healthy{poddisruptionbudget="%s-pdb",namespace="%s"}) < 0' % [app_name, namespace, app_name, namespace],
'for': if deployment_manifest.team.name == 'DataScience' then '30m' else '15m',
labels: {
severity: 'critical',
alertTeam: deployment_manifest.team.name,
appName: app_name,
},
},
] else [])
+ (if load_balancer_util.is_using_tg(deployment_manifest.deployment.loadBalancers) then
mapAlerts(targetGroupAlerts, alerts.loadBalancer) else [])
+ (if load_balancer_util.is_using_lb(deployment_manifest.deployment.loadBalancers, 'alb') then
mapAlerts(albAlerts, alerts.loadBalancer) else [])
+ (if load_balancer_util.is_using_lb(deployment_manifest.deployment.loadBalancers, 'commonApiGateway') then
mapAlerts(kongAlerts, alerts.kong) else [])
+ (if manifest_util.is_database_present(deployment_manifest) then
mapAlerts(databaseAlerts, alerts.database) else [])
+ mapAlerts(kafkaAlerts, alerts.kafka)
+ mapAlerts(customAlerts, alerts.custom)
+ mapAlerts(underUtilisedResourcesAlerts, alerts.underUtilisedResources)
+ vpaAlerts(app_name, namespace, deployment_manifest.team.name),
},
] + mapRecordingRule(recordingRulesForm, alerts.prometheusRecordingRule),
},
}

View File

@@ -1,29 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local deployment_util = import 'deployment_util.jsonnet';
local pod_template = import 'pod_template.jsonnet';
local vars = import 'vars.jsonnet';
local deployment = deployment_manifest.deployment;
if (deployment.controller == vars.defaultController) then {
apiVersion: 'apps/v1',
kind: 'Deployment',
metadata: {
name: chart.full_service_name(deployment.name),
labels: common.labels {
linkConfig: std.toString(deployment_manifest.deployment.isLinkConfig),
},
annotations: common.annotations,
namespace: deployment_manifest.deployment.namespace,
},
spec: {
progressDeadlineSeconds: deployment.progressDeadlineSeconds,
selector: {
matchLabels: common.matchLabels,
},
strategy: deployment_util.strategy.rollingUpdate(),
template: pod_template,
},
}

View File

@@ -1,209 +0,0 @@
local deployment_manifest_json = import 'deployment_manifest.json';
local health_check_values = import 'health_check_values.jsonnet';
local manifest_util = import 'manifest_util.jsonnet';
local port_map = import 'port_map.jsonnet';
local probe_values = import 'probe_values.jsonnet';
local default_service_port = [{ name: 'serviceport', port: 8080, enableGrpc: false }];
local namespace_values = import 'namespace_values.jsonnet';
local vars = import 'vars.jsonnet';
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local alertTypes(alerts) = std.map(function(alert) alert.type, alerts);
local containsAlertType(alerts, type) = if (std.find(type, alertTypes(alerts)) != []) then true else false;
local mergePodAlerts(defaultAlerts, manifestAlerts) = manifestAlerts + std.filter(function(alert) !containsAlertType(manifestAlerts, alert.type), defaultAlerts);
local deployment = deployment_manifest_json.deployment;
local defaultStartupProbe = health_check_values.getDefaultStartupProbe;
local defaultLivenessCheck = health_check_values.getDefaultLivenessCheck;
local defaultReadinessCheck = health_check_values.getDefaultReadinessCheck;
local esImageMapping = {
'7.17.0': vars.esImage_7_17_0,
'8.12.2': vars.esImage_8_12_2,
};
local kibanaImageMapping = {
'7.17.0': vars.kibanaImage_7_17_0,
'8.12.2': vars.kibanaImage_8_12_2,
};
local lbFunction = function(lbObject) {
type: if 'type' in lbObject then lbObject.type else error 'Missing loadbalancer type',
endpoint: if 'endpoint' in lbObject then lbObject.endpoint else null,
name: if 'name' in lbObject then lbObject.name else null,
groupName: if 'groupName' in lbObject then lbObject.groupName else null,
stickiness: if 'stickiness' in lbObject then lbObject.stickiness else false,
'tls-1-1': if 'tls-1-1' in lbObject then lbObject['tls-1-1'] else false,
enableGrpc: if 'enableGrpc' in lbObject then lbObject.enableGrpc else false,
stickinessCookieDuration: if 'stickinessCookieDuration' in lbObject then lbObject.stickinessCookieDuration else 86400,
idleTimeout: if 'idleTimeout' in lbObject then lbObject.idleTimeout else 60,
slowStartDuration: if 'slowStartDuration' in lbObject then lbObject.slowStartDuration else 0,
accessPolicies: if 'accessPolicies' in lbObject then lbObject.accessPolicies else [],
extraSecurityGroups: if 'extraSecurityGroups' in lbObject then lbObject.extraSecurityGroups else [],
accessLog: if 'accessLog' in lbObject then lbObject.accessLog else namespace_values.loadBalancer.annotations.accessLog,
webAcl: if 'webAcl' in lbObject then lbObject.webAcl else namespace_values.loadBalancer.annotations.webAcl,
groupOrder: if 'groupOrder' in lbObject then lbObject.groupOrder else '100',
additionalEndpoints: if 'additionalEndpoints' in lbObject then lbObject.additionalEndpoints else [],
redirects: if 'redirects' in lbObject then lbObject.redirects else [],
exposeToLoadBalancer: false,
};
// A mixin that carefully overrides values. It should resemble deployment_manifest.json
local manifest_defaults = {
environment: if 'environment' in super then super.environment else null,
securityGroup: if 'securityGroup' in super then super.securityGroup else null,
metadata: if 'metadata' in super then super.metadata else {},
sandboxParams: if 'sandboxParams' in super then super.sandboxParams else null,
[if 'flink' in deployment_manifest_json then 'flink' else null]+: {
loadBalancers: std.map(lbFunction,
if ('loadBalancers' in deployment_manifest_json.flink && deployment_manifest_json.flink.loadBalancers != []) then deployment_manifest_json.flink.loadBalancers else [{ type: 'none' }]),
},
deployment+: {
//TODO: Just support $.name instead of $.deployment.name once all apps have migrated
name: if 'name' in super then super.name else $.name,
image: if 'image' in super then deployment.image else null,
imagePullPolicy: if 'imagePullPolicy' in super then deployment.imagePullPolicy else 'IfNotPresent',
maxSurge: if 'maxSurge' in super then deployment.maxSurge else null,
controller: if 'controller' in super then deployment.controller else vars.defaultController,
strategy: if 'strategy' in super then deployment.strategy else null,
strategyConfig: if 'strategyConfig' in super then deployment.strategyConfig else {},
exposedPorts: if 'exposedPorts' in super then (if port_map.hasPort(super.exposedPorts, 'serviceport') then super.exposedPorts else super.exposedPorts + default_service_port) else default_service_port,
healthChecks+: {
startupProbeEnabled: if 'startupProbeEnabled' in super then deployment.healthChecks.startupProbeEnabled else false,
startupProbe+: {
type: $.deployment.healthChecks.livenessCheck.type,
port: $.deployment.healthChecks.livenessCheck.port,
path: $.deployment.healthChecks.livenessCheck.path,
successThreshold: defaultStartupProbe.successThreshold,
initialDelaySeconds: defaultStartupProbe.initialDelaySeconds,
periodSeconds: defaultStartupProbe.periodSeconds,
failureThreshold: defaultStartupProbe.failureThreshold,
httpHeaders+: $.deployment.healthChecks.livenessCheck.httpHeaders,
},
livenessCheck+: {
type: if 'type' in super then super.type else defaultLivenessCheck.type,
port: if 'port' in super then super.port else defaultLivenessCheck.port,
path: if 'path' in super then super.path else defaultLivenessCheck.path,
successThreshold: if 'successThreshold' in super then super.successThreshold else defaultLivenessCheck.successThreshold,
initialDelaySeconds: if $.deployment.healthChecks.startupProbeEnabled then 0 else (if 'initialDelaySeconds' in super then super.initialDelaySeconds else defaultLivenessCheck.initialDelaySeconds),
periodSeconds: if 'periodSeconds' in super then super.periodSeconds else defaultLivenessCheck.periodSeconds,
failureThreshold: if 'failureThreshold' in super then super.failureThreshold else defaultLivenessCheck.failureThreshold,
httpHeaders+: if 'httpHeaders' in super then super.httpHeaders else defaultLivenessCheck.httpHeaders,
},
readinessCheck+: {
type: if 'type' in super then super.type else defaultReadinessCheck.type,
port: if 'port' in super then super.port else defaultReadinessCheck.port,
path: if 'path' in super then super.path else defaultReadinessCheck.path,
successThreshold: if 'successThreshold' in super then super.successThreshold else defaultReadinessCheck.successThreshold,
initialDelaySeconds: if $.deployment.healthChecks.startupProbeEnabled then 0 else (if 'initialDelaySeconds' in super then super.initialDelaySeconds else defaultReadinessCheck.initialDelaySeconds),
periodSeconds: if 'periodSeconds' in super then super.periodSeconds else defaultReadinessCheck.periodSeconds,
failureThreshold: if 'failureThreshold' in super then super.failureThreshold else defaultReadinessCheck.failureThreshold,
httpHeaders+: if 'httpHeaders' in super then super.httpHeaders else defaultReadinessCheck.httpHeaders,
},
},
progressDeadlineSeconds: if 'timeout' in super then super.timeout else (if $.environment != vars.environments.prod then 720 else 540),
terminationGracePeriodSeconds: if 'terminationGracePeriodSeconds' in super then super.terminationGracePeriodSeconds else (if $.environment != vars.environments.prod then 60 else 90),
instance+: {
count: if 'count' in super then super.count else 2,
cpu: if 'cpu' in super then super.cpu else '0.25',
memory: if 'memory' in super then super.memory else '300Mi',
[if $.deployment.isVpaEnabled then 'minCPU']: if 'minCPU' in super then super.minCPU else 0.5,
[if $.deployment.isVpaEnabled then 'minMemory']: if 'minMemory' in super then super.minMemory else '512Mi',
gpu: if 'gpu' in super then super.gpu else 0,
gpuNodeSelector: if 'gpuNodeSelector' in super then super.gpuNodeSelector else { 'nvidia.com/gpu': 'true' },
gpuTolerations: if 'gpuTolerations' in super then super.gpuTolerations else [{ effect: 'NoSchedule', key: 'nvidia.com/gpu', operator: 'Exists' }],
},
environmentVariables+: [],
mountSecrets+: [],
namespace: if 'namespace' in super then super.namespace else 'default',
loadBalancers: std.map(lbFunction,
if ( 'loadBalancers' in super && super.loadBalancers != []) then super.loadBalancers else [{ type: 'none' }]),
commonApiGateways: std.map(function(apiGateways) {
commonApiGatewayUrl: if 'commonApiGatewayUrl' in apiGateways then apiGateways.commonApiGatewayUrl else null,
internalCommonApiGatewayUrl: if 'internalCommonApiGatewayUrl' in apiGateways then apiGateways.internalCommonApiGatewayUrl else null,
gatewayAttributes: if 'gatewayAttributes' in apiGateways then apiGateways.gatewayAttributes else [],
}, if ( 'commonApiGateways' in super && super.commonApiGateways != []) then super.commonApiGateways else [{ type: 'none' }]),
serviceMonitor+: {
enabled: if 'enabled' in super then super.enabled else false,
port: if 'port' in super then super.port else 'serviceport',
path: if 'path' in super then super.path else '/actuator/prometheus',
namespace: if 'namespace' in super then super.namespace else 'monitoring',
interval: if 'interval' in super then super.interval else '30s',
metricRelabelings: if 'metricRelabelings' in super then super.metricRelabelings else [],
scrapeTimeout: if 'scrapeTimeout' in super then super.scrapeTimeout else '10s',
},
elasticSearch+: {
local defaultLabelPrefix = deployment.elasticSearch.instance.instanceName,
local elasticsearchVersion = if 'esVersion' in deployment.elasticSearch.instance then deployment.elasticSearch.instance.esVersion else '7.17.0',
enabled: if 'enabled' in super then super.enabled else false,
esLabels: if 'esLabels' in super then super.esLabels else { app: chart.service_name, chart: chart.service_chart, heritage: 'NaviDeploymentManifest', release: defaultLabelPrefix + '-elasticsearch', Team: deployment_manifest_json.team.name, Environment: deployment_manifest_json.environment, Name: defaultLabelPrefix + '-elasticsearch', Product: namespace_values.additionalTags.product, Owner: if deployment_manifest_json.infraVertical == 'lending' then 'medici' else if deployment_manifest_json.infraVertical == 'insurance' then 'gi' else deployment_manifest_json.infraVertical },
instanceName: if 'instanceName' in super then super.instanceName else 'default-elasticsearch',
cpu: if 'cpu' in super then super.cpu else '1',
memory: if 'memory' in super then super.memory else '1Gi',
diskSpace: if 'diskSpace' in super then super.diskSpace else '30Gi',
esVersion: elasticsearchVersion,
esImage: esImageMapping[elasticsearchVersion],
esCount: if 'esCount' in super then super.esCount else 3,
esNodeSelector: if 'esNodeSelector' in super then super.esNodeSelector else { 'kops.k8s.io/instancegroup': 'datastore-nodes-1' },
esTolerations: if 'esTolerations' in super then super.esTolerations else [{ effect: 'NoSchedule', key: 'node', operator: 'Equal', value: 'datastore' }],
kibana: if 'kibana' in super then super.kibana else null,
kibanaLabels: if 'kibanaLabels' in super then super.kibanaLabels else { app: chart.service_name, chart: chart.service_chart, heritage: 'NaviDeploymentManifest', release: defaultLabelPrefix + '-kibana', Team: deployment_manifest_json.team.name, Environment: deployment_manifest_json.environment, Name: defaultLabelPrefix + '-kibana', Product: namespace_values.additionalTags.product, Owner: if deployment_manifest_json.infraVertical == 'lending' then 'medici' else if deployment_manifest_json.infraVertical == 'insurance' then 'gi' else deployment_manifest_json.infraVertical },
kibanaVersion: elasticsearchVersion,
kibanaImage: kibanaImageMapping[elasticsearchVersion],
},
perfUtility+: {
mockServerEnabled: if 'mockServer' in super then super.mockServer else false,
mockServerImage: if 'mockServerImage' in super then super.mockServerImage else '193044292705.dkr.ecr.ap-south-1.amazonaws.com/common/perf-mock-server:latest',
postgresServerEnabled: if 'postgresServer' in super then super.postgresServer else false,
postgresServerImage: '193044292705.dkr.ecr.ap-south-1.amazonaws.com/common/postgres:13',
},
hpa+: {
type: if 'type' in super then super.type else vars.deployment.hpa.type.metrics,
maxReplicas: if 'maxReplicas' in super then super.maxReplicas else $.deployment.instance.count,
minReplicas: if 'minReplicas' in super then super.minReplicas else $.deployment.instance.count,
metrics: if 'metrics' in super then super.metrics else [],
custom_metrics: if 'custom_metrics' in super then super.custom_metrics else [],
cronJobs: if 'cronJobs' in super then super.cronJobs else [],
},
isVpaEnabled: if 'isVpaEnabled' in super then super.isVpaEnabled else false,
isLinkConfig: if 'linkConfig' in super && super.linkConfig != null then super.linkConfig else false,
vpa+: {
maxAllowed: {
cpu: if 'cpu' in super then super.cpu else vars.vpa.maxAllowedCPU,
memory: if 'memory' in super then super.memory else vars.vpa.maxAllowedMemory,
},
},
allowEgress: if 'allowEgress' in super then super.allowEgress else [],
alerts+: {
pod: mergePodAlerts(vars.deployment.alerts.pod, if 'pod' in super then super.pod else []),
loadBalancer+: [],
database+: [],
kafka+: [],
custom+: [],
kong+: [],
prometheusRecordingRule+: [],
underUtilisedResources+: [],
},
disableIstio: if 'disableIstio' in super then super.disableIstio else false,
},
team+: {
name: if 'name' in super then super.name else 'Infra',
},
[if 'flink' in deployment_manifest_json then null else 'labels']+: {
'micrometer-prometheus': if 'micrometer-prometheus' in super then super['micrometer-prometheus']
else if ($.deployment.serviceMonitor.enabled == false && port_map.hasPort($.deployment.exposedPorts, 'metrics')) then 'enabled' else 'disabled',
},
isSwApmEnabled: if 'isSwApmEnabled' in super then super.isSwApmEnabled else namespace_values.isSwApmEnabled,
extraResources: if 'extraResources' in super then super.extraResources else null,
};
local deployment_manifest = deployment_manifest_json + manifest_defaults;
//For Validation
local rateLimitRulesLength(commonApiGateways) = [
if 'rateLimitRules' in attribute then std.length(attribute.rateLimitRules) else 0
for gateway in commonApiGateways
for attribute in gateway.gatewayAttributes
];
local commonApiGateways = if 'flink' in deployment_manifest then [] else deployment_manifest.deployment.commonApiGateways;
assert std.length([value for value in rateLimitRulesLength(commonApiGateways) if value > 1]) == 0 : 'Apigateway has more than one rateLimiting rule configured in at least one of the gateway attributes';
assert std.isString(deployment_manifest.cluster) : 'ValidationError: cluster must be a non empty string';
deployment_manifest

View File

@@ -1,108 +0,0 @@
local deployment_manifest = import 'deployment_manifest.jsonnet';
local deployment = deployment_manifest.deployment;
local vars = import 'vars.jsonnet';
local rolloutController = vars.rolloutController;
local deploymentController = deployment.controller;
local ingress = deployment_manifest.deployment.loadBalancers[0];
local load_balancer_util = import 'load_balancer_util.jsonnet';
local chart = import 'chart.jsonnet';
local port_map = import 'port_map.jsonnet';
local namespace_values = import 'namespace_values.jsonnet';
local empty(parent, field) = if (field in parent && parent[field] != {} && parent[field] != [] && parent[field] != '') then false else true;
{
stepsValueMap(step):: {
manualPromotion: { pause: {} },
setWeight: { setWeight: step.value },
pause: { pause: { duration: step.value } },
}[step.name],
getSteps(steps):: [
$.stepsValueMap(step)
for step in steps
],
stickinessConfig(stickinessDuration):: {
enabled: true,
durationSeconds: stickinessDuration,
},
getMaxSurge(deployment)::
if deployment.maxSurge == null || deployment.maxSurge == '' then
if deployment.hpa.minReplicas <= 5 then '51%' else '20%'
else
deployment.maxSurge + '%',
strategy:: {
rollingUpdate()::
if (deploymentController == rolloutController) then {
canary: {
maxSurge: $.getMaxSurge(deployment),
maxUnavailable: 0,
},
} else {
assert deployment.strategy != 'canary' : '%s controller does not support canary' % deploymentController,
type: 'RollingUpdate',
rollingUpdate: {
maxSurge: $.getMaxSurge(deployment),
maxUnavailable: 0,
},
}
,
canary(config={}):: {
assert deploymentController == rolloutController : '%s controller is not supported for canary' % deployment.controller,
assert std.find(ingress.type, ['alb', 'sharedAlbAcrossNamespace']) != [] : '%s is not supported for canary' % ingress.type,
local ingressFullName = load_balancer_util.ingress_name(chart.full_service_name(deployment.name), ingress),
local fullName = chart.full_service_name(deployment.name),
local analysisConfig = if !empty(config, 'analysis') then config.analysis else {},
canary: {
maxSurge: '51%',
maxUnavailable: 0,
[if analysisConfig != {} then 'analysis']: {
templates: [{
templateName: chart.full_service_name(deployment.name),
}],
[if !empty(analysisConfig, 'templates') && deployment.analysisTemplate != null then 'templates']: analysisConfig.templates,
[if !empty(analysisConfig, 'args') then 'args']: analysisConfig.args,
[if !empty(analysisConfig, 'startingStep') then 'startingStep']: analysisConfig.startingStep,
},
steps: if empty(config, 'steps') then vars.defaultCanarySteps else $.getSteps(config.steps),
stableService: '%s-stable' % fullName,
canaryService: '%s-canary' % fullName,
trafficRouting: {
alb: {
ingress: ingressFullName,
rootService: fullName,
servicePort: port_map.getPort('serviceport'),
[if 'stickinessDuration' in config && config.stickinessDuration > 0 then 'stickinessConfig']: $.stickinessConfig(config.stickinessDuration),
},
},
},
},
rollingUpdateWithCanaryMixIn(config={}):: {
assert deploymentController == rolloutController : '%s controller is not supported for canary' % deployment.controller,
assert std.find(ingress.type, ['alb', 'sharedAlbAcrossNamespace']) != [] : '%s is not supported for canary' % ingress.type,
local ingressFullName = load_balancer_util.ingress_name(chart.full_service_name(deployment.name), ingress),
local fullName = chart.full_service_name(deployment.name),
canary: {
maxSurge: '51%',
maxUnavailable: 0,
stableService: '%s-stable' % fullName,
canaryService: '%s-canary' % fullName,
trafficRouting: {
alb: {
ingress: ingressFullName,
rootService: fullName,
servicePort: port_map.getPort('serviceport'),
[if 'stickinessDuration' in config && config.stickinessDuration > 0 then 'stickinessConfig']: $.stickinessConfig(config.stickinessDuration),
},
},
[if config.currentStrategy == 'canary' then 'steps']: [{ pause: {} }],
},
},
},
isEfsNeeded(deployment):: namespace_values.isEfsSupported && 'efs' in deployment,
isFsxNeeded(deployment):: namespace_values.isFsxSupported && 'fsx' in deployment,
}

View File

@@ -1,22 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local manifest_util = import 'manifest_util.jsonnet';
local dynamicConfiguration = if manifest_util.is_dynamic_config_present(deployment_manifest) then deployment_manifest.dynamicConfiguration else {};
if manifest_util.is_dynamic_config_present(deployment_manifest) then {
apiVersion: 'v1',
kind: 'Secret',
metadata: {
name: chart.full_service_name(deployment_manifest.deployment.name) + '-dynamic-secret',
namespace: deployment_manifest.deployment.namespace,
labels: common.labels,
annotations: common.annotations,
},
stringData:
{
[config.fileName]: config.data
for config in dynamicConfiguration
},
type: 'Opaque',
}

View File

@@ -1,34 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local deployment_util = import 'deployment_util.jsonnet';
local deployment = deployment_manifest.deployment;
local namespace_values = import 'namespace_values.jsonnet';
if (deployment_util.isEfsNeeded(deployment)) then
local efs_list = deployment.efs;
if (std.length(efs_list) != 0) then
{
apiVersion: 'v1',
kind: 'List',
items: std.map(function(efs) {
apiVersion: 'v1',
kind: 'PersistentVolumeClaim',
metadata: {
name: chart.full_service_name(deployment.name) + '-' + efs.name,
labels: common.labels,
annotations: common.annotations,
namespace: deployment.namespace,
},
spec: {
accessModes: ['ReadWriteMany'],
storageClassName: efs.name,
resources: {
requests: {
storage: '1Mi',
},
},
},
}, efs_list),
}
else null

View File

@@ -1,90 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local deployment = deployment_manifest.deployment;
local PVCAnnotations = '{ "Team": "%s", "Environment": "%s", "Product": "%s" }' % [deployment_manifest.team.name, deployment_manifest.environment, deployment_manifest.metadata.product];
if deployment_manifest.deployment.elasticSearch.enabled == true then {
apiVersion: 'elasticsearch.k8s.elastic.co/v1',
kind: 'Elasticsearch',
metadata: {
name: deployment.elasticSearch.instance.instanceName + '-elasticsearch',
namespace: deployment_manifest.environment + '-datastores',
labels: deployment.elasticSearch.esLabels,
annotations: common.annotations,
},
spec: {
version: deployment.elasticSearch.esVersion,
image: deployment.elasticSearch.esImage,
secureSettings: [
{
secretName: 'aws-credentials-es-backup',
},
],
http: {
tls: {
selfSignedCertificate: {
disabled: true,
},
},
},
nodeSets: [
{
name: 'node',
config: {
'node.roles': [
'master',
'data',
'ingest',
],
'node.store.allow_mmap': false,
},
podTemplate: {
metadata: {
labels: deployment.elasticSearch.esLabels,
},
spec: {
nodeSelector: deployment.elasticSearch.esNodeSelector,
tolerations: deployment.elasticSearch.esTolerations,
containers: [
{
name: 'elasticsearch',
resources: {
requests: {
memory: deployment.elasticSearch.instance.memory,
cpu: deployment.elasticSearch.instance.cpu,
},
limits: {
memory: deployment.elasticSearch.instance.memory,
cpu: deployment.elasticSearch.instance.cpu,
},
},
},
],
},
},
count: deployment.elasticSearch.esCount,
volumeClaimTemplates: [
{
metadata: {
name: 'elasticsearch-data',
annotations: {
'k8s-pvc-tagger/tags': PVCAnnotations
}
},
spec: {
accessModes: [
'ReadWriteOnce',
],
resources: {
requests: {
storage: deployment.elasticSearch.instance.diskSpace,
},
},
storageClassName: 'gp3-retain-policy',
},
},
],
},
],
},
}

View File

@@ -1,186 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local namespace_values = import 'namespace_values.jsonnet';
local app_name = chart.full_service_name(deployment_manifest.deployment.name);
local namespace = deployment_manifest.deployment.namespace;
local deployment = deployment_manifest.deployment;
local clusterName = deployment.elasticSearch.instance.instanceName + '-elasticsearch';
if deployment_manifest.deployment.elasticSearch.enabled == true then {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'PrometheusRule',
metadata: {
labels: deployment.elasticSearch.esLabels {
prometheus: 'kube-prometheus',
role: 'alert-rules',
},
annotations: common.annotations,
name: clusterName + '-alerts',
namespace: deployment_manifest.environment + '-datastores',
},
spec: {
groups: [
{
name: clusterName + '-alerts',
rules: [
{
alert: 'ElasticsearchHeapUsageTooHigh',
expr: '(es_jvm_mem_heap_used_bytes{job=~".*http",es_cluster="%(clustername)s"} / es_jvm_mem_heap_max_bytes{job=~".*http",es_cluster="%(clustername)s"}) * 100 > 90' % ({ clustername: clusterName }),
'for': '20m',
labels: {
severity: 'critical',
alertTeam: deployment_manifest.team.name,
appName: clusterName,
},
annotations: {
summary: 'Elasticsearch Heap Usage Too High (node `{{ $labels.node }}`)',
description: 'The heap usage is over 90% for 5m VALUE = `{{ $value }}`\n NAME: `{{ $labels.node }}`',
},
},
{
alert: 'ElasticsearchHeapUsageWarning',
expr: '(es_jvm_mem_heap_used_bytes{job=~".*http",es_cluster="%(clustername)s"} / es_jvm_mem_heap_max_bytes{job=~".*http",es_cluster="%(clustername)s"}) * 100 > 80' % ({ clustername: clusterName }),
'for': '15m',
labels: {
severity: 'warning',
alertTeam: deployment_manifest.team.name,
appName: clusterName,
},
annotations: {
summary: 'Elasticsearch Heap Usage warning (node `{{ $labels.node }}`)',
description: 'The heap usage is over 80% for 15m\n VALUE = `{{ $value }}`\n NAME: `{{ $labels.node }}`',
},
},
{
alert: 'ElasticsearchAvgDiskOutOfSpace_Warning',
expr: '(es_fs_total_free_bytes{job=~".*http",es_cluster="%(clustername)s"}/es_fs_total_total_bytes{job=~".*http",es_cluster="%(clustername)s"}) * 100 < 15' % ({ clustername: clusterName }),
'for': '20m',
labels: {
severity: 'warning',
alertTeam: deployment_manifest.team.name,
appName: clusterName,
},
annotations: {
summary: 'Elasticsearch average disk out of space (node - `{{ $labels.node }}`). No new shards will be allocated at this node',
description: 'The disk usage is over 85%\n VALUE = `{{ $value }}`',
},
},
{
alert: 'ElasticsearchDiskOutOfSpace',
expr: '(es_fs_total_free_bytes{job=~".*http",es_cluster="%(clustername)s"}/es_fs_total_total_bytes{job=~".*http",es_cluster="%(clustername)s"}) * 100 < 10' % ({ clustername: clusterName }),
'for': '10m',
labels: {
severity: 'critical',
alertTeam: deployment_manifest.team.name,
appName: clusterName,
},
annotations: {
summary: 'Elasticsearch disk out of space (node `{{ $labels.node }}`). No new shards will be allocated at this node',
description: 'The disk usage is over 90%\n VALUE = `{{ $value }}`\n NAME: `{{ $labels.node }}`',
},
},
{
alert: 'ElasticsearchClusterRed',
expr: 'max(es_cluster_status{job=~".*http",es_cluster="%(clustername)s"}) by (es_cluster) == 2' % ({ clustername: clusterName }),
'for': '5m',
labels: {
severity: 'critical',
alertTeam: deployment_manifest.team.name,
appName: clusterName,
},
annotations: {
summary: 'Elasticsearch Cluster Red (cluster - `{{ $labels.es_cluster }}`)',
description: 'Elastic Cluster Red',
},
},
{
alert: 'ElasticsearchClusterYellow',
expr: 'max(es_cluster_status{job=~".*http",es_cluster="%(clustername)s"}) by (es_cluster) == 1' % ({ clustername: clusterName }),
'for': '15m',
labels: {
severity: 'warning',
alertTeam: deployment_manifest.team.name,
appName: clusterName,
},
annotations: {
summary: 'Elasticsearch Cluster Yellow (cluster - `{{ $labels.es_cluster }}`)',
description: 'Elastic Cluster Yellow for 15 minutes',
},
},
{
alert: 'ElasticsearchClusterIndexReplicaUnavailable',
expr: 'min(es_index_replicas_number{job=~".*http",es_cluster="%(clustername)s",index!~"^[.].*"}) by (es_cluster,index) < 1' % ({ clustername: clusterName }),
'for': '15m',
labels: {
severity: 'warning',
alertTeam: deployment_manifest.team.name,
appName: clusterName,
},
annotations: {
summary: 'Elasticsearch Cluster Index Replica less than 1 (cluster - `{{ $labels.es_cluster }}`)',
description: 'Elastic Cluster Index Replica less than 1 for 15 minutes\n VALUE = `{{ $value }}`',
},
},
{
alert: 'ElasticsearchInitializingShards',
expr: 'max(es_cluster_shards_number{type="initializing",job=~".*http",es_cluster="%(clustername)s"}) by (es_cluster) > 0' % ({ clustername: clusterName }),
'for': '10m',
labels: {
severity: 'warning',
alertTeam: deployment_manifest.team.name,
appName: clusterName,
},
annotations: {
summary: 'Elasticsearch initializing shards (cluster `{{ $labels.es_cluster }}`)',
description: 'Number of initializing shards for 10 min\n VALUE = `{{ $value }}`',
},
},
{
alert: 'ElasticsearchUnassignedShards',
expr: 'max(es_cluster_shards_number{type="unassigned",job=~".*http",es_cluster="%(clustername)s"}) by (es_cluster) > 0' % ({ clustername: clusterName }),
'for': '30m',
labels: {
severity: 'critical',
alertTeam: deployment_manifest.team.name,
appName: clusterName,
},
annotations: {
summary: 'Elasticsearch unassigned shards (cluster `{{ $labels.es_cluster }}`)',
description: 'Number of unassigned shards for 30 min\n VALUE = `{{ $value }}`',
},
},
{
alert: 'ElasticsearchUnassignedShards',
expr: 'max(es_cluster_shards_number{type="unassigned",job=~".*http",es_cluster="%(clustername)s"}) by (es_cluster) > 0' % ({ clustername: clusterName }),
'for': '15m',
labels: {
severity: 'warning',
alertTeam: deployment_manifest.team.name,
appName: clusterName,
},
annotations: {
summary: 'Elasticsearch unassigned shards (cluster `{{ $labels.es_cluster }}`)',
description: 'Number of unassigned shards for 15 min\n VALUE = `{{ $value }}`',
},
},
{
alert: 'ElasticsearchPendingTasks',
expr: 'max(es_cluster_pending_tasks_number{job=~".*http",es_cluster="%(clustername)s"}) by (es_cluster) > 0' % ({ clustername: clusterName }),
'for': '15m',
labels: {
severity: 'warning',
alertTeam: deployment_manifest.team.name,
appName: clusterName,
},
annotations: {
summary: 'Elasticsearch pending tasks (cluster `{{ $labels.es_cluster }}`)',
description: 'Number of pending tasks for 15 min. Cluster works slowly.\n VALUE = `{{ $value }}`',
},
},
],
},
],
},
}

View File

@@ -1,18 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local deployment = deployment_manifest.deployment;
if deployment_manifest.deployment.elasticSearch.enabled == true then {
apiVersion: 'v1',
kind: 'Secret',
metadata: {
name: deployment.elasticSearch.instance.instanceName + '-elasticsearch' + '-es-elastic-user',
namespace: deployment_manifest.environment + '-datastores',
labels: deployment.elasticSearch.esLabels,
annotations: common.annotations,
},
data: { [e.name]: std.base64(e.value) for e in deployment_manifest.environmentVariables if std.toString(e.name) == 'elastic' },
type: 'Opaque',
}

View File

@@ -1,75 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local deployment = deployment_manifest.deployment;
if deployment_manifest.deployment.elasticSearch.enabled == true then {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: {
labels: deployment.elasticSearch.esLabels,
name: deployment.elasticSearch.instance.instanceName + '-elasticsearch-monitor',
namespace: deployment_manifest.environment + '-datastores',
annotations: common.annotations,
},
spec: {
endpoints: [
{
metricRelabelings: [
{
action: 'drop',
regex: 'es_index_segments_memory_bytes',
sourceLabels: ['__name__'],
},
{
action: 'drop',
regex: '.*es-node',
sourceLabels: ['job'],
},
{
action: 'drop',
regex: 'es_cluster.*;.*es-node-[1-9]+',
sourceLabels: ['__name__', 'pod'],
},
{
sourceLabels: ['cluster'],
targetLabel: 'es_cluster',
replacement: '$1'
},
{
action: 'labeldrop',
regex: '^cluster$',
}
],
interval: '30s',
path: '/_prometheus/metrics',
port: 'http',
scheme: 'http',
tlsConfig: {
insecureSkipVerify: true,
},
basicAuth: {
password: {
name: deployment.elasticSearch.instance.instanceName + '-elasticsearch' + '-sm-secret',
key: 'password',
},
username: {
name: deployment.elasticSearch.instance.instanceName + '-elasticsearch' + '-sm-secret',
key: 'username',
},
},
},
],
namespaceSelector: {
matchNames: [
deployment_manifest.environment + '-datastores',
],
},
selector: {
matchLabels: {
'common.k8s.elastic.co/type': 'elasticsearch',
'elasticsearch.k8s.elastic.co/cluster-name': deployment.elasticSearch.instance.instanceName + '-elasticsearch',
},
},
},
}

View File

@@ -1,18 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local deployment = deployment_manifest.deployment;
if deployment_manifest.deployment.elasticSearch.enabled == true then {
apiVersion: 'v1',
kind: 'Secret',
metadata: {
name: deployment.elasticSearch.instance.instanceName + '-elasticsearch' + '-sm-secret',
namespace: deployment_manifest.environment + '-datastores',
labels: deployment.elasticSearch.esLabels,
annotations: common.annotations,
},
data: { ['username']: std.base64(e.name) for e in deployment_manifest.environmentVariables if std.toString(e.name) == 'elastic' } + { ['password']: std.base64(e.value) for e in deployment_manifest.environmentVariables if std.toString(e.name) == 'elastic' },
type: 'Opaque',
}

View File

@@ -1,122 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local deployment = deployment_manifest.deployment;
if deployment_manifest.deployment.elasticSearch.enabled == true then {
apiVersion: 'batch/v1',
kind: 'Job',
metadata: {
name: deployment.elasticSearch.instance.instanceName + '-elasticsearch' + '-init-snapshots',
namespace: deployment_manifest.environment + '-datastores',
labels: deployment.elasticSearch.esLabels,
annotations: common.annotations,
},
spec: {
template: {
spec: {
initContainers: [
{
name: 'elasticsearch-s3-repository',
image: '193044292705.dkr.ecr.ap-south-1.amazonaws.com/common/centos:7-custom',
imagePullPolicy: 'IfNotPresent',
volumeMounts: [
{
name: 'es-basic-auth',
mountPath: '/mnt/elastic/es-basic-auth',
},
],
env: [
{
name: 'ES_HOST',
value: deployment.elasticSearch.instance.instanceName + '-elasticsearch-es-http.' + deployment_manifest.environment + '-datastores.' + 'svc.cluster.local',
},
{
name: 'ES_PORT',
value: '9200',
},
{
name: 'ES_REPOSITORY',
value: 'snapshots',
},
{
name: 'S3_REGION',
value: 'ap-south-1',
},
{
name: 'S3_BUCKET',
valueFrom: {
secretKeyRef: {
name: 's3-bucket-es-backup',
key: 'bucket',
},
},
},
{
name: 'S3_BASE_PATH',
value: deployment.elasticSearch.instance.instanceName,
},
{
name: 'S3_COMPRESS',
value: 'true',
},
{
name: 'S3_STORAGE_CLASS',
value: 'standard',
},
],
command: [
'/bin/sh',
'-c',
],
args: [
"dockerize -wait tcp://${ES_HOST}:${ES_PORT} -timeout 600s && curl -s -i -k -u \"elastic:$(</mnt/elastic/es-basic-auth/elastic)\" -XPUT -H \"Content-Type: application/json\" http://${ES_HOST}:${ES_PORT}/_snapshot/${ES_REPOSITORY} -d '{\n \"type\": \"s3\",\n \"settings\": {\n \"bucket\": \"'\"${S3_BUCKET}\"'\",\n \"base_path\": \"'\"${S3_BASE_PATH}\"'\",\n \"region\": \"'\"${S3_REGION}\"'\",\n \"compress\": \"'\"${S3_COMPRESS}\"'\",\n \"storage_class\": \"'\"${S3_STORAGE_CLASS}\"'\"\n }\n}'\n",
],
},
],
containers: [
{
name: 'slm',
image: '193044292705.dkr.ecr.ap-south-1.amazonaws.com/common/centos:7-custom',
volumeMounts: [
{
name: 'es-basic-auth',
mountPath: '/mnt/elastic/es-basic-auth',
},
],
env: [
{
name: 'ES_HOST',
value: deployment.elasticSearch.instance.instanceName + '-elasticsearch-es-http.' + deployment_manifest.environment + '-datastores.' + 'svc.cluster.local',
},
{
name: 'ES_PORT',
value: '9200',
},
{
name: 'ES_REPOSITORY',
value: 'snapshots',
},
],
command: [
'/bin/bash',
'-c',
],
args: [
"curl -s -i -k -u \"elastic:$(</mnt/elastic/es-basic-auth/elastic)\" -XPUT \"http://${ES_HOST}:${ES_PORT}/_slm/policy/daily-snapshots?pretty\" -H 'Content-Type: application/json' -d '{\n \"schedule\": \"'\"0 59 23 * * ?\"'\",\n \"name\": \"'\"<daily-snap-{now/d}>\"'\",\n \"repository\": \"'\"${ES_REPOSITORY}\"'\",\n \"config\": {\n \"indices\": \"'\"*\"'\",\n \"include_global_state\": \"'\"true\"'\"\n },\n \"retention\": {\n \"expire_after\": \"7d\",\n \"min_count\": 7,\n \"max_count\": 14\n }\n}'\n",
],
},
],
restartPolicy: 'Never',
volumes: [
{
name: 'es-basic-auth',
secret: {
secretName: deployment.elasticSearch.instance.instanceName + '-elasticsearch' + '-es-elastic-user',
},
},
],
},
},
},
}

View File

@@ -1,238 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local namespace_values = import 'namespace_values.jsonnet';
local app_name = chart.full_service_name(deployment_manifest.name);
local namespace = deployment_manifest.flink.namespace;
local load_balancer_util = import 'load_balancer_util.jsonnet';
local manifest_util = import 'manifest_util.jsonnet';
local flink = deployment_manifest.flink;
local vars = import 'vars.jsonnet';
local util = import 'util.jsonnet';
local environment = deployment_manifest.environment;
local commonAlertFields = {
appName: common.awsTags.Name,
fullName: chart.full_service_name(deployment_manifest.name),
namespace: namespace,
environment: environment,
};
local baseLabels = function(alert) {
labels: {
severity: alert.severity,
alertTeam: deployment_manifest.team.name,
appName: app_name,
[if manifest_util.is_custom_slack_channel_enabled(alert) then 'slackChannel']: alert.slackChannel,
},
};
local baseAnnotations = function(alert) {
annotations: {
runbook: 'https://navihq.atlassian.net/wiki/spaces/IN/pages/509936863/Runbook',
},
};
local mapAlerts(alertGroup, alerts) = std.filterMap(
function(alert) alert.type in alertGroup,
function(alert) baseAnnotations(alert) + alertGroup[alert.type](alert) + baseLabels(alert),
alerts
);
local alerts = {
"pod": [
{
"type": "HighPodRestarts",
"duration": "30m",
"severity": "critical",
"threshold": 3
},
{
"type": "HighPodFailures",
"duration": "3h",
"severity": "warning",
"threshold": 2
},
{
"type": "FrequentPodOOMKilled",
"duration": "10m",
"severity": "critical",
"threshold": 2
},
{
"type": "PodOOMKilled",
"duration": "5m",
"severity": "warning",
"threshold": 1
},
{
"type": "KubeContainerWaiting",
"duration": "1h",
"severity": "critical",
"threshold": 0
}
],
"flink": [
{
"type": "JobManagerJvmMemoryUsageHigh",
"duration": "10m",
"severity": "critical",
"threshold": 85
},
{
"type": "JobManagerCpuLoadHigh",
"duration": "10m",
"severity": "critical",
"threshold": 75
},
{
"type": "TaskManagerJvmCpuLoadHigh",
"duration": "10m",
"severity": "critical",
"threshold": 75
},
{
"type": "TaskManagerJvmMemoryUsageHigh",
"duration": "10m",
"severity": "critical",
"threshold": 85
},
{
"type": "JobManagerFailedCheckpointIncreased",
"duration": "5m",
"severity": "critical",
"threshold": 0
},
{
"type": "FlinkTaskFailed",
"duration": "5m",
"severity": "critical",
"threshold": 0
}
],
"custom": []
};
local podAlerts = {
HighPodRestarts: function(alert) ({
alert: 'HighPodRestarts',
annotations: {
description: 'Namespace: %s, AppName: %s; Pod restarted multiple times' % [namespace, app_name],
summary: 'High Pod Restarts',
runbook: 'https://navihq.atlassian.net/wiki/spaces/IN/pages/279937094/Act+On+Pod+Alert',
},
expr: 'sum(increase(kube_pod_container_status_restarts_total{namespace="%s", pod=~"%s.*"}[%s])) > %s' % [namespace, app_name, alert.duration, alert.threshold],
}),
HighPodFailures: function(alert) ({
alert: 'HighPodFailures',
annotations: {
description: 'Namespace: %s, AppName: %s; Pods were last terminated due to reason {{ $labels.reason }}' % [namespace, app_name],
summary: 'High Pod Failures',
runbook: 'https://navihq.atlassian.net/wiki/spaces/IN/pages/279937094/Act+On+Pod+Alert',
},
expr: 'sum(increase(kube_pod_container_status_last_terminated_reason{namespace="%s", container=~"%s.*",reason !~ "Completed|Evicted|OOMKilled"}[%s])) by (reason,pod) > %s' % [namespace, app_name, alert.duration, alert.threshold],
}),
FrequentPodOOMKilled: function(alert) ({
alert: 'FrequentPodOOMKilled',
annotations: {
description: 'Namespace: %s, AppName: %s; Pod: {{ $labels.pod }} is restarting multiple times because of OOMKilled' % [namespace, app_name],
summary: 'High Pod Failures',
runbook: 'https://navihq.atlassian.net/wiki/spaces/IN/pages/279937094/Act+On+Pod+Alert',
},
expr: 'increase(kube_pod_container_status_restarts_total{namespace="%s", container="%s"}[%s]) >= %s AND ignoring(reason) kube_pod_container_status_last_terminated_reason{namespace="%s", container="%s", reason="OOMKilled"} > 0' % [namespace, app_name, alert.duration, alert.threshold, namespace, app_name],
}),
PodOOMKilled: function(alert) ({
alert: 'PodOOMKilled',
annotations: {
description: 'Namespace: %s, AppName: %s; Pod: {{ $labels.pod }} killed because of OOMKilled' % [namespace, app_name],
summary: 'Pod OOMKilled',
runbook: 'https://navihq.atlassian.net/wiki/spaces/IN/pages/279937094/Act+On+Pod+Alert',
},
expr: 'kube_pod_container_status_restarts_total{namespace="%s", container="%s"} - kube_pod_container_status_restarts_total{namespace="%s", container="%s"} offset %s >= %s AND ignoring(reason) kube_pod_container_status_last_terminated_reason{namespace="%s", container="%s", reason="OOMKilled"} > 0' % [namespace, app_name, namespace, app_name, alert.duration, alert.threshold, namespace, app_name],
}),
KubeContainerWaiting: function(alert) ({
alert: 'KubeContainerWaiting',
annotations: {
description: 'Namespace: %s, AppName: %s; container in waiting state for one hour' % [namespace, app_name],
summary: 'container is waiting for too long',
runbook: 'https://navihq.atlassian.net/wiki/spaces/IN/pages/279937094/Act+On+Pod+Alert',
},
expr: 'sum by (namespace, pod, container) (kube_pod_container_status_waiting_reason{container="%s", namespace="%s"}) > %s' % [app_name, namespace, alert.threshold],
'for': alert.duration,
}),
};
local flinkAlerts = {
JobManagerJvmMemoryUsageHigh: function(alert) ({
alert: 'JobManagerJvmMemoryUsageHigh',
annotations: {
description: 'Namespace: %s, AppName: %s; JVM Memory usage more than 80 percent for flink job {{ $labels.job }} since last %s ' % [namespace, app_name, alert.duration],
summary: 'Job Manager JVM Memory Usage High',
},
expr: '( flink_jobmanager_Status_JVM_Memory_Heap_Used / flink_jobmanager_Status_JVM_Memory_Heap_Max ) * 100 > %s ' % [alert.threshold],
'for': alert.duration,
}),
JobManagerCpuLoadHigh: function(alert) ({
alert: 'JobManagerCpuLoadHigh',
annotations: {
description: 'Namespace: %s, AppName: %s; JVM CPU Load more than %s for flink job {{ $labels.job }} since last %s.' % [namespace, app_name,alert.threshold, alert.duration],
summary: 'Job Manager CPU Load High',
},
expr: 'flink_jobmanager_Status_JVM_CPU_Load > %s' % alert.threshold,
'for': alert.duration,
}),
TaskManagerJvmCpuLoadHigh: function(alert) ({
alert: 'TaskManagerJvmCpuLoadHigh',
annotations: {
description: 'Namespace: %s, AppName: %s; JVM CPU Load more than %s for flink taskmanager {{ $labels.tm_id }} for job {{ $labels.job }} since last %s.' % [namespace, app_name, alert.threshold, alert.duration],
summary: 'Task Manager JVM CPU Load High',
},
expr: 'flink_taskmanager_Status_JVM_CPU_Load > %s' % alert.threshold,
'for': alert.duration,
}),
TaskManagerJvmMemoryUsageHigh: function(alert) ({
alert: 'TaskManagerJvmMemoryUsageHigh',
annotations: {
description: 'Namespace: %s, AppName: %s; JVM Memory usage more than 80 percent for TaskManager {{ $labels.tm_id }} for job {{ $labels.job }} since last %s.' % [namespace, app_name, alert.duration],
summary: 'Task Manager JVM Memory Usage High',
},
expr: '(flink_taskmanager_Status_JVM_Memory_Heap_Used / flink_taskmanager_Status_JVM_Memory_Heap_Max) * 100 > %s' % alert.threshold,
'for': alert.duration,
}),
JobManagerFailedCheckpointIncreased: function(alert) ({
alert: 'JobManagerFailedCheckpointIncreased',
annotations: {
description: 'Namespace: %s, AppName: %s; Number of failed checkpoints increased in last %s for job {{ $labels.job }}' % [namespace, app_name, alert.duration],
summary: 'Job Manager Failed Checkpoint Increased',
},
expr: 'increase(flink_jobmanager_job_numberOfFailedCheckpoints[%s]) > 0' % alert.duration,
}),
FlinkTaskFailed: function(alert) ({
alert: 'FlinkTaskFailed',
annotations: {
description: 'Namespace: %s, AppName: %s; The Flink job {{ $labels.job }} has tasks that failed.' % [namespace, app_name],
summary: 'Flink Task Failed',
},
expr: 'rate(flink_taskmanager_job_task_failed{job="{{ $labels.job }}"}[%s]) > 0' % alert.duration,
'for': alert.duration,
}),
};
{
apiVersion: 'monitoring.coreos.com/v1',
kind: 'PrometheusRule',
metadata: {
labels: common.labels {
prometheus: 'kube-prometheus',
role: 'alert-rules',
},
name: app_name,
namespace: namespace,
annotations: common.annotations,
},
spec: {
groups: [
{
name: '%s-basic' % [app_name],
rules: (mapAlerts(podAlerts, alerts.pod) + mapAlerts(flinkAlerts, alerts.flink)),
},
],
},
}

View File

@@ -1,172 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local flink = deployment_manifest.flink;
local namespace_values = import 'namespace_values.jsonnet';
local flinkBucket = namespace_values.flinkBucket;
local flinkBucketBaseDir = 's3://' + flinkBucket + '/jobs/' + deployment_manifest.environment + '/' + deployment_manifest.name;
local util = import 'util.jsonnet';
local needsAWSAccess = if util.is_field_present(deployment_manifest.extraResources, 'aws_access')
&& util.is_field_present(deployment_manifest.extraResources.aws_access, 'policies')
&& std.length(deployment_manifest.extraResources.aws_access.policies) > 0 then true else false;
local roleName = chart.full_service_name(deployment_manifest.name) + '-' + deployment_manifest.environment;
local awsAccess = {
volumeName:: 'aws-iam-credentials',
volumeMountPath:: '/meta/aws-iam',
volume: if (needsAWSAccess && namespace_values.zalandoEnabled) then [
{ name: $.volumeName, secret: { secretName: roleName, defaultMode: 420 } },
] else [],
mount: if (needsAWSAccess && namespace_values.zalandoEnabled) then [
{ name: $.volumeName, mountPath: $.volumeMountPath },
] else [],
env: if (needsAWSAccess && namespace_values.zalandoEnabled) then [
{ name: 'AWS_DEFAULT_REGION', value: 'ap-south-1' },
{ name: 'AWS_SHARED_CREDENTIALS_FILE', value: $.volumeMountPath + '/credentials.process' },
{ name: 'AWS_CREDENTIAL_PROFILES_FILE', value: $.volumeMountPath + '/credentials' },
] else [],
};
local rocksDbSupport = {
name:: 'rocksdb-storage',
storageClassName:: 'gp2',
mountPath:: '/opt/flink/rocksdb',
accessModes:: ['ReadWriteOnce'],
volume: {
name: $.name,
ephemeral: {
volumeClaimTemplate: {
metadata: {
labels: common.labels,
},
spec: {
accessModes: $.accessModes,
storageClassName: $.storageClassName,
resources: {
requests: {
storage: flink.flinkDeployment.taskManager.volumeSize,
},
},
},
},
},
},
mount: {
name: $.name,
mountPath: $.mountPath,
},
};
{
mainContainerName:: 'flink-main-container',
image:: '193044292705.dkr.ecr.ap-south-1.amazonaws.com/common/flink:1.17.2-s3-hadoop',
flinkVersion:: 'v1_17',
defaultStateBackendType:: 'filesystem',
isRocksDbSupportEnabled:: ('volumeSize' in flink.flinkDeployment.taskManager),
isCustomTaskManagerPodTemplateRequired:: ($.isRocksDbSupportEnabled),
stateBackendType:: (
if $.isRocksDbSupportEnabled then
'rocksdb'
else
$.defaultStateBackendType
),
apiVersion: 'flink.apache.org/v1beta1',
kind: 'FlinkDeployment',
metadata: {
name: deployment_manifest.name,
namespace: flink.namespace,
},
spec: {
image: $.image,
imagePullPolicy: 'IfNotPresent',
flinkVersion: $.flinkVersion,
restartNonce: flink.flinkDeployment.restartNonce,
flinkConfiguration: {
'taskmanager.numberOfTaskSlots': std.toString(flink.flinkDeployment.flinkConfiguration.taskManagerSlots),
'high-availability': 'KUBERNETES',
'high-availability.storageDir': flinkBucketBaseDir + '/recovery',
'state.backend.type': $.stateBackendType,
[if $.isRocksDbSupportEnabled then 'state.backend.rocksdb.localdir']: rocksDbSupport.mountPath,
'state.checkpoints.dir': flinkBucketBaseDir + '/checkpoints',
'state.savepoints.dir': flinkBucketBaseDir + '/savepoints',
'kubernetes.operator.periodic.savepoint.interval': flink.flinkDeployment.flinkConfiguration.savepointFrequency,
'kubernetes.operator.savepoint.history.max.count': '24',
'kubernetes.operator.pod-template.merge-arrays-by-name': 'true',
'restart-strategy': 'exponentialdelay',
'execution.checkpointing.interval': '30s',
'restart-strategy.exponential-delay.initial-backoff': '10s',
'restart-strategy.exponential-delay.max-backoff': '2min',
'restart-strategy.exponential-delay.backoff-multiplier': '2.0',
'restart-strategy.exponential-delay.reset-backoff-threshold': '10min',
'restart-strategy.exponential-delay.jitter-factor': '0.1',
'metrics.reporter.promgateway.jobName': deployment_manifest.name,
'metrics.reporter.promgateway.groupingKey': 'tag_team=' + deployment_manifest.team.name,
},
serviceAccount: roleName,
podTemplate: {
apiVersion: 'v1',
kind: 'Pod',
metadata: {
name: deployment_manifest.name,
labels: common.labels,
},
spec: {
containers: [
{
name: $.mainContainerName,
env: [
{
name: e.name,
valueFrom: {
secretKeyRef: {
name: chart.full_service_name(deployment_manifest.name) + '-secret',
key: e.name,
},
},
}
for e in deployment_manifest.environmentVariables
] +
// Adding md5 to make sure deployment is retrigerred if just values are changed
([{ name: 'secretMd5', value: std.md5(std.toString(deployment_manifest.environmentVariables)) }]) +
awsAccess.env,
volumeMounts: awsAccess.mount,
},
],
volumes: awsAccess.volume,
serviceAccountName: roleName,
},
},
jobManager: {
replicas: flink.flinkDeployment.jobManager.replicas,
resource: {
memory: flink.flinkDeployment.jobManager.resources.memory,
cpu: flink.flinkDeployment.jobManager.resources.cpu,
},
},
taskManager: {
[if $.isCustomTaskManagerPodTemplateRequired then 'podTemplate']: {
spec: {
securityContext: {
fsGroup: 9999,
},
containers: [
{
name: $.mainContainerName,
volumeMounts: [rocksDbSupport.mount],
},
],
volumes: [rocksDbSupport.volume],
},
},
replicas: flink.flinkDeployment.taskManager.replicas,
resource: {
memory: flink.flinkDeployment.taskManager.resources.memory,
cpu: flink.flinkDeployment.taskManager.resources.cpu,
},
},
},
}

View File

@@ -1,27 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local namespace = deployment_manifest.flink.namespace;
local serviceAccountName = chart.full_service_name(deployment_manifest.name) + '-' + deployment_manifest.environment;
{
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'RoleBinding',
metadata: {
name: 'flink' + '-' + serviceAccountName,
namespace: namespace,
labels: common.labels,
},
roleRef: {
apiGroup: 'rbac.authorization.k8s.io',
kind: 'Role',
name: 'flink',
},
subjects: [
{
kind: 'ServiceAccount',
name: serviceAccountName,
namespace: namespace,
},
],
}

View File

@@ -1,27 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local namespace = deployment_manifest.flink.namespace;
local namespace_values = import 'namespace_values.jsonnet';
local util = import 'util.jsonnet';
local needsAWSAccess = if util.is_field_present(deployment_manifest.extraResources, 'aws_access')
&& util.is_field_present(deployment_manifest.extraResources.aws_access, 'policies')
&& std.length(deployment_manifest.extraResources.aws_access.policies) > 0 then true else false;
local roleName = chart.full_service_name(deployment_manifest.name) + '-' + deployment_manifest.environment;
{
apiVersion: 'v1',
kind: 'ServiceAccount',
metadata: ({
name: roleName,
namespace: namespace,
labels: common.labels,
[if !namespace_values.zalandoEnabled then 'annotations' else null]: {
'eks.amazonaws.com/role-arn': 'arn:aws:iam::' + namespace_values.awsAccountId + ':role/' + roleName,
'eks.amazonaws.com/sts-regional-endpoints': 'true',
'eks.amazonaws.com/token-expiration': '10800',
},
}),
}

View File

@@ -1,24 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local flink = deployment_manifest.flink;
local namespace_values = import 'namespace_values.jsonnet';
{
apiVersion: 'flink.apache.org/v1beta1',
kind: 'FlinkSessionJob',
metadata: {
name: deployment_manifest.name,
namespace: flink.namespace,
},
spec: {
deploymentName: deployment_manifest.name,
job: {
jarURI: std.extVar('IMAGE'),
parallelism: flink.flinkJob.parallelism,
allowNonRestoredState: true,
upgradeMode: 'savepoint',
[if 'entryClass' in flink.flinkJob then 'entryClass' else null]: flink.flinkJob.entryClass,
args: [flink.flinkJob.jobArguments],
},
},
}

View File

@@ -1,65 +0,0 @@
local deployment_manifest = import 'deployment_manifest.jsonnet';
local port_map = import 'port_map.jsonnet';
local exposedPorts = deployment_manifest.deployment.exposedPorts;
local isMicrometerPrometheusEnabled = deployment_manifest.labels['micrometer-prometheus'] == 'enabled';
local error_message = 'Metrics port not specified with micrometer-prometheus enabled';
local defaultReadinessCheck = {
type: 'tcp',
port: 'serviceport',
path: '/actuator/health',
successThreshold: 1,
initialDelaySeconds: 60,
periodSeconds: 30,
failureThreshold: 5,
httpHeaders: [],
};
local defaultLivenessCheck = {
type: 'tcp',
port: 'serviceport',
path: '/actuator/health',
successThreshold: 1,
initialDelaySeconds: 60,
periodSeconds: 30,
failureThreshold: 5,
httpHeaders: [],
} + if isMicrometerPrometheusEnabled then { port: 'metrics', type: 'http' } else {};
local defaultStartupProbe = {
successThreshold: 1,
initialDelaySeconds: 0,
periodSeconds: 10,
failureThreshold: 30,
httpHeaders: [],
};
{
generator(healthCheck): {
http:: {
httpGet: {
port: port_map.getPort(healthCheck.port),
path: healthCheck.path,
httpHeaders: healthCheck.httpHeaders,
},
successThreshold: healthCheck.successThreshold,
initialDelaySeconds: healthCheck.initialDelaySeconds,
periodSeconds: healthCheck.periodSeconds,
failureThreshold: healthCheck.failureThreshold,
},
tcp:: {
tcpSocket: {
port: port_map.getPort(healthCheck.port),
},
successThreshold: healthCheck.successThreshold,
initialDelaySeconds: healthCheck.initialDelaySeconds,
periodSeconds: healthCheck.periodSeconds,
failureThreshold: healthCheck.failureThreshold,
},
},
getDefaultReadinessCheck:: defaultReadinessCheck,
getDefaultStartupProbe:: defaultStartupProbe,
getDefaultLivenessCheck:: if (isMicrometerPrometheusEnabled && !port_map.hasPort(exposedPorts, 'metrics')) then error error_message else defaultLivenessCheck,
}

View File

@@ -1,70 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local deployment = deployment_manifest.deployment;
local hpa = deployment.hpa;
local vars = import 'vars.jsonnet';
local util = import 'util.jsonnet';
local hpa_custom_metrics = [
item { name: super.name + '_' + deployment.name + '_' + deployment_manifest.environment }
for item in hpa.custom_metrics
];
local basename = chart.full_service_name(deployment.name);
local isHpaEnabled = hpa.type == vars.deployment.hpa.type.metrics;
local name = if isHpaEnabled then
basename
else
basename + '-disabled';
{
apiVersion: 'autoscaling/v2beta2',
kind: 'HorizontalPodAutoscaler',
metadata: {
name: name,
labels: common.labels,
namespace: deployment_manifest.deployment.namespace,
annotations: common.annotations + {
[std.format('metric-config.external.prometheus-query.prometheus/%s', metric.name)]: metric.query
for metric in hpa_custom_metrics
},
},
spec: {
maxReplicas: hpa.maxReplicas,
minReplicas: if hpa.minReplicas == 0 then 1 else hpa.minReplicas,
metrics: [
{
resource: {
name: metric.name,
target: {
averageUtilization: metric.threshold,
type: 'Utilization',
},
},
type: 'Resource',
}
for metric in hpa.metrics
] + [
{
external: {
metric: {
name: 'prometheus-query',
selector: {
matchLabels: {
'query-name': metric.name,
},
},
},
target: {
type: 'Value',
value: metric.threshold,
},
},
type: 'External',
}
for metric in hpa_custom_metrics
],
scaleTargetRef: util.hpa_scale_target_ref(deployment.name, deployment.controller, !isHpaEnabled),
},
}

View File

@@ -1,189 +0,0 @@
//Imports
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local load_balancer_util = import 'load_balancer_util.jsonnet';
local namespace_values = import 'namespace_values.jsonnet';
local port_map = import 'port_map.jsonnet';
local util = import 'util.jsonnet';
local flink = deployment_manifest.flink;
local sandbox = import './sandbox/main.jsonnet';
local isSandbox = util.is_sandbox(deployment_manifest.environment);
local vars = import 'vars.jsonnet';
local isflinkJob = std.objectHas(deployment_manifest, 'flink');
local loadBalancers = if isflinkJob then deployment_manifest.flink.loadBalancers else deployment_manifest.deployment.loadBalancers;
local namespace = if isflinkJob then deployment_manifest.flink.namespace else deployment_manifest.deployment.namespace;
local exposedPorts = deployment_manifest.deployment.exposedPorts;
local albScheme = {
internetFacing: 'internet-facing',
internal: 'internal',
cdn: 'internet-facing',
internetFacingRestricted: 'internet-facing',
};
local albTags = common.awsTags + if isSandbox then {
Environment: deployment_manifest.sandboxParams.source.environment,
Namespace: deployment_manifest.sandboxParams.source.namespace,
} else {};
local nginxClass(environment, serviceName) = '%s-%s-nginx' % [environment, serviceName];
local ingress_annotations(lbObject, clusterAnnotationValues, exposePortToLb=false, enableGrpc=false) = {
local subnetScheme = load_balancer_util.subnet_scheme(lbObject.accessPolicies),
local groupName = load_balancer_util.group_name(lbObject),
local ingressName = load_balancer_util.ingress_name(chart.full_service_name(deployment_manifest.name), lbObject),
local sslCerts = clusterAnnotationValues.sslCert,
local certificateArns = std.join(',', std.set(
[sslCerts[util.get_certs(std.objectFieldsAll(sslCerts), lbObject.endpoint)]] +
[sslCerts[util.get_certs(std.objectFieldsAll(sslCerts), host.hostname)] for host in lbObject.redirects] +
[sslCerts[util.get_certs(std.objectFieldsAll(sslCerts), host)] for host in lbObject.additionalEndpoints]
)),
local redirect_annotations(destinationHost) = { ['alb.ingress.kubernetes.io/actions.redirect-%s' % i]: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301", "Host": "%(destinationHost)s","Path": "%(destinationPath)s"}}' % (lbObject.redirects[i] { destinationHost: destinationHost }) for i in std.range(0, std.length(lbObject.redirects) - 1) },
nginxLb: namespace_values.loadBalancer.annotations {
'kubernetes.io/ingress.class': nginxClass(deployment_manifest.environment, deployment_manifest.name),
'nginx.ingress.kubernetes.io/rewrite-target': '/',
},
sharedAlbAcrossNamespace: common.annotations + namespace_values.loadBalancer.annotations {
local sortedPolicies = std.sort(lbObject.accessPolicies),
local sgs = [if accessPolicy in clusterAnnotationValues.securityGroups then clusterAnnotationValues.securityGroups[accessPolicy] for accessPolicy in sortedPolicies],
'kubernetes.io/ingress.class': 'alb',
'alb.ingress.kubernetes.io/target-type': 'ip',
'alb.ingress.kubernetes.io/listen-ports': load_balancer_util.listener_ports(lbObject),
'alb.ingress.kubernetes.io/certificate-arn': certificateArns,
'alb.ingress.kubernetes.io/actions.ssl-redirect': '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}',
}
+ (
if load_balancer_util.target_group_attribute_list(lbObject) != null then {
'alb.ingress.kubernetes.io/target-group-attributes': load_balancer_util.target_group_attribute_list(lbObject),
} else {}
)
+ (if subnetScheme == 'internetFacing' then {
'alb.ingress.kubernetes.io/ip-address-type': 'dualstack', // exclusive
} else {})
+ (if groupName == null then {} else {
'alb.ingress.kubernetes.io/group.name': '%s' % groupName,
'alb.ingress.kubernetes.io/group.order': lbObject.groupOrder,
})
+ redirect_annotations(lbObject.endpoint),
alb: common.annotations + namespace_values.loadBalancer.annotations {
'kubernetes.io/ingress.class': 'alb',
'alb.ingress.kubernetes.io/target-type': 'ip',
[if enableGrpc then 'alb.ingress.kubernetes.io/backend-protocol-version']: 'GRPC',
[if !lbObject['tls-1-1'] then 'alb.ingress.kubernetes.io/ssl-policy']: 'ELBSecurityPolicy-TLS-1-2-2017-01',
'alb.ingress.kubernetes.io/listen-ports': load_balancer_util.listener_ports(lbObject, exposePortToLb),
'alb.ingress.kubernetes.io/certificate-arn': certificateArns,
'alb.ingress.kubernetes.io/scheme': albScheme[subnetScheme],
'alb.ingress.kubernetes.io/security-groups': (load_balancer_util.security_group_list(lbObject.accessPolicies, clusterAnnotationValues.securityGroups, lbObject.extraSecurityGroups))
+ (if (subnetScheme == 'internetFacing' || subnetScheme == 'internetFacingRestricted') then (',' + clusterAnnotationValues.securityGroups.http) else ''),
'alb.ingress.kubernetes.io/load-balancer-attributes': load_balancer_util.load_balancer_attribute_list(lbObject, namespace_values.loadBalancer.annotations, deployment_manifest.name),
'alb.ingress.kubernetes.io/tags': 'Environment=%(Environment)s,Owner=%(Owner)s,Name=%(Name)s,Team=%(Team)s,Namespace=%(Namespace)s,Ingress=%(ingressName)s,Product=%(Product)s' % (albTags { ingressName: ingressName }),
[if !exposePortToLb then 'alb.ingress.kubernetes.io/actions.ssl-redirect']: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}',
} + redirect_annotations(lbObject.endpoint)
+ (
if load_balancer_util.target_group_attribute_list(lbObject) != null then {
'alb.ingress.kubernetes.io/target-group-attributes': load_balancer_util.target_group_attribute_list(lbObject),
} else {}
)
+ (if subnetScheme in (clusterAnnotationValues.subnets) then {
'alb.ingress.kubernetes.io/subnets': clusterAnnotationValues.subnets[subnetScheme],
} else {})
+ (if subnetScheme == 'internetFacing' then {
'alb.ingress.kubernetes.io/ip-address-type': 'dualstack',
[if lbObject.webAcl != 'false' then 'alb.ingress.kubernetes.io/wafv2-acl-arn']: lbObject.webAcl,
} else {})
+ (if subnetScheme == 'internetFacingRestricted' then {
'alb.ingress.kubernetes.io/ip-address-type': 'dualstack',
} else {})
+ (if subnetScheme == 'cdn' then {
'alb.ingress.kubernetes.io/ip-address-type': 'dualstack',
'external-dns.alpha.kubernetes.io/exclude': 'true',
} else {})
+ (if deployment_manifest.environment != vars.environments.prod then {
'alb.ingress.kubernetes.io/group.name': std.join('-', [ingressName, namespace]),
'alb.ingress.kubernetes.io/group.order': lbObject.groupOrder,
} else {})
+ (if lbObject.groupName != '' then {
'alb.ingress.kubernetes.io/group.name': std.join('-', [ingressName, namespace]),
} else {}),
}[lbObject.type];
//Filter alb & sharedAlbAcrossNamespace type of loadbalancer configurations
local filteredLbs = std.filter(function(lbObject) std.find(lbObject.type, ['alb', 'sharedAlbAcrossNamespace', 'nginxLb']) != [], loadBalancers);
local ports = [
{ port: port_map.getPort('serviceport'), exposeToLoadBalancer: false, portFieldKey: 'number', enableGrpc: port_map.isGrpcEnabled('serviceport') },
{ port: port_map.getPort('secondary-service-port'), exposeToLoadBalancer: true, portFieldKey: 'number', enableGrpc: port_map.isGrpcEnabled('secondary-service-port') },
{ port: port_map.getPort(chart.full_service_name(deployment_manifest.name) + '-rest'), exposeToLoadBalancer: false, portFieldKey: 'name' },
];
local loadbalancerWithAllPorts = [
lb + port
for lb in filteredLbs
for port in ports
if port.port != null
];
// this is to ensure only in case of new load balancers,( which will not have groupName as empty string ), exposed ingress is created
local isOldALB(lbObject) = lbObject.groupName == '';
local filteredLoadBalancerWithAllPorts = [
lbObject
for lbObject in loadbalancerWithAllPorts
if !isOldALB(lbObject) || (isOldALB(lbObject) && !lbObject.exposeToLoadBalancer)
];
std.map(
//Generate ingress objects based on above filtered configurations
function(lbIndex) {
config:: {
lbObject: filteredLoadBalancerWithAllPorts[lbIndex],
subnetScheme: load_balancer_util.subnet_scheme($.config.lbObject.accessPolicies),
serviceName: if isflinkJob then (deployment_manifest.name + '-rest') else chart.full_service_name(deployment_manifest.name),
servicePort: $.config.lbObject.port,
exposePortToLoadBalancer: $.config.lbObject.exposeToLoadBalancer,
enableGrpc: $.config.lbObject.enableGrpc,
portFieldKey: $.config.lbObject.portFieldKey,
name: load_balancer_util.ingress_name(chart.full_service_name(deployment_manifest.name), $.config.lbObject, $.config.exposePortToLoadBalancer),
},
assert std.length($.config.name) <= 253 : 'Ingress name must be less than 253 characters. name: %s' % $.config.name,
apiVersion: 'networking.k8s.io/v1',
kind: 'Ingress',
metadata: {
name: $.config.name,
labels: common.labels,
annotations: ingress_annotations($.config.lbObject, namespace_values.loadBalancer.annotations, $.config.exposePortToLoadBalancer, $.config.enableGrpc) + if isSandbox then sandbox.sandbox($.config).albIngress.annotations else {},
namespace: namespace,
},
spec: {
rules: [
{
host: if $.config.lbObject.endpoint != null && $.config.lbObject.endpoint != '' then $.config.lbObject.endpoint else namespace_values.loadBalancer.fixedHostNames[deployment_manifest.name],
http: {
paths: (if $.config.exposePortToLoadBalancer then [] else load_balancer_util.http_redirect_config)
+ load_balancer_util.weighted_path_config($.config.serviceName)
+ (if isSandbox then sandbox.sandbox($.config).albIngress.host.paths else
(load_balancer_util.path_config($.config.serviceName, $.config.servicePort, $.config.portFieldKey))),
},
},
] + [
{
host: endpoint,
http: {
paths: if $.config.subnetScheme == 'internetFacing' ||
$.config.subnetScheme == 'internetFacingRestricted' then
load_balancer_util.http_redirect_config
else
load_balancer_util.create_sandbox_or_standard_paths($.config, isSandbox, sandbox),
},
}
for endpoint in $.config.lbObject.additionalEndpoints
] + [load_balancer_util.redirect_config($.config.lbObject.redirects[i], 'redirect-%s' % i) for i in std.range(0, std.length($.config.lbObject.redirects) - 1)],
},
},
std.range(0, std.length(filteredLoadBalancerWithAllPorts) - 1)
)

View File

@@ -1,60 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local deployment = deployment_manifest.deployment;
if deployment_manifest.deployment.elasticSearch.kibana != null then {
apiVersion: 'kibana.k8s.elastic.co/v1',
kind: 'Kibana',
metadata: {
name: deployment.elasticSearch.instance.instanceName + '-kibana',
namespace: deployment_manifest.environment + '-datastores',
labels: deployment.elasticSearch.kibanaLabels,
annotations: common.annotations,
},
spec: {
version: deployment.elasticSearch.kibanaVersion,
image: deployment.elasticSearch.kibanaImage,
config: {
'xpack.monitoring.enabled': true,
'xpack.monitoring.ui.enabled': true,
'xpack.monitoring.kibana.collection.enabled': true,
'server.publicBaseUrl': 'https://' + deployment.elasticSearch.kibana,
},
http: {
tls: {
selfSignedCertificate: {
disabled: true,
},
},
},
count: 2,
elasticsearchRef: {
name: deployment.elasticSearch.instance.instanceName + '-elasticsearch',
},
podTemplate: {
metadata: {
labels: deployment.elasticSearch.kibanaLabels,
},
spec: {
nodeSelector: deployment.elasticSearch.esNodeSelector,
tolerations: deployment.elasticSearch.esTolerations,
containers: [
{
name: 'kibana',
resources: {
requests: {
memory: '1Gi',
cpu: 0.5,
},
limits: {
memory: '4Gi',
cpu: 2,
},
},
},
],
},
},
},
}

View File

@@ -1,83 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local deployment = deployment_manifest.deployment;
local namespace_values = import 'namespace_values.jsonnet';
local load_balancer_util = import 'load_balancer_util.jsonnet';
local util = import 'util.jsonnet';
local albTags = common.awsTags;
local ingressName = deployment.elasticSearch.instance.instanceName + '-kibana';
local groupName = '%s-datastores-services-alb' % deployment_manifest.environment;
local annotations = namespace_values.loadBalancer.annotations;
local securityGroups = std.join(',', [
annotations.securityGroups.internal,
annotations.securityGroups.officeIp,
]);
local lbObject = {
idleTimeout: 60,
accessPolicies: ['internal'],
accessLog: true,
};
local subnetScheme = load_balancer_util.subnet_scheme(lbObject.accessPolicies);
if deployment_manifest.deployment.elasticSearch.kibana != null then {
apiVersion: 'networking.k8s.io/v1',
kind: 'Ingress',
metadata: {
name: deployment.elasticSearch.instance.instanceName + '-kibana',
namespace: deployment_manifest.environment + '-datastores',
annotations: common.annotations {
'kubernetes.io/ingress.class': 'alb',
'alb.ingress.kubernetes.io/target-type': 'ip',
'alb.ingress.kubernetes.io/ssl-policy': 'ELBSecurityPolicy-TLS-1-2-2017-01',
'alb.ingress.kubernetes.io/listen-ports': load_balancer_util.listener_ports(lbObject),
'alb.ingress.kubernetes.io/certificate-arn': annotations.sslCert[util.get_certs(std.objectFieldsAll(annotations.sslCert), deployment_manifest.deployment.elasticSearch.kibana)],
'alb.ingress.kubernetes.io/scheme': 'internal',
'alb.ingress.kubernetes.io/security-groups': securityGroups,
'alb.ingress.kubernetes.io/load-balancer-attributes': load_balancer_util.load_balancer_attribute_list(lbObject, namespace_values.loadBalancer.annotations, groupName),
'alb.ingress.kubernetes.io/tags': 'Name=shared-alb-%(name)s,Ingress=shared-alb-%(name)s,Owner=shared,Team=Shared,Product=%(Product)s,Environment=%(Environment)s' % (albTags { name: groupName }),
'alb.ingress.kubernetes.io/actions.ssl-redirect': '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}',
'alb.ingress.kubernetes.io/group.name': '%s' % groupName,
}
+ (
if load_balancer_util.subnet_scheme(lbObject.accessPolicies) in (annotations.subnets) then {
'alb.ingress.kubernetes.io/subnets': annotations.subnets[subnetScheme],
} else {}
),
},
spec: {
rules: [
{
host: deployment_manifest.deployment.elasticSearch.kibana,
http: {
paths: [
{
backend: {
service: {
name: 'ssl-redirect',
port: {
name: 'use-annotation',
},
},
},
pathType: 'ImplementationSpecific',
path: '/*',
},
{
pathType: 'ImplementationSpecific',
backend: {
service: {
name: deployment.elasticSearch.instance.instanceName + '-kibana' + '-kb-http',
port: {
number: 5601,
},
},
},
},
],
},
},
],
},
}

View File

@@ -1,142 +0,0 @@
local deployment_manifest = import 'deployment_manifest.jsonnet';
local util = import 'util.jsonnet';
local vars = import 'vars.jsonnet';
local isSandbox = util.is_sandbox(deployment_manifest.environment);
local alias(type) = if type == 'sharedalbacrossnamespace' then 'sharedalb' else type;
{
// Creates a comma separated list of security groups
security_group_list(accessPolicies, securityGroups, extraSecurityGroups)::
local accessPolicySecurityGroups = [if accessPolicy in securityGroups then securityGroups[accessPolicy] for accessPolicy in accessPolicies];
local extraSGs = if std.objectHas(deployment_manifest.deployment, 'securityGroup') then std
.flattenArrays([if std.objectHas(sg, 'ids') then sg.ids for sg in deployment_manifest
.deployment.securityGroup]) else [];
std.join(',', accessPolicySecurityGroups + extraSGs),
// Determines kind of subnet(internal or internetFacing or cdn) to use based on access policy.
subnet_scheme(accessPolicies)::
local scheme = std.setInter(std.set(accessPolicies), ['internal', 'internetFacing', 'internetFacingRestricted', 'cdn']);
assert std.length(scheme) == 1 : 'ValidationError: accessPolicies can only contain one out of internal, internetFacing, interetFacingRestricted & cdn';
{ internal: 'internal', internetFacing: 'internetFacing', internetFacingRestricted: 'internetFacingRestricted', cdn: 'cdn' }[scheme[0]],
// Returns true if application is using aws application load balancer
is_using_lb(lbObjects, lbName)::
std.length(std.filter(function(lbObject) lbObject.type == lbName, lbObjects)) > 0,
// Returns group name for sharedAlbAcrossNamespace if any
group_name(lbObject)::
if lbObject.groupName != null && lbObject.groupName != '' then
lbObject.groupName
else null,
// Returns true if application is using aws target groups
is_using_tg(lbObjects)::
std.length(std.filter(function(lbObject) std.find(lbObject.type, ['alb', 'sharedAlbAcrossNamespace']) != [], lbObjects)) > 0,
ingress_name(full_service_name, lbObject, expose=false)::
local name = if lbObject.name != null && lbObject.name != '' then
full_service_name + '-' + alias(std.asciiLower(lbObject.type)) + '-' + std.asciiLower(lbObject.name)
else
full_service_name + '-' + alias(std.asciiLower(lbObject.type));
local finalName = if expose then name + '-exposed' else name;
finalName,
alb_ingress_name(full_service_name)::
self.ingress_name(full_service_name, { type: 'alb', name: null }),
load_balancer_attribute_list(lbObject, namespace_annotations, s3_key_prefix)::
local idleTimeout = 'idle_timeout.timeout_seconds=%s' % lbObject.idleTimeout;
local baseAttributes = if namespace_annotations.deletionProtection then idleTimeout + ',deletion_protection.enabled=true' else idleTimeout;
local accessLogAttributes = 'access_logs.s3.enabled=true,access_logs.s3.bucket=%s,access_logs.s3.prefix=%s' % [namespace_annotations.accessLogBucket, s3_key_prefix];
std.join(',', [
baseAttributes,
if lbObject.accessLog then accessLogAttributes,
],),
target_group_attribute_list(lbObject)::
local slowStartDurationAttribute = 'slow_start.duration_seconds=%s' % lbObject.slowStartDuration;
local sticknessAttribute = 'stickiness.enabled=true,stickiness.lb_cookie.duration_seconds=%s' % lbObject.stickinessCookieDuration;
local tg_annotation = [
if lbObject.slowStartDuration > 0 then slowStartDurationAttribute,
if lbObject.stickiness then sticknessAttribute,
];
std.join(',', std.prune(tg_annotation)),
//Determines listener-ports to be added to the load-balaner
listener_ports(lbObject, exposeToLoadBalancer=false)::
local subnetScheme = $.subnet_scheme(lbObject.accessPolicies);
if exposeToLoadBalancer then
if lbObject.type == 'alb' then '[{"HTTPS": %s}]' % lbObject.port
else error 'ValidationError: secondary port can only be used with alb. Please change the loadbalancer type'
else
'[{ "HTTPS": 443 },{"HTTP": 80}]',
//Returns path to be added to alb to enable HTTP to HTTPS redirection
http_redirect_config:: [{
path: '/*',
pathType: 'ImplementationSpecific',
backend: {
service: {
name: 'ssl-redirect',
port: {
name: 'use-annotation',
},
},
},
}],
redirect_config(host, actionNaem):: {
host: host.hostname,
http: {
paths: [{
path: host.path,
pathType: 'ImplementationSpecific',
backend: {
service: {
name: actionNaem,
port: {
name: 'use-annotation',
},
},
},
}],
},
},
weighted_path_config(serviceName):: if 'flink' in deployment_manifest then []
else (if (deployment_manifest.deployment.controller == vars.rolloutController && deployment_manifest.deployment.strategy != vars.defaultDeploymentStrategy && !isSandbox) then [{
path: '/*',
pathType: 'ImplementationSpecific',
backend: {
service: {
name: serviceName,
port: {
name: 'use-annotation',
},
},
},
}] else []),
path_config(serviceName, servicePort, portFieldKey='number')::
[
{
pathType: 'ImplementationSpecific',
backend: {
service: {
name: serviceName,
port: {
[portFieldKey]: servicePort,
},
},
},
},
],
create_sandbox_or_standard_paths(config, isSandboxEnabled=false, sandbox={}):: (
if isSandboxEnabled then
sandbox.sandbox(config).albIngress.host.paths
else
$.path_config(config.serviceName, config.servicePort)
),
}

View File

@@ -1,97 +0,0 @@
local common_api_gateways = import 'common_api_gateway.jsonnet';
local configmap = import 'configmap.jsonnet';
local cron_hpa_autoscaler = import 'cron_hpa_autoscaler.jsonnet';
local default_alerts = import 'default_alerts.jsonnet';
local deployment = import 'deployment.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local rollout = import 'rollout.jsonnet';
local hpa = import 'hpa.jsonnet';
local ingresses = import 'ingress.jsonnet';
local kibana = import 'kibana.jsonnet';
local kibana_ingress_endpoint = import 'kibana_ingress_endpoint.jsonnet';
local pdb = import 'pdb.jsonnet';
local perf_utility = import 'perf_utility.jsonnet';
local rollout_analysis_template = import 'rollout_analysis_template.jsonnet';
local sandbox = import 'sandbox/main.jsonnet';
local secret = import 'secret.jsonnet';
local security_group = import 'security_group.jsonnet';
local service = import 'service.jsonnet';
local service_monitor = import 'service_monitor.jsonnet';
local sidecar = import 'sidecar.jsonnet';
local cron_hpa_autoscaler = import 'cron_hpa_autoscaler.jsonnet';
local elastic_search_secrets = import 'elasticsearch_secrets.jsonnet';
local elastic_search = import 'elastic_search.jsonnet';
local kibana = import 'kibana.jsonnet';
local kibana_ingress_endpoint = import 'kibana_ingress_endpoint.jsonnet';
local elasticsearch_sm_secrets = import 'elasticsearch_sm_secrets.jsonnet';
local elasticsearch_servicemonitor = import 'elasticsearch_servicemonitor.jsonnet';
local elasticsearch_alerts_default = import 'elasticsearch_alerts_default.jsonnet';
local elasticsearch_snapshots = import 'elasticsearch_snapshots.jsonnet';
local dynamic_configuration = import 'dynamic_configuration.jsonnet';
local perf_utility = import 'perf_utility.jsonnet';
local vpa = import 'vpa.jsonnet';
local efs_pvc = import 'efs_persistent_volume_claim.jsonnet';
local common_api_gateways = import 'common_api_gateway.jsonnet';
local sandbox = import 'sandbox/main.jsonnet';
local util = import 'util.jsonnet';
local isSandbox = util.is_sandbox(deployment_manifest.environment);
local flink_deployment = import 'flink_deployment.jsonnet';
local flink_session_job = import 'flink_session_job.jsonnet';
local flink_service_account = import 'flink_service_account.jsonnet';
local flink_role_binding = import 'flink_role_binding.jsonnet';
local flink_default_alerts = import 'flink_default_alerts.jsonnet';
local isflinkJob = std.objectHas(deployment_manifest, 'flink');
if isflinkJob then
({
'0_secret.json': secret,
'0_0_flink_deployment.json': flink_deployment,
'0_1_flink_session_job.json': flink_session_job,
'0_2_flink_service_account.json': flink_service_account,
'0_3_flink_role_binding.json': flink_role_binding,
'0_4_flink_default_alerts.json': flink_default_alerts,
} + { ['5_%s_ingress.json' % index]: ingresses[index] for index in std.range(0, std.length(ingresses) - 1) })
else ({
'0_secret.json': secret,
'1_configmap.json': configmap,
'2_sidecar.json': sidecar,
'3_service.json': service,
'4_deployment.json': deployment,
'4_rollout.json': rollout,
'4_0_rollout_analysis_template.json': rollout_analysis_template,
'6_pdb.json': pdb,
'7_service_monitor.json': service_monitor,
'8_default_alerts.json': default_alerts,
'9_hpa.json': hpa,
'11_cron_hpa_autoscaler.json': cron_hpa_autoscaler,
'12_elastic_search_secrets.json': elastic_search_secrets,
'13_elastic_search.json': elastic_search,
'14_kibana.json': kibana,
'15_kibana_ingress_endpoint.json': kibana_ingress_endpoint,
'16_elasticsearch_sm_secrets.json': elasticsearch_sm_secrets,
'17_elasticsearch_servicemonitor.json': elasticsearch_servicemonitor,
'18_elasticsearch_alerts_default.json': elasticsearch_alerts_default,
'19_elasticsearch_snapshots.json': elasticsearch_snapshots,
'20_dynamic_configuration.json': dynamic_configuration,
'21_perf_utility.json': perf_utility,
'22_vpa.json': vpa,
'23_efs_pvc.json': efs_pvc,
})
+
(if isSandbox then {
'0_0_namespace.json': sandbox.sandbox().namespace,
'0_1_iam_role.json': sandbox.sandbox().iamRole,
'30_role_binding.json': sandbox.sandbox().roleBinding,
'31_access_role_binding.json': sandbox.sandbox().accessRoleBinding,
'32_access_role.json': sandbox.sandbox().accessRole,
} else {})
+
(if ingresses != null then
{ ['5_%s_ingress.json' % index]: ingresses[index] for index in std.range(0, std.length(ingresses) - 1) }
+
if security_group != null then
{ ['10_%s_security_group.json' % index]: security_group[index] for index in std.range(0, std.length
(security_group) - 1) } else {})
+
(if common_api_gateways != null then
{ ['23_%s_common_api_gateways.json' % index]: common_api_gateways[0].items[index] for index in std.range(0, std.length(common_api_gateways[0].items) - 1) })

View File

@@ -1,17 +0,0 @@
{
is_alert_defined(deployment, alertName)::
if ('alerts' in deployment && alertName in deployment.alerts) then true else false,
is_database_present(deploymentManifest)::
if ('extraResources' in deploymentManifest && deploymentManifest.extraResources != null) then
if ('database' in deploymentManifest.extraResources) then
local database = deploymentManifest.extraResources.database;
'instanceName' in database && database.instanceName != ''
else false
else false,
is_dynamic_config_present(deploymentManifest)::
if ('dynamicConfiguration' in deploymentManifest && deploymentManifest.dynamicConfiguration != null && deploymentManifest.dynamicConfiguration != []) then true else false,
is_custom_slack_channel_enabled(alert)::
if ('slackChannel' in alert && alert.slackChannel != null && alert.slackChannel != '') then true else false,
}

View File

@@ -1,19 +0,0 @@
local cluster_values = import 'cluster_values.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
assert deployment_manifest.cluster in cluster_values :
'ValidationError: Unrecognized cluster - %s' % deployment_manifest.cluster;
local cluster_config = cluster_values[deployment_manifest.cluster];
local manifest_namespace = if 'flink' in deployment_manifest then deployment_manifest.flink.namespace else deployment_manifest.deployment.namespace;
// Use default namespace values for a cluster if specific namespace values not present
local namespace_values =
if manifest_namespace in cluster_config
then
cluster_config[manifest_namespace]
else
cluster_config.default;
namespace_values

View File

@@ -1,20 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
if deployment_manifest.deployment.instance.count > 1 then {
apiVersion: 'policy/v1beta1',
kind: 'PodDisruptionBudget',
metadata: {
name: chart.full_service_name(deployment_manifest.deployment.name) + '-pdb',
labels: common.labels,
namespace: deployment_manifest.deployment.namespace,
annotations: common.annotations,
},
spec: {
maxUnavailable: '15%',
selector: {
matchLabels: common.matchLabels,
},
},
}

View File

@@ -1,289 +0,0 @@
local chart = import 'chart.jsonnet';
local cluster_values = import 'cluster_values.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local deployment = deployment_manifest.deployment;
local namespace_values = import 'namespace_values.jsonnet';
local util = import 'util.jsonnet';
local load_balancer_util = import 'load_balancer_util.jsonnet';
local albTags = common.awsTags;
local ingressName = deployment.name + '-mock-server';
local postgresServiceName = deployment.name + '-postgres-server';
local mockEndpointName = deployment.name + '-perf-mock';
local postgresEndpointName = deployment.name + '-perf-postgres';
local domainEndpoint = cluster_values.perfDomainEndpoint[deployment_manifest.infraVertical];
local lbObject = {
idleTimeout: 60,
accessPolicies: ['internal'],
accessLog: true,
};
local subnetScheme = load_balancer_util.subnet_scheme(lbObject.accessPolicies);
local annotations = namespace_values.loadBalancer.annotations;
local groupName = 'perf-internal';
local securityGroups = std.join(',', [
annotations.securityGroups.internal,
annotations.securityGroups.officeIp,
]);
if deployment_manifest.environment == 'perf' && (deployment_manifest.deployment.perfUtility.mockServerEnabled || deployment_manifest.deployment.perfUtility.postgresServerEnabled) == true then {
apiVersion: 'v1',
kind: 'List',
items:
(if deployment_manifest.deployment.perfUtility.mockServerEnabled then [
{
apiVersion: 'apps/v1',
kind: 'Deployment',
metadata: {
name: deployment.name + '-mock-server',
labels: common.perfMockServerLabels,
namespace: deployment_manifest.deployment.namespace,
annotations: common.annotations,
},
spec: {
progressDeadlineSeconds: 1500,
selector: {
matchLabels: {
app: chart.service_name,
release: deployment.name + '-mock-server',
},
},
template: {
metadata: {
labels: common.perfMockServerLabels,
},
spec: {
containers: [{
name: 'mock-server',
image: deployment_manifest.deployment.perfUtility.mockServerImage,
imagePullPolicy: 'IfNotPresent',
resources: {
requests: {
memory: '2Gi',
cpu: '1',
},
limits: {
memory: '2Gi',
cpu: '1',
},
},
}],
},
},
},
},
{
apiVersion: 'v1',
kind: 'Service',
metadata: {
name: deployment.name + '-mock-server',
labels: common.perfMockServerLabels,
namespace: deployment_manifest.deployment.namespace,
},
spec: {
selector: {
app: chart.service_name,
release: deployment.name + '-mock-server',
},
type: 'ClusterIP',
ports: [
{
name: 'service-port',
port: 1080,
protocol: 'TCP',
targetPort: 1080,
},
],
},
},
{
apiVersion: 'networking.k8s.io/v1',
kind: 'Ingress',
metadata: {
name: deployment.name + '-mock-server',
annotations: {
'kubernetes.io/ingress.class': 'alb',
'alb.ingress.kubernetes.io/target-type': 'ip',
'alb.ingress.kubernetes.io/ssl-policy': 'ELBSecurityPolicy-TLS-1-2-2017-01',
'alb.ingress.kubernetes.io/listen-ports': load_balancer_util.listener_ports(lbObject),
'alb.ingress.kubernetes.io/certificate-arn': annotations.sslCert[util.get_certs(std.objectFieldsAll(annotations.sslCert), mockEndpointName + domainEndpoint)],
'alb.ingress.kubernetes.io/scheme': 'internal',
'alb.ingress.kubernetes.io/security-groups': securityGroups,
'alb.ingress.kubernetes.io/load-balancer-attributes': load_balancer_util.load_balancer_attribute_list(lbObject, namespace_values.loadBalancer.annotations, groupName),
'alb.ingress.kubernetes.io/tags': 'Name=shared-alb-%(name)s,Ingress=shared-alb-%(name)s,Owner=shared,Team=Shared,Product=%(Product)s,Environment=%(Environment)s' % (albTags { name: groupName }),
'alb.ingress.kubernetes.io/actions.ssl-redirect': '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}',
'alb.ingress.kubernetes.io/group.name': '%s' % groupName,
}
+ (
if load_balancer_util.subnet_scheme(lbObject.accessPolicies) in (annotations.subnets) then {
'alb.ingress.kubernetes.io/subnets': annotations.subnets[subnetScheme],
} else {}
),
namespace: deployment_manifest.deployment.namespace,
},
spec: {
rules: [
{
host: mockEndpointName + domainEndpoint,
http: {
paths: [
{
pathType: 'ImplementationSpecific',
backend: {
service: {
name: 'ssl-redirect',
port: {
name: 'use-annotation',
},
},
},
path: '/*',
},
{
pathType: 'ImplementationSpecific',
backend: {
service: {
name: deployment.name + '-mock-server',
port: {
number: 1080,
},
},
},
},
],
},
},
],
},
},
] else []) +
(if deployment_manifest.deployment.perfUtility.postgresServerEnabled then [
{
apiVersion: 'v1',
kind: 'Secret',
metadata: {
name: deployment.name + '-postgres-secret',
labels: common.perfPostgresServerLabels,
namespace: deployment_manifest.deployment.namespace,
},
type: 'Opaque',
data: { password: 'cG9zdGdyZXNwZXJmcGFzc3dvcmQK' },
},
{
apiVersion: 'v1',
kind: 'PersistentVolumeClaim',
metadata: {
name: deployment.name + '-postgres-storage',
labels: common.perfPostgresServerLabels,
namespace: deployment_manifest.deployment.namespace,
},
spec: {
accessModes: ['ReadWriteOnce'],
resources: {
requests: {
storage: deployment_manifest.deployment.perfUtility.postgresDbConfig.storage,
},
},
storageClassName: 'gp2',
},
},
{
apiVersion: 'apps/v1',
kind: 'Deployment',
metadata: {
name: deployment.name + '-postgres-server',
labels: common.perfPostgresServerLabels,
namespace: deployment_manifest.deployment.namespace,
},
spec: {
progressDeadlineSeconds: 1500,
selector: {
matchLabels: {
app: chart.service_name,
release: deployment.name + '-postgres-server',
},
},
template: {
metadata: {
labels: common.perfPostgresServerLabels,
},
spec: {
containers: [{
name: 'postgres',
image: deployment_manifest.deployment.perfUtility.postgresServerImage,
imagePullPolicy: 'IfNotPresent',
env: [
{
name: 'POSTGRES_PASSWORD',
valueFrom: {
secretKeyRef: {
name: deployment.name + '-postgres-secret',
key: 'password',
},
},
},
{
name: 'PGDATA',
value: '/var/lib/postgresql/data/pgdata',
},
],
resources: {
requests: {
memory: deployment_manifest.deployment.perfUtility.postgresDbConfig.memory,
cpu: deployment_manifest.deployment.perfUtility.postgresDbConfig.cpu,
},
limits: {
memory: deployment_manifest.deployment.perfUtility.postgresDbConfig.memory,
cpu: deployment_manifest.deployment.perfUtility.postgresDbConfig.cpu,
},
},
volumeMounts: [
{
name: deployment.name + '-postgres-storage',
mountPath: '/var/lib/postgresql/data',
},
],
}],
volumes: [
{
name: deployment.name + '-postgres-storage',
persistentVolumeClaim: {
claimName: deployment.name + '-postgres-storage',
},
},
],
},
},
},
},
{
apiVersion: 'v1',
kind: 'Service',
metadata: {
name: deployment.name + '-postgres-server',
labels: common.perfPostgresServerLabels,
annotations: {
'external-dns.alpha.kubernetes.io/hostname': postgresEndpointName + domainEndpoint,
'service.beta.kubernetes.io/aws-load-balancer-internal': '0.0.0.0/0',
'service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags': 'Environment=%(Environment)s,Owner=%(Owner)s,Name=%(Name)s,Team=%(Team)s,Product=%(Product)s' % (albTags { ingressName: postgresServiceName }),
},
namespace: deployment_manifest.deployment.namespace,
},
spec: {
selector: {
app: chart.service_name,
release: deployment.name + '-postgres-server',
},
type: 'LoadBalancer',
ports: [
{
name: 'service-port',
port: 5432,
protocol: 'TCP',
targetPort: 5432,
},
],
},
},
] else []),
}

View File

@@ -1,363 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local deployment_util = import 'deployment_util.jsonnet';
local health_check_values = import 'health_check_values.jsonnet';
local port_map = import 'port_map.jsonnet';
local util = import 'util.jsonnet';
local vars = import 'vars.jsonnet';
local environments = vars.environments;
local deployment = deployment_manifest.deployment;
local environment = deployment_manifest.environment;
local readinessCheck = deployment.healthChecks.readinessCheck;
local livenessCheck = deployment.healthChecks.livenessCheck;
local startupProbe = deployment.healthChecks.startupProbe;
local exposedPorts = deployment_manifest.deployment.exposedPorts;
local manifest_util = import 'manifest_util.jsonnet';
local namespace_values = import 'namespace_values.jsonnet';
local vars = import 'vars.jsonnet';
local image = util.get_image(deployment.image, environment);
assert image != 'null' : '[IMAGE or deployment.image] cannot be null';
local isSandbox = util.is_sandbox(environment);
local sandbox = import 'sandbox/main.jsonnet';
local sandboxConfig = sandbox.sandbox();
// Conditions to check if heap-dump sidecar has to be enabled or not
local isEfsNeeded = deployment_util.isEfsNeeded(deployment);
local isFsxNeeded = deployment_util.isFsxNeeded(deployment);
local mandatoryHeapDumpString = '-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/dumps';
local errorLogFileString = '-XX:ErrorFile=/dumps/hs_err_pid%p.log';
local envKeys = [e.name for e in deployment_manifest.environmentVariables];
local jvmOptionsExists = std.length(std.find('JVM_OPTS', envKeys));
local isSwApmEnabled = deployment_manifest.isSwApmEnabled;
local jvmParameter = [e for e in deployment_manifest.environmentVariables if std.toString(e.name) == 'JVM_OPTS'][0];
local heapDumpEnabled = if !isSandbox && (jvmOptionsExists > 0 && (std.length(std.findSubstr(mandatoryHeapDumpString, std.toString(jvmParameter.value))) > 0
|| std.length(std.findSubstr(errorLogFileString, std.toString(jvmParameter.value))) > 0)) then true else false;
//# Sandbox
// GPU
local isGPUEnabled = if deployment.instance.gpu == 0 then false else true;
// Required to form S3 bucket name for heap-dumps
local bucketEnvironment = if deployment_manifest.environment == environments.prod then environments.prod else 'nonprod';
local bucketName = 'java-heap-dumps-' + deployment_manifest.infraVertical + '-' + bucketEnvironment;
local hasEnvironmentFile = if 'environmentFile' in deployment then true else false;
local needsAWSAccess = if util.is_field_present(deployment_manifest.extraResources, 'aws_access')
&& util.is_field_present(deployment_manifest.extraResources.aws_access, 'policies')
&& std.length(deployment_manifest.extraResources.aws_access.policies) > 0 then true else false;
local roleName = (if ('roleName' in deployment_manifest.extraResources.aws_access && deployment_manifest.extraResources.aws_access.roleName != '') then deployment_manifest.extraResources.aws_access.roleName else chart.full_service_name(deployment.name)) + '-' + deployment_manifest.environment;
local istioInboundPortsAnnotation = if deployment.disableIstio then
{ 'sidecar.istio.io/inject': 'false' }
else
{
'traffic.sidecar.istio.io/excludeInboundPorts': std.join(',', std.map(function(exposedPort) std.toString(exposedPort.port), exposedPorts)),
'traffic.sidecar.istio.io/includeInboundPorts': '*',
};
local injectSwAgent(isSwApmEnabled) = (
if isSwApmEnabled then [
{
name: 'agent-container',
image: '193044292705.dkr.ecr.ap-south-1.amazonaws.com/common/skywalking-java-agent:8.7.0-alpine',
volumeMounts: [
{
name: 'skywalking-agent',
mountPath: '/agent',
},
],
command: [
'/bin/sh',
],
args: [
'-c',
'cp -R /skywalking/agent /agent/ && cp /skywalking/agent/optional-plugins/apm-kotlin-coroutine-plugin-8.7.0.jar /agent/agent/plugins',
],
securityContext: {
runAsUser: 4000,
},
},
]
else null
);
local topologicalSpreadConstraints = [
{
maxSkew: 1,
topologyKey: 'topology.kubernetes.io/zone',
whenUnsatisfiable: 'DoNotSchedule',
labelSelector: {
matchLabels: common.matchLabels,
},
},
];
{
metadata: {
labels: common.labels,
annotations: common.annotations + istioInboundPortsAnnotation,
},
spec: {
[if isSandbox then 'securityContext']: sandboxConfig.securityContext,
initContainers: injectSwAgent(isSwApmEnabled),
[if deployment_manifest.environment == environments.prod then 'topologySpreadConstraints' else null]: topologicalSpreadConstraints,
[if isGPUEnabled then 'nodeSelector']: deployment.instance.gpuNodeSelector,
[if isGPUEnabled then 'tolerations']: deployment.instance.gpuTolerations,
containers:
(if heapDumpEnabled then [{
name: 'push-heap-dump',
image: '193044292705.dkr.ecr.ap-south-1.amazonaws.com/common/java-heap-dump-manager:v7d6dad2b5a2431412b8183c9707f93b5dcb05287',
resources: {
limits: {
memory: '128Mi',
cpu: '100m',
},
requests: {
memory: '128Mi',
cpu: '100m',
},
},
env: [
{
name: 'AWS_DEFAULT_REGION',
value: 'ap-south-1',
},
{
name: 'AWS_SHARED_CREDENTIALS_FILE',
value: '/meta/aws-iam/credentials.process',
},
{
name: 'AWS_CREDENTIAL_PROFILES_FILE',
value: '/meta/aws-iam/credentials',
},
{
name: 'SERVICE_NAME',
value: chart.full_service_name(deployment.name),
},
{
name: 'S3_BUCKET',
value: if 'heapDumpBucket' in namespace_values then namespace_values.heapDumpBucket else bucketName,
},
{
name: 'ENVIRONMENT',
value: deployment_manifest.environment,
},
],
volumeMounts: [
{
name: 'aws-iam-credentials-heap-dump',
mountPath: '/meta/aws-iam',
readOnly: true,
},
{
name: 'heap-dumps',
mountPath: '/dumps',
},
],
}] else []) +
[
{
env: [
{
name: e.name,
valueFrom: {
secretKeyRef: {
name: chart.full_service_name(deployment.name) + '-secret',
key: e.name,
},
},
}
for e in deployment_manifest.environmentVariables
] + (if needsAWSAccess && namespace_values.zalandoEnabled then [
{
name: 'AWS_SHARED_CREDENTIALS_FILE',
value: '/meta/aws-iam/credentials.process',
},
{
name: 'AWS_CREDENTIAL_PROFILES_FILE',
value: '/meta/aws-iam/credentials',
},
] else [])
// Adding md5 to make sure deployment is retrigerred if just values are changed
+ [{ name: 'secretMd5', value: std.md5(std.toString(deployment_manifest.environmentVariables)) }]
+ (if 'environmentFile' in deployment then
[{ name: 'environmentFileMd5', value: std.md5(std.toString(deployment.environmentFile)) }]
else [])
+ (
if isSwApmEnabled then
[
{
name: 'JAVA_TOOL_OPTIONS',
value: '-javaagent:/skywalking/agent/skywalking-agent.jar',
},
{
name: 'SW_AGENT_COLLECTOR_BACKEND_SERVICES',
value: vars.swBackend + ':' + vars.swPort,
},
{
name: 'SW_AGENT_NAMESPACE',
value: deployment_manifest.deployment.namespace,
},
{
name: 'SW_AGENT_NAME',
value: deployment.name,
},
{
name: 'SW_LOGGING_OUTPUT',
value: 'CONSOLE',
},
{
name: 'ELASTIC_APM_ENABLED',
value: 'false',
},
{
name: 'ELASTIC_APM_ACTIVE',
value: 'false',
},
] else []
),
image: image, //Directly passed to jssonnet via --ext-str command
imagePullPolicy: deployment.imagePullPolicy,
lifecycle: {
preStop: {
exec: {
command: ['sleep', if deployment_manifest.environment == 'prod' then std.toString
(0.8 * $.spec.terminationGracePeriodSeconds) else std.toString(0.5 * $.spec.terminationGracePeriodSeconds)],
},
},
},
resources: {
limits: {
memory: if deployment.isVpaEnabled then deployment.instance.minMemory else deployment.instance.memory,
cpu: (
if deployment.isVpaEnabled then
(if environment == environments.prod then deployment.instance.minCPU * 1.75 else deployment.instance.minCPU * 1.5)
else deployment.instance.cpu
),
} + (if isGPUEnabled then { 'nvidia.com/gpu': deployment.instance.gpu } else {}),
requests: {
memory: if deployment.isVpaEnabled then deployment.instance.minMemory else deployment.instance.memory,
cpu: if deployment.isVpaEnabled then deployment.instance.minCPU else deployment.instance.cpu,
} + (if isGPUEnabled then { 'nvidia.com/gpu': deployment.instance.gpu } else {}),
},
name: chart.full_service_name(deployment.name),
ports: port_map.getContainerPorts,
volumeMounts:
(if (isFsxNeeded) then
std.map(function(fsx) {
name: fsx.name,
mountPath: fsx.mountPath,
}, deployment.fsx)
else []) +
(if (isEfsNeeded) then
std.map(function(efs) {
name: efs.name,
mountPath: efs.mountPath,
}, deployment.efs)
else []) +
(if needsAWSAccess && namespace_values.zalandoEnabled then
[{
name: 'aws-iam-credentials',
mountPath: '/meta/aws-iam',
readOnly: true,
}] else []) +
(if hasEnvironmentFile then
[{
mountPath: util.parent_dir(deployment.environmentFile.path),
name: 'environment-file-volume',
}] else []) +
(if manifest_util.is_dynamic_config_present(deployment_manifest) then
[{
mountPath: '/var/navi-app/dynamic_configuration',
name: 'dynamic-config-volume',
}] else []) +
(if heapDumpEnabled then
[{
mountPath: '/dumps',
name: 'heap-dumps',
}] else []) +
(if isSwApmEnabled then
[{
name: 'skywalking-agent',
mountPath: '/skywalking',
}] else []) +
[{
mountPath: secret.path,
name: secret.name,
} for secret in deployment.mountSecrets],
[if util.is_readiness_probe_enabled(deployment.image, environment) then 'readinessProbe']: health_check_values.generator(readinessCheck)[readinessCheck.type],
[if util.is_liveness_probe_enabled(deployment.image, environment) then 'livenessProbe']: health_check_values.generator(livenessCheck)[livenessCheck.type],
[if util.is_startup_probe_enabled(deployment.healthChecks.startupProbeEnabled, deployment.image, environment) then 'startupProbe']: health_check_values.generator(startupProbe)[startupProbe.type],
},
],
terminationGracePeriodSeconds: deployment.terminationGracePeriodSeconds,
dnsConfig: {
options: [
{
name: 'ndots',
value: '2',
},
],
},
volumes:
(if (isFsxNeeded) then
std.map(function(fsx) {
name: fsx.name,
persistentVolumeClaim: {
claimName: fsx.name,
},
}, deployment.fsx)
else []) +
(if (isEfsNeeded) then
std.map(function(efs) {
name: efs.name,
persistentVolumeClaim: {
claimName: chart.full_service_name(deployment.name) + '-' + efs.name,
},
}, deployment.efs)
else []) +
(if hasEnvironmentFile then
[{
configMap: {
name: chart.full_service_name(deployment.name) + '-cm',
},
name: 'environment-file-volume',
}] else []) +
(if manifest_util.is_dynamic_config_present(deployment_manifest) then
[{
name: 'dynamic-config-volume',
secret: {
secretName: chart.full_service_name(deployment_manifest.deployment.name) + '-dynamic-secret',
},
}] else []) +
(if needsAWSAccess && namespace_values.zalandoEnabled then
[{
name: 'aws-iam-credentials',
secret: {
secretName: roleName,
},
}] else []) +
(if heapDumpEnabled then
[{
name: 'heap-dumps',
emptyDir: {},
}] else []) +
(if heapDumpEnabled then
[{
name: 'aws-iam-credentials-heap-dump',
secret: {
secretName: 'java-heap-dump-bucket-role',
},
}] else []) +
(if isSwApmEnabled then
[{
name: 'skywalking-agent',
emptyDir: {},
}] else []) +
[{ name: secret.name, secret: { secretName: secret.name } } for secret in deployment.mountSecrets],
} + (if (needsAWSAccess && !namespace_values.zalandoEnabled) then { serviceAccountName: roleName } else {}),
}

View File

@@ -1,50 +0,0 @@
local chart = import 'chart.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local exposedPorts = if std.objectHas(deployment_manifest, 'flink') then
[{ name: chart.full_service_name(deployment_manifest.name) + '-rest', port: 'rest' }]
else
deployment_manifest.deployment.exposedPorts;
{
hasPort(ports, portName):: if portName in self.parsePorts(ports) then true else false,
parsePorts(ports):: {
[port.name]: port.port
for port in ports
},
getServicePorts:: [
{
name: port.name,
port: port.port,
protocol: 'TCP',
targetPort: port.port,
}
for port in exposedPorts
],
getPortsforClusterIPService:: [
{
name: port.name,
port: port.port,
protocol: 'TCP',
nodePort: null,
targetPort: port.port,
}
for port in exposedPorts
],
getContainerPorts:: [
{
containerPort: port.port,
protocol: 'TCP',
}
for port in exposedPorts
],
getPort(portName):: if portName in self.parsePorts(exposedPorts) then self.parsePorts(exposedPorts)[portName] else null,
isGrpcEnabled(name):: (
local result = std.filter(function(obj) obj.name == name, exposedPorts);
if std.length(result) == 0 then
false
else if 'enableGrpc' in result[0] then
result[0].enableGrpc
else
false
),
}

View File

@@ -1,31 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local deployment_util = import 'deployment_util.jsonnet';
local pod_template = import 'pod_template.jsonnet';
local deployment = deployment_manifest.deployment;
local vars = import 'vars.jsonnet';
local strategyConfig = deployment.strategy.config;
if (deployment.controller == vars.rolloutController) then {
apiVersion: 'argoproj.io/v1alpha1',
kind: 'Rollout',
metadata: {
name: chart.full_service_name(deployment.name),
labels: common.labels {
linkConfig: std.toString(deployment_manifest.deployment.isLinkConfig),
},
namespace: deployment_manifest.deployment.namespace,
annotations: common.annotations,
},
spec: {
progressDeadlineSeconds: deployment.progressDeadlineSeconds,
selector: {
matchLabels: common.matchLabels,
},
template: pod_template,
strategy: if deployment.strategy == 'canary' then deployment_util.strategy.canary(if 'canaryConfig' in deployment.strategyConfig then deployment.strategyConfig.canaryConfig else {})
else if deployment.strategy == 'rollingUpdateWithCanaryMixIn' then deployment_util.strategy.rollingUpdateWithCanaryMixIn(deployment.strategyConfig.rollingUpdateWithCanaryMixInConfig)
else deployment_util.strategy.rollingUpdate(),
},
}

View File

@@ -1,33 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local deployment = deployment_manifest.deployment;
local vars = import 'vars.jsonnet';
local templateEnabled = if 'analysisTemplate' in deployment.strategyConfig then true else false;
if (deployment.controller == vars.rolloutController && templateEnabled) then {
apiVersion: 'argoproj.io/v1alpha1',
kind: 'AnalysisTemplate',
metadata: {
name: chart.full_service_name(deployment.name),
labels: common.labels,
namespace: deployment_manifest.deployment.namespace,
annotations: common.annotations,
},
spec: {
metrics: [
{
name: 'degrade-rollout',
interval: deployment.strategyConfig.analysisTemplate.interval,
successCondition: 'result' + deployment.strategyConfig.analysisTemplate.operator + deployment.strategyConfig.analysisTemplate.threshold,
failureLimit: deployment.strategyConfig.analysisTemplate.failureLimit,
provider: {
prometheus: {
address: 'http://prometheus-kube-prometheus.monitoring.svc.cluster.local:9090',
query: deployment.strategyConfig.analysisTemplate.query,
},
},
},
],
},
}

View File

@@ -1,80 +0,0 @@
local common = import '../common.jsonnet';
local deployment_manifest = import '../deployment_manifest.jsonnet';
local namespace = deployment_manifest.deployment.namespace;
{
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'Role',
metadata: {
name: namespace + '-full-access',
namespace: namespace,
labels: common.labels
},
rules: [
{
apiGroups: [""],
resources: [
"configmaps",
"endpoints",
"persistentvolumeclaims",
"pods",
"replicationcontrollers",
"replicationcontrollers/scale",
"serviceaccounts",
"services",
"events",
"limitranges",
"pods/log",
"pods/status",
"replicationcontrollers/status",
"resourcequotas",
"resourcequotas/status",
],
verbs: ["get", "list", "watch"],
},
{
apiGroups: ["apps"],
resources: [
"controllerrevisions",
"daemonsets",
"deployments",
"deployments/scale",
"replicasets",
"replicasets/scale",
"statefulsets",
"statefulsets/scale",
],
verbs: ["get", "list", "watch"],
},
{
apiGroups: ["autoscaling"],
resources: ["horizontalpodautoscalers"],
verbs: ["get", "list", "watch"],
},
{
apiGroups: ["batch"],
resources: ["cronjobs", "jobs"],
verbs: ["get", "list", "watch"],
},
{
apiGroups: ["policy"],
resources: ["poddisruptionbudgets"],
verbs: ["get", "list", "watch"],
},
{
apiGroups: ["networking.k8s.io"],
resources: ["ingresses", "networkpolicies"],
verbs: ["get", "list", "watch"],
},
{
apiGroups: [""],
resources: ["configmaps", "pods/portforward", "pods/exec"],
verbs: ["get", "update", "create"],
},
{
apiGroups: ["apps"],
resources: ["deployments"],
verbs: ["create", "update", "patch", "delete"],
},
],
}

View File

@@ -1,26 +0,0 @@
local common = import '../common.jsonnet';
local deployment_manifest = import '../deployment_manifest.jsonnet';
local sandboxParams = deployment_manifest.sandboxParams;
local namespace = deployment_manifest.deployment.namespace;
{
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'RoleBinding',
metadata: {
name: namespace + "-full-access",
namespace: namespace,
labels: common.labels
},
roleRef: {
apiGroup: 'rbac.authorization.k8s.io',
kind: 'Role',
name: namespace + '-full-access',
},
subjects: [
{
apiGroup: "rbac.authorization.k8s.io",
kind: "User",
name: "remote-"+sandboxParams.email+"-teleport.cmd.navi-tech.in"
}
]
}

View File

@@ -1,36 +0,0 @@
local chart = import '../chart.jsonnet';
local common = import '../common.jsonnet';
local deployment_manifest = import '../deployment_manifest.jsonnet';
local deployment = deployment_manifest.deployment;
local sourceEnvironment = deployment_manifest.sandboxParams.source.environment;
local environment = deployment_manifest.environment;
local full_name = chart.full_service_name(deployment.name);
local namespace_values = import '../namespace_values.jsonnet';
if (deployment_manifest.extraResources != null
&& 'aws_access' in deployment_manifest.extraResources) then
if (namespace_values.zalandoEnabled) then {
apiVersion: 'zalando.org/v1',
kind: 'AWSIAMRole',
metadata: {
name: '%s-%s' % [full_name, environment],
namespace: deployment_manifest.deployment.namespace,
annotations: common.annotations,
},
spec: {
roleReference: '%s-%s' % [full_name, sourceEnvironment],
},
} else {
apiVersion: 'v1',
kind: 'ServiceAccount',
metadata: {
annotations: {
'eks.amazonaws.com/role-arn': 'arn:aws:iam::%s:role/%s-%s' % [namespace_values.awsAccountId, full_name, sourceEnvironment],
'eks.amazonaws.com/sts-regional-endpoints': 'true',
'eks.amazonaws.com/token-expiration': '10800',
},
name: '%s-%s' % [full_name, environment],
namespace: deployment_manifest.deployment.namespace,
},
}
else null

View File

@@ -1,53 +0,0 @@
local namespace = import "namespace.jsonnet";
local roleBinding = import "role_binding.jsonnet";
local accessRole = import "access_role.jsonnet";
local accessRoleBinding = import "access_role_binding.jsonnet";
local deployment_manifest = import '../deployment_manifest.jsonnet';
local namespace = import 'namespace.jsonnet';
local sandboxParams = deployment_manifest.sandboxParams;
local roleBinding = import 'role_binding.jsonnet';
local groupOrder = '20';
local awsIamRole = import 'aws_iam_role.jsonnet';
{
sandbox: function(config={}) {
local _config = {
routingKey: if sandboxParams != null then sandboxParams.routingKey,
serviceName: null,
servicePort: null,
} + config,
namespace: namespace,
roleBinding: roleBinding,
securityContext: {
runAsUser: 0,
},
albIngress: {
annotations: {
assert _config.serviceName != null : 'serviceName is required',
assert _config.servicePort != null : 'servicePort is required',
assert _config.routingKey != null : 'routingKey is required',
'alb.ingress.kubernetes.io/actions.sandbox': '{"Type":"forward","ForwardConfig":{"TargetGroups":[{"ServiceName":"%s","ServicePort":"%s","Weight":100}]}}' % [_config.serviceName, _config.servicePort],
'alb.ingress.kubernetes.io/conditions.sandbox': '[{"field":"http-header","httpHeaderConfig":{"httpHeaderName": "routing_key", "values":["%s"]}}]' % _config.routingKey,
'alb.ingress.kubernetes.io/group.order': groupOrder,
},
host: {
paths: [
{
pathType: 'ImplementationSpecific',
backend: {
service: {
name: 'sandbox',
port: {
name: 'use-annotation',
},
},
},
},
],
},
},
rolebinding: roleBinding,
accessRoleBinding: accessRoleBinding,
accessRole: accessRole,
iamRole: awsIamRole
},
}

View File

@@ -1,17 +0,0 @@
local common = import '../common.jsonnet';
local deployment_manifest = import '../deployment_manifest.jsonnet';
local namespace = deployment_manifest.deployment.namespace;
local metadata = {
labels: {
privilege: 'true',
prometheus: 'kube-prometheus',
},
name: namespace,
};
{
apiVersion: 'v1',
kind: 'Namespace',
metadata: metadata,
}

View File

@@ -1,25 +0,0 @@
local common = import '../common.jsonnet';
local deployment_manifest = import '../deployment_manifest.jsonnet';
local namespace = deployment_manifest.deployment.namespace;
{
apiVersion: 'rbac.authorization.k8s.io/v1',
kind: 'RoleBinding',
metadata: {
name: 'psp:privileged:' + namespace,
labels: common.labels,
namespace: namespace,
},
roleRef: {
apiGroup: 'rbac.authorization.k8s.io',
kind: 'ClusterRole',
name: 'psp:privileged',
},
subjects: [
{
apiGroup: 'rbac.authorization.k8s.io',
kind: 'Group',
name: 'system:serviceaccounts:' + namespace,
},
],
}

View File

@@ -1,18 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local namespace = if 'flink' in deployment_manifest then deployment_manifest.flink.namespace else deployment_manifest.deployment.namespace;
{
apiVersion: 'v1',
kind: 'Secret',
metadata: {
name: chart.full_service_name(deployment_manifest.name) + '-secret',
labels: common.labels,
namespace: namespace,
annotations: common.annotations,
},
data: { [e.name]: std.base64(e.value) for e in deployment_manifest.environmentVariables },
type: 'Opaque',
}

View File

@@ -1,32 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local deployment = deployment_manifest.deployment;
if 'securityGroup' in deployment then
local security_group = deployment.securityGroup;
[{
apiVersion: 'aws.navi.com/v1',
kind: 'SecurityGroup',
metadata: {
name: '%s-%s' % [chart.full_service_name(deployment_manifest.deployment.name), sg.name],
labels: common.labels,
namespace: deployment_manifest.deployment.namespace,
annotations: common.annotations,
},
spec: {
rules: [
{
local ipv4_cidrs = [cidr for cidr in rule.ingressCidr if std.findSubstr(':',cidr) == []],
local ipv6_cidrs = [cidr for cidr in rule.ingressCidr if std.findSubstr(':',cidr) != []],
[if 'fromPort' in rule then 'fromPort']: rule.fromPort,
[if 'toPort' in rule then 'toPort']: rule.toPort,
[if 'protocol' in rule then 'protocol']: rule.protocol,
[if 'description' in rule then 'description']: rule.description,
[if 'ingressCidr' in rule then 'ingressCidr']: ipv4_cidrs,
[if 'ingressCidr' in rule then 'ipv6ingressCidr']: ipv6_cidrs,
}
for rule in sg.rules
],
[if 'vpcId' in sg then 'vpcId']: sg.vpcId,
},
} for sg in security_group]

View File

@@ -1,119 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local health_check_values = import 'health_check_values.jsonnet';
local load_balancer_util = import 'load_balancer_util.jsonnet';
local namespace_values = import 'namespace_values.jsonnet';
local port_map = import 'port_map.jsonnet';
local util = import 'util.jsonnet';
local vars = import 'vars.jsonnet';
local deployment = deployment_manifest.deployment;
local livenessCheck = deployment.healthChecks.livenessCheck;
local elbScheme = {
internetFacing: 'false',
internal: 'true',
};
local name = chart.full_service_name(deployment.name);
local services = [name] + if (deployment.controller == vars.rolloutController) then ['%s-canary' % name, '%s-stable' % name] else [];
local albTags = common.awsTags;
local load_balancer_spec = {
alb: {
type: 'ClusterIP',
ports: port_map.getServicePorts,
},
//If shared Alb is used all accessPolicies are ignored for now
sharedAlbAcrossNamespace: self.alb,
nodePort: self.alb,
commonApiGateway: self.alb,
elb: {
type: 'LoadBalancer',
loadBalancerSourceRanges: namespace_values.loadBalancer.sourceRanges,
ports: [{
port: 443,
targetPort: port_map.getPort('serviceport'),
protocol: 'TCP',
name: 'https',
}] + port_map.getServicePorts,
},
// If elb or alb is being created, a clusterIP is created by default
kubeLb: {
type: 'ClusterIP',
ports: port_map.getServicePorts,
},
nginxLb: self.kubeLb,
// Creates a kubernetes headless service
none: {
type: 'ClusterIP',
ports: port_map.getServicePorts,
},
};
local lb_annotations_mixin(albTags) =
local elbObjects = std.filter(function(lbObject) lbObject.type == 'elb', deployment.loadBalancers);
// Only first elb loadbalancer configuration is considered
local elb_annotations_mixin =
if elbObjects != [] then
{
'service.beta.kubernetes.io/aws-load-balancer-ssl-ports': 'https',
'service.beta.kubernetes.io/aws-load-balancer-backend-protocol': 'http',
'service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout': '3600',
'service.beta.kubernetes.io/aws-load-balancer-extra-security-groups':
load_balancer_util.security_group_list(elbObjects[0].accessPolicies, super.securityGroups, elbObjects[0].extraSecurityGroups),
'service.beta.kubernetes.io/aws-load-balancer-ssl-cert': super.sslCert[util.get_certs(std
.objectFieldsAll(super.sslCert), elbObjects[0].endpoint)],
'service.beta.kubernetes.io/aws-load-balancer-internal': elbScheme[load_balancer_util.subnet_scheme(elbObjects[0].accessPolicies)],
'external-dns.alpha.kubernetes.io/hostname': elbObjects[0].endpoint,
'external-dns.alpha.kubernetes.io/ttl': '60',
}
else {};
local albObjects = std.filter(function(lbObject) std.prune([std.find(loadBalancers.type, ['alb', 'sharedAlbAcrossNamespace']) for loadBalancers in deployment.loadBalancers]) != [], deployment.loadBalancers);
// Only first alb/sharedAlbAcrossNamespace loadbalancer configuration is considered
local alb_annotations_mixin =
if albObjects != [] then
{
'alb.ingress.kubernetes.io/healthcheck-path': livenessCheck.path,
'alb.ingress.kubernetes.io/healthcheck-port': std.toString(port_map.getPort(livenessCheck.port)),
'alb.ingress.kubernetes.io/tags': 'Environment=%(Environment)s,Owner=%(Owner)s,Name=%(Name)s,Team=%(Team)s,Namespace=%(Namespace)s' % (albTags),
}
else {};
elb_annotations_mixin + alb_annotations_mixin;
//Kubernetes Service Object
local create_service(name) = {
local tags = albTags { Name: name },
apiVersion: 'v1',
kind: 'Service',
metadata: {
labels: common.labels,
name: name,
annotations: common.annotations + namespace_values.loadBalancer.annotations + lb_annotations_mixin(tags),
namespace: deployment_manifest.deployment.namespace,
},
spec: {
selector: {
app: chart.service_name,
release: deployment.name,
},
} + load_balancer_spec[deployment.loadBalancers[0].type],
};
// this if condition is only added so older test get passed. we need to update older tests fixture and than remove this
if (std.length(services) == 1) then create_service(name) else {
apiVersion: 'v1',
kind: 'List',
items: [create_service(service) for service in services],
}

View File

@@ -1,37 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
if deployment_manifest.deployment.serviceMonitor.enabled == true then {
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: {
labels: common.labels,
name: chart.full_service_name(deployment_manifest.deployment.name) + '-monitor',
namespace: deployment_manifest.deployment.namespace,
annotations: common.annotations,
},
spec:
{
endpoints: [
{
honorLabels: false,
interval: deployment_manifest.deployment.serviceMonitor.interval,
path: deployment_manifest.deployment.serviceMonitor.path,
port: deployment_manifest.deployment.serviceMonitor.port,
metricRelabelings: deployment_manifest.deployment.serviceMonitor.metricRelabelings,
scrapeTimeout: deployment_manifest.deployment.serviceMonitor.scrapeTimeout,
},
],
namespaceSelector: {
matchNames: [
deployment_manifest.deployment.namespace,
],
},
//adding hard limit on scrape sample per target
sampleLimit: 20000,
selector: {
matchLabels: common.matchLabels,
},
},
}

View File

@@ -1,5 +0,0 @@
local shared_ingress = import 'shared_ingress.libsonnet';
function(cluster, namespace, group_name, environment, product="shared") {
'10_ingress.json': shared_ingress.create(cluster, namespace, group_name, environment, product)
}

View File

@@ -1,75 +0,0 @@
local cluster_values = import '../cluster_values.jsonnet';
local defaults = {
idle_timeout_seconds: 60,
access_logs_enable: true,
ssl_policy: 'ELBSecurityPolicy-TLS-1-2-2017-01',
team_name: 'Shared',
labels: {
product: 'shared',
owner: 'shared',
heritage: 'NaviDeploymentManifest',
},
};
{
namespace_values(cluster, namespace)::
local cluster_value = cluster_values[cluster];
if namespace in cluster_value
then cluster_value[namespace]
else cluster_value.default,
annotations(cluster, namespace, group_name, environment, product)::
local namespace_values = $.namespace_values(cluster, namespace);
local cluster_annotations = namespace_values.loadBalancer.annotations;
local security_groups = cluster_annotations.securityGroups;
local ingress_sg = std.join(',', [security_groups.officeIp, security_groups.internal]);
local loadbalancer_attributes = std.join(',', [
'idle_timeout.timeout_seconds=%s' % defaults.idle_timeout_seconds,
'access_logs.s3.enabled=%s' % defaults.access_logs_enable,
'access_logs.s3.bucket=%s' % cluster_annotations.accessLogBucket,
'access_logs.s3.prefix=%s' % group_name,
]);
{
'alb.ingress.kubernetes.io/ssl-policy': defaults.ssl_policy,
'alb.ingress.kubernetes.io/scheme': 'internal',
'alb.ingress.kubernetes.io/security-groups': ingress_sg,
'alb.ingress.kubernetes.io/load-balancer-attributes': loadbalancer_attributes,
[if cluster != 'spike.np.navi-tech.in' then 'alb.ingress.kubernetes.io/subnets']: cluster_annotations.subnets.internal,
'alb.ingress.kubernetes.io/group.name': group_name,
'alb.ingress.kubernetes.io/tags': 'Name=shared-alb-%(name)s,Ingress=shared-alb-%(name)s,Owner=shared,Team=Shared,Product=%(product)s,Environment=%(environment)s' % { name: group_name, product: product, environment: environment },
'kubernetes.io/ingress.class': 'alb',
},
labels(name, environment, product)::
{
app: name,
chart: name,
heritage: defaults.labels.heritage,
release: name,
Team: defaults.team_name,
Environment: environment,
Name: name,
Product: product,
Owner: defaults.labels.owner,
},
name(group_name):: '%s-shared-alb-config' % [group_name],
create(cluster, namespace, group_name, environment, product=defaults.labels.product)::
local name = $.name(group_name);
{
apiVersion: 'networking.k8s.io/v1',
kind: 'Ingress',
metadata: {
name: name,
labels: $.labels(name, environment, product),
namespace: namespace,
annotations: $.annotations(cluster, namespace, group_name, environment, product),
},
spec: {
rules: [{}],
},
},
}

View File

@@ -1,34 +0,0 @@
{
"apiVersion": "networking.k8s.io/v1",
"kind": "Ingress",
"metadata": {
"annotations": {
"alb.ingress.kubernetes.io/group.name": "custom-group-name",
"alb.ingress.kubernetes.io/load-balancer-attributes": "idle_timeout.timeout_seconds=60,access_logs.s3.enabled=true,access_logs.s3.bucket=navi-nonprod-lb-access-logs,access_logs.s3.prefix=custom-group-name",
"alb.ingress.kubernetes.io/scheme": "internal",
"alb.ingress.kubernetes.io/security-groups": "sg-01a64c085bfdb2cbb,sg-0bc07e856d000a5f4",
"alb.ingress.kubernetes.io/ssl-policy": "ELBSecurityPolicy-TLS-1-2-2017-01",
"alb.ingress.kubernetes.io/subnets": "internal-lb-ap-south-1a.nonprod.np.navi-tech.in,internal-lb-ap-south-1b.nonprod.np.navi-tech.in",
"alb.ingress.kubernetes.io/tags": "Name=shared-alb-custom-group-name,Ingress=shared-alb-custom-group-name,Owner=shared,Team=Shared,Product=shared,Environment=dev",
"kubernetes.io/ingress.class": "alb"
},
"labels": {
"Environment": "dev",
"Name": "custom-group-name-shared-alb-config",
"Owner": "shared",
"Product": "shared",
"Team": "Shared",
"app": "custom-group-name-shared-alb-config",
"chart": "custom-group-name-shared-alb-config",
"heritage": "NaviDeploymentManifest",
"release": "custom-group-name-shared-alb-config"
},
"name": "custom-group-name-shared-alb-config",
"namespace": "dev-internal"
},
"spec": {
"rules": [
{}
]
}
}

View File

@@ -1,15 +0,0 @@
{
"version": 1,
"dependencies": [
{
"source": {
"git": {
"remote": "https://github.com/yugui/jsonnetunit.git",
"subdir": "jsonnetunit"
}
},
"version": "master"
}
],
"legacyImports": true
}

View File

@@ -1,16 +0,0 @@
{
"version": 1,
"dependencies": [
{
"source": {
"git": {
"remote": "https://github.com/yugui/jsonnetunit.git",
"subdir": "jsonnetunit"
}
},
"version": "6927c58cae7624a00f368b977ccc477d4f74071f",
"sum": "9FFqqln65hooRF0l6rjICDtnTxUlmDj34+sKMh4sjPI="
}
],
"legacyImports": false
}

View File

@@ -1,49 +0,0 @@
local shared_ingress = import '../shared_ingress.libsonnet';
local test = import './vendor/jsonnetunit/test.libsonnet';
test.suite({
testName: {
actual: shared_ingress.name('group-name'),
expect: 'group-name-shared-alb-config',
},
testAnnotations: {
actual: shared_ingress.annotations('nonprod.np.navi-tech.in', 'dev', 'group_name', 'dev', product='shared'),
expect: {
'alb.ingress.kubernetes.io/group.name': 'group_name',
'alb.ingress.kubernetes.io/load-balancer-attributes': 'idle_timeout.timeout_seconds=60,access_logs.s3.enabled=true,access_logs.s3.bucket=navi-nonprod-lb-access-logs,access_logs.s3.prefix=group_name',
'alb.ingress.kubernetes.io/scheme': 'internal',
'alb.ingress.kubernetes.io/security-groups': 'sg-01a64c085bfdb2cbb,sg-0bc07e856d000a5f4',
'alb.ingress.kubernetes.io/ssl-policy': 'ELBSecurityPolicy-TLS-1-2-2017-01',
'alb.ingress.kubernetes.io/subnets': 'internal-lb-ap-south-1a.nonprod.np.navi-tech.in,internal-lb-ap-south-1b.nonprod.np.navi-tech.in',
'alb.ingress.kubernetes.io/tags': 'Name=shared-alb-group_name,Ingress=shared-alb-group_name,Owner=shared,Team=Shared,Product=shared,Environment=dev',
'kubernetes.io/ingress.class': 'alb',
},
},
testLabels: {
local name = 'group-name-shared-alb-config',
local env = 'dev',
local product = 'shared',
actual: shared_ingress.labels(name, env, product),
expect: {
app: name,
chart: name,
heritage: 'NaviDeploymentManifest',
release: name,
Team: 'Shared',
Environment: env,
Name: name,
Product: 'shared',
Owner: 'shared',
},
},
testIngress: {
local cluster = 'nonprod.np.navi-tech.in',
local namespace = 'dev-internal',
local environment = 'dev',
local group_name = 'custom-group-name',
actual: shared_ingress.create(cluster, namespace, group_name, environment),
expect: import './expected/ingress/nonprod.np.navi-tech.in:dev-internal:custom-group-name:dev.json',
},
})

View File

@@ -1,87 +0,0 @@
local deployment_manifest = import 'deployment_manifest.jsonnet';
local deployment = deployment_manifest.deployment;
local namespace_values = import 'namespace_values.jsonnet';
local vars = import 'vars.jsonnet';
local util = import 'util.jsonnet';
local cluster = deployment_manifest.cluster;
local namespace = deployment.namespace;
local outboundTrafficPolicy = {
"nonprod.np.navi-tech.in": {
"dev": { mode: 'ALLOW_ANY' },
"qa": { mode: 'ALLOW_ANY' },
},
};
local getOutboundTrafficPolicy(cluster, namespace) = (
local envConf = util.get(outboundTrafficPolicy, cluster, {});
local policy = util.get(envConf, namespace, {});
policy
);
// Istio sidecar need not be deployed for Infra team or applications in command cluster
if (deployment_manifest.team.name != 'Infra' && namespace_values.sidecarEnabled
&& !deployment.disableIstio) then {
local chart = import 'chart.jsonnet',
local common = import 'common.jsonnet',
local util = import 'util.jsonnet',
local default_egress_list = [
'istio-system/*',
'*/' + vars.swBackend,
],
// Applies namespace prefix as required by sidecar configuration
// FROM [ "dev-payment.np.navi-tech.in",
// "dev-camunda.np.navi-tech.in",
// "192.168.1.1",
//
// TO [ "*/dev-payment.np.navi-tech.in",
// "*/dev-camunda.np.navi-tech.in",
// "192.168.1.1" ]
local sidecar_egress_list(egressEndpoints) =
std.map(function(egressEndpoint) if util.is_ipv4_address(egressEndpoint) then egressEndpoint else '*/' + egressEndpoint, egressEndpoints),
// Converts a array of endpoint urls to flat array of hostnames
// FROM [ "https://dev-payment.np.navi-tech.in",
// "https://dev-camunda.np.navi-tech.in",
// "192.168.1.1",
// "kafka-0.np.navi-tech.in:19092,kafka-1.np.navi-tech.in:19092,kafka-2.np.navi-tech.in:19092" ]
//
// TO [ "dev-payment.np.navi-tech.in",
// "dev-camunda.np.navi-tech.in",
// "192.168.1.1",
// "kafka-0.np.navi-tech.in",
// "kafka-1.np.navi-tech.in",
// "kafka-2.np.navi-tech.in" ]
local host_list(egressEndpoints) =
std.flattenArrays([
if std.findSubstr(',', egressEndpoint) != [] then std.map(util.host_name, std.split(egressEndpoint, ','))
else [util.host_name(egressEndpoint)]
for egressEndpoint in egressEndpoints
]),
apiVersion: 'networking.istio.io/v1alpha3',
kind: 'Sidecar',
metadata: {
name: chart.full_service_name(deployment.name) + '-sidecar',
labels: common.labels,
namespace: deployment.namespace,
annotations: common.annotations,
},
spec: {
workloadSelector: {
labels: {
app: chart.service_name,
release: deployment.name,
},
},
outboundTrafficPolicy: getOutboundTrafficPolicy(cluster, namespace),
egress: [
{
hosts: sidecar_egress_list(host_list(deployment.allowEgress)) + default_egress_list,
},
],
},
}

View File

@@ -1,96 +0,0 @@
local chart = import 'chart.jsonnet';
local vars = import 'vars.jsonnet';
{
parent_dir(filePath)::
std.splitLimit(filePath, '/', 1)[0],
file_name(filePath)::
local words = std.split(filePath, '/');
words[std.length(words) - 1],
// Returns the root domain for given domain
// dev-camunda.np.navi-tech.in => navi-tech.in
// dev-camunda.np.navi-ext.com => navi-ext.com
root_domain(domain)::
local words = std.split(domain, '.');
words[std.length(words) - 2] + '.' + words[std.length(words) - 1],
get_certs(ssls, domain)::
local qualified_certificates = std.prune([if std.findSubstr(ssl, domain) != [] then ssl for ssl in std.sort(ssls)]);
if std.length(qualified_certificates) == 0 then error 'No cert found for domain: %s' % domain
else qualified_certificates[std.length(qualified_certificates) - 1],
// Returns hostname for given full endpoint urls like following
// https://dev-camunda.np.navi-tech.in => dev-camuna.np.navi-tech.in
// https://dev-camunda.np.navi-tech.in/camunda => dev-camuna.np.navi-tech.in
// dev-camunda.np.navi-tech.in:3131 => dev-camuna.np.navi-tech.in
// 192.168.1.1 => 192.168.1.1
host_name(endpoint)::
if std.findSubstr('://', endpoint) != [] then local hostNameStart = std.findSubstr('://', endpoint); self.host_name(std.substr(endpoint, hostNameStart[0] + 3, 9999))
else if std.findSubstr(':', endpoint) != [] then self.host_name(std.split(endpoint, ':')[0])
else if std.findSubstr('/', endpoint) != [] then self.host_name(std.split(endpoint, '/')[0])
else endpoint,
is_ipv4_address(endpoint)::
local ipChars = std.split(endpoint, '.');
std.length(ipChars) == 4 && std.length(std.filter(function(ipChar) std.length(ipChar) >= 1 && std.length(ipChar) <= 3, ipChars)) == 4,
is_field_present(object, field)::
if object == null then false
else std.objectHas(object, field),
memory_in_mb(memory)::
local unitMap = {
Mi: 1,
Gi: 1024,
};
local length = std.length(memory);
local value = std.parseInt(std.substr(memory, 0, length - 2));
local unit = std.substr(memory, length - 2, 2);
value * unitMap[unit],
cpu_in_milli_core(cpu)::
local cpuStr = cpu + '';
if std.substr(cpuStr, std.length(cpuStr) - 1, 1) == 'm' then cpu else '%dm' % (cpu * 1000),
replace_character_in_string(str, a, b):: (
assert std.length(a) == 1;
std.join(b, std.split(str, a))
),
is_sandbox(env):: if env == 'sandbox' then true else false,
is_local_sandbox(image, env):: std.extVar('IMAGE') == 'null' && $.is_sandbox(env) && (image == null || image == 'null'),
get_image(image, env)::
if std.extVar('IMAGE') == 'null' then
if $.is_local_sandbox(image, env) then
vars.sandboxImage
else
image
else
std.extVar('IMAGE'),
is_readiness_probe_enabled(image, environment):: !$.is_local_sandbox(image, environment),
is_liveness_probe_enabled(image, environment):: !$.is_local_sandbox(image, environment),
is_startup_probe_enabled(is_enabled, image, environment):: is_enabled && !$.is_local_sandbox(image, environment),
hpa_scale_target_ref(name, controller, isDisabled):: if isDisabled then {
apiVersion: 'apps/v1',
kind: 'Deployment',
name: 'disabled',
} else if (controller == vars.rolloutController) then {
apiVersion: 'argoproj.io/v1alpha1',
kind: 'Rollout',
name: chart.full_service_name(name),
} else {
apiVersion: 'apps/v1',
kind: 'Deployment',
name: chart.full_service_name(name),
},
get( object, key, defaultValue ):: if std.objectHas(object, key) then object[key] else defaultValue,
}

View File

@@ -1,44 +0,0 @@
{
esImage_7_17_0:: '193044292705.dkr.ecr.ap-south-1.amazonaws.com/common/elastic-search:7.17.0-withplugins',
esImage_8_12_2:: '193044292705.dkr.ecr.ap-south-1.amazonaws.com/common/elastic-search:8.12.2-withplugins',
kibanaImage_7_17_0:: '193044292705.dkr.ecr.ap-south-1.amazonaws.com/common/kibana:7.17.0',
kibanaImage_8_12_2:: '193044292705.dkr.ecr.ap-south-1.amazonaws.com/common/kibana:8.12.2',
sandboxImage:: '193044292705.dkr.ecr.ap-south-1.amazonaws.com/common/busybox:navicli',
swBackend:: 'skywalking-skywalking-helm-oap.skywalking.svc.cluster.local',
swPort:: '11800',
rolloutController:: 'argo',
defaultController:: 'default',
defaultDeploymentStrategy:: 'rollingUpdate',
defaultCanarySteps: [
{ setWeight: 20 },
{ pause: {} },
],
environments: {
prod: 'prod',
dev: 'dev',
qa: 'qa',
perf: 'perf',
cmd: 'cmd',
},
vpa:: {
maxAllowedCPU: '7200m',
maxAllowedMemory: '16Gi',
},
deployment:: {
hpa:: {
type:: {
metrics:: 'metrics',
cron:: 'cron',
},
},
alerts:: {
pod:: [
{ type: 'HighPodRestarts', threshold: 3, duration: '30m', severity: 'critical' },
{ type: 'HighPodFailures', threshold: 2, duration: '3h', severity: 'warning' },
{ type: 'FrequentPodOOMKilled', threshold: 2, duration: '10m', severity: 'critical' },
{ type: 'PodOOMKilled', threshold: 1, duration: '5m', severity: 'warning' },
{ type: 'KubeContainerWaiting', threshold: 0, duration: '1h', severity: 'critical' },
],
},
},
}

View File

@@ -1,60 +0,0 @@
local chart = import 'chart.jsonnet';
local common = import 'common.jsonnet';
local deployment_manifest = import 'deployment_manifest.jsonnet';
local vars = import 'vars.jsonnet';
local deployment = deployment_manifest.deployment;
local vpaEnabled = deployment.isVpaEnabled;
local namespace_values = import 'namespace_values.jsonnet';
local util = import 'util.jsonnet';
local name = chart.full_service_name(deployment.name);
local vpaAllowed = namespace_values.isVpaDeployed;
local minAllowed = {
cpu: util.cpu_in_milli_core(deployment.instance.minCPU),
memory: deployment.instance.minMemory,
};
local maxAllowed = {
cpu: util.cpu_in_milli_core(deployment.vpa.maxAllowed.cpu),
memory: deployment.vpa.maxAllowed.memory,
};
if vpaAllowed then {
apiVersion: 'autoscaling.k8s.io/v1',
kind: 'VerticalPodAutoscaler',
metadata: {
name: name,
labels: common.labels,
namespace: deployment_manifest.deployment.namespace,
annotations: common.annotations,
},
spec: {
targetRef: if (deployment.controller == vars.rolloutController) then {
apiVersion: 'argoproj.io/v1alpha1',
kind: 'Rollout',
name: name,
} else {
apiVersion: 'apps/v1',
kind: 'Deployment',
name: name,
},
[if !vpaEnabled then 'updatePolicy']: {
updateMode: 'Off',
},
[if vpaEnabled then 'resourcePolicy']: {
containerPolicies: [
{
containerName: name,
minAllowed: minAllowed,
maxAllowed: maxAllowed,
controlledResources: ['cpu', 'memory'],
},
{
containerName: '*',
mode: 'Off',
},
],
},
},
}