* INFRA-2231 | Ashvin | Create shared alb config json Curently this jsonnet only works for internal shared albs. I will expand it to be useful for internet facing load balancers in subsequent commits. * INFRA-2231 | Ashvin | Add jsonnet tests * INFRA-2231 | Ashvin | Add jsonnet tests * INFRA-2231 | Ashvin | Change team of shared config ingress to Shared * INFRA-2231 | Ashvin | Create and apply shared ingress config * INFRA-2231 | Ashvin | Remove subnet section for spike cluster * INFRA-2231 | Ashvin | Remove exclusive annotations from shared alb * INFRA-2231 | Ashvin | Create ingress groups and store them in db * INFRA-2231 | Ashvin | Fixed tests * INFRA-2602 | Ashvin | Create service account for sandbox in EKS clusters (#760) * INFRA-2602 | Ashvin | Add controller tests for IngressGroupController * INFRA-2602 | Ashvin | Add service test for IngressGroupServiceImpl * INFRA-2602 | Ashvin | Add service test for IngressGroupServiceImpl * INFRA-2602 | Ashvin | Add test for IngressGroupApplier * INFRA-2602 | Ashvin | Add test for IngressGroupApplier * INFRA-2602 | Ashvin | Add tags annotation in shared alb config * INFRA-2231 | Ashvin | Remove tags from shared alb of test files
143 lines
5.9 KiB
Jsonnet
143 lines
5.9 KiB
Jsonnet
local deployment_manifest = import 'deployment_manifest.jsonnet';
|
|
local util = import 'util.jsonnet';
|
|
local vars = import 'vars.jsonnet';
|
|
local isSandbox = util.is_sandbox(deployment_manifest.environment);
|
|
|
|
local alias(type) = if type == 'sharedalbacrossnamespace' then 'sharedalb' else type;
|
|
|
|
{
|
|
// Creates a comma separated list of security groups
|
|
security_group_list(accessPolicies, securityGroups, extraSecurityGroups)::
|
|
local accessPolicySecurityGroups = [if accessPolicy in securityGroups then securityGroups[accessPolicy] for accessPolicy in accessPolicies];
|
|
local extraSGs = if std.objectHas(deployment_manifest.deployment, 'securityGroup') then std
|
|
.flattenArrays([if std.objectHas(sg, 'ids') then sg.ids for sg in deployment_manifest
|
|
.deployment.securityGroup]) else [];
|
|
std.join(',', accessPolicySecurityGroups + extraSGs),
|
|
|
|
// Determines kind of subnet(internal or internetFacing or cdn) to use based on access policy.
|
|
subnet_scheme(accessPolicies)::
|
|
local scheme = std.setInter(std.set(accessPolicies), ['internal', 'internetFacing', 'internetFacingRestricted', 'cdn']);
|
|
assert std.length(scheme) == 1 : 'ValidationError: accessPolicies can only contain one out of internal, internetFacing, interetFacingRestricted & cdn';
|
|
{ internal: 'internal', internetFacing: 'internetFacing', internetFacingRestricted: 'internetFacingRestricted', cdn: 'cdn' }[scheme[0]],
|
|
|
|
// Returns true if application is using aws application load balancer
|
|
is_using_lb(lbObjects, lbName)::
|
|
std.length(std.filter(function(lbObject) lbObject.type == lbName, lbObjects)) > 0,
|
|
|
|
// Returns group name for sharedAlbAcrossNamespace if any
|
|
group_name(lbObject)::
|
|
if lbObject.groupName != null && lbObject.groupName != '' then
|
|
lbObject.groupName
|
|
else null,
|
|
|
|
// Returns true if application is using aws target groups
|
|
is_using_tg(lbObjects)::
|
|
std.length(std.filter(function(lbObject) std.find(lbObject.type, ['alb', 'sharedAlbAcrossNamespace']) != [], lbObjects)) > 0,
|
|
|
|
ingress_name(full_service_name, lbObject, expose=false)::
|
|
local name = if lbObject.name != null && lbObject.name != '' then
|
|
full_service_name + '-' + alias(std.asciiLower(lbObject.type)) + '-' + std.asciiLower(lbObject.name)
|
|
else
|
|
full_service_name + '-' + alias(std.asciiLower(lbObject.type));
|
|
local finalName = if expose then name + '-exposed' else name;
|
|
finalName,
|
|
|
|
alb_ingress_name(full_service_name)::
|
|
self.ingress_name(full_service_name, { type: 'alb', name: null }),
|
|
|
|
load_balancer_attribute_list(lbObject, namespace_annotations, s3_key_prefix)::
|
|
local idleTimeout = 'idle_timeout.timeout_seconds=%s' % lbObject.idleTimeout;
|
|
local baseAttributes = if namespace_annotations.deletionProtection then idleTimeout + ',deletion_protection.enabled=true' else idleTimeout;
|
|
local accessLogAttributes = 'access_logs.s3.enabled=true,access_logs.s3.bucket=%s,access_logs.s3.prefix=%s' % [namespace_annotations.accessLogBucket, s3_key_prefix];
|
|
std.join(',', [
|
|
baseAttributes,
|
|
if lbObject.accessLog then accessLogAttributes,
|
|
],),
|
|
|
|
target_group_attribute_list(lbObject)::
|
|
local slowStartDurationAttribute = 'slow_start.duration_seconds=%s' % lbObject.slowStartDuration;
|
|
local sticknessAttribute = 'stickiness.enabled=true,stickiness.lb_cookie.duration_seconds=%s' % lbObject.stickinessCookieDuration;
|
|
local tg_annotation = [
|
|
if lbObject.slowStartDuration > 0 then slowStartDurationAttribute,
|
|
if lbObject.stickiness then sticknessAttribute,
|
|
];
|
|
std.join(',', std.prune(tg_annotation)),
|
|
|
|
//Determines listener-ports to be added to the load-balaner
|
|
listener_ports(lbObject, exposeToLoadBalancer=false)::
|
|
local subnetScheme = $.subnet_scheme(lbObject.accessPolicies);
|
|
if exposeToLoadBalancer then
|
|
if lbObject.type == 'alb' then '[{"HTTPS": %s}]' % lbObject.port
|
|
else error 'ValidationError: secondary port can only be used with alb. Please change the loadbalancer type'
|
|
else
|
|
'[{ "HTTPS": 443 },{"HTTP": 80}]',
|
|
|
|
//Returns path to be added to alb to enable HTTP to HTTPS redirection
|
|
http_redirect_config:: [{
|
|
path: '/*',
|
|
pathType: 'ImplementationSpecific',
|
|
backend: {
|
|
service: {
|
|
name: 'ssl-redirect',
|
|
port: {
|
|
name: 'use-annotation',
|
|
},
|
|
},
|
|
},
|
|
}],
|
|
|
|
redirect_config(host, actionNaem):: {
|
|
host: host.hostname,
|
|
http: {
|
|
paths: [{
|
|
path: host.path,
|
|
pathType: 'ImplementationSpecific',
|
|
backend: {
|
|
service: {
|
|
name: actionNaem,
|
|
port: {
|
|
name: 'use-annotation',
|
|
},
|
|
},
|
|
},
|
|
}],
|
|
},
|
|
},
|
|
|
|
weighted_path_config(serviceName):: if 'flink' in deployment_manifest then []
|
|
else (if (deployment_manifest.deployment.controller == vars.rolloutController && deployment_manifest.deployment.strategy != vars.defaultDeploymentStrategy && !isSandbox) then [{
|
|
path: '/*',
|
|
pathType: 'ImplementationSpecific',
|
|
backend: {
|
|
service: {
|
|
name: serviceName,
|
|
port: {
|
|
name: 'use-annotation',
|
|
},
|
|
},
|
|
},
|
|
}] else []),
|
|
|
|
path_config(serviceName, servicePort, portFieldKey='number')::
|
|
[
|
|
{
|
|
pathType: 'ImplementationSpecific',
|
|
backend: {
|
|
service: {
|
|
name: serviceName,
|
|
port: {
|
|
[portFieldKey]: servicePort,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
],
|
|
|
|
create_sandbox_or_standard_paths(config, isSandboxEnabled=false, sandbox={}):: (
|
|
if isSandboxEnabled then
|
|
sandbox.sandbox(config).albIngress.host.paths
|
|
else
|
|
$.path_config(config.serviceName, config.servicePort)
|
|
),
|
|
}
|