INFRA-2296 | Harinder | Adding scripts to add resources in AWS and importing them in automated manner to terraform state
This commit is contained in:
50
scripts/add-resource-and-update-tfstate/README.md
Normal file
50
scripts/add-resource-and-update-tfstate/README.md
Normal file
@@ -0,0 +1,50 @@
|
||||
### NOTE: Teraform init and workspace selection is taken care of
|
||||
|
||||
### Stages:
|
||||
1. preTerraformSetup()
|
||||
1. This function is used to execute custom scripts before terraform operations(if required)
|
||||
2. User can add custom scripts in user-scripts-before-terraform-setup.txt
|
||||
3. Function can be moved as per convenience and is optional.
|
||||
4. migrationRequiredFor.json is where the output goes in this stage in the format {"a": true, "b": true} where a and b are the resource for which migration is required
|
||||
2. Manifests are fetched from deployment portal using xAuthToken and portal url
|
||||
3. postTerraformSetup()
|
||||
1. This function is used to execute custom scripts after terraform operations(if required)
|
||||
2. User can add custom scripts in user-scripts-after-terraform-setup.txt
|
||||
3. User can specify actions such as terraform import.
|
||||
|
||||
### Caution:
|
||||
1. Currently tfStateUpdate() is written to work on database related operations in a more optimised manner. Modify and update the code as per requirement
|
||||
2. User can search for "# NOTE" in the code and find all the place where logic can be changed as per resource type and type of operation
|
||||
3. It is user's responsibility to update shell commands in user-scripts-before-terraform-setup.txt and user-scripts-after-terraform-setup.txt and provide the list of resources as specified in 1.4 format in migrationRequiredFor.json file.
|
||||
|
||||
### Usage:
|
||||
```
|
||||
python driver.py -h
|
||||
Usage: python3 driver.py <vertical> <xAuthToken> <operateOnEnv> <resourceType>
|
||||
vertical(optional): default(lending), navi-pay, sa, gi
|
||||
resourceType: database, elasticCache, docdb, aws_access, s3_buckets, dynamodb, auroradb
|
||||
operateOnEnv: all, dev, prod, perf
|
||||
|
||||
$ python driver.py kjf-adfkjf-12e dev dynamodb
|
||||
- vertical -> lending
|
||||
- kjf-adfkjf-12e -> aAuthToken
|
||||
- dev -> environment for which the script will run on
|
||||
- dynamodb -> resource which the script will get and setup terraform for
|
||||
$ python navi-pay kjf-adfkjf-12e all database
|
||||
- the script will run for all the databases(rds-postgres) in all the environments for navi-pay vertical
|
||||
```
|
||||
|
||||
### Example use case:
|
||||
|
||||
preTerraformSetup()/user-scripts-before-terraform-setup.txt
|
||||
- was used to create cloudwatch alarms for the required RDS instances and update list of DBs to json file
|
||||
getListofAWSDBsRequiredForMigration() read the list of DBs for which migration is required from a json file
|
||||
|
||||
driver.py
|
||||
- gets list of manifests from deployment-portal-backend and iterates over them as per resource type and environment.
|
||||
- for each manifest, the environment variables are updated in the required keys. Ex: ${DATASOURCE_USERNAME} is updated with the username required to access DB
|
||||
- some of the variables like TEAM_NAME, DB_NAME, DB_REPLICA are set in env.json file and used in user-scripts-after-terraform-setup.txt
|
||||
- tfStateUpdate() is used to execute terraform init and terraform import for the required RDS instances
|
||||
|
||||
postTerraformSetup()/user-scripts-after-terraform-setup
|
||||
- was used to execute terraform import for the required RDS instances
|
||||
@@ -0,0 +1,83 @@
|
||||
import boto3
|
||||
import os
|
||||
import json
|
||||
from pprint import pprint
|
||||
|
||||
awsDBMap = {}
|
||||
def getAWSDBList():
|
||||
rds_client = boto3.client('rds')
|
||||
paginator = rds_client.get_paginator('describe_db_instances')
|
||||
for page in paginator.paginate():
|
||||
for db in page['DBInstances']:
|
||||
awsDBMap[db['DBInstanceIdentifier']] = True
|
||||
|
||||
|
||||
def updateAlarms():
|
||||
# Create a CloudWatch client
|
||||
cloudwatch_client = boto3.client('cloudwatch')
|
||||
|
||||
# Get a list of alarms
|
||||
alarms = []
|
||||
paginator = cloudwatch_client.get_paginator('describe_alarms')
|
||||
for page in paginator.paginate(AlarmNamePrefix='DBMachineClassNeedsUpgrade: RDS Low EBS byte Balance'):
|
||||
alarms.extend(page['MetricAlarms'])
|
||||
|
||||
for alarm in alarms:
|
||||
# remove the DBs for which alarms are already created
|
||||
if 'DBMachineClassNeedsUpgrade: RDS Low EBS byte Balance' in alarm['AlarmName']:
|
||||
dbAlarmByteBalance = alarm['AlarmName'].split(" ")
|
||||
if dbAlarmByteBalance[6] in awsDBMap:
|
||||
tempDBName = alarm['AlarmName'].split(" ")[6]
|
||||
print(f"Removing {tempDBName} from awsDBMap as the required alarm already exists for it")
|
||||
del awsDBMap[alarm['AlarmName'].split(" ")[6]]
|
||||
|
||||
print("List of resourcess for which alarms are not created:" + str(awsDBMap))
|
||||
with open(os.path.join(os.getcwd(), 'migrationRequiredFor.json'), 'w') as fp:
|
||||
json.dump(awsDBMap, fp)
|
||||
|
||||
alarms = []
|
||||
paginator = cloudwatch_client.get_paginator('describe_alarms')
|
||||
for page in paginator.paginate(AlarmNamePrefix='DBDiskNeedsUpgrade: RDS Low ebs burst Balance'):
|
||||
alarms.extend(page['MetricAlarms'])
|
||||
|
||||
for alarm in alarms:
|
||||
if 'DBDiskNeedsUpgrade: RDS Low ebs burst Balance' in alarm['AlarmName'] and alarm['AlarmName'].split(" ")[6] in awsDBMap:
|
||||
print(alarm['AlarmName'] + " will used for copying and adding EBS Byte and IO alarm")
|
||||
|
||||
# Delete AlarmArn, AlarmConfigurationUpdatedTimestamp, StateReason, StateReasonData, StateTransitionedTimestamp, StateUpdatedTimestamp, StateValue
|
||||
del alarm['AlarmArn']
|
||||
del alarm['AlarmConfigurationUpdatedTimestamp']
|
||||
del alarm['StateReason']
|
||||
del alarm['StateReasonData']
|
||||
del alarm['StateTransitionedTimestamp']
|
||||
del alarm['StateUpdatedTimestamp']
|
||||
del alarm['StateValue']
|
||||
|
||||
ebs_io_balance_alarm = alarm.copy()
|
||||
ebs_byte_balance_alarm = alarm.copy()
|
||||
|
||||
# Change metric name EBSIOBalance and EBSByteBalance
|
||||
# Change Threshold to 70
|
||||
# Change alarm name
|
||||
# Change alarm description
|
||||
# Change alarm limit
|
||||
|
||||
ebs_io_balance_alarm['MetricName'] = 'EBSIOBalance'
|
||||
ebs_io_balance_alarm['Threshold'] = 70
|
||||
ebs_io_balance_alarm['AlarmName'] = ebs_io_balance_alarm['AlarmName'].replace('DBDiskNeedsUpgrade: RDS Low ebs burst Balance', 'DBMachineClassNeedsUpgrade: RDS Low EBS IO Balance')
|
||||
ebs_io_balance_alarm['AlarmDescription'] = ebs_io_balance_alarm['AlarmDescription'].replace('EBS Burst balance is lower than 120', 'EBS IO balance is lower than 70')
|
||||
print(f'Adding EBS IO Balance Alarm: {ebs_io_balance_alarm["AlarmName"]}')
|
||||
cloudwatch_client.put_metric_alarm(**ebs_io_balance_alarm)
|
||||
cloudwatch_client.disable_alarm_actions(AlarmNames=[ebs_io_balance_alarm['AlarmName']])
|
||||
|
||||
ebs_byte_balance_alarm['MetricName'] = 'EBSByteBalance'
|
||||
ebs_byte_balance_alarm['Threshold'] = 70
|
||||
ebs_byte_balance_alarm['AlarmName'] = ebs_byte_balance_alarm['AlarmName'].replace('DBDiskNeedsUpgrade: RDS Low ebs burst Balance', 'DBMachineClassNeedsUpgrade: RDS Low EBS byte Balance')
|
||||
ebs_byte_balance_alarm['AlarmDescription'] = ebs_byte_balance_alarm['AlarmDescription'].replace('EBS Burst balance is lower than 120', 'EBS Byte balance is lower than 70')
|
||||
print(f'Adding EBS Byte Balance Alarm: {ebs_byte_balance_alarm["AlarmName"]}')
|
||||
cloudwatch_client.put_metric_alarm(**ebs_byte_balance_alarm)
|
||||
cloudwatch_client.disable_alarm_actions(AlarmNames=[ebs_byte_balance_alarm['AlarmName']])
|
||||
|
||||
if __name__ == "__main__":
|
||||
getAWSDBList()
|
||||
updateAlarms()
|
||||
267
scripts/add-resource-and-update-tfstate/driver.py
Normal file
267
scripts/add-resource-and-update-tfstate/driver.py
Normal file
@@ -0,0 +1,267 @@
|
||||
from math import e
|
||||
import os
|
||||
import requests
|
||||
from pprint import pprint
|
||||
import boto3
|
||||
import json
|
||||
import sys
|
||||
|
||||
infraProvisionerDict = {
|
||||
"database":"rds-tf",
|
||||
"elasticCache":"elastic-cache-tf",
|
||||
"docdb":"document-db-tf",
|
||||
"aws_access":"aws-roles-tf",
|
||||
"s3_buckets":"aws-s3-bucket-tf",
|
||||
"dynamodb":"dynamo-db-tf",
|
||||
"aurora-db":"aurora-db-tf"
|
||||
}
|
||||
|
||||
vertical = "" # NOTE: to be specified by user.
|
||||
portalUrl = "https://deployment-portal-backend.cmd.navi-tech.in" # NOTE: to be specified by user as per vertical
|
||||
xAuthToken = "" # NOTE: to be specified by user. Token can be generated using navicli
|
||||
operateOnEnv = "dev" # NOTE: to be specified by user. Options: all, dev, prod, perf
|
||||
resourceType = "" # NOTE: to be specified by user. Options: database, elasticCache, docdb, aws_access, s3_buckets, dynamodb, auroradb
|
||||
parentWorkingDirectory = os.getcwd()
|
||||
migrationRequiredFor = {} # List of resources for which migration is required
|
||||
beforeTerraformOperations = []
|
||||
afterTerraformOperations = []
|
||||
|
||||
def setupWorkingDirectory():
|
||||
os.makedirs(parentWorkingDirectory + "/updateTerraformState", exist_ok=True)
|
||||
os.chdir(parentWorkingDirectory + "/updateTerraformState")
|
||||
|
||||
def cleanupWorkingDirectory():
|
||||
print("Cleaning up working directory\n-------------------------------------------------------------------------------------------------------")
|
||||
os.chdir(parentWorkingDirectory)
|
||||
os.system("rm -rf updateTerraformState")
|
||||
|
||||
def preTerraformSetup():
|
||||
# NOTE: This function can be called from anyplace as per convenience/requirement
|
||||
with open(parentWorkingDirectory + "/user-scripts-before-terraform-setup.txt") as usf:
|
||||
beforeTerraformOperations = usf.readlines()
|
||||
# -----------Execute custom scripts before terraform operations(if required)-----------
|
||||
for customScript in beforeTerraformOperations:
|
||||
print(f"Executing script: {customScript}")
|
||||
os.system(customScript)
|
||||
|
||||
def postTerraformSetup(envVars):
|
||||
# NOTE: This function can be called from anyplace as per convenience/requirement
|
||||
with open(parentWorkingDirectory + "/user-scripts-after-terraform-setup.txt") as usf:
|
||||
afterTerraformOperations = usf.readlines()
|
||||
# -----------Execute custom scripts after terraform operations(if required)
|
||||
for customScript in afterTerraformOperations:
|
||||
# NOTE: The following logic should be changed per resource type
|
||||
if "rds_instance_replica" in customScript:
|
||||
if 'DB_REPLICA' in envVars and envVars['DB_REPLICA'] == True:
|
||||
print(f"Executing script: {customScript}")
|
||||
os.system(customScript)
|
||||
else:
|
||||
print(f"Executing script: {customScript}")
|
||||
os.system(customScript)
|
||||
|
||||
def getMigrationResourceList():
|
||||
global migrationRequiredFor
|
||||
with open(os.path.join(os.getcwd(), 'migrationRequiredFor.json')) as fp:
|
||||
migrationRequiredFor = json.load(fp)
|
||||
print("List of resources for which migration is required:" + str(migrationRequiredFor))
|
||||
|
||||
def loadEnvVariables(manifest):
|
||||
config = {}
|
||||
if "environmentVariables" in manifest:
|
||||
environmentVariables = manifest["environmentVariables"]
|
||||
for envVar in environmentVariables:
|
||||
config[envVar["name"]] = envVar["value"]
|
||||
else:
|
||||
print("Environment variables not found in manifest: " + str(manifest['id']))
|
||||
return False
|
||||
|
||||
if 'extraResources' in manifest:
|
||||
extraResources = manifest['extraResources']
|
||||
# NOTE: The following logic should be changed per resource type
|
||||
if 'database' in extraResources:
|
||||
databaseResource = extraResources['database']
|
||||
for k, v in databaseResource.items():
|
||||
if "${" in str(v) or "$" in str(v):
|
||||
if v[2:-1] in config:
|
||||
manifest['extraResources']['database'][k] = config[v[2:-1]]
|
||||
elif v[1:] in config:
|
||||
manifest['extraResources']['database'][k] = config[v[1:]]
|
||||
else:
|
||||
if "READONLY" in str(v[2:-1]):
|
||||
manifest['extraResources']['database'][k] = ""
|
||||
else:
|
||||
print(f"Key {v[2:-1]} or {v[1:]} not found in config")
|
||||
return False
|
||||
# return False
|
||||
if 'docdb' in extraResources:
|
||||
docdbResource = extraResources['docdb']
|
||||
print("Updating docdb variables")
|
||||
|
||||
for k, v in docdbResource.items():
|
||||
if "${" in str(v):
|
||||
if v[2:-1] in config:
|
||||
manifest['extraResources']['docdb'][k] = config[v[2:-1]]
|
||||
elif v[1:] in config:
|
||||
manifest['extraResources']['docdb'][k] = config[v[1:]]
|
||||
else:
|
||||
if "READONLY" in str(v[2:-1]):
|
||||
manifest['extraResources']['database'][k] = ""
|
||||
else:
|
||||
print(f"Key {v[2:-1]} or {v[1:]} not found in config")
|
||||
return False
|
||||
if 'elasticSearch' in extraResources:
|
||||
elasticSearchResource = extraResources['elasticSearch']
|
||||
print("Updating elasticSearch variables")
|
||||
|
||||
for k, v in elasticSearchResource.items():
|
||||
if "${" in str(v):
|
||||
if v[2:-1] in config:
|
||||
manifest['extraResources']['elasticSearch'][k] = config[v[2:-1]]
|
||||
elif v[1:] in config:
|
||||
manifest['extraResources']['elasticSearch'][k] = config[v[1:]]
|
||||
else:
|
||||
if "READONLY" in str(v[2:-1]):
|
||||
manifest['extraResources']['elasticSearch'][k] = ""
|
||||
else:
|
||||
print(f"Key {v[2:-1]} or {v[1:]} not found in config")
|
||||
return False
|
||||
else:
|
||||
print("No extra resources found in manifest: " + str(manifest['id']))
|
||||
return False
|
||||
|
||||
# Export updated manifest to file
|
||||
f = open("manifest.json", "w")
|
||||
json.dump(manifest, f)
|
||||
f.close()
|
||||
|
||||
return True
|
||||
|
||||
def getWorkspace(manifest):
|
||||
# Gets the workspace which is used in selecting the appropriate terraform workspace
|
||||
workspace = ""
|
||||
if 'workspace' in manifest['extraResources']:
|
||||
workspace = manifest['extraResources']['workspace']
|
||||
elif 'cluster' in manifest:
|
||||
workspace = manifest['cluster']
|
||||
elif 'deployment' in manifest and 'cluster' in manifest['deployment']:
|
||||
workspace = manifest['deployment']['cluster']
|
||||
return workspace
|
||||
|
||||
def databaseExists(manifest, envVars):
|
||||
if 'database' in manifest['extraResources'] and 'instanceName' in manifest['extraResources']['database'] and len(manifest['extraResources']['database']['instanceName']) > 2:
|
||||
envVars['DB_NAME'] = manifest['extraResources']['database']['instanceName']
|
||||
if 'readReplica' in manifest['extraResources']['database']:
|
||||
envVars['DB_REPLICA'] = True
|
||||
else:
|
||||
return envVars, False
|
||||
|
||||
if manifest['extraResources']['database']['instanceName'] in migrationRequiredFor:
|
||||
envVars['DB_PRESENT_IN_AWS'] = True
|
||||
else:
|
||||
print(f"Migration not required for {manifest['extraResources']['database']['instanceName']}. Aborting operation for manifest: " + str(manifest['id']))
|
||||
return envVars, False
|
||||
|
||||
return envVars, True
|
||||
|
||||
def tfStateUpdate(manifest, resourceType):
|
||||
# Check if manifest contains the valid data and format
|
||||
if 'id' in manifest and 'extraResources' in manifest:
|
||||
print("Manifest: " + str(manifest['id']) + ". Resource type: " + resourceType + " which has " + str(manifest['extraResources'].keys()))
|
||||
if (resourceType == "aurora-db" and "database" in manifest['extraResources']) or (resourceType in manifest['extraResources']):
|
||||
if resourceType == "database" and manifest['extraResources']['database']['dbEngineType'] != "rds-postgres":
|
||||
return
|
||||
elif resourceType == "aurora-db" and manifest['extraResources']['database']['dbEngineType'] != "rds-aurora-postgres":
|
||||
return
|
||||
|
||||
print("Operating on manifest: " + str(manifest['id']) + " - " + manifest['environment'] + "/" + manifest['name'] + " for " + resourceType + " and setting up working directory")
|
||||
|
||||
setupWorkingDirectory()
|
||||
|
||||
if loadEnvVariables(manifest):
|
||||
print("Loaded environment variables")
|
||||
else:
|
||||
print("Issues with finding environment variables and exporting to required variables in manifest. Aborting operation for manifest: " + str(manifest['id']))
|
||||
cleanupWorkingDirectory()
|
||||
return
|
||||
|
||||
envVars = {}
|
||||
envVars['TEAM_NAME'] = manifest['team']['name']
|
||||
|
||||
if resourceType == "database":
|
||||
# this is an optimisation to in case resourceType == database
|
||||
envVars, databasePresence = databaseExists(manifest, envVars)
|
||||
if not databasePresence:
|
||||
print("Manifest issue or DB not present in AWS. Aborting operation for manifest: " + str(manifest['id']))
|
||||
cleanupWorkingDirectory()
|
||||
return
|
||||
|
||||
with open('env.json', 'w') as fp:
|
||||
json.dump(envVars, fp) # This is needed to run commands mentioned in user-scripts-after-terraform-setup.txt
|
||||
|
||||
manifestFilePath = os.path.join(os.getcwd(), "manifest.json")
|
||||
|
||||
os.system("export AWS_PROFILE=cmd")
|
||||
|
||||
print("Setting up terraform for " + resourceType + " and importing resource")
|
||||
os.system(f"infra-provisioner --manifest {manifestFilePath} --template-only {resourceType}")
|
||||
os.chdir(infraProvisionerDict[resourceType])
|
||||
os.system("terraform init")
|
||||
workspace = getWorkspace(manifest)
|
||||
if workspace == "":
|
||||
print("Workspace not found. Aborting operation for manifest: " + str(manifest['id']))
|
||||
cleanupWorkingDirectory()
|
||||
return
|
||||
os.system(f"terraform workspace select {workspace}")
|
||||
|
||||
postTerraformSetup(envVars)
|
||||
|
||||
cleanupWorkingDirectory()
|
||||
|
||||
def driver():
|
||||
# get all the manifests for a particular vertical
|
||||
global portalUrl
|
||||
if vertical != "" or None:
|
||||
temp = portalUrl.split("//")
|
||||
portalUrl = temp[0] + "//" + vertical + "-" + temp[1]
|
||||
|
||||
s = requests.Session()
|
||||
s.headers.update({'X_AUTH_TOKEN': xAuthToken})
|
||||
|
||||
listManifestPath = "/api/manifest/list"
|
||||
individualManifestPath = "/api/manifest/"
|
||||
|
||||
r = s.get(portalUrl + listManifestPath)
|
||||
manifestList = r.json()
|
||||
|
||||
for manifest in manifestList:
|
||||
r = s.get(portalUrl + individualManifestPath + str(manifest['id']))
|
||||
if operateOnEnv == "all" or manifest['environment'] == operateOnEnv:
|
||||
tfStateUpdate(r.json(), resourceType)
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) > 1:
|
||||
if sys.argv[1] in ["help", "-h", "--help"]:
|
||||
print("Usage: python3 driver.py <vertical> <xAuthToken> <operateOnEnv> <resourceType>")
|
||||
print("vertical(optional): default(lending), navi-pay, sa, gi")
|
||||
print("resourceType: database, elasticCache, docdb, aws_access, s3_buckets, dynamodb, auroradb")
|
||||
print("operateOnEnv: all, dev, prod, perf")
|
||||
sys.exit(0)
|
||||
else:
|
||||
if len(sys.argv) == 4:
|
||||
xAuthToken = sys.argv[1]
|
||||
operateOnEnv = sys.argv[2]
|
||||
resourceType = sys.argv[3]
|
||||
elif len(sys.argv) == 5:
|
||||
vertical = sys.argv[1]
|
||||
xAuthToken = sys.argv[2]
|
||||
operateOnEnv = sys.argv[3]
|
||||
resourceType = sys.argv[4]
|
||||
else:
|
||||
print("Invalid input. Use -h for help")
|
||||
sys.exit(0)
|
||||
else:
|
||||
print("Invalid input. Use -h for help")
|
||||
sys.exit(0)
|
||||
preTerraformSetup()
|
||||
getMigrationResourceList() # NOTE: This has to be prepared from preTerraformSetup(). Refer README.md
|
||||
driver()
|
||||
@@ -0,0 +1,7 @@
|
||||
sed -i.bak "s/local.aws_profile/\"sa-prod\"/g" .terraform/modules/rds/main.tf
|
||||
sed -i.bak "165,171 s/^/#/" .terraform/modules/rds/main.tf
|
||||
sed -i.bak "3,3 s/^/#/" .terraform/modules/rds/modules/rds_instance_events/main.tf
|
||||
terraform import 'module.rds.module.rds_instance.aws_cloudwatch_metric_alarm.ebs_io_balance[0]' "DBMachineClassNeedsUpgrade: RDS Low EBS IO Balance $(cat ../env.json | jq -r '.DB_NAME') Team: $(cat ../env.json | jq -r '.TEAM_NAME')"
|
||||
terraform import 'module.rds.module.rds_instance.aws_cloudwatch_metric_alarm.ebs_byte_balance[0]' "DBMachineClassNeedsUpgrade: RDS Low EBS byte Balance $(cat ../env.json | jq -r '.DB_NAME') Team: $(cat ../env.json | jq -r '.TEAM_NAME')"
|
||||
terraform import 'module.rds.module.rds_instance_replica[0].aws_cloudwatch_metric_alarm.ebs_byte_balance[0]' "DBMachineClassNeedsUpgrade: RDS Low EBS byte Balance $(cat ../env.json | jq -r '.DB_NAME')-read-replica Team: $(cat ../env.json | jq -r '.TEAM_NAME')"
|
||||
terraform import 'module.rds.module.rds_instance_replica[0].aws_cloudwatch_metric_alarm.ebs_io_balance[0]' "DBMachineClassNeedsUpgrade: RDS Low EBS IO Balance $(cat ../env.json | jq -r '.DB_NAME')-read-replica Team: $(cat ../env.json | jq -r '.TEAM_NAME')"
|
||||
@@ -0,0 +1,3 @@
|
||||
export AWS_PROFILE=gi-prod
|
||||
python /Users/harindersingh/InfraWork/devops/cicd/infra-scripts/add-resource-and-update-tfstate/addCloudWatchAlarm.py
|
||||
export AWS_PROFILE=cmd
|
||||
Reference in New Issue
Block a user