def validate_subnet_ids(self): self.error_message = None if Settings.get('VPC', None): vpc_ids = [Settings.VPC['ID']] if Settings.VPC.get('SUBNETS', None): if Settings.get('REQUIRE_SUBNETS_ON_DIFFERENT_ZONE', False): subnet_ids = Settings.VPC['SUBNETS'] try: valid_subnets = vpc.get_vpc_subnets( Settings.AWS_ACCESS_KEY, Settings.AWS_SECRET_KEY, Settings.AWS_REGION, vpc_ids) except Exception as e: self.error_message = str( e) + "\n\t" + K.CORRECT_VPC_MSG return K.NOT_VALID current_subnets = [ subnet for subnet in valid_subnets if subnet['SubnetId'] in subnet_ids ] if len(current_subnets) != len(subnet_ids): self.error_message = K.INVALID_SUBNETS return K.NOT_VALID if len( set([ subnet['AvailabilityZone'] for subnet in current_subnets ])) < 2: self.error_message = K.INVALID_SUBNET_ZONES return K.NOT_VALID return K.VALID
def get_pacbot_domain_url(cls): pacbot_domain = Settings.get('PACBOT_DOMAIN', None) pacbot_domain = pacbot_domain if pacbot_domain else cls.get_output_attr( 'dns_name') return "%s://%s" % (Settings.get('ALB_PROTOCOL', "HTTP").lower(), pacbot_domain)
class InfraSecurityGroupResource(SecurityGroupResource): name = "" vpc_id = Settings.get('VPC')['ID'] ingress = [{ 'from_port': 0, 'to_port': 0, 'protocol': "-1", 'cidr_blocks': Settings.get('VPC')['CIDR_BLOCKS'], 'ipv6_cidr_blocks': [], 'prefix_list_ids': [], 'description': "", 'self': False, 'security_groups': [] }] egress = [{ 'from_port': 0, 'to_port': 0, 'protocol': "-1", 'cidr_blocks': ["0.0.0.0/0"], 'ipv6_cidr_blocks': [], 'prefix_list_ids': [], 'description': "", 'self': False, 'security_groups': [] }]
class ApplicationLoadBalancer(LoadBalancerResource): name = "" internal = Settings.get('MAKE_ALB_INTERNAL', True) load_balancer_type = "application" security_groups = [InfraSecurityGroupResource.get_output_attr('id')] subnets = Settings.get('VPC')['SUBNETS'] OUTPUT_LIST = ['dns_name'] @classmethod def get_http_url(cls): return "http://%s" % cls.get_output_attr('dns_name') @classmethod def get_api_base_url(cls): return "http://%s/api" % cls.get_output_attr('dns_name') @classmethod def get_api_version_url(cls, service): version_url = cls.get_api_server_url(service) return version_url if service == "auth" else version_url + "/v1" @classmethod def get_api_server_url(cls, service): return "%s/%s" % (cls.get_api_base_url(), service) def render_output(self, outputs): if self.resource_in_tf_output(outputs): return { 'Pacbot Domain': outputs[self.get_resource_id()]['dns_name'], 'Admin': Settings.PACBOT_LOGIN_CREDENTIALS['Admin'], 'User': Settings.PACBOT_LOGIN_CREDENTIALS['User'] }
def validate(self): self.show_step_heading(K.SETTINGS_CHECK_STARTED) status = self.validate_vpc_and_cidr_blocks() self.show_step_inner_messaage(K.VPC_CHECK_STARTED, status, self.error_message) if status != K.VALID: return False status = self.validate_subnet_ids() self.show_step_inner_messaage(K.SUBNETS_CHECK_STARTED, status, self.error_message) if status != K.VALID: return False status = self.validate_policies() if Settings.get('MINIMUM_RAM', None): status = self.validate_system_ram_config() self.show_step_inner_messaage(K.SYSTEM_RAM_CONFIG_CHECK_STARTED, status, self.error_message) if status != K.VALID: return False if Settings.get('MINIMUM_STORAGE', None): status = self.validate_system_storage_config() self.show_step_inner_messaage(K.SYSTEM_STORAGE_CONFIG_CHECK_STARTED, status, self.error_message) if status != K.VALID: return False return status
def validate_subnet_ids(self): """ Check the subnets provided are present under the given VPC or not Returns: valid or not_valid (str): Configured string for valid and not valid conditions """ self.error_message = None if Settings.get('VPC', None): vpc_ids = [Settings.VPC['ID']] if Settings.VPC.get('SUBNETS', None): if Settings.get('REQUIRE_SUBNETS_ON_DIFFERENT_ZONE', False): subnet_ids = Settings.VPC['SUBNETS'] try: valid_subnets = vpc.get_vpc_subnets( vpc_ids, Settings.AWS_AUTH_CRED ) except Exception as e: self.error_message = str(e) + "\n\t" + K.CORRECT_VPC_MSG return K.NOT_VALID current_subnets = [subnet for subnet in valid_subnets if subnet['SubnetId'] in subnet_ids] if len(current_subnets) != len(subnet_ids): self.error_message = K.INVALID_SUBNETS return K.NOT_VALID if len(set([subnet['AvailabilityZone'] for subnet in current_subnets])) < 2: self.error_message = K.INVALID_SUBNET_ZONES return K.NOT_VALID return K.VALID
class RuleEngineBatchJobEnv(BatchComputeEnvironmentResource): compute_environment_name = "" instance_role = ECSRoleInstanceProfile.get_output_attr('arn') instance_type = [Settings.get('BATCH_INSTANCE_TYPE', "m4.xlarge")] max_vcpus = 256 min_vcpus = 0 desired_vcpus = 0 ec2_key_pair = "" resource_type = "EC2" security_group_ids = [InfraSecurityGroupResource.get_output_attr('id')] subnets = Settings.get('VPC')['SUBNETS'] env_type = "MANAGED" service_role = BatchRole.get_output_attr('arn') compute_resources_tags = get_all_resource_tags() DEPENDS_ON = [ BatchIAMRolePolicyAttach ] # This is required otherwise policy would be dettached from Batchrole def pre_terraform_apply(self): ec2_client = get_ec2_client(self.input.AWS_AUTH_CRED) ec2_key_pair = self.get_input_attr('ec2_key_pair') try: key_obj = ec2_client.create_key_pair(KeyName=ec2_key_pair) with open(os.path.join(Settings.OUTPUT_DIR, ec2_key_pair + ".pem"), "w") as keyfile: keyfile.write(key_obj['KeyMaterial']) except Exception as e: pass def check_batch_jobs_running(self): envs = get_compute_environments( [self.get_input_attr('compute_environment_name')], self.input.AWS_AUTH_CRED) if not len(envs): return if envs[0]['computeResources']['desiredvCpus'] > int( self.get_input_attr('desired_vcpus')): return True def pre_generate_terraform(self): warn_msg = "Batch Jobs are running, please try after it gets completed and desired CPUs comes to 0." if self.check_batch_jobs_running(): message = "\n\t ** %s **\n" % warn_msg print(MsgMixin.BERROR_ANSI + message + MsgMixin.RESET_ANSI) sys.exit() def post_terraform_destroy(self): ec2_client = get_ec2_client(self.input.AWS_AUTH_CRED) ec2_key_pair = self.get_input_attr('ec2_key_pair') try: key_obj = ec2_client.delete_key_pair(KeyName=ec2_key_pair) except Exception as e: print(ec2_key_pair + " Not able to delete Key Pair. Error: %s" % str(e))
def validate_system_storage_config(self): """ Validate the system configuration requirements Minimum Storage requirements Validation Returns: valid or not_valid (str): Configured string for valid and not valid conditions """ self.error_message = None statvfs = os.statvfs(os.getcwd()) available_disk_space = (statvfs.f_frsize * statvfs.f_bavail ) / (2 ** 30) if not available_disk_space >= Settings.get('MINIMUM_STORAGE', None): self.error_message = K.INVALID_SYSTEM_STORAGE_CONFIG + str(Settings.get('MINIMUM_STORAGE', None)) return K.NOT_VALID return K.VALID
def _get_printable_abs_url(self, dns_name): """ This function returns the absolute URL of the domain ie. with http/https Args: dns_name (str): Loadbalancer dns name Returns: url (str): abs url of pacbot """ pacbot_domain = Settings.get('PACBOT_DOMAIN', None) pacbot_domain = pacbot_domain if pacbot_domain else dns_name return "%s://%s" % (Settings.get('ALB_PROTOCOL', "HTTP").lower(), pacbot_domain)
def validate_system_ram_config(self): """ Validate the system configuration requirements Minimum RAM requirements Validation Returns: valid or not_valid (str): Configured string for valid and not valid conditions """ self.error_message = None system_mem_bytes = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') system_mem_gib = system_mem_bytes / (1024. ** 3) if not system_mem_gib > Settings.get('MINIMUM_RAM', None): self.error_message = K.INVALID_SYSTEM_RAM_CONFIG + str(Settings.get('MINIMUM_RAM', None)) return K.NOT_VALID return K.VALID
def validate_resource_existence(self, resources): can_continue_installation = True if not Settings.get('SKIP_RESOURCE_EXISTENCE_CHECK', False): self.show_step_heading(K.RESOURCE_EXISTS_CHECK_STARTED) for resource in resources: resource_class = resource.__class__ if TerraformResource not in inspect.getmro(resource_class): continue # This means resource is a Variable or Data and not TF Resource self.show_progress_start_message( "Checking resource existence for %s" % resource_class.__name__) exists, checked_details = resource.check_exists_before( self.input, self.tf_outputs) self.erase_printed_line() if exists: can_continue_installation = False resource_name = resource.resource_instance_name.replace( "_", " ").title() message = "Resource: %s, %s: `%s`" % ( resource_name, checked_details['attr'], checked_details['value']) self.show_step_inner_messaage(message, K.EXISTS) if can_continue_installation: self.show_step_finish(K.RESOURCE_EXISTS_CHECK_COMPLETED, color=self.GREEN_ANSI) else: self.show_step_finish(K.RESOURCE_EXISTS_CHECK_FAILED, color=self.ERROR_ANSI) self.stdout_flush() return can_continue_installation
class RedshiftSubnetGroup(redshift.RedshiftSubnetGroupResource): name = "" subnet_ids = Settings.get('VPC')['SUBNETS'] tags = [ {'environment': Settings.RESOURCE_NAME_PREFIX + "redshift"}, {'Name': Settings.RESOURCE_NAME_PREFIX} ]
def terraform_destroy(self, resources=None): """ Run terraform destroy and raise excpetion if there is any error or response of the command Args: resources (list): List of resources if there are targets else None Returns: response (dict): Response after terraform destroy """ if exists_teraform_lock(): raise Exception(K.ANOTHER_PROCESS_RUNNING) CMD = Settings.get('running_command', "Terraform Destroy") terraform = Terraform( working_dir=Settings.TERRAFORM_DIR, targets=self.get_target_resources(resources), stdout_log_file=self.log_obj.get_terraform_destroy_log_file() ) self.log_obj.write_terraform_destroy_log_header() kwargs = {"auto_approve": True} response = terraform.destroy(**kwargs) if response[0] == 1: self.log_obj.write_debug_log(K.TERRAFORM_DESTROY_ERROR) self.write_current_status(CMD, K.DESTROY_STATUS_ERROR, response[2]) raise Exception(response[2]) self.write_current_status(CMD, K.DESTROY_STATUS_COMPLETED, K.TERRAFORM_DESTROY_COMPLETED) return response
def terraform_apply(self, resources=None): """ Run terraform apply and raise excpetion if there is any error or response of the command Args: resources (list): List of resources if there are targets else None Returns: response (dict): Response after terraform apply """ if exists_teraform_lock(): raise Exception(K.ANOTHER_PROCESS_RUNNING) CMD = Settings.get('running_command', "Terraform Apply") terraform = Terraform( working_dir=Settings.TERRAFORM_DIR, targets=self.get_target_resources(resources), stdout_log_file=self.log_obj.get_terraform_install_log_file() ) self.log_obj.write_terraform_apply_log_header() # In order to -auto-approve we need to pass skip_plan=True for python3 response = terraform.apply(skip_plan=True) if response[0] == 1: self.log_obj.write_debug_log(K.TERRAFORM_APPLY_ERROR) self.write_current_status(CMD, K.APPLY_STATUS_ERROR, response[2]) raise Exception(response[2]) self.write_current_status(CMD, K.APPLY_STATUS_COMPLETED, K.TERRAFORM_APPLY_COMPLETED) return response
def validate_vpc_and_cidr_blocks(self): """ Check the VPC is correct and the CIDR block provided is also correct Returns: valid or not_valid (str): Configured string for valid and not valid conditions """ self.error_message = None if Settings.get('VPC', None): vpc_ids = [Settings.VPC['ID']] cidr_blocks = Settings.VPC['CIDR_BLOCKS'] try: vpcs = vpc.get_vpc_details( vpc_ids, Settings.AWS_AUTH_CRED ) except Exception as e: self.error_message = str(e) + "\n\t" + K.CORRECT_VPC_MSG return K.NOT_VALID valid_cidr_blocks = [vpc['CidrBlock'] for vpc in vpcs] if not set(cidr_blocks).issubset(set(valid_cidr_blocks)): self.error_message = K.INVALID_CIDR + "\n\t" + K.CORRECT_VPC_MSG return K.NOT_VALID return K.VALID
def show_loading_messsage(self): """This function is called to display the title/initial message when the execution starts""" print(self.BMAGENTA) if os.path.exists(Settings.LOADER_FILE_PATH): with open(Settings.LOADER_FILE_PATH, "r") as f: lines = f.readlines() for line in lines: sys.stdout.write(line) sys.stdout.flush() else: surrounding_char = "#" column_length = self.column_length + 0 if len( Settings.SETUP_TITLE) < self.column_length else 20 print(surrounding_char * column_length) pre_hash_count, post_hash_count = self._get_pre_and_post_char_length( Settings.SETUP_TITLE, column_length) print("%s %s %s" % (surrounding_char * pre_hash_count, Settings.SETUP_TITLE, surrounding_char * post_hash_count)) if Settings.get('SETUP_DESCRIPTION', None): pre_hash_count, post_hash_count = self._get_pre_and_post_char_length( Settings.SETUP_DESCRIPTION, column_length) print("%s %s %s" % (surrounding_char * pre_hash_count, Settings.SETUP_DESCRIPTION, surrounding_char * post_hash_count)) print(surrounding_char * column_length) print(self.RESET_ANSI)
class PacBotHttpsListener(ALBListenerResource): load_balancer_arn = ApplicationLoadBalancer.get_output_attr('arn') port = 443 protocol = "HTTPS" ssl_policy = "ELBSecurityPolicy-2016-08" certificate_arn = Settings.get('SSL_CERTIFICATE_ARN') default_action_target_group_arn = tg.NginxALBTargetGroup.get_output_attr('arn') default_action_type = "forward"
def get_azure_tenants(): if need_to_enable_azure(): tenants = Settings.get('AZURE_TENANTS', []) tenant_ids = [tenant['tenantId'] for tenant in tenants] return ",".join(tenant_ids) else: return ""
def show_step_heading(self, heading, write_log=True): if write_log: SysLog().write_debug_log(heading) step_count_num = Settings.get('step_count_num', 1) print( self._get_heading_message_in_color( "\nStep %s: %s" % (str(step_count_num), heading), self.BCYAN_ANSI)) step_count_num = Settings.set('step_count_num', step_count_num + 1)
class BaseTG: port = 80 protocol = "HTTP" target_type = "ip" create_before_destroy = True vpc_id = Settings.get('VPC')['ID'] interval = HEALTH_CHECK_INTERVAL timeout = HEALTH_CHECK_TIMEOUT matcher = HEALTH_CHECK_MATCHING_LIST
class BaseEcsService: desired_count = 1 launch_type = "FARGATE" cluster = ApplicationECSCluster.get_output_attr('id') network_configuration_security_groups = [InfraSecurityGroupResource.get_output_attr('id')] network_configuration_subnets = Settings.get('VPC')['SUBNETS'] network_configuration_assign_public_ip = True load_balancer_container_port = 80 tags = None
class ESDomain(ElasticsearchDomainResource): domain_name = "data" elasticsearch_version = "5.5" instance_type = Settings.get('ES_INSTANCE_TYPE', "m4.large.elasticsearch") instance_count = 1 dedicated_master_enabled = False zone_awareness_enabled = False ebs_enabled = True volume_type = "gp2" volume_size = 20 automated_snapshot_start_hour = 23 security_group_ids = [InfraSecurityGroupResource.get_output_attr('id')] subnet_ids = [Settings.get('VPC')['SUBNETS'][0]] cloudwatch_log_group_arn = ESCloudWatchLogGroup.get_output_attr('arn') log_type = "ES_APPLICATION_LOGS" @classmethod def get_http_url_with_port(cls): return "%s:%s" % (cls.get_http_url(), "80") @classmethod def get_http_url(cls): return "http://%s" % cls.get_output_attr('endpoint') @classmethod def get_es_port(cls): return 80 def pre_terraform_apply(self): status, msg = create_iam_service_linked_role( Settings.AWS_ACCESS_KEY, Settings.AWS_SECRET_KEY, "es.amazonaws.com", Settings.RESOURCE_DESCRIPTION) SysLog().write_debug_log("ElasticSearch IAM Service Linked role creation: Status:%s, Message: %s" % (str(status), msg)) def render_output(self, outputs): if self.resource_in_tf_output(outputs): resource_id = self.get_resource_id() return { 'ES Host': outputs[resource_id]['endpoint'], 'Kibana Host': outputs[resource_id]['kibana_endpoint'] }
def process_destroy_result(self, p): response = Terraform().return_process_result(p) CMD = Settings.get('running_command', "Terraform Destroy") if response[0] == 1: self.log_obj.write_debug_log(K.TERRAFORM_DESTROY_ERROR) self.write_current_status(CMD, K.DESTROY_STATUS_ERROR, response[2]) raise Exception(response[2]) self.write_current_status(CMD, K.DESTROY_STATUS_COMPLETED, K.TERRAFORM_DESTROY_COMPLETED)
def __init__(self, config_path): """ Constructor for the Kernel class, which do system validations and initialises Object attributes Args: config_path (str): This is the path to the main configuration/settings file """ self.load_settings(config_path) provider_name = Settings.get('PROVIDER', None) self.provider = Provider(provider_name) self.do_system_validation() super().__init__()
class InfraSecurityGroupResource(SecurityGroupResource): name = "" vpc_id = Settings.get('VPC')['ID'] ingress = [ { 'from_port': 0, 'to_port': 0, 'protocol': "-1", 'cidr_blocks': Settings.get('VPC')['CIDR_BLOCKS'] } ] egress = [ { 'from_port': 0, 'to_port': 0, 'protocol': "-1", 'cidr_blocks': ["0.0.0.0/0"] } ]
class BaseTG: # port = 80 if Settings.get('ALB_PROTOCOL', "HTTP") != "HTTPS" else 443 # protocol = Settings.get('ALB_PROTOCOL', "HTTP") port = 80 protocol = "HTTP" target_type = "ip" create_before_destroy = True vpc_id = Settings.get('VPC')['ID'] interval = HEALTH_CHECK_INTERVAL timeout = HEALTH_CHECK_TIMEOUT matcher = HEALTH_CHECK_MATCHING_LIST
def validate_input_args(self): """ Check protocol is HTTPS then validate certificate ARN. If not Or correct ARN then fallback to original validation Returns: success (boolean): Validation is success or not msg_list (list): List of validation messages """ if self.protocol == "HTTPS": if not Settings.get('SSL_CERTIFICATE_ARN', None): return False, ["Certifcate ARN is not found for ELB SSL Policy"] return super().validate_input_args()
def prepare_azure_tenants_credentias(self): tenants = Settings.get('AZURE_TENANTS', []) credential_string = "" if need_to_enable_azure(): for tenant in tenants: tenant_id = tenant['tenantId'] client_id = tenant['clientId'] seccret_id = tenant['secretId'] credential_string = "" if credential_string == "" else ( credential_string + "##") credential_string += "tenant:%s,clientId:%s,secretId:%s" % ( tenant_id, client_id, seccret_id) return credential_string
class MySQLDatabase(RDSResource): name = "pacmandata" instance_class = Settings.get('RDS_INSTANCE_TYPE', "db.t2.medium") identifier = "data" storage_type = "gp2" engine = "mysql" engine_version = "5.6.40" allocated_storage = 10 username = "******" password = "******" parameter_group_name = DBParameterGroup.get_input_attr('name') option_group_name = DBOptionGroup.get_input_attr('name') db_subnet_group_name = DBSubnetGroup.get_input_attr('name') vpc_security_group_ids = [InfraSecurityGroupResource.get_output_attr('id')] skip_final_snapshot = True apply_immediately = True DEPENDS_ON = [DBOptionGroup, DBParameterGroup, DBSubnetGroup] @classmethod def get_rds_info(cls): info = "%s:%s" % (cls.get_input_attr('username'), cls.get_input_attr('password')) return base64.b64encode(info.encode()).decode() @classmethod def get_rds_db_url(cls): rds_endpoint = cls.get_output_attr('endpoint') db_name = cls.get_input_attr('name') return "jdbc:mysql://%s/%s" % (rds_endpoint, db_name) def render_output(self, outputs): if self.resource_in_tf_output(outputs): return { 'MySQL Host': outputs[self.get_resource_id()]['endpoint'], 'MySQL DB': self.get_input_attr('name') } def pre_terraform_apply(self): status, msg = create_iam_service_linked_role( Settings.AWS_ACCESS_KEY, Settings.AWS_SECRET_KEY, "rds.amazonaws.com", Settings.RESOURCE_DESCRIPTION) SysLog().write_debug_log( "RDS IAM Service Linked role creation: Status:%s, Message: %s" % (str(status), msg))
def process_destroy_result(self, p): """ Store the destroy response and riase exception if there is any Args: p (process obj): process obj of the terraform destroy """ response = Terraform().return_process_result(p) CMD = Settings.get('running_command', "Terraform Destroy") if response[0] == 1: self.log_obj.write_debug_log(K.TERRAFORM_DESTROY_ERROR) self.write_current_status(CMD, K.DESTROY_STATUS_ERROR, response[2]) raise Exception(response[2]) self.write_current_status(CMD, K.DESTROY_STATUS_COMPLETED, K.TERRAFORM_DESTROY_COMPLETED)