def test_env_insert(self): env_data = {} env_data['name'] = self._get_env_name() env_data['location'] = 'abc' env_data['dep_target'] = 'local' env_data['env_definition'] = 'env_definition' env_data['env_version_stamp'] = 'version' new_env_id = environment.Environment().insert(env_data) self.assertIsNotNone(new_env_id, "Env not inserted properly") environment.Environment().delete(new_env_id)
def get_deployment_details(self, env_id): env_obj = env_db.Environment().get(env_id) env_details = ast.literal_eval(env_obj.env_definition) project = '' zone = '' if 'app_deployment' in env_details['environment']: project = env_details['environment']['app_deployment']['project'] zone = env_details['environment']['app_deployment']['zone'] else: project = env_details['environment']['resources']['gcloud'][0][ 'resource']['project'] zone = env_details['environment']['resources']['gcloud'][0][ 'resource']['zone'] user_account = '' if not os.path.exists(home_dir + "/.config/gcloud/configurations/config_default"): fmlogger.error( "gcloud sdk installation not proper. Did not find ~/.config/gcloud/configurations/config_default file" ) raise Exception() else: fp = open( home_dir + "/.config/gcloud/configurations/config_default", "r") lines = fp.readlines() for line in lines: if line.find("account") >= 0: parts = line.split("=") user_account = parts[1].strip() break return user_account, project, zone
def get_coe_type(env_id): coe_type = '' env_obj = env_db.Environment().get(env_id) env_definition = ast.literal_eval(env_obj.env_definition) env_details = env_definition['environment'] if 'app_deployment' in env_details: coe_type = env_details['app_deployment']['type'] return coe_type
def create(self, env_id, resource_details): fmlogger.debug("MySQL container create") res_data = {} res_data['status'] = 'unavailable' env_obj = env_db.Environment().get(env_id) res_type = resource_details['type'] env_output_config = ast.literal_eval(env_obj.output_config) env_version_stamp = env_output_config['env_version_stamp'] container_name = env_obj.name + "-" + env_version_stamp res_id = '' res_data['env_id'] = env_id res_data['cloud_resource_id'] = container_name res_data['type'] = res_type res_data['status'] = 'creating' res_id = res_db.Resource().insert(res_data) env = { "MYSQL_ROOT_PASSWORD": constants.DEFAULT_DB_PASSWORD, "MYSQL_DATABASE": constants.DEFAULT_DB_NAME, "MYSQL_USER": constants.DEFAULT_DB_USER, "MYSQL_PASSWORD": constants.DEFAULT_DB_PASSWORD } self.docker_client.import_image(image=DEFAULT_MYSQL_VERSION) serv_cont = self.docker_client.create_container(DEFAULT_MYSQL_VERSION, detach=True, environment=env, name=container_name) self.docker_client.start(serv_cont) cont_data = self.docker_client.inspect_container(serv_cont) service_ip_addr = cont_data['NetworkSettings']['IPAddress'] container_id = cont_data['Id'] fmlogger.debug("MySQL Service IP Address:%s" % service_ip_addr) filtered_description = {} detailed_description = {} filtered_description['Username'] = constants.DEFAULT_DB_USER filtered_description['Password'] = constants.DEFAULT_DB_PASSWORD filtered_description['Root_Password'] = constants.DEFAULT_DB_PASSWORD filtered_description['DBName'] = constants.DEFAULT_DB_NAME filtered_description['DBHOST'] = service_ip_addr detailed_description['container_id'] = container_id detailed_description['container_name'] = container_name res_data['detailed_description'] = str(detailed_description) res_data['filtered_description'] = str(filtered_description) res_data['status'] = 'available' res_db.Resource().update(res_id, res_data) return res_data['status']
def run_command(self, env_id, env_name, resource_obj, command): command_output = '' env_obj = env_db.Environment().get(env_id) df_dir = env_obj.location if not os.path.exists(df_dir): mkdir_command = ("mkdir {df_dir}").format(df_dir=df_dir) os.system(mkdir_command) dockerfile_name = "Dockerfile.run_command" df_path = df_dir + "/" + dockerfile_name df = self.docker_handler.get_dockerfile_snippet("aws") df = df + ("COPY . /src \n" "WORKDIR /src \n" "RUN cp -r aws-creds $HOME/.aws \n" "CMD [\"sh\", \"/src/run_command.sh\"] ") df_name = df_dir + "/Dockerfile.run_command" fp = open(df_name, "w") fp.write(df) fp.close() fp1 = open(df_dir + "/run_command.sh", "w") fp1.write("#!/bin/bash \n") fp1.write(command) fp1.close() resource_name = resource_obj.cloud_resource_id cont_name = resource_name + "_run_command" err, output = self.docker_handler.build_container_image( cont_name, df_name, df_context=df_dir) if err: error_msg = ("Error encountered in running command {e}").format( e=err) fmlogger.error(error_msg) raise Exception(error_msg) err, output = self.docker_handler.run_container(cont_name) if err: error_msg = ("Error encountered in running command {e}").format( e=err) fmlogger.error(error_msg) raise Exception(error_msg) cont_id = output.strip() err, command_output = self.docker_handler.get_logs(cont_id) #self.docker_handler.stop_container(cont_id) self.docker_handler.remove_container(cont_id) self.docker_handler.remove_container_image(cont_name) return command_output
def _create_network(self, env_id, project, cluster_name): network_name = cluster_name try: resp = self.compute_service.networks().insert( project=project, body={ "autoCreateSubnetworks": True, "routingConfig": { "routingMode": "GLOBAL" }, "name": network_name }).execute() except Exception as e: fmlogger.error(e) env_update = {} env_update['output_config'] = str({'error': str(e)}) env_db.Environment().update(env_id, env_update) raise e network_obj = '' count = 0 while not network_obj: try: network_obj = self.compute_service.networks().get( project=project, network=network_name).execute() except Exception as e: fmlogger.error(e) #env_update = {} #env_update['output_config'] = str({'error': str(e)}) #env_db.Environment().update(env_id, env_update) if network_obj: break else: time.sleep(2) count = count + 1 if count >= GCLOUD_ACTION_TIMEOUT: raise exceptions.AppDeploymentFailure() return network_obj
def run_command(self, env_id, env_name, resource_obj, base_command, command, base_image): command_output = '' env_obj = env_db.Environment().get(env_id) df_dir = env_obj.location if not os.path.exists(df_dir): mkdir_command = ("mkdir {df_dir}").format(df_dir=df_dir) os.system(mkdir_command) if not os.path.exists(df_dir + "/google-creds"): shutil.copytree(home_dir + "/.config/gcloud", df_dir + "/google-creds/gcloud") user_account, project_name, zone_name = self.get_deployment_details( env_id) df = self.docker_handler.get_dockerfile_snippet(base_image) df = df + ( "RUN /google-cloud-sdk/bin/gcloud config set account {account} \ \n" " && /google-cloud-sdk/bin/gcloud config set project {project} \n" "{base_command}" "WORKDIR /src \n" "CMD [\"sh\", \"/src/run_command.sh\"] ").format( account=user_account, project=project_name, base_command=base_command) df_name = df_dir + "/Dockerfile.run_command" fp = open(df_name, "w") fp.write(df) fp.close() fp1 = open(df_dir + "/run_command.sh", "w") fp1.write("#!/bin/bash \n") fp1.write(command) fp1.close() time1 = int(round(time.time() * 1000)) resource_name = resource_obj.cloud_resource_id cont_name = resource_name + "_run_command" err, output = self.docker_handler.build_container_image( cont_name, df_name, df_context=df_dir) time2 = int(round(time.time() * 1000)) if err: error_msg = ("Error encountered in running command {e}").format( e=err) fmlogger.error(error_msg) raise Exception(error_msg) err, output = self.docker_handler.run_container(cont_name) time3 = int(round(time.time() * 1000)) if err: error_msg = ("Error encountered in running command {e}").format( e=err) fmlogger.error(error_msg) raise Exception(error_msg) cont_id = output.strip() err, command_output = self.docker_handler.get_logs(cont_id) time4 = int(round(time.time() * 1000)) #self.docker_handler.stop_container(cont_id) self.docker_handler.remove_container(cont_id) time5 = int(round(time.time() * 1000)) self.docker_handler.remove_container_image(cont_name) time6 = int(round(time.time() * 1000)) build_time = time2 - time1 run_time = time3 - time2 logs_time = time4 - time3 remove_cont_time = time5 - time4 remove_image_time = time6 - time5 timings1 = ( "Build time:{build_time}, Run time:{run_time}, logs time:{logs_time}" ).format(build_time=build_time, run_time=run_time, logs_time=logs_time) timings2 = ( " Cont remove time:{remove_cont_time}, Cont image remove time:{remove_image_time}" ).format(remove_cont_time=remove_cont_time, remove_image_time=remove_image_time) fmlogger.error("Build time: %s %s" % (timings1, timings2)) return command_output
def create(self, env_id, resource_details): env_obj = env_db.Environment().get(env_id) res_type = resource_details['type'] env_output_config = ast.literal_eval(env_obj.output_config) env_version_stamp = env_output_config['env_version_stamp'] instance_id = env_obj.name + "-" + env_version_stamp db_name = constants.DEFAULT_DB_NAME vpc_id = '' vpc_traffic_block = [] if 'vpc_id' in env_output_config and 'cidr_block' in env_output_config: vpc_id = env_output_config['vpc_id'] vpc_traffic_block.append(env_output_config['cidr_block']) else: vpc_details = RDSResourceHandler.awshelper.get_vpc_details() vpc_id = vpc_details['vpc_id'] vpc_traffic_block.append(vpc_details['cidr_block']) sec_group_name = instance_id + "-sql" sec_group_id = '' try: sec_group_id = RDSResourceHandler.awshelper.create_security_group_for_vpc( vpc_id, sec_group_name) except Exception as e: status = str(e) return status port_list = [3306] engine = DEFAULT_RDS_ENGINE instance_class = DEFAULT_RDS_INSTANCE_CLASS if 'configuration' in resource_details: if 'engine' in resource_details['configuration']: engine = resource_details['configuration']['engine'] if 'flavor' in resource_details['configuration']: instance_class = resource_details['configuration']['flavor'] publicly_accessible = False if 'policy' in resource_details: if resource_details['policy']['access'] == 'open': publicly_accessible = True vpc_traffic_block.append('0.0.0.0/0') try: RDSResourceHandler.awshelper.setup_security_group( vpc_id, vpc_traffic_block, sec_group_id, sec_group_name, port_list) except Exception as e: status = str(e) try: RDSResourceHandler.awshelper.delete_security_group_for_vpc( vpc_id, sec_group_id, sec_group_name) except Exception as e1: fmlogger.error(e1) status = status + " + " + str(e1) return status try: self.client.create_db_instance( DBName=db_name, DBInstanceIdentifier=instance_id, DBInstanceClass=instance_class, Engine=engine, MasterUsername=constants.DEFAULT_DB_USER, MasterUserPassword=constants.DEFAULT_DB_PASSWORD, PubliclyAccessible=publicly_accessible, AllocatedStorage=5, VpcSecurityGroupIds=[sec_group_id], Tags=[{ "Key": "Tag1", "Value": "Value1" }]) except Exception as e: fmlogger.error( "Exception encountered in creating rds instance %s" % e) status = str(e) try: RDSResourceHandler.awshelper.delete_security_group_for_vpc( vpc_id, sec_group_id, sec_group_name) except Exception as e1: fmlogger.error(e1) status = status + " + " + str(e1) return status status = constants.CREATION_REQUEST_RECEIVED count = 1 instance_description = '' filtered_description = dict() res_data = {} res_data['env_id'] = env_id res_data['cloud_resource_id'] = instance_id res_data['type'] = res_type res_data['status'] = status res_id = res_db.Resource().insert(res_data) while status.lower() is not 'available': try: instance_description = self.client.describe_db_instances( DBInstanceIdentifier=instance_id) status = instance_description['DBInstances'][0][ 'DBInstanceStatus'] if status.lower() == 'available': break res_data['status'] = status res_data['filtered_description'] = str(filtered_description) res_data['detailed_description'] = str(instance_description) res_db.Resource().update(res_id, res_data) count = count + 1 time.sleep(2) except Exception as e: fmlogger.error( "Exception encountered in describing rds instance %s" % e) if status.lower() == 'available': # Saving vpc_id here for convenience as when we delete RDS instance we can directly read it # from the resource table than querying the env table. filtered_description['vpc_id'] = vpc_id filtered_description['sql-security-group-name'] = sec_group_name filtered_description['sql-security-group-id'] = sec_group_id filtered_description['DBInstanceIdentifier'] = instance_id filtered_description[ 'DBInstanceClass'] = DEFAULT_RDS_INSTANCE_CLASS filtered_description['Engine'] = DEFAULT_RDS_ENGINE filtered_description['MasterUsername'] = constants.DEFAULT_DB_USER filtered_description[ 'MasterUserPassword'] = constants.DEFAULT_DB_PASSWORD filtered_description['DBName'] = constants.DEFAULT_DB_NAME endpoint_address = instance_description['DBInstances'][0][ 'Endpoint']['Address'] filtered_description['Address'] = endpoint_address else: status = 'create-failure: ' + status res_data['status'] = status res_data['filtered_description'] = str(filtered_description) res_data['detailed_description'] = str(instance_description) res_db.Resource().update(res_id, res_data) return status.lower()
def create(self, env_id, resource_details): fmlogger.debug("CloudSQL create") cloudsql_status = 'unavailable' env_obj = env_db.Environment().get(env_id) res_type = resource_details['type'] env_details = ast.literal_eval(env_obj.env_definition) project_name = env_details['environment']['app_deployment']['project'] env_output_config = ast.literal_eval(env_obj.output_config) env_version_stamp = env_output_config['env_version_stamp'] instance_id = env_obj.name + "-" + env_version_stamp authorizedNetworks = '' if 'policy' in resource_details: if resource_details['policy']['access'] == 'open': authorizedNetworks = '0.0.0.0/0' if 'cluster_ips' in env_output_config: cluster_ip_list = env_output_config['cluster_ips'] authorizedNetworks = ','.join(cluster_ip_list) database_instance_body = {} if authorizedNetworks: database_instance_body = { 'name': instance_id, 'settings': { 'tier': DEFAULT_TIER, 'ipConfiguration': { 'authorizedNetworks': [{ 'value': authorizedNetworks }] } }, } else: database_instance_body = { 'name': instance_id, 'settings': { 'tier': DEFAULT_TIER }, } create_request = CloudSQLResourceHandler.service.instances().insert( project=project_name, body=database_instance_body) res_id = '' res_data = {} res_data['env_id'] = env_id res_data['cloud_resource_id'] = instance_id res_data['type'] = res_type res_data['status'] = 'creating' res_id = res_db.Resource().insert(res_data) try: create_response = create_request.execute() detailed_description = {} detailed_description['action_response'] = create_response detailed_description['name'] = instance_id detailed_description['project'] = project_name res_data['detailed_description'] = str(detailed_description) res_db.Resource().update(res_id, res_data) except Exception as e: fmlogger.error( "Exception encountered in creating CloudSQL instance %s" % e) res_data['status'] = 'Error: ' + str(e) res_db.Resource().update(res_id, res_data) return cloudsql_status available = False i = 0 etag = '' filtered_description = {} get_response = '' while not available: get_request = CloudSQLResourceHandler.service.instances().get( project=project_name, instance=instance_id) get_response = '' try: get_response = get_request.execute() except Exception as e: fmlogger.error(e) res_data['status'] = str(e) res_db.Resource().update(res_id, res_data) return cloudsql_status status = get_response['state'] etag = get_response['etag'] res_data['status'] = status detailed_description['etag'] = etag res_data['detailed_description'] = str(detailed_description) res_db.Resource().update(res_id, res_data) if status == 'RUNNABLE': cloudsql_status = 'available' available = True else: i = i + 1 time.sleep(3) if i == constants.TIMEOUT_COUNT: res_data['status'] = 'creation-timed-out' res_db.Resource().update(res_id, res_data) return cloudsql_status detailed_description['action_response'] = get_response filtered_description['Address'] = get_response['ipAddresses'][0][ 'ipAddress'] username = constants.DEFAULT_DB_USER if 'username' in resource_details: username = resource_details['username'] password = constants.DEFAULT_DB_PASSWORD if 'password' in resource_details: password = resource_details['password'] user_body = { 'name': username, 'project': project_name, 'instance': instance_id, 'password': password, 'etag': etag } insert_user_req = CloudSQLResourceHandler.service.users().insert( project=project_name, instance=instance_id, body=user_body) insert_user_req.execute() # Give some time for Google to create the username/password time.sleep(10) dbname = '' try: dbname = self._create_database(resource_details, project_name, instance_id, etag) except Exception as e: fmlogger.error(e) res_data['status'] = str(e) cloudsql_status = 'unavailable' filtered_description['Username'] = username filtered_description['Password'] = password filtered_description['DBName'] = dbname res_data['filtered_description'] = str(filtered_description) res_data['detailed_description'] = str(detailed_description) res_db.Resource().update(res_id, res_data) fmlogger.debug("Exiting CloudSQL create call.") return cloudsql_status
def create_cluster(self, env_id, env_info): fmlogger.debug("Creating GKE cluster.") cluster_status = 'unavailable' env_obj = env_db.Environment().get(env_id) env_name = env_obj.name env_details = ast.literal_eval(env_obj.env_definition) env_output_config = ast.literal_eval(env_obj.output_config) env_version_stamp = env_output_config['env_version_stamp'] cluster_name = env_name + "-" + env_version_stamp filtered_description = {} res_data = {} res_data['env_id'] = env_id res_data['cloud_resource_id'] = cluster_name res_data['type'] = 'gke-cluster' res_data['status'] = 'provisioning' res_id = res_db.Resource().insert(res_data) cluster_size = 1 if 'cluster_size' in env_details['environment']['app_deployment']: cluster_size = env_details['environment']['app_deployment'][ 'cluster_size'] project = "" if 'project' in env_details['environment']['app_deployment']: project = env_details['environment']['app_deployment']['project'] else: fmlogger("Project ID required. Not continuing with the request.") status = 'not-continuing-with-provisioning:missing parameter project id' res_data['status'] = status res_db.Resource().update(res_id, res_data) return status zone = "" if 'zone' in env_details['environment']['app_deployment']: zone = env_details['environment']['app_deployment']['zone'] else: fmlogger("Zone required. Not continuing with the request.") status = 'not-continuing-with-provisioning:missing parameter zone' res_data['status'] = status res_db.Resource().update(res_id, res_data) return status filtered_description['cluster_name'] = cluster_name filtered_description['project'] = project filtered_description['zone'] = zone filtered_description['env_name'] = env_name res_data['filtered_description'] = str(filtered_description) res_db.Resource().update(res_id, res_data) instance_type = DEFAULT_MACHINE_TYPE if 'instance_type' in env_details['environment']['app_deployment']: instance_type = env_details['environment']['app_deployment'][ 'instance_type'] try: self._create_network(env_id, project, cluster_name) except Exception as e: fmlogger.error(e) return try: self._create_firewall_rule(env_id, project, cluster_name) except Exception as e: fmlogger.error(e) return resp = '' try: resp = self.gke_service.projects().zones().clusters().create( projectId=project, zone=zone, body={ "cluster": { "name": cluster_name, "initialNodeCount": cluster_size, "nodeConfig": { "oauthScopes": "https://www.googleapis.com/auth/devstorage.read_only", "machineType": instance_type }, "network": cluster_name } }).execute() fmlogger.debug(resp) except Exception as e: fmlogger.error(e) env_update = {} env_update['output_config'] = str({'error': str(e)}) env_db.Environment().update(env_id, env_update) # Cleanup self._delete_firewall_rule(project, cluster_name) return count = 1 available = False while not available: resp = self.gke_service.projects().zones().clusters().get( projectId=project, zone=zone, clusterId=cluster_name).execute() status = resp['status'] res_data['status'] = status res_db.Resource().update(res_id, res_data) if status.lower() == 'running' or status.lower() == 'available': available = True break else: count = count + 1 time.sleep(3) instance_ip_list = '' try: instance_ip_list = self._get_cluster_node_ip( env_name, project, zone) except Exception as e: cluster_status = 'unavailable ' + str(e) else: cluster_status = 'available' if instance_ip_list: env_output_config['cluster_ips'] = instance_ip_list env_data = {} env_data['output_config'] = str(env_output_config) env_db.Environment().update(env_id, env_data) res_data['status'] = cluster_status filtered_description['cluster_ips'] = instance_ip_list res_data['filtered_description'] = str(filtered_description) res_db.Resource().update(res_id, res_data) fmlogger.debug("Done creating GKE cluster.") else: resource_obj = res_db.Resource().get(res_id) cluster_status = 'Could not get IP address of the cluster.. Not continuing.. Deleting cluster.' res_data['status'] = cluster_status res_db.Resource().update(res_id, res_data) self.delete_cluster(env_id, env_info, resource_obj) fmlogger.debug("Done deleting GKE cluster.") return cluster_status