def get_tags_from_registry(self, image_data, url_to_call): if environment.use_azure_repository(image_data): user = environment.get_env(environment.AZURE_REGISTRY_USER) password = environment.get_env(environment.AZURE_REGISTRY_PWD) else: user = environment.get_env(environment.DOCKER_REGISTRY_USER) password = environment.get_env(environment.DOCKER_REGISTRY_PWD) response = requests.get_urllib_json(url_to_call, auth=(user, password)) if environment.use_azure_repository(image_data): tags = [version["name"] for version in response["tags"]] else: tags = response["tags"] return self.filter_out_non_valids(tags)
def run_step(self, pipeline_data): self.vault_key_path = environment.get_env(environment.VAULT_KEY_PATH) self.app_pwd_file_path = environment.get_env( environment.APP_PWD_FILE_PATH) if not os.path.isfile(self.vault_key_path): raise exceptions.AspenError( f'Vault key path {self.vault_key_path} is not a file') if not os.path.isfile(self.app_pwd_file_path): raise exceptions.AspenError( f'Application pwd path {self.app_pwd_file_path} ' f'is not a file') vault_output = self.decrypt_app_passwords() pipeline_data[data_defs.APPLICATION_PASSWORDS] = yaml.load( vault_output) return pipeline_data
def load_cluster_status_from_file(self): cluster_file = environment.get_env(environment.CLUSTER_STATUS_API_URL) if not os.path.isfile(cluster_file): raise exceptions.DeploymentError(f'Could not load cluster status file {cluster_file}') with open(cluster_file, 'r') as file_stream: json_content = json.loads(file_stream.read()) return json_content
def run_step(self, pipeline_data): deployment_json = self.create_deployment_json(pipeline_data) skip_deployment = environment.get_env(environment.SKIP_DEPLOYMENT) if not skip_deployment: reporter_service.handle_deployment_success(deployment_json) pipeline_data[data_defs.WAS_DEPLOYED] = True return pipeline_data
def get_cache_key(pipeline_data): mgt_res_grp = environment.get_env(environment.MANAGEMENT_RES_GRP) if data_defs.STACK_FILE_PATH in pipeline_data: file_path = pipeline_data[data_defs.STACK_FILE_PATH] return f'error/{mgt_res_grp}/{file_path.lstrip("/")}' else: return None
def main(): log.init_logging() logger = logging.getLogger(__name__) known_hosts.write_entry_if_missing() if environment.get_env(environment.SYNC_START_ON_RUN): logger.info('Starting sync thread on run') thread.create_and_start_sync_thread(sync_routine) FLASK_APP.run(host='0.0.0.0', port=3005)
def call_cluster_status_api(self): try: url = environment.get_env(environment.CLUSTER_STATUS_API_URL) response = requests.get_urllib_json(url) return response except Exception as err: raise exceptions.AspenError(f'Could not call cluster status api. ' f'Error was: "{str(err)}"')
def run_step(self, pipeline_data): if environment.get_env(environment.CLUSTER_STATUS_URL_IS_FILE): cluster_data = self.load_cluster_status_from_file() else: cluster_data = self.call_cluster_status_api() pipeline_data[data_defs.CLUSTERS_TO_DEPLOY] = self.verify_cluster_to_deploy_has_ip(cluster_data) pipeline_data[data_defs.DOCKER_HOST_IPS] = cluster_data return pipeline_data
def add_known_host_entry(): logger = logging.getLogger(__name__) file = environment.get_with_default_string(environment.KNOWN_HOST_FILE, '/root/.ssh/known_hosts') entry = environment.get_env(environment.KNOWN_HOST_ENTRY) if file_has_text(file, entry): logging.debug('KNOWN_HOST_FILE already has KNOWN_HOST_ENTRY') else: logger.debug('Writing KNOWN_HOST_ENTRY to KNOWN_HOST_FILE') write_to_file(file, entry)
def handle_deployment_success(deployment_json): logger = logging.getLogger(__name__) deployment_url = environment.get_env(environment.SLACK_DEPLOYMENT_POST_URL) if deployment_url: logger.info( f'{deployment_json["cluster"]}/{deployment_json["applicationName"]} - Reporting successful deployment ' ) response = call_with_payload(deployment_url, deployment_json) if response: logger.debug('Response was: "%s"', response) else: logger.debug('Slack integration not enabled, skipping report')
def run_step(self, pipeline_data): skip_deployment = environment.get_env(environment.SKIP_DEPLOYMENT) if skip_deployment: return pipeline_data service_names = self.get_all_service_names(pipeline_data) if not service_names: raise exceptions.DeploymentError( f'Couldnt get service names from deployment ' f'output. Output was: ' f'"{pipeline_data[data_defs.DEPLOY_OUTPUT]}"') for service in service_names: self.wait_for_service_replication(pipeline_data, service) return pipeline_data
def handle_fatal_error(error: exceptions.DeploymentError): logger = logging.getLogger(__name__) logger.debug('Found new reportable error: reporting to Slack') error_url = environment.get_env(environment.SLACK_ERROR_POST_URL) if error_url: error_json = create_error_object(error, None, False) logger.debug('Calling "%s" with "%s"', error_url, error_json) response = call_with_payload(error_url, error_json) if response: logger.debug('Response was: "%s"', response) else: logger.warning( 'Found error to report, but not SLACK_ERROR_POST_URL was set')
def run_pipeline(self): try: self.log.info( 'Starting AspenPipeline with "%s" steps to clusters "%s"', len(self.pipeline_steps), environment.get_env(environment.CLUSTERS_TO_DEPLOY)) pipeline_data = self.pipeline_steps[0].run_pipeline_step( self.pipeline_data) return pipeline_data except exceptions.AspenError as as_err: self.log.error('AspenError occured: "%s"', str(as_err)) raise except Exception as err: self.log.error('Unhandled exception occured: "%s"', str(err)) self.log.exception(err) raise
def report_error_to_slack(error, add_here_to_msg): logger = logging.getLogger(__name__) logger.debug('Found new reportable error: reporting to Slack') combined_labels = get_combined_service_labels(error.pipeline_data) error_url = environment.get_env(environment.SLACK_ERROR_POST_URL) if error_url: error_json = create_error_object(error, combined_labels, add_here_to_msg) logger.debug('Calling "%s" with "%s"', error_url, error_json) response = call_with_payload(error_url, error_json) if response: logger.debug('Response was: "%s"', response) error_cache.write_to_error_cache(error) else: logger.warning( 'Found error to report, but not SLACK_ERROR_POST_URL was set')
def handle_recommendation(pipeline_data, application_name, recommendation_text): logger = logging.getLogger(__name__) recommendation_url = environment.get_env( environment.SLACK_RECOMMENDATION_POST_URL) if recommendation_url: combined_labels = get_combined_service_labels(pipeline_data) slack_channels = get_slack_channels(combined_labels) payload = create_recommedation_object(application_name, recommendation_text, slack_channels) response = call_with_payload(recommendation_url, payload) if response: logger.debug('Response was: "%s"', response) else: logger.debug( 'Slack recommendation integration not enabled, skipping report')
def run_deploy(self, pipeline_data, environment): stack_file = pipeline_data[data_defs.STACK_FILE_PATH] name = pipeline_data[data_defs.APPLICATION_NAME] cluster_lb_ip = pipeline_data[data_defs.DOCKER_HOST_IP] cmd = (f'{environment} DOCKER_TLS_VERIFY=1 docker ' f'-H tcp://{cluster_lb_ip} stack deploy ' f'--with-registry-auth ' f'--compose-file {stack_file} {name}') skip_deployment = module_env.get_env(module_env.SKIP_DEPLOYMENT) if skip_deployment: pipeline_data[data_defs.DEPLOY_OUTPUT] = '' self.log.info('SKIP_DEPLOY set - skipping deployment') else: deploy_output = self.run_docker_cmd(cmd) pipeline_data[data_defs.DEPLOY_OUTPUT] = deploy_output.decode( 'utf-8') self.log.debug('Deployment output was: "%s"', deploy_output) return pipeline_data
def run_step(self, pipeline_data): user = environment.get_env(environment.DOCKER_REGISTRY_USER) registry_url = environment.get_env(environment.DOCKER_REGISTRY_URL) password = environment.get_env(environment.DOCKER_REGISTRY_PWD) self.run_docker_login(registry_url, user, password) return pipeline_data
def write_entry_if_missing(): if environment.get_env(environment.KNOWN_HOST_ENTRY): add_known_host_entry()