def raise_for_exception(self, has_password, has_secrets_file, has_env_file): if has_env_file: if not has_secrets_file: raise exceptions.DeploymentError( 'Stack file is missing secrets.env file') if not has_password: raise exceptions.DeploymentError( 'Application is missing password ' 'in app.passwords.yml')
def test_add_error_data(self): step = ConcreteBPS() error = exceptions.DeploymentError('Test message') error = step.add_error_data(error, {'test': 'testv'}) self.assertIsNone(error.timestamp) self.assertEqual(error.step_name, 'ConcreteBPS') self.assertEqual(error.pipeline_data, {'test': 'testv'}) error = exceptions.DeploymentError('Test message', retryable=True) error = step.add_error_data(error, {'test': 'testv'}) self.assertIsNotNone(error.timestamp)
def get_app_name_from_file_path(file_path): # file_path: /bla/deploy/kth-azure-app/stage/docker-stack.yml # path.dirname: /bla/deploy/kth-azure-app/stage # path.split: (/bla/deploy/kth-azure-app, stage) split_path = path.split(path.dirname(file_path)) if not split_path[1]: raise exceptions.DeploymentError( 'Could not parse cluster from stack file path') # 2nd path.split: (/bla/deploy, kth-azure-app) split_path = path.split(split_path[0]) if not split_path[1]: raise exceptions.DeploymentError( 'Could not parse application name from stack file path') return split_path[1]
def load_cluster_status_from_file(self): cluster_file = environment.get_env(environment.CLUSTER_STATUS_API_URL) if not os.path.isfile(cluster_file): raise exceptions.DeploymentError(f'Could not load cluster status file {cluster_file}') with open(cluster_file, 'r') as file_stream: json_content = json.loads(file_stream.read()) return json_content
def check_environment_missing(self, pipeline_data, environment_missing): if environment_missing: self.log.error('Step environment missing "%s" for step "%s", and pipeline_data "%s"', environment_missing, self.get_step_name(), pipeline_data) raise exceptions.DeploymentError(f'Mandatory env {environment_missing} missing', pipeline_data=pipeline_data, step_name=self.get_step_name())
def check_step_data_missing(self, pipeline_data, step_data_missing): if step_data_missing: self.log.error('Step data "%s" missing for step "%s", and pipeline_data "%s"', step_data_missing, self.get_step_name(), pipeline_data) raise exceptions.DeploymentError('Step pipeline_data not ok', pipeline_data=pipeline_data, step_name=self.get_step_name())
def get_running_replicas(self, pipeline_data, service): service_ls = self.run_service_ls(pipeline_data, service).split('\n') for line in service_ls: match = re.match(regex.get_nr_of_replicas(), line) if match: return match raise exceptions.DeploymentError( 'Could not find any service when running ls')
def execute_json_delete(client, key): try: logger = logging.getLogger(__name__) logger.debug('Deleting key "%s"', key) return client.execute_command('DEL', key) except redis.RedisError as redis_err: raise exceptions.DeploymentError(f'Couldnt execute redis delete cmd. ' f'Error was: "{str(redis_err)}"')
def execute_json_set(client, key, value): try: logger = logging.getLogger(__name__) logger.debug('Writing key "%s" and value "%s"', key, value) client.execute_command('SET', key, json.dumps(value)) except redis.RedisError as redis_err: raise exceptions.DeploymentError(f'Couldnt execute redis set cmd. ' f'Error was: "{str(redis_err)}"')
def get_client(): try: redis_url = environment.get_with_default_string( environment.REDIS_URL, 'redis://redis:6379') return redis.StrictRedis.from_url(redis_url) except redis.RedisError as redis_err: raise exceptions.DeploymentError( f'Couldnt create redis client. Error was: ' f'"{str(redis_err)}"')
def execute_command(client, command): try: logger = logging.getLogger(__name__) logger.debug('Running command "%s"', command) return client.execute_command(command) except redis.RedisError as redis_err: raise exceptions.DeploymentError( f'Couldnt execute redis command f{command}. ' f'Error was: "{str(redis_err)}"')
def execute_json_get(client, key): try: logger = logging.getLogger(__name__) logger.debug('Getting key "%s"', key) value = client.execute_command('GET', key) if value: return json.loads(value) return value except redis.RedisError as redis_err: raise exceptions.DeploymentError(f'Couldnt execute redis get cmd. ' f'Error was: "{str(redis_err)}"')
def get_semver_version_from_env(self, pipeline_data, service_name, semver_env_key): for name, service in pipeline_data_utils.get_parsed_services( pipeline_data): if name == service_name: # Environment always exists - set in init_service_pipeline_data.py for env_var, env_val in service['environment'].items(): if env_var == semver_env_key: return env_val raise exceptions.DeploymentError( 'SemVer definition `{}` found in image url but could not be not found under *environment* in docker-stack.yml' .format(semver_env_key))
def run_step(self, pipeline_data): skip_deployment = environment.get_env(environment.SKIP_DEPLOYMENT) if skip_deployment: return pipeline_data service_names = self.get_all_service_names(pipeline_data) if not service_names: raise exceptions.DeploymentError( f'Couldnt get service names from deployment ' f'output. Output was: ' f'"{pipeline_data[data_defs.DEPLOY_OUTPUT]}"') for service in service_names: self.wait_for_service_replication(pipeline_data, service) return pipeline_data
def handle_pipeline_error(self, error, pipeline_data): msg = str(error) if isinstance(error, exceptions.AspenError): msg = str(error) error = exceptions.DeploymentError(msg, fatal=True, expected=False) if isinstance(error, subprocess.CalledProcessError): msg = str(error.output) # pylint: disable=E1101 if not isinstance(error, exceptions.DeploymentError): # Convert all exceptions to deployment errors error = exceptions.DeploymentError(msg) # Mark them as unexpected error.expected = False # Complement error with step data error = self.add_error_data(error, pipeline_data) self.log.error('An error occured: "%s"', str(error), exc_info=True) if error.fatal: reporter_service.handle_fatal_error(error) else: reporter_service.handle_deployment_error(error) # If the error was an AspenError, the Aspen pipeline will # be stopped. This is a more friendly solution than # using sys.exit() self.stop_pipeline()
def find_best_match(sorted_versions, semver_version): for version in sorted_versions: is_max_build = str.startswith(semver_version, '~') is_max_minor = str.startswith(semver_version, '^') if get_major(version) == get_major(semver_version): if is_max_minor: return version if get_minor(version) == get_minor(semver_version): if is_max_build: return version if get_build(version) == get_build(semver_version): return version raise exceptions.DeploymentError( 'No matching semver version found in tags')
def wait_for_service_replication(self, pipeline_data, service): for i in range(self.wait_times): self.log.debug( 'Checking if service "%s" has all replicas (attempt #%s/#%s)', service, i + 1, self.wait_times) match = self.get_running_replicas(pipeline_data, service) if match.group(1) == match.group(2): self.log.debug('Service "%s" has %s/%s replicas. All clear.', service, match.group(1), match.group(2)) break self.log.debug( 'Service "%s" only at %s/%s replicas, waiting %s secs', service, match.group(1), match.group(2), self.wait_seconds) time.sleep(self.wait_seconds) else: msg = ( f'Application didnt start correctly. Service ps output is: \n' f'\n{self.get_ps_output(pipeline_data, service)}\n') raise exceptions.DeploymentError(msg)
def run_step(self, pipeline_data): for i, service in pipeline_data_utils.get_enumerated_services( pipeline_data): image_data = service[data_defs.S_IMAGE] self.log.debug('Found image data "%s"', image_data) if image_data[data_defs.IMG_IS_SEMVER] and image_data[ data_defs.IMG_TAGS]: try: best_match = max_satisfying( image_data[data_defs.IMG_TAGS], image_data[data_defs.IMG_SEMVER_VERSION]) except exceptions.DeploymentError as semver_error: raise exceptions.DeploymentError( "No matching version found for '{}': '{}' in the Docker registry. Not pushed yet?" .format(image_data[data_defs.IMG_SEMVER_ENV_KEY], image_data[data_defs.IMG_SEMVER_VERSION])) self.log.debug('Best match was "%s"', best_match) image_data[data_defs.IMG_BEST_SEMVER_MATCH] = best_match service = self.set_semver_environment(service, image_data, best_match) pipeline_data[data_defs.SERVICES][i] = service return pipeline_data
def get_current_cluster_lb_ip(self, cluster_data, pipeline_data): application_cluster = pipeline_data[data_defs.APPLICATION_CLUSTER] for cluster in cluster_data: if cluster['status'] == application_cluster: return cluster['load_balancer_ip'] raise exceptions.DeploymentError('Application not targeted for cluster')
def get_app_cluster_from_file_path(file_path): split_path = path.split(path.dirname(file_path)) if not split_path[1]: raise exceptions.DeploymentError( 'Could not parse cluster from stack file path') return split_path[1]
def get_mock_deployment_error(expected=True): return exceptions.DeploymentError('This is a deployment error', expected=expected, step_name='ParseStackFile')