def set_teams(config: ConcourseConfig): not_none(config) # Use main-team, i.e. the team that can change the other teams' credentials main_team_credentials = config.main_team_credentials() concourse_api = client.from_cfg( concourse_cfg=config, team_name=main_team_credentials.teamname(), ) for team in config.all_team_credentials(): # We skip the main team here since we cannot update all its credentials at this time. if team.teamname() == "main": continue concourse_api.set_team(team)
def start_worker_resurrector( config_name: CliHint(typehint=str, help='the config set name to use'), concourse_namespace='concourse', ): config_factory = ctx().cfg_factory() config_set = config_factory.cfg_set(cfg_name=config_name) kubernetes_cfg = config_set.kubernetes() kube_client = kube.ctx.Ctx() kube_client.set_kubecfg(kubernetes_cfg.kubeconfig()) concourse_cfg = config_set.concourse() concourse_client = client.from_cfg(concourse_cfg=concourse_cfg, team_name='main') resurrect_pods(namespace=concourse_namespace, concourse_client=concourse_client, kubernetes_client=kube_client)
def set_teams(config: ConcourseConfig): not_none(config) cfg_factory = global_ctx().cfg_factory() concourse_uam_cfg_name = config.concourse_uam_config() concourse_uam_cfg = cfg_factory.concourse_uam(concourse_uam_cfg_name) # Use main-team, i.e. the team that can change the other teams' credentials main_team = concourse_uam_cfg.main_team() concourse_api = client.from_cfg( concourse_cfg=config, team_name=main_team.teamname(), ) for team in concourse_uam_cfg.teams(): # We skip the main team here since we cannot update all its credentials at this time. if team.teamname() == main_team.teamname(): continue concourse_api.set_team(team)
def trigger_resource_check( cfg_name: CliHints.non_empty_string(help="cfg_set to use"), team_name: CliHints.non_empty_string(help="pipeline's team name"), pipeline_name: CliHints.non_empty_string(help="pipeline name"), resource_name: CliHints.non_empty_string(help="resource to check"), ): '''Triggers a check of the specified Concourse resource ''' cfg_factory = ctx().cfg_factory() cfg_set = cfg_factory.cfg_set(cfg_name) concourse_cfg = cfg_set.concourse() api = client.from_cfg( concourse_cfg=concourse_cfg, team_name=team_name, ) api.trigger_resource_check( pipeline_name=pipeline_name, resource_name=resource_name, )
def deploy(self, definition_descriptor): pipeline_definition = definition_descriptor.pipeline pipeline_name = definition_descriptor.pipeline_name try: api = client.from_cfg( concourse_cfg=definition_descriptor.concourse_target_cfg, team_name=definition_descriptor.concourse_target_team, ) response = api.set_pipeline( name=pipeline_name, pipeline_definition=pipeline_definition ) info( 'Deployed pipeline: ' + pipeline_name + ' to team: ' + definition_descriptor.concourse_target_team ) if self.unpause_pipelines: info(f'Unpausing pipeline {pipeline_name}') api.unpause_pipeline(pipeline_name=pipeline_name) if self.expose_pipelines: api.expose_pipeline(pipeline_name=pipeline_name) deploy_status = DeployStatus.SUCCEEDED if response is concourse.client.model.SetPipelineResult.CREATED: deploy_status |= DeployStatus.CREATED elif response is concourse.client.model.SetPipelineResult.UPDATED: pass else: raise NotImplementedError return DeployResult( definition_descriptor=definition_descriptor, deploy_status=deploy_status, ) except Exception as e: import traceback traceback.print_exc() warning(e) return DeployResult( definition_descriptor=definition_descriptor, deploy_status=DeployStatus.FAILED, error_details=traceback.format_exc(), )
def determine_previous_build_status(v, cfg_set): concourse_api = from_cfg(cfg_set.concourse(), team_name=v['build-team-name']) try: build_number = int(v['build-name']) if build_number < 2: ci.util.info('this seems to be the first build - will notify') return BuildStatus.SUCCEEDED previous_build = str(build_number - 1) previous_build = concourse_api.job_build( pipeline_name=v['build-pipeline-name'], job_name=v['build-job-name'], build_name=previous_build) return previous_build.status() except Exception as e: if type(e) == SystemExit: raise e # in doubt, ensure notification is sent traceback.print_exc() return None
def process_results(self, results): # collect pipelines by concourse target (concourse_cfg, team_name) as key concourse_target_results = {} for result in results: definition_descriptor = result.definition_descriptor concourse_target_key = definition_descriptor.concourse_target_key() if concourse_target_key not in concourse_target_results: concourse_target_results[concourse_target_key] = [] concourse_target_results[concourse_target_key].append(result) for concourse_target_key, concourse_results in concourse_target_results.items( ): # TODO: implement eq for concourse_cfg concourse_cfg, concourse_team = next(iter( concourse_results)).definition_descriptor.concourse_target() concourse_results = concourse_target_results[concourse_target_key] concourse_api = client.from_cfg( concourse_cfg=concourse_cfg, team_name=concourse_team, ) # find pipelines to remove if self.remove_pipelines: deployed_pipeline_names = set( map(lambda r: r.definition_descriptor.pipeline_name, concourse_results)) pipelines_to_remove = set( concourse_api.pipelines()) - deployed_pipeline_names for pipeline_name in pipelines_to_remove: logger.info( 'removing pipeline: {p}'.format(p=pipeline_name)) concourse_api.delete_pipeline(pipeline_name) # trigger resource checks in new pipelines self._initialise_new_pipeline_resources(concourse_api, concourse_results) if self.reorder_pipelines: # order pipelines alphabetically pipeline_names = list(concourse_api.pipelines()) pipeline_names.sort() concourse_api.order_pipelines(pipeline_names) # evaluate results failed_descriptors = [ d for d in results if not d.deploy_status & DeployStatus.SUCCEEDED ] failed_count = len(failed_descriptors) logger.info('Successfully replicated {d} pipeline(s)'.format( d=len(results) - failed_count)) if failed_count == 0: return True logger.warning( f'Errors occurred whilst replicating pipeline(s): {failed_count=}') all_notifications_succeeded = True for failed_descriptor in failed_descriptors: logger.warning( failed_descriptor.definition_descriptor.pipeline_name) try: self._notify_broken_definition_owners(failed_descriptor) except Exception: logger.warning( 'an error occurred whilst trying to send error notifications' ) traceback.print_exc() all_notifications_succeeded = False # signall error only if error notifications failed return all_notifications_succeeded