def run(dry_run): settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() state = State( integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings ) emails = queries.get_app_interface_emails() smtp_client = SmtpClient(settings=settings) # validate no 2 emails have the same name email_names = set([e['name'] for e in emails]) if len(emails) != len(email_names): logging.error('email names must be unique.') sys.exit(1) emails_to_send = [e for e in emails if not state.exists(e['name'])] for email in emails_to_send: logging.info(['send_email', email['name'], email['subject']]) if not dry_run: names = collect_to(email['to']) subject = email['subject'] body = email['body'] smtp_client.send_mail(names, subject, body) state.add(email['name'])
def run(dry_run): settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() users = queries.get_users() state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) mails = smtp_client.get_mails(criteria='SUBJECT "Sentry Access Request"', folder='[Gmail]/Sent Mail', settings=settings) user_names = get_sentry_users_from_mails(mails) if not dry_run: slack = init_slack_workspace(QONTRACT_INTEGRATION) for user_name in user_names: guesses = guess_user(user_name, users) if not guesses: logging.debug(f'no users guessed for {user_name}') continue slack_username = \ guesses[0].get('slack_username') or guesses[0]['org_username'] if state.exists(slack_username): continue logging.info(['help_user', slack_username]) if not dry_run: state.add(slack_username) slack.chat_post_message( f'yo <@{slack_username}>! it appears that you have ' + 'requested access to a project in Sentry. ' + 'access is managed automatically via app-interface. ' 'checkout https://url.corp.redhat.com/sentry-help')
def run_skill(state: State, modes: List = [skill_attrs.modes.default]): utterances = state.utterances bot_messages = state.bot_messages if bot_messages: skill_name = bot_messages[-1]["skill_name"] scores = get_emo_scores(utterances) state.add_skill_scores(skill_name=skill_name, scores=scores) return state
def run_skill(state: State, modes: List = [skill_attrs.modes.intro]): model_results = run_models(models, state.human_utterances) true_model_names = cmd_postprocessing(model_results, model_name_only=True) true_cmds = cmd_postprocessing(model_results, cmd_only=True) skill_state_update = {} # print(f"<{skill_attrs.skill_name}> skill_state: {skill_state}") # print(f"<{skill_attrs.skill_name}> true_model_names: {true_model_names}") # print(f"<{skill_attrs.skill_name}> true_cmds: {true_cmds}") # handle loop proceed = True handler_state = {} i = 0 while proceed: i += 1 if skill_attrs.modes.intro in modes: skill_state = {} next_step = "" else: skill_state = state.get_skill_state(skill_attrs.skill_name) next_step = skill_state.get("next_step", "") # # if next_step in ["", "are_you_ask"]: # if next_step in ["are_you_ask"]: # proceed, handler_state, skill_state_update, state = are_you_ask_handler( # handler_state, skill_state, state, true_model_names, true_cmds # ) if next_step in ["", "have_you_played"]: proceed, handler_state, skill_state_update, state = have_you_played_handler( handler_state, skill_state, state, true_model_names, true_cmds) elif next_step in ["do_you_like"]: proceed, handler_state, skill_state_update, state = do_you_like_handler( handler_state, skill_state, state, true_model_names, true_cmds) state.update_skill_state(skill_attrs.skill_name, skill_state_update) # logger.info(f"{i}: next_step = {next_step}") # logger.info(skill_state_update) # print(f"skill_state_update = {skill_state_update}") text = handler_state.get("text", ["Sorry, i can not answer."]) text = " ".join(text) confidence = handler_state.get("confidence", 0.0) scenario = handler_state.get("scenario", False) state.add_hypothesis( skill_name=skill_attrs.skill_name, text=text, confidence=confidence, scenario=scenario, ) return state
def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=None): settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() clusters = [c for c in queries.get_clusters(minimal=True) if c.get('ocm')] oc_map = OC_Map(clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size) defer(lambda: oc_map.cleanup()) state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) if not dry_run: slack = init_slack_workspace(QONTRACT_INTEGRATION) now = datetime.utcnow() for cluster in oc_map.clusters(): oc = oc_map.get(cluster) if not oc: logging.log(level=oc.log_level, msg=oc.message) continue upgrade_config = oc.get(namespace='openshift-managed-upgrade-operator', kind='UpgradeConfig', name='osd-upgrade-config', allow_not_found=True) if not upgrade_config: logging.debug(f'[{cluster}] UpgradeConfig not found.') continue upgrade_spec = upgrade_config['spec'] upgrade_at = upgrade_spec['upgradeAt'] version = upgrade_spec['desired']['version'] upgrade_at_obj = datetime.strptime(upgrade_at, '%Y-%m-%dT%H:%M:%SZ') state_key = f'{cluster}-{upgrade_at}' # if this is the first iteration in which 'now' had passed # the upgrade at date time, we send a notification if upgrade_at_obj < now: if state.exists(state_key): # already notified continue logging.info(['cluster_upgrade', cluster]) if not dry_run: state.add(state_key) usergroup = f'{cluster}-cluster' usergroup_id = slack.get_usergroup_id(usergroup) slack.chat_post_message( f'Heads up <!subteam^{usergroup_id}>! ' + f'cluster `{cluster}` is currently ' + f'being upgraded to version `{version}`')
def test_get_estimation(self): state = State('RI', '2015', 0, 0.5, 10, 0, 0.1, cdist, 10) state.get_estimation() self.assertIsInstance(state.df, DataFrame) self.assertTrue('Centroid Latitude' in state.df.columns) self.assertTrue('Centroid Longitude' in state.df.columns) self.assertTrue('Congressional District' in state.df.columns) self.assertTrue('GEOID' in state.df.columns) self.assertTrue('Predicted 2015 Population' in state.df.columns) self.assertTrue('geometry' in state.df.columns)
def ls(ctx, integration): settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() state = State(integration, accounts, settings=settings) keys = state.ls() # if 'integration' is defined, the 0th token is empty table_content = [ {'integration': k.split('/')[0] or integration, 'key': '/'.join(k.split('/')[1:])} for k in keys] print_output('table', table_content, ['integration', 'key'])
def run(dry_run=False): settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() state = State( integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings ) credentials_requests = queries.get_credentials_requests() # validate no 2 requests have the same name credentials_requests_names = \ set([r['name'] for r in credentials_requests]) if len(credentials_requests) != len(credentials_requests_names): logging.error('request names must be unique.') sys.exit(1) error = False credentials_requests_to_send = \ [r for r in credentials_requests if not state.exists(r['name'])] for credentials_request_to_send in credentials_requests_to_send: user = credentials_request_to_send['user'] org_username = user['org_username'] public_gpg_key = user.get('public_gpg_key') credentials_name = credentials_request_to_send['credentials'] if not public_gpg_key: error = True logging.error( f"user {org_username} does not have a public gpg key") continue logging.info(['send_credentials', org_username, credentials_name]) if not dry_run: request_name = credentials_request_to_send['name'] names = [org_username] subject = request_name ecrypted_credentials = \ get_ecrypted_credentials(credentials_name, user, settings) if not ecrypted_credentials: error = True logging.error( f"could not get encrypted credentials {credentials_name}") continue body = MESSAGE_TEMPLATE.format( request_name, credentials_name, ecrypted_credentials) smtp_client.send_mail(names, subject, body, settings=settings) state.add(request_name) if error: sys.exit(1)
def test_create_run_and_return_kmeans(self): state = State('RI', '2015', 0, 0.5, 10, 0, 0.1, cdist, 10) state.load_estimation() state.K = 2 state.X = np.stack( (np.asarray(state.df['Centroid Latitude'].apply(float)), np.asarray(state.df['Centroid Longitude'].apply(float))), axis=1) state.counts = list(state.df[state.population].apply(float)) state.create_run_and_return_kmeans()
def test_run_stats(self): state = State('RI', '2015', 0, 0.5, 10, 0, 0.1, cdist, 10, unique_filename_for_state='RI_2017_10_07_14_17_05') state.run_stats() self.assertTrue(os.path.isfile(state.stats_filename)) self.assertTrue(os.path.isfile(state.means_filename))
def __init__(self): ns = 'voxel_map/' self._visual_threshold = rospy.get_param(ns + 'visual_threshold') voxels_per_side = rospy.get_param(ns + 'voxels_per_side') width_on_a_side = rospy.get_param(ns + 'width_on_a_side') initial_prob = rospy.get_param(ns + 'initial_prob') pD = rospy.get_param(ns + 'pD') pFA = rospy.get_param(ns + 'pFA') ps = rospy.get_param(ns + 'ps') pt = rospy.get_param(ns + 'pt') self.map = VoxelMap(voxels_per_side, width_on_a_side, initial_prob, pD, pFA, ps, pt) rospy.Subscriber("truth/NED", Odometry, callback=self.state_cb, queue_size=1) rospy.Subscriber("triang_points", PointCloud, callback=self.measurement_cb) self.map_pub = rospy.Publisher("voxel_map", PointCloud, queue_size=1) self.vis_pub = rospy.Publisher("voxel_map_visual", PointCloud, queue_size=1) self.map_center_pub = rospy.Publisher("voxel_map_center", Odometry) self.state = State() self.blank_cloud = PointCloud() zero_point = Point32(0, 0, 0) self.blank_cloud.points = [zero_point] * self.map.N**3
def slack_notify(self, dry_run, aws_accounts, ri): result = self._get_deployment_result(dry_run, ri) state = State( integration=self.integration, accounts=aws_accounts, settings=self.settings ) for saas_file in self.saas_files: self.github = self._initiate_github(saas_file) saas_file_name = saas_file['name'] for resource_template in saas_file['resourceTemplates']: url = resource_template['url'] hash_length = resource_template['hash_length'] resource_template_name = resource_template['name'] for target in resource_template['targets']: if not target.get('notify'): continue cluster, namespace = \ self._get_cluster_and_namespace(target) target_hash = target['hash'] desired_commit_sha = \ self._get_commit_sha(url, target_hash, hash_length) state_key_format = "{}/{}/{}/{}" state_key = state_key_format.format( saas_file_name, resource_template_name, cluster, namespace ) current_commit_sha = state.get(state_key, None) if current_commit_sha != desired_commit_sha: slack_info = saas_file.get('slack') if slack_info: slack = self._init_slack(slack_info) msg_format = "[{}] {} deployment to {}/{}: {}" msg = msg_format.format( saas_file_name, resource_template_name, cluster, namespace, result ) channel = slack.chat_kwargs['channel'] logging.info(['slack_notify', channel, msg]) if not dry_run: state[state_key] = desired_commit_sha slack.chat_post_message(msg)
def plot(self): state = State('RI', '2015', 0, 0.5, 10, 0, 0.1, cdist, 10, unique_filename_for_state='RI_2017_10_07_14_17_05') state.plot() self.assertTrue( os.path.isfile(state.cong_dist_plot_dir + state.state + '_cong_dist')) self.assertTrue(os.path.isfile(state.clusters_plot_filename + '.png')) self.assertTrue(os.path.isfile(state.clusters_plot_filename + '.pdf'))
def test_get_stats(self): state = State('RI', '2015', 0, 0.5, 10, 0, 0.1, cdist, 10, unique_filename_for_state='RI_2017_10_07_14_17_05') stats_df = state.get_stats() self.assertIsInstance(stats_df, DataFrame) self.assertTrue('Population' in stats_df.columns) self.assertTrue('Number of Blocks' in stats_df.columns) self.assertTrue('Is Cluster' in stats_df.columns) self.assertTrue('Mean Pairwise Distance' in stats_df.columns) self.assertTrue( 'Cluster/Congressional District ID' in stats_df.columns)
def strategy(self): # past state self.past_state = self.state # current state self.state = State( self.my_pose, # (1, 2) self.lidar_ranges, # (1, 1, 360) self.image, # (1, 3, 480, 640) self.mask, # (1, 18) ) if self.action is not None: current_score = copy.deepcopy(self.score) reward = self.get_reward(self.past_score, current_score) print("reward: {}".format(reward)) self.past_score = current_score reward = torch.LongTensor([reward]) self.agent.memorize(self.past_state, self.action, self.state, reward) # manual wall avoidance if not self.punish_if_facing_wall: avoid, linear_x, angular_z = manual_avoid_wall_2(self.lidar_ranges, dist_th=0.13, back_vel=0.2) else: avoid = False if avoid: self.action = None else: # get action from agent if self.step % 3 == 0: policy = "boltzmann" else: policy = "epsilon" self.action = self.agent.get_action(self.state, self.episode, policy, self.debug) choice = int(self.action.item()) linear_x = ACTION_LIST[choice][0] angular_z = ACTION_LIST[choice][1] print("step: {}, vel:{}, omega:{}".format(self.step, linear_x, angular_z)) # update twist twist = Twist() twist.linear.x = linear_x twist.linear.y = 0.0 twist.linear.z = 0.0 twist.angular.x = 0.0 twist.angular.y = 0.0 twist.angular.z = angular_z self.twist_pub.publish(twist) self.step += 1
def emigrate(self, fraction): """ Generates a State, describing a population slice. This population slice is then deducted from the Agent's population pool. :param fraction: float in range 0.0 < x < 1.0 denoting what fraction of the population is emigrating. """ if not 0.0 < fraction < 1.0: raise ValueError( "Emigration fraction not in valid range 0.0 < x < 1.0") # Compute emigrant slice S, E, I, R, N = self.state() n_emigrants = int(N * fraction) # int() floors/truncates the number. # Update the current Agent's population count new_state = State(S, E, I, R, N - n_emigrants) self.set_state(new_state) return State(S, E, I, R, n_emigrants)
def run(dry_run, io_dir='throughput/', defer=None): jjb, additional_repo_urls = init_jjb() defer(lambda: jjb.cleanup()) accounts = queries.get_aws_accounts() state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=jjb.settings) if dry_run: validate_repos_and_admins(jjb, additional_repo_urls) jjb.generate(io_dir, 'desired') jjb.overwrite_configs(state) jjb.generate(io_dir, 'current') jjb.print_diffs(io_dir) else: jjb.update() configs = jjb.get_configs() for name, desired_config in configs.items(): state.add(name, value=desired_config, force=True)
def test_create_unique_filename_for_state(self): state = State('RI', '2015', 0, 0.5, 10, 0, 0.1, cdist, 10) # Check new filename is a unicode string: self.assertIsInstance(state.unique_filename_for_state, unicode) # Check it really is unique: self.assertFalse( os.path.isfile(state.clustered_geojson_results_dir + state.unique_filename_for_state)) self.assertFalse( os.path.isfile(state.clustered_csv_results_dir + state.unique_filename_for_state))
def test_init(self): state = State('RI', '2015', 0, 0.5, 10, 0, 0.1, cdist, 10) self.assertTrue(state.state == 'RI') self.assertTrue(state.year == '2015') self.assertTrue(state.alpha == 0) self.assertTrue(state.alpha_increment == 0.5) self.assertTrue(state.alpha_max == 10) self.assertTrue(state.beta == 0) self.assertTrue(state.beta_increment == 0.1) self.assertTrue(state.dist == cdist) self.assertTrue(state.max_runs == 10) self.assertTrue(state.population == 'Predicted 2015 Population') self.assertIsInstance(state.cong_dist, DataFrame)
def run(dry_run): unleash_instances = queries.get_unleash_instances() accounts = queries.get_aws_accounts() settings = queries.get_app_interface_settings() state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) for unleash_instance in unleash_instances: instance_name = unleash_instance['name'] current_state = fetch_current_state(unleash_instance) previous_state = fetch_previous_state(state, instance_name) diffs = calculate_diff(current_state, previous_state) if diffs: act(dry_run, state, unleash_instance, diffs)
def run(dry_run): jira_boards = [j for j in queries.get_jira_boards() if j.get('slack')] accounts = queries.get_aws_accounts() settings = queries.get_app_interface_settings() state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) for index, jira_board in enumerate(jira_boards): if not is_in_shard_round_robin(jira_board['name'], index): continue jira, current_state = fetch_current_state(jira_board, settings) previous_state = fetch_previous_state(state, jira.project) if previous_state: diffs = calculate_diff(jira.server, current_state, previous_state) act(dry_run, jira_board, diffs) if not dry_run: write_state(state, jira.project, current_state)
def run(dry_run): unleash_instances = queries.get_unleash_instances() accounts = queries.get_aws_accounts() settings = queries.get_app_interface_settings() state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) for unleash_instance in unleash_instances: instance_name = unleash_instance['name'] current_state = fetch_current_state(unleash_instance) if not current_state: logging.warning('not acting on empty Unleash instances. ' + 'please create a feature toggle to get started.') continue previous_state = fetch_previous_state(state, instance_name) diffs = calculate_diff(current_state, previous_state) if diffs: act(dry_run, state, unleash_instance, diffs)
def get_outputs(self, state: GameTickPacket) -> Dict[int, Action]: """Returns a dictionary where the keys are indices of your drones and the values are PlayerInput objects (the controller inputs)""" # check if round is active if state.game_info.is_round_active: # round is active, update state and make bot decisions self.stopped = False current_state = State.from_packet(state, self.drone_actions, self.enemy_actions) current_state.dt = C.DT self.drone_actions, self.enemy_actions = self.choose_action( current_state) # finalize stored packets for i in self.drone_actions: current_state.drones[i].current_action = self.drone_actions[i] for i in self.enemy_actions: current_state.enemies[i].current_action = self.enemy_actions[i] last_state = self.initial_states[-1] if last_state is not None: last_state.dt = state.game_info.seconds_elapsed - self.time self.final_states.append(current_state) self.initial_states.append(current_state) else: self.initial_states[-1] = current_state self.time = state.game_info.seconds_elapsed # transform into RLBot compatible inputs self.controls = { i: PlayerInput(*self.drone_actions[i]) for i in self.drone_indices } elif not self.stopped: # first interrupted frame, restart data capture self.stopped = True self.initial_states[-1] = None # train predictor if possible if self.final_states: self.predictor.train(self.initial_states[:-1], self.final_states) # set controls to nothing self.controls = { i: PlayerInput(*C.NOTHING) for i in self.drone_indices } return self.controls
def immigrate(self, immigration_slice): # Get immigrant data n_im = immigration_slice.N distribution_im = np.asarray(immigration_slice)[:4] # Get local data n_local = self.__history[-1].N distribution_local = np.asarray(self.state())[:4] # Compute new local distribution distribution_post = np.add(distribution_im * n_im, distribution_local * n_local) / (n_im + n_local) # Replace the current state with a state that includes the new immigration state_post = State(distribution_post[0], distribution_post[1], distribution_post[2], distribution_post[3], n_im + n_local) self.__history[-1] = state_post
def __init__(self): ns = 'voxel_map/' self._visual_threshold = rospy.get_param(ns + 'visual_threshold') self.map = VoxelMap() rospy.Subscriber("truth/NED", Odometry, callback=self.state_cb, queue_size=1) rospy.Subscriber("triang_points", PointCloud, callback=self.measurement_cb) self.map_pub = rospy.Publisher("voxel_map", PointCloud, queue_size=1) self.vis_pub = rospy.Publisher("voxel_map_visual", PointCloud, queue_size=1) self.state = State() self.blank_cloud = PointCloud() zero_point = Point32(0, 0, 0) self.blank_cloud.points = [zero_point] * self.map.N**3
def run_skill(state: State, modes: List = [skill_attrs.modes.intro]): skill_state = state.get_skill_state(skill_attrs.skill_name) model_results = run_models(models, state.human_utterances) true_model_names = cmd_postprocessing(model_results, model_name_only=True) true_cmds = cmd_postprocessing(model_results, cmd_only=True) text = "Sorry, have no idea what to say." confidence = 0.0 scenario = False skill_state_update = {} # print(f"<{skill_attrs.skill_name}> true_model_names: {true_model_names}") # print(f"<{skill_attrs.skill_name}> true_cmds: {true_cmds}") if skill_attrs.modes.intro in modes: if set(true_model_names) & set( ["last_year", "this_year", "month", "week"]): state, text, confidence, skill_state_update, scenario = select_top_handler( state, skill_state, true_model_names, true_cmds) else: state, text, confidence, skill_state_update, scenario = intro_handler( state, skill_state, true_model_names, true_cmds) else: current_step = skill_state.get("next_step", "") if current_step == "describe_top": state, text, confidence, skill_state_update, scenario = describe_top_handler( state, skill_state, true_model_names, true_cmds) state.update_st2_policy({"game_conversation": True}) elif current_step == "select_top" or (set(true_model_names) & set( ["last_year", "this_year", "month", "week"])): state, text, confidence, skill_state_update, scenario = select_top_handler( state, skill_state, true_model_names, true_cmds) state.add_hypothesis( skill_name=skill_attrs.skill_name, text=text, confidence=confidence, scenario=scenario, ) state.update_skill_state(skill_attrs.skill_name, skill_state_update) return state
class SaasHerder(): """Wrapper around SaaS deployment actions.""" def __init__(self, saas_files, thread_pool_size, gitlab, integration, integration_version, settings, jenkins_map=None, accounts=None, validate=False): self.saas_files = saas_files if validate: self._validate_saas_files() if not self.valid: return self.thread_pool_size = thread_pool_size self.gitlab = gitlab self.integration = integration self.integration_version = integration_version self.settings = settings self.secret_reader = SecretReader(settings=settings) self.namespaces = self._collect_namespaces() self.jenkins_map = jenkins_map # each namespace is in fact a target, # so we can use it to calculate. divisor = len(self.namespaces) or 1 self.available_thread_pool_size = \ threaded.estimate_available_thread_pool_size( self.thread_pool_size, divisor) # if called by a single saas file,it may # specify that it manages resources exclusively. self.take_over = self._get_saas_file_attribute('takeover') self.compare = self._get_saas_file_attribute('compare') self.publish_job_logs = self._get_saas_file_attribute('publishJobLogs') if accounts: self._initiate_state(accounts) def _get_saas_file_attribute(self, attribute): return len(self.saas_files) == 1 and self.saas_files[0].get(attribute) def _validate_saas_files(self): self.valid = True saas_file_name_path_map = {} for saas_file in self.saas_files: saas_file_name = saas_file['name'] saas_file_path = saas_file['path'] saas_file_name_path_map.setdefault(saas_file_name, []) saas_file_name_path_map[saas_file_name].append(saas_file_path) saas_file_owners = [ u['org_username'] for r in saas_file['roles'] for u in r['users'] ] if not saas_file_owners: msg = 'saas file {} has no owners: {}' logging.error(msg.format(saas_file_name, saas_file_path)) self.valid = False for resource_template in saas_file['resourceTemplates']: resource_template_name = resource_template['name'] for target in resource_template['targets']: target_parameters = target['parameters'] if not target_parameters: continue target_parameters = json.loads(target_parameters) target_namespace = target['namespace'] namespace_name = target_namespace['name'] cluster_name = target_namespace['cluster']['name'] environment = target_namespace['environment'] environment_name = environment['name'] environment_parameters = environment['parameters'] if not environment_parameters: continue environment_parameters = \ json.loads(environment_parameters) msg = \ f'[{saas_file_name}/{resource_template_name}] ' + \ f'parameter found in target ' + \ f'{cluster_name}/{namespace_name} ' + \ f'should be reused from env {environment_name}' for t_key, t_value in target_parameters.items(): if not isinstance(t_value, str): continue for e_key, e_value in environment_parameters.items(): if not isinstance(e_value, str): continue if '.' not in e_value: continue if e_value not in t_value: continue if t_key == e_key and t_value == e_value: details = \ f'consider removing {t_key}' else: replacement = t_value.replace( e_value, '${' + e_key + '}') details = \ f'target: \"{t_key}: {t_value}\". ' + \ f'env: \"{e_key}: {e_value}\". ' + \ f'consider \"{t_key}: {replacement}\"' logging.warning(f'{msg}: {details}') duplicates = { saas_file_name: saas_file_paths for saas_file_name, saas_file_paths in saas_file_name_path_map.items() if len(saas_file_paths) > 1 } if duplicates: self.valid = False msg = 'saas file name {} is not unique: {}' for saas_file_name, saas_file_paths in duplicates.items(): logging.error(msg.format(saas_file_name, saas_file_paths)) def _collect_namespaces(self): # namespaces may appear more then once in the result namespaces = [] for saas_file in self.saas_files: managed_resource_types = saas_file['managedResourceTypes'] resource_templates = saas_file['resourceTemplates'] for rt in resource_templates: targets = rt['targets'] for target in targets: namespace = target['namespace'] if target.get('disable'): logging.debug( f"[{saas_file['name']}/{rt['name']}] target " + f"{namespace['cluster']['name']}/" + f"{namespace['name']} is disabled.") continue # managedResourceTypes is defined per saas_file # add it to each namespace in the current saas_file namespace['managedResourceTypes'] = managed_resource_types namespaces.append(namespace) return namespaces def _initiate_state(self, accounts): self.state = State(integration=self.integration, accounts=accounts, settings=self.settings) @staticmethod def _collect_parameters(container): parameters = container.get('parameters') or {} if isinstance(parameters, str): parameters = json.loads(parameters) # adjust Python's True/False for k, v in parameters.items(): if v is True: parameters[k] = 'true' elif v is False: parameters[k] = 'false' elif any([isinstance(v, t) for t in [dict, list, tuple]]): parameters[k] = json.dumps(v) return parameters @retry() def _get_file_contents(self, options): url = options['url'] path = options['path'] ref = options['ref'] github = options['github'] html_url = os.path.join(url, 'blob', ref, path) content = None if 'github' in url: repo_name = url.rstrip("/").replace('https://github.com/', '') repo = github.get_repo(repo_name) f = repo.get_contents(path, ref) content = f.decoded_content elif 'gitlab' in url: if not self.gitlab: raise Exception('gitlab is not initialized') project = self.gitlab.get_project(url) f = project.files.get(file_path=path.lstrip('/'), ref=ref) content = f.decode() return yaml.safe_load(content), html_url @retry() def _get_directory_contents(self, options): url = options['url'] path = options['path'] ref = options['ref'] github = options['github'] html_url = os.path.join(url, 'tree', ref, path) resources = [] if 'github' in url: repo_name = url.rstrip("/").replace('https://github.com/', '') repo = github.get_repo(repo_name) for f in repo.get_contents(path, ref): file_path = os.path.join(path, f.name) file_contents = repo.get_contents(file_path, ref) resource = yaml.safe_load(file_contents.decoded_content) resources.append(resource) elif 'gitlab' in url: if not self.gitlab: raise Exception('gitlab is not initialized') project = self.gitlab.get_project(url) for f in project.repository_tree(path=path.lstrip('/'), ref=ref, all=True): file_contents = \ project.files.get(file_path=f['path'], ref=ref) resource = yaml.safe_load(file_contents.decode()) resources.append(resource) return resources, html_url @retry() def _get_commit_sha(self, options): url = options['url'] ref = options['ref'] github = options['github'] hash_length = options.get('hash_length') commit_sha = '' if 'github' in url: repo_name = url.rstrip("/").replace('https://github.com/', '') repo = github.get_repo(repo_name) commit = repo.get_commit(sha=ref) commit_sha = commit.sha elif 'gitlab' in url: if not self.gitlab: raise Exception('gitlab is not initialized') project = self.gitlab.get_project(url) commits = project.commits.list(ref_name=ref) commit_sha = commits[0].id if hash_length: return commit_sha[:hash_length] return commit_sha @staticmethod def _get_cluster_and_namespace(target): cluster = target['namespace']['cluster']['name'] namespace = target['namespace']['name'] return cluster, namespace def _process_template(self, options): saas_file_name = options['saas_file_name'] resource_template_name = options['resource_template_name'] url = options['url'] path = options['path'] provider = options['provider'] target = options['target'] github = options['github'] target_ref = target['ref'] resources = None html_url = None if provider == 'openshift-template': hash_length = options['hash_length'] parameters = options['parameters'] environment = target['namespace']['environment'] environment_parameters = self._collect_parameters(environment) target_parameters = self._collect_parameters(target) consolidated_parameters = {} consolidated_parameters.update(environment_parameters) consolidated_parameters.update(parameters) consolidated_parameters.update(target_parameters) for replace_key, replace_value in consolidated_parameters.items(): if not isinstance(replace_value, str): continue replace_pattern = '${' + replace_key + '}' for k, v in consolidated_parameters.items(): if not isinstance(v, str): continue if replace_pattern in v: consolidated_parameters[k] = \ v.replace(replace_pattern, replace_value) get_file_contents_options = { 'url': url, 'path': path, 'ref': target_ref, 'github': github } try: template, html_url = \ self._get_file_contents(get_file_contents_options) except Exception as e: logging.error(f"[{url}/{path}:{target_ref}] " + f"error fetching template: {str(e)}") return None, None if "IMAGE_TAG" not in consolidated_parameters: template_parameters = template.get('parameters') if template_parameters is not None: for template_parameter in template_parameters: if template_parameter['name'] == 'IMAGE_TAG': # add IMAGE_TAG only if it is required get_commit_sha_options = { 'url': url, 'ref': target_ref, 'hash_length': hash_length, 'github': github } image_tag = self._get_commit_sha( get_commit_sha_options) consolidated_parameters['IMAGE_TAG'] = image_tag oc = OC('server', 'token', local=True) try: resources = oc.process(template, consolidated_parameters) except StatusCodeError as e: logging.error( f"[{saas_file_name}/{resource_template_name}] " + f"{html_url}: error processing template: {str(e)}") elif provider == 'directory': get_directory_contents_options = { 'url': url, 'path': path, 'ref': target_ref, 'github': github } try: resources, html_url = \ self._get_directory_contents( get_directory_contents_options) except Exception as e: logging.error(f"[{url}/{path}:{target_ref}] " + f"error fetching directory: {str(e)}") return None, None else: logging.error(f"[{saas_file_name}/{resource_template_name}] " + f"unknown provider: {provider}") return resources, html_url def _collect_images(self, resource): images = set() # resources with pod templates try: template = resource["spec"]["template"] for c in template["spec"]["containers"]: images.add(c["image"]) except KeyError: pass # init containers try: template = resource["spec"]["template"] for c in template["spec"]["initContainers"]: images.add(c["image"]) except KeyError: pass # CronJob try: template = resource["spec"]["jobTemplate"]["spec"]["template"] for c in template["spec"]["containers"]: images.add(c["image"]) except KeyError: pass # CatalogSource templates try: images.add(resource["spec"]["image"]) except KeyError: pass return images @staticmethod def _check_image(image, image_patterns, image_auth, error_prefix): error = False if image_patterns and \ not any(image.startswith(p) for p in image_patterns): error = True logging.error( f"{error_prefix} Image is not in imagePatterns: {image}") try: valid = Image(image, **image_auth) if not valid: error = True logging.error(f"{error_prefix} Image does not exist: {image}") except Exception as e: error = True logging.error(f"{error_prefix} Image is invalid: {image}. " + f"details: {str(e)}") return error def _check_images(self, options): saas_file_name = options['saas_file_name'] resource_template_name = options['resource_template_name'] html_url = options['html_url'] resources = options['resources'] image_auth = options['image_auth'] image_patterns = options['image_patterns'] error_prefix = \ f"[{saas_file_name}/{resource_template_name}] {html_url}:" images_list = threaded.run(self._collect_images, resources, self.available_thread_pool_size) images = set([item for sublist in images_list for item in sublist]) if not images: return False # no errors errors = threaded.run(self._check_image, images, self.available_thread_pool_size, image_patterns=image_patterns, image_auth=image_auth, error_prefix=error_prefix) error = True in errors return error def _initiate_github(self, saas_file): auth = saas_file.get('authentication') or {} auth_code = auth.get('code') or {} if auth_code: token = self.secret_reader.read(auth_code) else: # use the app-sre token by default default_org_name = 'app-sre' config = get_config(desired_org_name=default_org_name) token = config['github'][default_org_name]['token'] base_url = os.environ.get('GITHUB_API', 'https://api.github.com') return Github(token, base_url=base_url) def _initiate_image_auth(self, saas_file): """ This function initiates a dict required for image authentication. This dict will be used as kwargs for sertoolbox's Image. The image authentication secret specified in the saas file must contain the 'user' and 'token' keys, and may optionally contain a 'url' key specifying the image registry url to be passed to check if an image should be checked using these credentials. The function returns the keys extracted from the secret in the structure expected by sretoolbox's Image: 'user' --> 'username' 'token' --> 'password' 'url' --> 'auth_server' (optional) """ auth = saas_file.get('authentication') if not auth: return {} auth_image_secret = auth.get('image') if not auth_image_secret: return {} creds = self.secret_reader.read_all(auth_image_secret) required_keys = ['user', 'token'] ok = all(k in creds.keys() for k in required_keys) if not ok: logging.warning( "the specified image authentication secret " + f"found in path {auth_image_secret['path']} " + f"does not contain all required keys: {required_keys}") return {} image_auth = {'username': creds['user'], 'password': creds['token']} url = creds.get('url') if url: image_auth['auth_server']: url return image_auth def populate_desired_state(self, ri): results = threaded.run(self.init_populate_desired_state_specs, self.saas_files, self.thread_pool_size) desired_state_specs = \ [item for sublist in results for item in sublist] threaded.run(self.populate_desired_state_saas_file, desired_state_specs, self.thread_pool_size, ri=ri) def init_populate_desired_state_specs(self, saas_file): specs = [] saas_file_name = saas_file['name'] github = self._initiate_github(saas_file) image_auth = self._initiate_image_auth(saas_file) instance_name = saas_file['instance']['name'] managed_resource_types = saas_file['managedResourceTypes'] image_patterns = saas_file['imagePatterns'] resource_templates = saas_file['resourceTemplates'] saas_file_parameters = self._collect_parameters(saas_file) # iterate over resource templates (multiple per saas_file) for rt in resource_templates: rt_name = rt['name'] url = rt['url'] path = rt['path'] provider = rt.get('provider') or 'openshift-template' hash_length = rt.get('hash_length') or self.settings['hashLength'] parameters = self._collect_parameters(rt) consolidated_parameters = {} consolidated_parameters.update(saas_file_parameters) consolidated_parameters.update(parameters) # iterate over targets (each target is a namespace) for target in rt['targets']: if target.get('disable'): # a warning is logged during SaasHerder initiation continue cluster, namespace = \ self._get_cluster_and_namespace(target) process_template_options = { 'saas_file_name': saas_file_name, 'resource_template_name': rt_name, 'url': url, 'path': path, 'provider': provider, 'hash_length': hash_length, 'target': target, 'parameters': consolidated_parameters, 'github': github } check_images_options_base = { 'saas_file_name': saas_file_name, 'resource_template_name': rt_name, 'image_auth': image_auth, 'image_patterns': image_patterns } spec = { 'saas_file_name': saas_file_name, 'cluster': cluster, 'namespace': namespace, 'managed_resource_types': managed_resource_types, 'process_template_options': process_template_options, 'check_images_options_base': check_images_options_base, 'instance_name': instance_name, 'upstream': target.get('upstream') } specs.append(spec) return specs def populate_desired_state_saas_file(self, spec, ri): saas_file_name = spec['saas_file_name'] cluster = spec['cluster'] namespace = spec['namespace'] managed_resource_types = spec['managed_resource_types'] process_template_options = spec['process_template_options'] check_images_options_base = spec['check_images_options_base'] instance_name = spec['instance_name'] upstream = spec['upstream'] resources, html_url = \ self._process_template(process_template_options) if resources is None: ri.register_error() return # filter resources resources = [ resource for resource in resources if isinstance(resource, dict) and resource['kind'] in managed_resource_types ] # check images skip_check_images = upstream and self.jenkins_map and \ self.jenkins_map[instance_name].is_job_running(upstream) if skip_check_images: logging.warning(f"skipping check_image since " + f"upstream job {upstream} is running") else: check_images_options = { 'html_url': html_url, 'resources': resources } check_images_options.update(check_images_options_base) image_error = self._check_images(check_images_options) if image_error: ri.register_error() return # add desired resources for resource in resources: resource_kind = resource['kind'] resource_name = resource['metadata']['name'] oc_resource = OR(resource, self.integration, self.integration_version, caller_name=saas_file_name, error_details=html_url) ri.add_desired(cluster, namespace, resource_kind, resource_name, oc_resource) def get_moving_commits_diff(self, dry_run): results = threaded.run(self.get_moving_commits_diff_saas_file, self.saas_files, self.thread_pool_size, dry_run=dry_run) return [item for sublist in results for item in sublist] def get_moving_commits_diff_saas_file(self, saas_file, dry_run): saas_file_name = saas_file['name'] instace_name = saas_file['instance']['name'] github = self._initiate_github(saas_file) trigger_specs = [] for rt in saas_file['resourceTemplates']: rt_name = rt['name'] url = rt['url'] for target in rt['targets']: # don't trigger if there is a linked upstream job if target.get('upstream'): continue ref = target['ref'] get_commit_sha_options = { 'url': url, 'ref': ref, 'github': github } desired_commit_sha = \ self._get_commit_sha(get_commit_sha_options) # don't trigger on refs which are commit shas if ref == desired_commit_sha: continue namespace = target['namespace'] cluster_name = namespace['cluster']['name'] namespace_name = namespace['name'] env_name = namespace['environment']['name'] key = f"{saas_file_name}/{rt_name}/{cluster_name}/" + \ f"{namespace_name}/{env_name}/{ref}" current_commit_sha = self.state.get(key, None) # skip if there is no change in commit sha if current_commit_sha == desired_commit_sha: continue # don't trigger if this is the first time # this target is being deployed. # that will be taken care of by # openshift-saas-deploy-trigger-configs if current_commit_sha is None: # store the value to take over from now on if not dry_run: self.state.add(key, value=desired_commit_sha) continue # we finally found something we want to trigger on! job_spec = { 'saas_file_name': saas_file_name, 'env_name': env_name, 'instance_name': instace_name, 'rt_name': rt_name, 'cluster_name': cluster_name, 'namespace_name': namespace_name, 'ref': ref, 'commit_sha': desired_commit_sha } trigger_specs.append(job_spec) return trigger_specs def update_moving_commit(self, job_spec): saas_file_name = job_spec['saas_file_name'] env_name = job_spec['env_name'] rt_name = job_spec['rt_name'] cluster_name = job_spec['cluster_name'] namespace_name = job_spec['namespace_name'] ref = job_spec['ref'] commit_sha = job_spec['commit_sha'] key = f"{saas_file_name}/{rt_name}/{cluster_name}/" + \ f"{namespace_name}/{env_name}/{ref}" self.state.add(key, value=commit_sha, force=True) def get_configs_diff(self): results = threaded.run(self.get_configs_diff_saas_file, self.saas_files, self.thread_pool_size) return [item for sublist in results for item in sublist] def get_configs_diff_saas_file(self, saas_file): saas_file_name = saas_file['name'] saas_file_parameters = saas_file.get('parameters') saas_file_managed_resource_types = saas_file['managedResourceTypes'] instace_name = saas_file['instance']['name'] trigger_specs = [] for rt in saas_file['resourceTemplates']: rt_name = rt['name'] url = rt['url'] path = rt['path'] rt_parameters = rt.get('parameters') for desired_target_config in rt['targets']: namespace = desired_target_config['namespace'] cluster_name = namespace['cluster']['name'] namespace_name = namespace['name'] env_name = namespace['environment']['name'] desired_target_config['namespace'] = \ self.sanitize_namespace(namespace) # add parent parameters to target config desired_target_config['saas_file_parameters'] = \ saas_file_parameters # add managed resource types to target config desired_target_config['saas_file_managed_resource_types'] = \ saas_file_managed_resource_types desired_target_config['url'] = url desired_target_config['path'] = path desired_target_config['rt_parameters'] = rt_parameters # get current target config from state key = f"{saas_file_name}/{rt_name}/{cluster_name}/" + \ f"{namespace_name}/{env_name}" current_target_config = self.state.get(key, None) # skip if there is no change in target configuration if current_target_config == desired_target_config: continue job_spec = { 'saas_file_name': saas_file_name, 'env_name': env_name, 'instance_name': instace_name, 'rt_name': rt_name, 'cluster_name': cluster_name, 'namespace_name': namespace_name, 'target_config': desired_target_config } trigger_specs.append(job_spec) return trigger_specs @staticmethod def sanitize_namespace(namespace): """Only keep fields that should trigger a new job.""" new_job_fields = { 'namespace': ['name', 'cluster', 'app'], 'cluster': ['name', 'serverUrl'], 'app': ['name'] } namespace = { k: v for k, v in namespace.items() if k in new_job_fields['namespace'] } cluster = namespace['cluster'] namespace['cluster'] = { k: v for k, v in cluster.items() if k in new_job_fields['cluster'] } app = namespace['app'] namespace['app'] = { k: v for k, v in app.items() if k in new_job_fields['app'] } return namespace def update_config(self, job_spec): saas_file_name = job_spec['saas_file_name'] env_name = job_spec['env_name'] rt_name = job_spec['rt_name'] cluster_name = job_spec['cluster_name'] namespace_name = job_spec['namespace_name'] target_config = job_spec['target_config'] key = f"{saas_file_name}/{rt_name}/{cluster_name}/" + \ f"{namespace_name}/{env_name}" self.state.add(key, value=target_config, force=True)
def _initiate_state(self, accounts): self.state = State(integration=self.integration, accounts=accounts, settings=self.settings)
from agent.agent import Agent from utils.state import State, Parameters from utils.plotting import plot_agent_history, plot_compartment_comparison # Initialize SEIRS model params a = 0.2 # exposure rate b = 1.75 # Measure of time to infection once exposed g = 0.5 # Rate of recovery d = 0 # Rate at which loss of immunity occurs. r = 1 # Cost level?? initial_params = Parameters(a, b, g, d, r) # Initialize agent states init_agent_a = State(0.99, 0, 0.01, 0, 10000) init_agent_b = State(1.0, 0, 0, 0, 10000) init_agent_c = State(1.0, 0, 0, 0, 10000) # Initialize agent objects agent_a = Agent('A', init_agent_a, initial_params) agent_b = Agent('B', init_agent_b, initial_params) agent_c = Agent('C', init_agent_c, initial_params) agents = [agent_a, agent_b, agent_c] # Print initial status print(f"Agent a initial: {init_agent_a}") print(f"Agent b initial: {init_agent_b}") print(f"Agent c initial: {init_agent_c}") # Simulate for 100 iterations for _ in range(80):
def run(dry_run=False, enable_deletion=False): settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) queries_list = collect_queries() remove_candidates = [] for query in queries_list: query_name = query['name'] # Checking the sql-query state: # - No state: up for execution. # - State is a timestamp: executed and up for removal # after the JOB_TTL # - State is 'DONE': executed and removed. try: query_state = state[query_name] if query_state != 'DONE': remove_candidates.append({ 'name': query_name, 'timestamp': query_state }) continue except KeyError: pass job_yaml = process_template(query) job = yaml.safe_load(job_yaml) job_resource = OpenshiftResource(job, QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION) oc_map = OC_Map(namespaces=[query['namespace']], integration=QONTRACT_INTEGRATION, settings=queries.get_app_interface_settings(), internal=None) openshift_base.apply(dry_run=dry_run, oc_map=oc_map, cluster=query['cluster'], namespace=query['namespace']['name'], resource_type='job', resource=job_resource) if not dry_run: state[query_name] = time.time() for candidate in remove_candidates: if time.time() < candidate['timestamp'] + JOB_TTL: continue try: query = collect_queries(query_name=candidate['name'])[0] except IndexError: raise RuntimeError(f'sql-query {candidate["name"]} not present' f'in the app-interface while its Job is still ' f'not removed from the cluster. Manual clean ' f'up is needed.') oc_map = OC_Map(namespaces=[query['namespace']], integration=QONTRACT_INTEGRATION, settings=queries.get_app_interface_settings(), internal=None) openshift_base.delete(dry_run=dry_run, oc_map=oc_map, cluster=query['cluster'], namespace=query['namespace']['name'], resource_type='job', name=query['name'], enable_deletion=enable_deletion) if not dry_run: state[candidate['name']] = 'DONE'