def get_slack_usernames_from_pagerduty(pagerduties, users, usergroup): settings = queries.get_app_interface_settings() all_slack_usernames = [] all_pagerduty_names = [get_pagerduty_name(u) for u in users] for pagerduty in pagerduties or []: pd_token = pagerduty['token'] pd_schedule_id = pagerduty['scheduleID'] if pd_schedule_id is not None: pd_resource_type = 'schedule' pd_resource_id = pd_schedule_id pd_escalation_policy_id = pagerduty['escalationPolicyID'] if pd_escalation_policy_id is not None: pd_resource_type = 'escalationPolicy' pd_resource_id = pd_escalation_policy_id pd = PagerDutyApi(pd_token, settings=settings) pagerduty_names = pd.get_pagerduty_users(pd_resource_type, pd_resource_id) if not pagerduty_names: continue pagerduty_names = [name.split('+', 1)[0] for name in pagerduty_names] if not pagerduty_names: continue slack_usernames = [get_slack_username(u) for u in users if get_pagerduty_name(u) in pagerduty_names] not_found_pagerduty_names = \ [pagerduty_name for pagerduty_name in pagerduty_names if pagerduty_name not in all_pagerduty_names] if not_found_pagerduty_names: msg = ( '[{}] PagerDuty username not found in app-interface: {} ' '(hint: user files should contain ' 'pagerduty_username if it is different then org_username)' ).format(usergroup, not_found_pagerduty_names) logging.warning(msg) all_slack_usernames.extend(slack_usernames) return all_slack_usernames
def run(dry_run): unleash_instances = queries.get_unleash_instances() accounts = queries.get_aws_accounts() settings = queries.get_app_interface_settings() state = State( integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings ) for unleash_instance in unleash_instances: instance_name = unleash_instance['name'] current_state = fetch_current_state(unleash_instance) if not current_state: logging.warning( 'not acting on empty Unleash instances. ' + 'please create a feature toggle to get started.' ) continue previous_state = fetch_previous_state(state, instance_name) diffs = calculate_diff(current_state, previous_state) if diffs: act(dry_run, state, unleash_instance, diffs)
def fetch_current_state(dry_run, namespaces, thread_pool_size, internal, use_jump_host): ri = ResourceInventory() if dry_run: return ri, None settings = queries.get_app_interface_settings() oc_map = OC_Map(namespaces=namespaces, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size) state_specs = \ ob.init_specs_to_fetch( ri, oc_map, namespaces=namespaces, override_managed_types=['Secret'] ) threaded.run(populate_oc_resources, state_specs, thread_pool_size, ri=ri) return ri, oc_map
def run(dry_run=False, thread_pool_size=10): settings = queries.get_app_interface_settings() zones = queries.get_dns_zones() desired_state, err = build_desired_state(zones) if err: sys.exit(ExitCodes.ERROR) participating_accounts = [z['account'] for z in zones] awsapi = AWSApi(thread_pool_size, participating_accounts, settings) current_state, err = build_current_state(awsapi) if err: sys.exit(ExitCodes.ERROR) actions, err = reconcile_state(current_state, desired_state) if err: sys.exit(ExitCodes.ERROR) for action in actions: err = action[0](dry_run, awsapi, *action[1:]) if err: sys.exit(ExitCodes.ERROR)
def root_owner(ctx, cluster, namespace, kind, name): settings = queries.get_app_interface_settings() clusters = [c for c in queries.get_clusters(minimal=True) if c['name'] == cluster] oc_map = OC_Map(clusters=clusters, integration='qontract-cli', thread_pool_size=1, settings=settings, init_api_resources=True) oc = oc_map.get(cluster) obj = oc.get(namespace, kind, name) root_owner = oc.get_obj_root_owner(namespace, obj, allow_not_found=True, allow_not_controller=True) # TODO(mafriedm): fix this # do not sort ctx.obj['options']['sort'] = False # a bit hacky, but ¯\_(ツ)_/¯ if ctx.obj['options']['output'] != 'json': ctx.obj['options']['output'] = 'yaml' print_output(ctx.obj['options'], root_owner)
def run(dry_run=False, thread_pool_size=10, defer=None): instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() aws_accounts = queries.get_aws_accounts() gl = GitLabApi(instance, settings=settings) saas_files = queries.get_saas_files() saasherder = SaasHerder(saas_files, gitlab=gl, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION, settings=settings) ri, oc_map = ob.fetch_current_state( namespaces=saasherder.namespaces, thread_pool_size=thread_pool_size, integration=QONTRACT_INTEGRATION, integration_version=QONTRACT_INTEGRATION_VERSION) defer(lambda: oc_map.cleanup()) saasherder.populate_desired_state(ri) enable_deletion = False if ri.has_error_registered() else True ob.realize_data(dry_run, oc_map, ri, enable_deletion=enable_deletion) saasherder.slack_notify(dry_run, aws_accounts, ri)
def run(dry_run, gitlab_project_id=None, gitlab_merge_request_id=None) -> None: instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() gl = GitLabApi(instance, project_id=gitlab_project_id, settings=settings) project_labels = gl.get_project_labels() labels = gl.get_merge_request_labels(gitlab_merge_request_id) changed_paths = \ gl.get_merge_request_changed_paths(gitlab_merge_request_id) guessed_labels = guess_labels(project_labels, changed_paths) labels_to_add = [b for b in guessed_labels if b not in labels] labels_to_create = [b for b in labels_to_add if b not in project_labels] # This integration cannot check dry-run mode as it's always running with # dry_run flag to true. if labels_to_create: logging.info(['create_labels', labels_to_create]) for label in labels_to_create: gl.create_label(label, LABEL_COLOR) if labels_to_add: logging.info(['add_labels', labels_to_add]) gl.add_labels_to_merge_request(gitlab_merge_request_id, labels_to_add)
def init_slack(jira_board): settings = queries.get_app_interface_settings() slack_info = jira_board['slack'] slack_integrations = slack_info['workspace']['integrations'] jira_config = \ [i for i in slack_integrations if i['name'] == QONTRACT_INTEGRATION] [jira_config] = jira_config token = jira_config['token'] default_channel = jira_config['channel'] icon_emoji = jira_config['icon_emoji'] username = jira_config['username'] channel = slack_info.get('channel') or default_channel slack = SlackApi(token, settings=settings, init_usergroups=False, channel=channel, icon_emoji=icon_emoji, username=username) return slack
def run(dry_run: bool, thread_pool_size: int = 10, internal: Optional[bool] = None, use_jump_host: bool = True, defer=None, raise_errors=False): _LOG.debug('Collecting GQL data ...') namespaces = get_gql_namespaces_in_shard() inventory = LabelInventory() _LOG.debug('Initializing OC_Map ...') oc_map = get_oc_map(namespaces, internal, use_jump_host, thread_pool_size) defer(oc_map.cleanup) _LOG.debug('Collecting desired state ...') get_desired(inventory, oc_map, namespaces) settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) _LOG.debug('Collecting managed state ...') get_managed(inventory, state) _LOG.debug('Collecting current state ...') get_current(inventory, oc_map, thread_pool_size) inventory.reconcile() realize(inventory, state, oc_map, dry_run, thread_pool_size) if inventory.has_any_error(): error_messages = [] for cluster, namespace, errs in inventory.iter_errors(): for err in errs: msg = f'{cluster}/{namespace}: {err}' _LOG.error(msg) error_messages.append(msg) if raise_errors: raise NamespaceLabelError('\n'.join(error_messages)) sys.exit(1)
def run(dry_run): gqlapi = gql.get_api() result = gqlapi.query(REPOS_QUERY) config = get_config()['github-repo-invites'] settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) secret = {'path': config['secret_path'], 'field': config['secret_field']} token = secret_reader.read(secret) g = raw_github_api.RawGithubApi(token) urls = set() known_orgs = set() for app in result['apps_v1']: code_components = app['codeComponents'] if code_components is None: continue for code_component in app['codeComponents']: url = code_component['url'] urls.add(url) org = url[:url.rindex('/')] known_orgs.add(org) for i in g.repo_invitations(): invitation_id = i['id'] invitation_url = i['html_url'] url = os.path.dirname(invitation_url) accept = url in urls or any(url.startswith(org) for org in known_orgs) if accept: logging.info(['accept', url]) if not dry_run: g.accept_repo_invitation(invitation_id) else: logging.debug(['skipping', url])
def fetch_current_state(thread_pool_size, internal, use_jump_host): clusters = [c for c in queries.get_clusters() if is_in_shard(c["name"])] ocm_clusters = [c["name"] for c in clusters if c.get("ocm") is not None] current_state = [] settings = queries.get_app_interface_settings() oc_map = OC_Map( clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, ) groups_list = create_groups_list(clusters, oc_map) results = threaded.run(get_cluster_state, groups_list, thread_pool_size, oc_map=oc_map) current_state = list(itertools.chain.from_iterable(results)) return oc_map, current_state, ocm_clusters
def run(dry_run): settings = queries.get_app_interface_settings() gqlapi = gql.get_api() github = init_github() secret_reader = SecretReader(settings=settings) # Reconcile against all sentry instances instances = gqlapi.query(SENTRY_INSTANCES_QUERY)['instances'] tokens = {i['name']: secret_reader.read(i['automationToken']) for i in instances} skip_users = {i['name']: secret_reader.read(i['adminUser']) for i in instances} for instance in instances: instance_name = instance['name'] token = tokens[instance_name] host = instance['consoleUrl'] sentry_client = SentryClient(host, token) skip_user = skip_users[instance_name] current_state = fetch_current_state(sentry_client, [skip_user]) desired_state = fetch_desired_state(gqlapi, instance, github) reconciler = SentryReconciler(sentry_client, dry_run) reconciler.reconcile(current_state, desired_state)
def fetch_current_state(clusters): settings = queries.get_app_interface_settings() ocm_map = OCMMap(clusters=clusters, integration=QONTRACT_INTEGRATION, settings=settings) current_state = [] for cluster in clusters: cluster_name = cluster['name'] ocm = ocm_map.get(cluster_name) labels = ocm.get_external_configuration_labels(cluster_name) for key, value in labels.items(): item = { 'label': { 'key': key, 'value': value }, 'cluster': cluster_name } current_state.append(item) return ocm_map, current_state
def get_quay_api_store(): """ Returns a dictionary with a key for each Quay organization managed in app-interface. Each key contains an initiated QuayApi instance. """ quay_orgs = queries.get_quay_orgs() settings = queries.get_app_interface_settings() secret_reader = SecretReader(settings=settings) store = {} for org_data in quay_orgs: instance_name = org_data['instance']['name'] org_name = org_data['name'] org_key = OrgKey(instance_name, org_name) base_url = org_data['instance']['url'] token = secret_reader.read(org_data['automationToken']) if org_data.get('mirror'): mirror = OrgKey(org_data['mirror']['instance']['name'], org_data['mirror']['name']) else: mirror = None if org_data.get('pushCredentials'): push_token = secret_reader.read_all(org_data['pushCredentials']) else: push_token = None store[org_key] = { 'url': base_url, 'api': QuayApi(token, org_name, base_url=base_url), 'push_token': push_token, 'teams': org_data.get('managedTeams'), 'managedRepos': org_data.get('managedRepos'), 'mirror': mirror, } return store
def run( dry_run: bool, thread_pool_size=10, internal: Optional[bool] = None, use_jump_host=True, defer=None, ): all_namespaces = queries.get_namespaces(minimal=True) shard_namespaces, duplicates = get_shard_namespaces(all_namespaces) desired_state = get_desired_state(shard_namespaces) settings = queries.get_app_interface_settings() oc_map = OC_Map( namespaces=shard_namespaces, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, init_projects=True, ) defer(oc_map.cleanup) results = threaded.run( manage_namespaces, desired_state, thread_pool_size, return_exceptions=True, dry_run=dry_run, oc_map=oc_map, ) err = check_results(desired_state, results) if err or duplicates: sys.exit(ExitCodes.ERROR)
def run(dry_run, gitlab_project_id): settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() sqs_cli = SQSGateway(accounts, settings=settings) instance = queries.get_gitlab_instance() saas_files = queries.get_saas_files_minimal() gitlab_cli = GitLabApi(instance, project_id=gitlab_project_id, settings=settings, saas_files=saas_files) while True: messages = sqs_cli.receive_messages() logging.info('received %s messages', len(messages)) if not messages: break for message in messages: # Let's first delete all the message we received, # otherwise they will come back in 30s. receipt_handle = message[0] sqs_cli.delete_message(str(receipt_handle)) for message in messages: # Time to process the messages. Any failure here is not # critical, even though we already deleted the messaged, # since the producers will keep re-sending the message # until the MR gets merged to app-interface receipt_handle, body = message[0], message[1] logging.info('received message %s with body %s', receipt_handle[:6], json.dumps(body)) if not dry_run: merge_request = mr.init_from_sqs_message(body) merge_request.submit_to_gitlab(gitlab_cli=gitlab_cli)
def _fetch_oc_secret(self) -> str: parts = self._command_data.openshift_path.split("/") if len(parts) != 3: raise ArgumentException( f"Wrong format! --openshift-path must be of format {{cluster}}/{{namespace}}/{{secret}}. Got {self._command_data.openshift_path}" ) cluster_name, namespace, secret = parts clusters = queries.get_clusters_by( filter=queries.ClusterFilter( name=cluster_name, ) ) if not clusters: raise ArgumentException(f"No cluster found with name '{cluster_name}'") settings = queries.get_app_interface_settings() data = {} try: oc_map = OC_Map( clusters=clusters, integration="qontract-cli", settings=settings, use_jump_host=True, thread_pool_size=1, init_projects=False, ) oc = oc_map.get(cluster_name) data = oc.get(namespace, "Secret", name=secret, allow_not_found=False)[ "data" ] except Exception as e: raise OpenshiftException( f"Could not fetch secret from Openshift cluster {cluster_name}" ) from e return GPGEncryptCommand._format(data)
def run(dry_run, gitlab_project_id=None, thread_pool_size=10, enable_deletion=False): accounts = filter_accounts(queries.get_aws_accounts()) settings = queries.get_app_interface_settings() aws = AWSApi(thread_pool_size, accounts, settings=settings) deleted_keys = get_deleted_keys(accounts) existing_keys = aws.get_users_keys() aws_support_cases = aws.get_support_cases() keys_to_delete_from_cases = get_keys_to_delete(aws_support_cases) keys_to_delete = [] for ktd in keys_to_delete_from_cases: ktd_account = ktd["account"] ktd_key = ktd["key"] account_deleted_keys = deleted_keys.get(ktd_account) if account_deleted_keys and ktd_key in account_deleted_keys: continue account_existing_keys = existing_keys.get(ktd_account) if account_existing_keys: keys_only = itertools.chain.from_iterable(account_existing_keys.values()) if ktd_key not in keys_only: continue keys_to_delete.append(ktd) act(dry_run, gitlab_project_id, accounts, keys_to_delete)
def ocm_aws_infrastructure_access_switch_role_links(ctx): settings = queries.get_app_interface_settings() clusters = queries.get_clusters() clusters = [c for c in clusters if c.get('ocm') is not None] ocm_map = OCMMap(clusters=clusters, settings=settings) results = [] for cluster in clusters: cluster_name = cluster['name'] ocm = ocm_map.get(cluster_name) role_grants = \ ocm.get_aws_infrastructure_access_role_grants(cluster_name) for user_arn, access_level, _, switch_role_link in role_grants: item = { 'cluster': cluster_name, 'user_arn': user_arn, 'access_level': access_level, 'switch_role_link': switch_role_link, } results.append(item) columns = ['cluster', 'user_arn', 'access_level', 'switch_role_link'] print_output(ctx.obj['output'], results, columns)
def setup(print_only, thread_pool_size, internal, use_jump_host): gqlapi = gql.get_api() accounts = queries.get_aws_accounts() settings = queries.get_app_interface_settings() namespaces = gqlapi.query(TF_NAMESPACES_QUERY)['namespaces'] tf_namespaces = [namespace_info for namespace_info in namespaces if namespace_info.get('managedTerraformResources')] ri, oc_map = fetch_current_state(tf_namespaces, thread_pool_size, internal, use_jump_host) ts, working_dirs = init_working_dirs(accounts, thread_pool_size, print_only=print_only, oc_map=oc_map, settings=settings) tf = Terraform(QONTRACT_INTEGRATION, QONTRACT_INTEGRATION_VERSION, QONTRACT_TF_PREFIX, working_dirs, thread_pool_size) existing_secrets = tf.get_terraform_output_secrets() ts.populate_resources(tf_namespaces, existing_secrets) ts.dump(print_only, existing_dirs=working_dirs) return ri, oc_map, tf
def init_slack(slack_info, integration, init_usergroups=True): settings = queries.get_app_interface_settings() workspace_name = slack_info['workspace']['name'] slack_integrations = slack_info['workspace']['integrations'] slack_config = \ [i for i in slack_integrations if i['name'] == integration] [slack_config] = slack_config token = slack_config['token'] default_channel = slack_config['channel'] icon_emoji = slack_config['icon_emoji'] username = slack_config['username'] channel = slack_info.get('channel') or default_channel slack = SlackApi(workspace_name, token, settings=settings, init_usergroups=init_usergroups, channel=channel, icon_emoji=icon_emoji, username=username) return slack
def run( dry_run, thread_pool_size=10, disable_service_account_keys=False, account_name=None, defer=None, ): accounts = filter_accounts(queries.get_aws_accounts(), account_name) if not accounts: logging.debug("nothing to do here") # using return because terraform-resources # may be the calling entity, and has more to do return settings = queries.get_app_interface_settings() aws = AWSApi(thread_pool_size, accounts, settings=settings) keys_to_delete = get_keys_to_delete(accounts) working_dirs = init_tf_working_dirs(accounts, thread_pool_size, settings) defer(lambda: cleanup(working_dirs)) error = aws.delete_keys(dry_run, keys_to_delete, working_dirs, disable_service_account_keys) if error: sys.exit(1)
def fetch_data(namespaces, thread_pool_size, internal, use_jump_host, init_api_resources=False, overrides=None): ri = ResourceInventory() settings = queries.get_app_interface_settings() logging.debug(f"Overriding keys {overrides}") oc_map = OC_Map(namespaces=namespaces, integration=QONTRACT_INTEGRATION, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, init_api_resources=init_api_resources) state_specs = ob.init_specs_to_fetch(ri, oc_map, namespaces=namespaces, override_managed_types=overrides) threaded.run(fetch_states, state_specs, thread_pool_size, ri=ri) return oc_map, ri
def fetch_current_state(namespaces=None, clusters=None, thread_pool_size=None, integration=None, integration_version=None, override_managed_types=None, internal=None, use_jump_host=True, init_api_resources=False, cluster_admin=False): ri = ResourceInventory() settings = queries.get_app_interface_settings() oc_map = OC_Map(namespaces=namespaces, clusters=clusters, integration=integration, settings=settings, internal=internal, use_jump_host=use_jump_host, thread_pool_size=thread_pool_size, init_api_resources=init_api_resources, cluster_admin=cluster_admin) state_specs = \ init_specs_to_fetch( ri, oc_map, namespaces=namespaces, clusters=clusters, override_managed_types=override_managed_types ) threaded.run(populate_current_state, state_specs, thread_pool_size, ri=ri, integration=integration, integration_version=integration_version) return ri, oc_map
def run(dry_run, wait_for_pipeline): default_days_interval = 15 default_limit = 8 default_enable_closing = False instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() repos = queries.get_repos_gitlab_housekeeping(server=instance['url']) for repo in repos: hk = repo['housekeeping'] project_url = repo['url'] days_interval = hk.get('days_interval') or default_days_interval enable_closing = hk.get('enable_closing') or default_enable_closing limit = hk.get('limit') or default_limit gl = GitLabApi(instance, project_url=project_url, settings=settings) handle_stale_items(dry_run, gl, days_interval, enable_closing, 'issue') handle_stale_items(dry_run, gl, days_interval, enable_closing, 'merge-request') rebase = hk.get('rebase') try: merge_merge_requests(dry_run, gl, limit, rebase, insist=True, wait_for_pipeline=wait_for_pipeline) except Exception: merge_merge_requests(dry_run, gl, limit, rebase, wait_for_pipeline=wait_for_pipeline) if rebase: rebase_merge_requests(dry_run, gl, limit, wait_for_pipeline=wait_for_pipeline)
def init_slack_map(unleash_instance): settings = queries.get_app_interface_settings() slack_notifications = unleash_instance['notifications']['slack'] slack_map = {} for slack_info in slack_notifications: workspace = slack_info['workspace'] workspace_name = workspace['name'] slack_integrations = workspace['integrations'] client_config = workspace.get('api_client') slack_config = \ [i for i in slack_integrations if i['name'] == QONTRACT_INTEGRATION] [slack_config] = slack_config token = slack_config['token'] channel = slack_info['channel'] icon_emoji = slack_info['icon_emoji'] username = slack_info['username'] slack_api_kwargs = { 'settings': settings, 'init_usergroups': False, 'channel': channel, 'icon_emoji': icon_emoji, 'username': username } if client_config: slack_api_kwargs['api_config'] = \ SlackApiConfig.from_dict(client_config) slack = SlackApi(workspace_name, token, **slack_api_kwargs) slack_map[channel] = slack return slack_map
def run(dry_run): settings = queries.get_app_interface_settings() accounts = queries.get_state_aws_accounts() state = State( integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings ) emails = queries.get_app_interface_emails() smtp_client = SmtpClient(settings=settings) # validate no 2 emails have the same name email_names = {e["name"] for e in emails} if len(emails) != len(email_names): logging.error("email names must be unique.") sys.exit(1) emails_to_send = [e for e in emails if not state.exists(e["name"])] for email in emails_to_send: logging.info(["send_email", email["name"], email["subject"]]) if not dry_run: names = collect_to(email["to"]) subject = email["subject"] body = email["body"] smtp_client.send_mail(names, subject, body) state.add(email["name"])
def run(dry_run=False): settings = queries.get_app_interface_settings() dependency_map = settings.get('dependencies') if not dependency_map: sys.exit() gqlapi = gql.get_api() apps = gqlapi.query(APPS_QUERY)['apps'] error = False for app in apps: app_name = app['name'] app_deps = app.get('dependencies') current_deps = \ [a['name'] for a in app_deps] if app_deps else [] desired_deps = get_desired_dependency_names(app, dependency_map) missing_deps = list(desired_deps.difference(current_deps)) if missing_deps: error = True msg = f"App '{app_name}' has missing dependencies: {missing_deps}" logging.error(msg) if error: sys.exit(1)
def run(dry_run): instance = queries.get_gitlab_instance() settings = queries.get_app_interface_settings() gl = GitLabApi(instance, settings=settings) previous_urls = queries.get_jenkins_instances_previous_urls() repos = queries.get_repos(server=gl.server) for repo in repos: found_hook_urls = set() hooks = gl.get_project_hooks(repo) for hook in hooks: hook_url = hook.url if hook_url in found_hook_urls: # duplicate! remove logging.info(['delete_hook', repo, hook_url]) if not dry_run: hook.delete() continue found_hook_urls.add(hook_url) for previous_url in previous_urls: if hook_url.startswith(previous_url): logging.info(['delete_hook', repo, hook_url]) if not dry_run: hook.delete()
def run(dry_run=False): settings = queries.get_app_interface_settings() accounts = queries.get_aws_accounts() state = State(integration=QONTRACT_INTEGRATION, accounts=accounts, settings=settings) emails = queries.get_app_interface_emails() # validate no 2 emails have the same name email_names = set([e['name'] for e in emails]) if len(emails) != len(email_names): logging.error('email names must be unique.') sys.exit(1) emails_to_send = [e for e in emails if not state.exists(e['name'])] for email in emails_to_send: logging.info(['send_email', email['name'], email['subject']]) if not dry_run: names = collect_to(email['to']) subject = email['subject'] body = email['body'] smtp_client.send_mail(names, subject, body, settings=settings) state.add(email['name'])