def process_cloudwatch_metric_event(): """ Trigger scheduled update of all configured alarm metrics """ alarms = get_cloudwatch_alarms() stats = defaultdict(int) for alarm in alarms: alarm = Dict(alarm) current_state = alarm.StateValue statistics = None if current_state != "INSUFFICIENT_DATA": statistics = get_cloudwatch_metric_statistics(alarm) if statistics is not None: metric_event = cloudwatch_metric_to_standard_health_data_model( alarm, statistics) response = send_to_health_monitor(metric_event) LOG.debug("Lambda invoke status: %s", response.StatusCode) if response.StatusCode == 200: stats["sent"] += 1 else: stats["failed"] += 1 else: stats["no_data"] += 1 LOG.debug("%s state is %s", alarm.MetricName, current_state) return stats
def GetByEnterprise(enterprise_id, ): LOG.debug('Lexicon:GetByEnterprise:', enterprise_id) r = collection.find({ 'enterprise_id': enterprise_id, }) LOG.debug(r) return r
def Add( communication_log_id, communication_ids, #source, #enterprise_id, matched_keywords, participants, last_message_time, source, enterprise_id, state=None, error=None, ): LOG.debug('Conversations:Add:', communication_log_id, communication_ids) r = collection.insert_one({ 'communication_log_id': communication_log_id, 'communication_ids': communication_ids, #'source': source, #'enterprise_id': enterprise_id, 'matched_keywords': matched_keywords, 'participants': participants, 'last_message_time': last_message_time, 'source': source, 'enterprise_id': enterprise_id, 'state': state, 'error': error, }) LOG.debug(r) return r
def get_github_api_paged_data(url: str) -> List[Any]: token = get_github_access_token() page = 1 page_size = 100 page_items = 1 items = [] while page_items > 0: page_items = 0 page_url = f"{url}?page={page}&per_page={page_size}" headers = {"authorization": f"token {token}"} response = requests.get(page_url, headers=headers) if response.status_code == 200: response_items = response.json() LOG.debug("Got item page %s for URL %s", page, url) page_items = len(response_items) # append page to parent array for item in response_items: items.append(item) else: raise GithubApiError(response.text) page += 1 # this sleep is important to prevent rate limiting time.sleep(4) return items
def Execute(parse_source, parse_state): LOG.info('STARTED: ' + os.path.basename(__file__)) LOG.info('parse_source: \'' + str(parse_source) + '\'') if not parse_state: parse_state = db.CommunicationLogs.States.DOWNLOADED #production mode: parsing newly downloaded logs LOG.info('parse_state: \'' + str(parse_state) + '\'') communication_logs = db.CommunicationLogs.GetByStateSource( state=parse_state, source=parse_source, ) LOG.debug(communication_logs) for communication_log in communication_logs: try: file_path = settings.DOWNLOAD_DIR + '/' + communication_log[ 'source'] + '/' + communication_log['file_name'] if not os.path.exists(file_path): LOG.error('Does not exists: ' + file_path) continue LOG.info('Parsing: ' + file_path) parsers = imp.load_source( 'parsers', 'parsers/' + communication_log['source'] + '.py') #class_ = getattr(parsers, settings.COMMUNICATION_LOG_SOURCES[communication_log['source']]['ParserClass']) parser_class = getattr(parsers, communication_log['source']) parser = parser_class() parser.Parse(communication_log) except: LOG.exception(sys.exc_info()[0]) LOG.info('COMPLETED')
def process_message(raw_message): header = raw_message['header'] if not validate_header(header): return True # Effectively drop the message decoded = TrainMovementsMessage(raw_message['body']) if (decoded.event_type == EventType.arrival and decoded.status == VariationStatus.late and decoded.location.is_public_station and decoded.operating_company and decoded.operating_company.is_delay_repay_eligible( decoded.minutes_late)): LOG.info('{} {} arrival at {} ({}) - eligible for ' 'compensation from {}: {}'.format( decoded.actual_datetime, decoded.early_late_description, decoded.location.name, decoded.location.three_alpha, decoded.operating_company, str(decoded))) else: LOG.debug('Dropping {} {} {} message'.format( decoded.status, decoded.event_type, decoded.early_late_description)) return True
def SetParsed( communication_log_id, error = None ): LOG.debug('CommunicationLogs:Save:', communication_log_id, error) set = { 'parsed_time': datetime.datetime.now(), } if error: set['error'] = 'PARSING: ' + str(error) set['state'] = States.PARSING_ERROR else: set['error'] = None set['state'] = States.PARSED r = collection.update_one( { '_id': communication_log_id, }, { '$set': set, }, upsert = False ) LOG.debug(r) return r
def DeleteBySource(source, ): LOG.debug('Conversations:DeleteBySource:{0}'.format(source)) r = collection.remove({ 'source': source, }) LOG.debug(r) return r
def process_cloudwatch_alarm_event(event): """ Receive raw event from lambda invoke """ message = parse_sns_message(event) standardised_data = cloudwatch_alarm_to_standard_health_data_model(message) response = send_to_health_monitor(standardised_data) LOG.debug("Lambda invoke status: %s", response.StatusCode) return response.StatusCode == 200
def metric_resource_exists(cls, metric): """ Check the resource exists before defining an alarm aws cloudwatch list-metrics returns metrics for resources that no longer exists """ region = cls.get_metric_region(metric) namespace = metric.Namespace resource_exists = True try: LOG.debug("Getting boto client for %s in %s", namespace, region) client = cls.get_client_from_namespace(namespace, region) if client: queue = cls.get_metric_dimension_value(metric, "QueueName") LOG.debug("Get tags for sqs queue: %s", queue) if queue: client.get_queue_url(QueueName=queue) else: resource_exists = False except AttributeError as err: LOG.debug(json.dumps(metric, indent=2)) LOG.debug(str(err)) except botocore.exceptions.ClientError as err: LOG.debug(str(err)) resource_exists = False return resource_exists
def collect_parsed_data(self, data: dict) -> Dict: """ Собрать распознанные данные с загруженного файла. :param data: ответ сервера :type data: dict :return: словарь с данными для загрузки кандидата :rtype: Dict """ LOG.debug(f"Collect data from resume.") def get_recursively(data: dict, keys: Iterable) -> Optional[Union[float, str]]: res = data for key in keys: if res and isinstance(res, dict): res = res.get(key) if res and isinstance(res, list): res = res[0] return res experience = get_recursively(data, ("fields", "experience")) position = None company = None if experience: position = experience.get("position") company = experience.get("company") external_data = {} if text := data.get("text"): external_data.setdefault("data", {"body": text})
def send_health_monitoring_data_to_splunk(payload_to_send): """ Send the Health Monitoring payload to Splunk Cloud HEC """ try: splunk_hec_token = get_splunk_hec_token(SPLUNK_HEC_SSM_PARAMETER, AWS_REGION) splunk_hec_endpoint = ( "https://http-inputs-gds.splunkcloud.com/services/collector") headers = {"Authorization": "Splunk " + splunk_hec_token} response = requests.post(splunk_hec_endpoint, payload_to_send, headers=headers, verify=False) if response.status_code != 200: LOG.debug( "Received a non 200 HTTP status code from Splunk Cloud HEC") LOG.debug("Response code: %s: message: %s", response.status_code, response.text) elif response.status_code == 200: LOG.info("Successful: message: %s", response.text) except (ValueError, KeyError): LOG.error("Failed to send health monitoring data to Splunk Cloud HEC")
def _handle_multiple_messages(self, messages): """ Train movement message comprises a `header` and a `body`. The `header` http://nrodwiki.rockshore.net/index.php/Train_Movement """ def send_batch(sqs_entries): # http://boto3.readthedocs.org/en/latest/reference/services/sqs.html#SQS.Queue.sendentries result = self.queue.send_messages(Entries=sqs_entries) if len(result['Successful']) != len(sqs_entries): LOG.error('Some messages failed to send to SQS: {}'.format( result)) with batcher(send_batch, batch_size=10) as b: for raw_message in messages: message_id = str(uuid.uuid4()) pretty_message = json.dumps(raw_message, indent=4) LOG.debug('Sending to queue with id {}: {}'.format( message_id, pretty_message)) b.push({ 'Id': message_id, 'MessageBody': pretty_message }) self.increment_message_counter(len(raw_message))
def main(): args = parser.parse_args() try: uploader = ApplicantUploader(args.token) except AssertionError: LOG.error(f"Error durinng uploader initialization") sys.exit(1) if files := scan_directory(args.base_dir, BASE_FILENAME): for applicant in get_applicants_info(files, args.row): LOG.debug(f"Start uploading") # загружаем резюме resume_info = uploader.upload_file(applicant.file_path) # формируем json для загрузки кандидата body = uploader.collect_parsed_data(resume_info) # полученные из .xlsx файла данные имеют приоритет body.update(get_fio(applicant)) body.update({"money": applicant.salary}) # загружаем кандидата if response := uploader.upload_applicant(body): # устанавливаем его на вакансию applicant_id = response.get("id") uploader.set_vacancy(applicant, applicant_id)
def get_environment_account_id(environment): """ Match production like environment names and default to test """ prod_envs = ["live", "prod", "production"] account_var = "PROD_ACCOUNT" if environment.lower( ) in prod_envs else "TEST_ACCOUNT" account_id = os.environ.get(account_var) LOG.debug("Forward to account: %s", account_var) return account_id
def _get_vacancies_statuses(self): """ Получить список статусов вакансий """ url = f"{self.endpoint}/account/{self.account_id}/vacancy/statuses" LOG.debug(f"Get vacancies statuses") response = self._handle_request( requests.get(url=url, headers=self.base_headers)) if items := response.get("items"): return {el["name"]: el["id"] for el in items}
def _get_vacancies(self): """ Получить список идентификаторов вакансий """ url = f"{self.endpoint}/account/{self.account_id}/vacancies" LOG.debug(f"Get vacancies") response = self._handle_request( requests.get(url=url, headers=self.base_headers)) if items := response.get("items"): return {el["position"]: el["id"] for el in items}
def _get_account_id(self): """ Получить идентификатор аккаунта. """ url = f"{self.endpoint}/accounts" LOG.debug(f"Get account id") response = self._handle_request( requests.get(url=url, headers=self.base_headers)) if items := response.get("items"): return items[0].get("id")
def DeleteByCommunicationLogId(communication_log_id, ): LOG.debug('Conversations:DeleteByCommunicationLogId:{0}'.format( communication_log_id)) r = collection.remove({ 'communication_log_id': str(communication_log_id), }) LOG.debug(r) return r
def test_zb_connection() -> bool: zeebe_connection = HELPER.zb() try: res = next(zeebe_connection.get_topology()) LOG.debug(f'connected to {res.brokers}') return True except ZeebeError as zer: LOG.error(f'Could not connect to ZB: {zer}') return False
def stub_response_cognito_list_user_pools(stubber, env="testing"): mock_list_user_pools = { "UserPools": [{ "Id": MOCK_COGNITO_USER_POOL_ID, "Name": f"corona-cognito-pool-{env}" }] } LOG.debug(mock_list_user_pools) stubber.add_response("list_user_pools", mock_list_user_pools, {"MaxResults": 10})
def get_environment(event): """ Get environment from resource tags or default to DEF_ENVIRONMENT var """ event_env = event.get_attribute("environment") if event_env is not None: environment = event_env else: environment = os.environ.get("DEF_ENVIRONMENT") LOG.debug("Environment: %s", environment) return environment
def get_slack_channel(message): """ Identify target slack channel """ if "AlarmName" in message: LOG.debug("Get target channel for alarm: %s", message["AlarmName"]) default_channel = "cyber-security-service-health" # correct this to do something that might happen if "SlackChannel" in message: target_channel = message["SlackChannel"] else: target_channel = default_channel return target_channel
def _check_xinc(): if not checkEnvironment('XINC_HOME'): return False exe_name = win32 and '\\bin\\windows\\xinc.exe' or 'bin/unix/xinc' full_exe_name = os.path.join(xinc_home, exe_name) if not os.path.exists(full_exe_name): LOG.debug('%s does not exist' % full_exe_name) return False return True
def DeleteById( _id, ): LOG.debug('CommunicationLogs:DeleteById:{0}'.format(_id)) r = collection.remove( { '_id': _id, } ) LOG.debug(r) return r
def _check_fop(): if not checkEnvironment('FOP_HOME'): return False exe_name = win32 and 'fop.bat' or 'fop' full_exe_name = os.path.join(fop_home, exe_name) if not os.path.exists(full_exe_name): LOG.debug('%s does not exist' % full_exe_name) return False return True
def _check_xinc(): if not checkEnvironment("XINC_HOME"): return False exe_name = win32 and "\\bin\\windows\\xinc.exe" or "bin/unix/xinc" full_exe_name = os.path.join(xinc_home, exe_name) if not os.path.exists(full_exe_name): LOG.debug("%s does not exist" % full_exe_name) return False return True
def startup(): for x in range(START_TIMEOUT): if test_connections(): LOG.debug(f'Found Brokers, waiting for startup to complete') time.sleep(10) return else: time.sleep(1) LOG.error(f'System could not connect within {START_TIMEOUT} seconds.') raise TimeoutError( f'System could not connect within {START_TIMEOUT} seconds.')
def GetByStateSource( state, source ): set = { 'state': state } if source: set['source'] = source r = collection.find(set) LOG.debug('CommunicationLogs:GetByStateSource:', r) return r
def handle_change(self, change: Event, _file_type: FileType, _id, _contents): _resource = _file_type.name.lower() LOG.debug(f'helper handling {change} on {_resource}/{_id}') if change is Event.DELETE and _file_type is not FileType.BPMN: self._delete(_file_type, _id) elif change is Event.DELETE and _file_type is FileType.BPMN: LOG.info('We cannot _yet_ automatically delete BPMNs from the brokers') elif _file_type is FileType.BPMN: LOG.debug('Working on adding BPMN') self.add_bpmn(_id, _contents) else: self._update(_resource, _id, _contents)
def _check_xfc(): if not checkEnvironment("XFC_DIR"): return False # check only for fo2rtf (we expect that all other fo2XXX # converters are also installed properly) full_exe_name = os.path.join(xfc_dir, "fo2rtf") if not os.path.exists(full_exe_name): LOG.debug("%s does not exist" % full_exe_name) return False return True
def search_ga(max_gens, pop_size, ad_mut_stp, mu_lambda): generation = 0 population = init_population(pop_size) best = fitness_func(population) while generation < max_gens: new_population = mutate_random(tuple(population)) new_best = fitness_func(new_population) if new_best <= best: LOG.debug("Better population>{0}".format(new_population)) population = new_population best = new_best LOG.rbf("Generation>{0}:new best>{1}".format(generation, best)) generation = generation + 1
def create_sns_message(alert): """Create the message to publish to SNS""" try: payload = json.dumps({"alert": alert}) message = json.dumps( {"default": "Default payload", "sqs": payload, "lambda": payload} ) LOG.debug("MESSAGE: %s", message) return message except TypeError as err: raise ServerError("Error creating SNS message") from err
def checkEnvironment(envname): """ Check if the given name of an environment variable exists and if it points to an existing directory. """ dirname = os.environ.get(envname, None) if dirname is None: LOG.debug('Environment variable $%s is unset' % envname) return False if not os.path.exists(dirname): LOG.debug('The directory referenced through the environment ' 'variable $%s does not exit (%s)' % (envname, dirname)) return False return True
def on_message(self, stomp_headers, json_encoded_messages): LOG.debug('STOMP headers {}'.format(stomp_headers)) try: messages = json.loads(json_encoded_messages) except ValueError as e: LOG.error('Failed to decode {} bytes as JSON: {}'.format( len(json_encoded_messages), json_encoded_messages)) LOG.exception(e) return try: self._handle_multiple_messages(messages) except Exception as e: LOG.exception(e) return
def search_es(max_gens, pop_range, ad_mut_stp, mu_lambda): pop = init_population(pop_range) best = fitness_func(pop[0]) p = 1.5 for gen in range(0, max_gens-1): children = mutate(pop[0], pop[1], p) LOG.debug("children>{0}".format(children)) fitness = fitness_func(children[0]) if fitness <= best: best = fitness pop = children p = 1.5 else: p = 1.5 ** (-1/4) if mu_lambda: pop = init_population(pop_range) best = fitness_func(pop[0]) LOG.rbf("Generation>{0}:new best>{1}".format(gen, best)) return best
def validate_header(header): """ ``` "header": { "user_id": "", "msg_type": "0003", "msg_queue_timestamp": "1455883630000", "source_dev_id": "", "original_data_source": "SMART", "source_system_id": "TRUST" } ``` """ if header['msg_type'] != '0003': LOG.debug('Dropping unsupported message type `{}`'.format( header['msg_type'])) return False return True