def perform(to, subject, content, from_email): from_obj = Email(from_email, 'Quokka') to_obj = Email(email_override or to) mail = Mail(from_obj, subject, to_obj, content) if not perform_deliveries: logger.warn( 'Not sending email from {} to {} -- Mailer not configured to perform deliveries.' .format(from_obj.email, to_obj.email)) return False try: logger.info('Sending email from {} to {}...'.format( from_obj.email, to_obj.email)) resp = sg.client.mail.send.post(request_body=mail.get()) except BaseException: logger.error('Error sending email to {}'.format(to_obj.email)) return False if resp.status_code not in [200, 202]: print('Email failed with error code {}: {}'.format( resp.status_code, resp.body)) return False logger.info('Successfully sent email.') return True
def create_function(self): try: response = self.client.create_function(self.get_property("name"), self.get_property("runtime"), self.get_property("iam", "role"), self.get_property("handler"), self.get_property("code"), self.get_property("environment"), self.get_property("description"), self.get_property("time"), self.get_property("memory"), self.get_property("tags")) if response and 'FunctionArn' in response: self.properties["function_arn"] = response['FunctionArn'] response_parser.parse_lambda_function_creation_response(response, self.get_function_name(), self.client.get_access_key(), self.get_output_type()) except ClientError as ce: error_msg = "Error initializing lambda function." logger.error(error_msg, error_msg + ": %s" % ce) utils.finish_failed_execution() finally: # Remove the files created in the operation utils.delete_file(self.properties["zip_file_path"])
def get_country_and_continent_from_location(loc_string, user_name, website_name): """ finds user location (country and continent) from his location description because the api sometimes is unavailable (for several reasons) the function works in this form: 1. try first time - in a case of a failure, log a warning and try again (with sleep time for not immediately try again (could lead to problem again) 2. if there is a second error, log an ERROR message with the user details for scraping his location individually later (this feature will be completed in milestone 3) :param loc_string: user location description (str) :return: country and continent (str, str) or (None, None) """ country, continent = None, None # initiate the returned variables if not re.search( config.GMT_REGEX, loc_string ): # handle "GMT {-8:00}" - time zone location inputted try: country, continent = GeoLocation.geolocator_process(loc_string) except GeocoderUnavailable: logger.warning( config.GeocoderUnavailable_WARNING_STRING.format( user_name, website_name, loc_string)) time.sleep(config.SLEEP_TIME_FOR_LOCATIONS_API) try: country, continent = GeoLocation.geolocator_process( loc_string) except GeocoderUnavailable: logger.error( config.GeocoderUnavailable_ERROR_STRING.format( user_name, website_name, loc_string)) return country, continent
def get_function_info(self, function_name_or_arn): try: # If this call works the function exists return self.client.get_function_info(function_name_or_arn) except ClientError as ce: error_msg = "Error while looking for the lambda function" logger.error(error_msg, error_msg + ": %s" % ce)
def delete_rule_by_id(ids, delete_all=False, delete_related_usernames=True): if not ids: ids = list(map(lambda x: x.decode(), redis_cli.hkeys('rules'))) payload = {"delete": {"ids": ids}} response = requests.post(TWITTER_RULES_URL, headers=HEADERS, json=payload) if response.status_code == 200: if delete_all: redis_cli.delete('rules') redis_cli.delete('username_rid') else: for rid in ids: rule = redis_cli.hget('rules', rid) if delete_related_usernames: parsed_rules = list( map(lambda x: x.replace('from:', ''), re.findall(r'from:\w+\b', rule.decode()))) for rule in parsed_rules: redis_cli.hdel('username_rid', rule) redis_cli.hdel("rules", rid) logger.info("delete rules from twitter successfully") return 200 [redis_cli.hset('garbage', rid, 1) for rid in ids] logger.error("delete rules from twitter failed: {} \n\t {}".format( response.status_code, response.json())) return response.status_code
def wait_job(self, name, namespace, delete=False, sleep=5): jobs_path = self.jobs_path.format(namespace) url = 'https://{0}:{1}{2}/{3}'.format(self.kubernetes_service_host, self.kubernetes_service_port, jobs_path, name) while True: try: r = requests.get(url, verify=self.cert_verify, headers=self.auth_header) if r.status_code != 200: raise Exception(f'Error obtaining {name} info - {str(r.status_code)}\n{str(r.content)}') job = r.json() if (utils.is_value_in_dict(job['status'], 'succeeded') and utils.is_value_in_dict(job['spec'], 'completions')): if job['status']['succeeded'] >= job['spec']['completions']: # Delete succeeded jobs if delete=True if delete: self.delete_job(name, namespace) break if (utils.is_value_in_dict(job['status'], 'failed') and utils.is_value_in_dict(job['spec'], 'backoffLimit')): if job['status']['failed'] >= job['spec']['backoffLimit']: logger.error(f'{name} failed! See pod logs for details') break time.sleep(sleep) except Exception as e: logger.error(e) break
def delete_compute_environment(self, name): temp = True while (temp): creation_args = self.get_describe_job_queue_args(name_j=name) response = self.client.describe_job_queues(**creation_args) state = response["jobQueues"][0]["state"] status = response["jobQueues"][0]["status"] if (state == "ENABLED" and status == "VALID"): updating_args = self.get_update_job_queue_args(name_j=name) response = self.client.update_job_queue(**updating_args) else: if (state == "DISABLED" and status == "VALID"): deleting_args = self.get_delete_job_queue_args(name_j=name) response = self.client.delete_job_queue(**deleting_args) temp = False temp = True while (temp): creation_args = self.get_describe_compute_env_args(name_c=name) response = self.client.describe_compute_environments( **creation_args) state = response["computeEnvironments"][0]["state"] status = response["computeEnvironments"][0]["status"] if (state == "ENABLED"): update_args = self.get_update_compute_env_args(name_c=name) response = self.client.update_compute_environment( **update_args) else: if (state == "DISABLED" and status == "VALID" and (not self.exist_jobs_queue(name))): delete_args = self.get_delete_compute_env_args(name_c=name) response = self.client.delete_compute_environment( **delete_args) temp = False logger.error("Compute enviroment deleted")
def delete_function(self, function_name): try: # Delete the lambda function return self.get_client().delete_function(FunctionName=function_name) except ClientError as ce: error_msg = "Error deleting the lambda function" logger.error(error_msg, error_msg + ": %s" % ce)
def post(self): user = current_user() if not user or not user.is_admin: return '', 403 school = user.school sponsor_name = api.payload['name'] logo_name = '{}-{}'.format(slugify(sponsor_name, separator='-', to_lower=True), uuid4().hex) try: # TODO: Once you have apscheduler, run this as a delayed job logo = upload_image( data=api.payload['logo'], name=logo_name, location='sponsors/' ) except BaseException: logger.error('Error uploading image to S3') return 'Error uploading provided image', 500 dbi.create(Sponsor, { 'school': school, 'name': sponsor_name, 'logo': logo, 'url': api.payload['url'] }) sponsors = format_sponsors(school.sponsors) return sponsors, 201
def _create_bucket(self, bucket_name): try: self.client.make_bucket(bucket_name) except minio.error.BucketAlreadyOwnedByYou as err: logger.warning(err) except minio.error.ResponseError as err: logger.error(err)
def loop_function(): while self.is_running: try: # retries 10 times over 486secs # before raising error/exception # check binance_futures_api.py line 113 # for implementation details #client.stream_keepalive() listenKey = client.stream_get_listen_key() if self.listenKey != listenKey: logger.info("listenKey Changed!") notify("listenKey Changed!") self.listenKey = listenKey self.ws.close() # Send a heartbeat to Healthchecks.io if self.use_healthcecks: try: requests.get(conf['healthchecks.io'][self.account] ['listenkey_heartbeat']) #logger.info("Listen Key Heart Beat sent!") except Exception as e: pass time.sleep(600) except Exception as e: logger.error(f"Keep Alive Error - {str(e)}") #logger.error(traceback.format_exc()) notify(f"Keep Alive Error - {str(e)}")
def post(self): # Parse our payload. email = api.payload['email'] name = api.payload['name'] password = api.payload['password'] # Ensure the email isn't taken already. if dbi.find_one(User, {'email': email}): return ACCOUNT_ALREADY_EXISTS try: # Create the new user user = dbi.create( User, { 'email': email, 'name': name, 'hashed_pw': auth_util.hash_pw(password) }) except BaseException as e: logger.error('Error creating new user, with error: {}'.format(e)) return ERROR_CREATING_USER # Create a new session for the user session = user.new_session() token = auth_util.serialize_token(session.id, session.token) # Return success with newly created session token as response header return { 'ok': True, 'message': 'Successfully Created User' }, 201, { auth_header_name: token }
def __on_message(self, ws, message): """ On Message listener :param ws: :param message: :return: """ try: obj = json.loads(message) if 'e' in obj['data']: e = obj['data']['e'] action = "" datas = obj['data'] if e.startswith("kline"): data = [{ "timestamp" : datas['k']['T'], "high" : float(datas['k']['h']), "low" : float(datas['k']['l']), "open" : float(datas['k']['o']), "close" : float(datas['k']['c']), "volume" : float(datas['k']['v']) }] data[0]['timestamp'] = datetime.fromtimestamp(data[0]['timestamp']/1000).strftime('%Y-%m-%dT%H:%M:%S') data[0]['timestamp'] = datetime.strptime(data[0]['timestamp'],'%Y-%m-%dT%H:%M:%S') self.__emit(obj['data']['k']['i'], action, to_data_frame([data[0]])) elif e.startswith("24hrTicker"): self.__emit(e, action, datas) elif e.startswith("ACCOUNT_UPDATE"): self.__emit(e, action, datas['a']['P']) self.__emit('wallet', action, datas['a']['B'][0]) self.__emit('margin', action, datas['a']['B'][0]) # todo ORDER_TRADE_UPDATE elif e.startswith("ORDER_TRADE_UPDATE"): self.__emit(e, action, datas['o']) #todo orderbook stream # elif table.startswith(""): # self.__emit(e, action, data) elif e.startswith("listenKeyExpired"): self.__emit('close', action, datas) self.__get_auth_user_data_streams() logger.info(f"listenKeyExpired!!!") #self.__on_close(ws) elif not 'e' in obj['data']: e = 'IndividualSymbolBookTickerStreams' action = '' data = obj['data'] #logger.info(f"{data}") self.__emit(e, action, data) except Exception as e: logger.error(e) logger.error(traceback.format_exc())
def get_api_gateway_url(self, function_name): api_id = self.get_api_gateway_id(function_name) if api_id is None or api_id == "": error_msg = "Error retrieving API ID for lambda function {0}".format(function_name) logger.error(error_msg) utils.finish_failed_execution() return 'https://{0}.execute-api.{1}.amazonaws.com/scar/launch'.format(api_id, self.get_property("region"))
def judge(*args, **kwargs): try: return suspect(*args, **kwargs) except Exception as crime: logger.error( "an error occurred while calling {} function - detail: \n\t{}". format(suspect.__name__, crime)) return 500
def create_input_source(self): try: self.s3.create_input_bucket() self._lambda.link_function_and_input_bucket() self.s3.set_input_bucket_notification() except ClientError as ce: error_msg = "Error creating the event source" logger.error(error_msg, error_msg + ": %s" % ce)
def update_function_memory(self, function_name, memory): try: self.get_client().update_function_configuration(FunctionName=function_name, MemorySize=validators.validate_memory(memory)) except ClientError as ce: error_msg = "Error updating lambda function memory" logger.error(error_msg, error_msg + ": %s" % ce) utils.finish_failed_execution()
def __on_error(self, ws, message): """ On Error listener :param ws: :param message: """ logger.error(message) logger.error(traceback.format_exc())
def set_log_retention_policy(self, log_group_name, log_retention_policy_in_days): try: logger.debug("Setting log group policy.") self.get_client().put_retention_policy(logGroupName=log_group_name, retentionInDays=log_retention_policy_in_days) except ClientError as ce: logger.error("Error setting log retention policy", "Error setting log retention policy: %s" % ce)
def update_function_timeout(self, function_name, timeout): try: self.get_client().update_function_configuration(FunctionName=function_name, Timeout=validators.validate_time(timeout)) except ClientError as ce: error_msg = "Error updating lambda function timeout" logger.error(error_msg, error_msg + ": %s" % ce) utils.finish_failed_execution()
def clearTables(): with app.app_context(): try: meta = db.metadata for table in reversed(meta.sorted_tables): db.session.execute(table.delete()) db.session.commit() except Exception as e: logger.error("clearTables: error = {0}".format(e))
def parse_arguments(self): '''Command parsing and selection''' try: return self.parser.parse_args() except AttributeError as ae: logger.error( "Incorrect arguments: use scar -h to see the options available", "Error parsing arguments: %s" % ae) utils.finish_failed_execution()
def get_log_events_by_group_name_and_stream_name(self, log_group_name, log_stream_name): try: return self.get_client().get_log_events(logGroupName=log_group_name, logStreamName=log_stream_name, startFromHead=True) except ClientError as ce: logger.error("Error getting log events for log group '%s' and log stream name '%s': %s" % (log_group_name, log_stream_name, ce)) utils.finish_failed_execution()
def consume_url(url, session=None): if session is None: session = init_session() try: post: PttPost = PttPost(url, session) p = post.to_json() return p except Exception as e: logger.error("error: {} {}".format(url, e))
def __on_message(self, ws, message): """ On Message listener :param ws: :param message: :return: """ try: obj = json.loads(message) if 'table' in obj: if len(obj['data']) <= 0: return table = obj['table'] action = obj['action'] data = obj['data'] if table.startswith("tradeBin"): data[0]['timestamp'] = datetime.strptime( data[0]['timestamp'][:-5], '%Y-%m-%dT%H:%M:%S') new_data = [] new_data.append(data[0]) #add placeholder tick so it resamples correctly new_data.append({ "timestamp": data[0]['timestamp'] + timedelta(seconds=0.01), "open": data[0]['close'], "high": data[0]['close'], "low": data[0]['close'], "close": data[0]['close'], "volume": 0 }) self.__emit(table, action, to_data_frame(new_data)) elif table.startswith("instrument"): self.__emit(table, action, data[0]) elif table.startswith("margin"): self.__emit(table, action, data[0]) elif table.startswith("position"): self.__emit(table, action, data[0]) elif table.startswith("wallet"): self.__emit(table, action, data[0]) elif table.startswith("orderBookL2"): self.__emit(table, action, data) except Exception as e: logger.error(e) logger.error(traceback.format_exc())
def delete_log_group(self, log_group_name): try: # Delete the cloudwatch log group return self.get_client().delete_log_group(logGroupName=log_group_name) except ClientError as ce: if ce.response['Error']['Code'] == 'ResourceNotFoundException': logger.warning("Cannot delete log group '%s'. Group not found." % log_group_name) else: logger.error("Error deleting the cloudwatch log", "Error deleting the cloudwatch log: %s" % ce)
def get_log_events_by_group_name(self, log_group_name, next_token=None): try: if next_token: return self.get_client().filter_log_events(logGroupName=log_group_name, nextToken=next_token) else: return self.get_client().filter_log_events(logGroupName=log_group_name) except ClientError as ce: logger.error("Error getting log events for log group '%s': %s" % (log_group_name, ce)) utils.finish_failed_execution()
def add_invocation_permission(self, function_name, principal, source_arn): try: self.get_client().add_permission(FunctionName=function_name, StatementId=utils.get_random_uuid4_str(), Action="lambda:InvokeFunction", Principal=principal, SourceArn=source_arn) except ClientError as ce: error_msg = "Error setting lambda permissions" logger.error(error_msg, error_msg + ": %s" % ce)
def update_function_env_variables(self, function_name, env_vars): try: # Retrieve the global variables already defined lambda_env_variables = self.get_function_environment_variables(function_name) lambda_env_variables['Variables'].update(env_vars) self.get_client().update_function_configuration(FunctionName=function_name, Environment=lambda_env_variables) except ClientError as ce: error_msg = "Error updating the environment variables of the lambda function" logger.error(error_msg, error_msg + ": %s" % ce)
def get_resources(self, api_id): ''' Default type REGIONAL, other possible type EDGE. More info in https://boto3.readthedocs.io/en/latest/reference/services/apigateway.html#APIGateway.Client.get_resources ''' try: return self.get_client().get_resources(restApiId=api_id) except ClientError as ce: error_msg = "Error getting resources for the API ID '{0}'".format( api_id) logger.error(error_msg, error_msg + ": {0}".format(ce))