def run_pipeline() -> None: """Steps of the pipeline to run nightly.""" try: get_firehook_scanfile_mirror().sync() get_firehook_routeview_mirror().sync() get_censoredplanet_mirror().sync() # This is a very weird hack. # We execute the beam pipeline as a seperate process # because beam really doesn't like it when the main file for a pipeline # execution is not the same file the pipeline run call is made in. # It would require all the deps to be packaged and installed on the workers # which in our case requires packaging up many google cloud packages # which is slow (hangs basic worker machines) and wasteful. subprocess.run([ sys.executable, '-m', 'pipeline.run_beam_tables', '--env=prod', '--scan_type=all' ], check=True, stdout=subprocess.PIPE) rebuild_all_tables() except Exception: # If something goes wrong also log to GCP error console. error_reporting.Client().report_exception() raise
def __init__(self): self.isServiceAvailable = True signal.signal(signal.SIGINT, self.exit_gracefully) signal.signal(signal.SIGTERM, self.exit_gracefully) self.logging = error_reporting.Client() self.cache = Cache(ttl=5) self.coinGecko = CoinGeckoAPI() self.lastBitcoinQuote = { "quotePrice": [0], "quoteVolume": None, "ticker": Ticker("BTCUSD", "BTCUSD", "BTC", "USD", "BTC/USD", hasParts=False), "exchange": None, "timestamp": time.time() } try: rawData = self.coinGecko.get_coin_by_id(id="bitcoin", localization="false", tickers=False, market_data=True, community_data=False, developer_data=False) self.lastBitcoinQuote["quotePrice"] = [rawData["market_data"]["current_price"]["usd"]] self.lastBitcoinQuote["quoteVolume"] = rawData["market_data"]["total_volume"]["usd"] except: pass context = zmq.Context.instance() self.socket = context.socket(zmq.ROUTER) self.socket.bind("tcp://*:6900") print("[Startup]: Quote Server is online")
def server_error(e): client = error_reporting.Client(app.config['PROJECT_ID']) client.report_exception( http_context=error_reporting.build_flask_context(request)) return """ An internal error occurred. """, 500
def translate_image(request: flask.Request) -> flask.Response: """ Translates a manga :param request: flask request object with an image file "manga" :return: flask response with base64 encoded png on success OR error message on failure """ error_report_client = error_reporting.Client() try: image = request.files['manga'].read() if image: translator = MangaTranslator() translated_manga = translator.translate(image) encoded_translated_manga = base64.b64encode(translated_manga) response = flask.make_response((encoded_translated_manga, 200, { 'Content-Type': 'image/png' })) return response else: return flask.make_response(('Invalid Argument', 406)) except Exception: error_report_client.report_exception() return flask.make_response(('Failed to process manga :(', 500))
def simulate_error(): client = error_reporting.Client() try: # simulate calling a method that's not defined raise NameError except Exception: client.report_exception()
def predict(model_name, project, data, model_version='v2'): """ Makes API call to AI Platform and returns prediction. :param model_name: REQUIRED. STRING. Name of model on AI Platform. :param project: your project Id. :param data: cleaned and preprocessed data in shape that your model expects in JSON format "{instances: [data]}" :param model_version: model version you want to make the request to :return: prediction: """ project_id = "projects/{}".format(project) service = discovery.build('ml', 'v1') name = "{}/models/{}".format(project_id, model_name) if model_version is not None: name += "/versions/{}".format(model_version) data_pred = json.loads(data) instances = data_pred['instances'] try: response = service.projects().predict( name=name, body={"instances": instances} ).execute() return response['predictions'] except Exception: error_client = error_reporting.Client() error_client.report_exception()
def to_savedmodel(keras_model, export_path): """ Converts Keras model into a tensorflow saved_model format. :param keras_model: Loaded keras model :param export_path: local directory where you want to save your tensorflow SavedModel format model. :return: None. Creates directory and saved_model.pb file """ try: builder = saved_model_builder.SavedModelBuilder(export_path) signature = predict_signature_def( inputs={'input': keras_model.inputs[0]}, outputs={'output': keras_model.outputs[0]}) with K.get_session() as sess: builder.add_meta_graph_and_variables( sess=sess, tags=[tag_constants.SERVING], signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature }) builder.save() except Exception: error_client = error_reporting.Client() error_client.report_exception()
def post(instring): errorLog = error_reporting.Client(project='gcpoc-173120', service="GCPOCGetService", version="1.0.0") logClient = logging.Client() logger = logClient.logger('GCPOCPostLog') connection = pymysql.connect( host=os.environ['GCPOC_DB_HOST'], user=os.environ['GCPOC_DB_USER'], password=os.environ['GCPOC_DB_PASSWORD'], db=os.environ['GCPOC_DB_DATABASE'], charset='utf8mb4', cursorclass=pymysql.cursors.Cursor) try: with connection.cursor() as cursor: sql = "INSERT INTO gcpoc (instring) VALUES (%s)" cursor.execute(sql, (instring,)) logger.log_text("Added %s to database" % instring) connection.commit() except: errorLog.report_exception() return instring,500 finally: connection.close() return instring,201
def process(self, KVelement, offsetData): try: key, element = KVelement imgMetadata = element['imgMetadata'] fileName = imgMetadata['fileName'] if not 'Nt' in imgMetadata: assert imgMetadata['Nz'] == 1 assert offsetData[key].size > 0 imgMetadata['Nt'] = offsetData[key].size else: assert 'Nz' in imgMetadata # Nframes = imgMetadata['Nt']*imgMetadata['Nz'] # assert Nframes == offsetData[key].size splitter = self._getSplitter(imgMetadata) IFD = element['IFD'] sortedIFDs = offsetData[key] assert IFD in sortedIFDs n = searchsorted(sortedIFDs, IFD) + 1 for chunk in splitter.iterChunks(n, element['frame']): yield beam.pvalue.TaggedOutput('chunks', chunk) except: client = error_reporting.Client() client.report('File Not Processed: ' + fileName) client.report_exception() logging_client = logging.Client() log_name = 'TIF-reader' logger = logging_client.logger(log_name) logmessage = {'Error': 'File cannot be read', 'Filename': fileName} logger.log_struct(logmessage) yield beam.pvalue.TaggedOutput('readError', ('File Not Processed', fileName))
def search() -> list: errorLog = error_reporting.Client(project='gcpoc-173120', service="GCPOCGetService", version="1.0.0") logClient = logging.Client() logger = logClient.logger('GCPOCGetLog') connection = pymysql.connect(host=os.environ['GCPOC_DB_HOST'], user=os.environ['GCPOC_DB_USER'], password=os.environ['GCPOC_DB_PASSWORD'], db=os.environ['GCPOC_DB_DATABASE'], charset='utf8mb4', cursorclass=pymysql.cursors.Cursor) try: with connection.cursor() as cursor: sql = "SELECT instring from gcpoc" cursor.execute(sql) result = [item[0] for item in cursor.fetchall()] logger.log_text("Found %d items in database" % len(result)) except: errorLog.report_exception() return None, 500 finally: connection.close return result, 200
def try_execute(): try: delete() except Exception: traceback.print_exc() error_reporting.Client(service="oanda_history." + ENVIRONMENT).report_exception()
def create_version(model_name, version_name, project_id, deployment_uri): """ Create model version on AI platform. :param model_name: STRING. REQUIRED. name of model :param version_name: STRING. REQUIRED. version name (should be kept simple eg: v1) Must be unique within the model it is created in :param project_id: STRING. REQUIRED. project id :param deployment_uri: STRING. REQUIRED. GCS URI to folder containing your saved_model.pb to use for predictions. :return: None. Model version created on AI platform. """ project = f"projects/{project_id}" model_id = f"{project}/models/{model_name}" ml = discovery.build('ml', 'v1') request_dict = { 'name': version_name, 'deploymentUri': f"{deployment_uri}", 'runtimeVersion': '1.14', 'framework': 'TENSORFLOW', 'pythonVersion': '3.5' } request = ml.projects().models().versions().create(parent=model_id, body=request_dict) #Make API request call try: response = request.execute() print(response) except Exception: error_client = error_reporting.Client() error_client.report_exception()
def manualError(self, __text): from google.cloud import error_reporting client = error_reporting.Client() try: client.report(__text) except: client.report_exception()
def try_execute(): try: datetime_limit = deletion_limit() history.delete_old(datetime_limit) except Exception: traceback.print_exc() error_reporting.Client(service="kraken_ohlc_clean." + ENVIRONMENT).report_exception()
def try_execute(): try: start_loop() except: traceback.print_exc() if (ENVIRONMENT == 'dev'): return client = greport.Client(service=f'{SERVICE_NAME}.{ENVIRONMENT}') client.report_exception()
def exceptionError(self): from google.cloud import error_reporting client = error_reporting.Client() try: client.report_exception() except: pass
def server_error(e): client = error_reporting.Client() client.report_exception( http_context=error_reporting.build_flask_context(request)) return """ An internal error occurred: <pre>{}</pre> See logs for full stacktrace. """.format(e), 500
def server_error(e): client = error_reporting.Client() client.report_exception() client.report("An error occurred during a request") return """ An internal error occurred: <pre>{}</pre> See logs for full stacktrace. """.format(e), 500
def try_execute(): try: add_to_history() except EmptyCandles: pass except Exception: traceback.print_exc() error_reporting.Client(service="oanda_history." + ENVIRONMENT).report_exception()
def try_execute(): try: status_list = build_status() firestore.save_all(status_list) except: traceback.print_exc() if (ENVIRONMENT == 'dev'): return client = greport.Client(service=f'health_mongo.{ENVIRONMENT}') client.report_exception()
def try_execute(): try: diagnostics = firestore.get_documents() diagnostics = prune_dev(diagnostics) process_diagnotics(diagnostics, MAILGUN_API_KEY) except: traceback.print_exc() client = greport.Client(service=f'health_check') client.report_exception()
def wrapper(*args, **kwargs): try: client = error_reporting.Client() except Exception: # pylint: disable=broad-except return func(*args, **kwargs) else: try: return func(*args, **kwargs) except Exception: # pylint: disable=broad-except client.report_exception()
def run(self): while self.isRunning: try: msg = self.queue.get(block=True, timeout=2) self.__diffuse__(msg) except Empty: pass except Exception as err: traceback.print_exc() error_reporting.Client(service="oanda_history."+ENVIRONMENT).report_exception()
def __init__(self, name, to_cloud=True): self.to_cloud = to_cloud if self.to_cloud: # Use the Stackdriver logging and error reporting clients. self.logger = logging.Client().logger(name) self.error_client = error_reporting.Client() else: # Log to a local file. self.logger = getLogger(name) basicConfig(format=LOGS_FORMAT, level=NOTSET, filename=LOG_FILE)
def try_to_execute(): try: execute() except EmptyCandles or refobj.EmptyReferenceObject: pass except Exception: if ENVIRONMENT == 'dev': pass traceback.print_exc() error_reporting.Client(service="kraken_ohlc_bigquery." + ENVIRONMENT).report_exception()
def google_error_reporting_handler(service=None): # pragma: no coverage try: service_name = service or defaults.ERROR_HANDLER_SERVICE from google.cloud import error_reporting client = error_reporting.Client(service=service_name) return client.report_exception except Exception: logging.warning("Could not find start error reporting client") return None
def _initialize_cloud_clients(): """Initialize clients for Google Cloud Logging and Error reporting.""" assert not utils.is_local() global _log_client if _log_client: return _log_client = google.cloud.logging.Client() logging_handler = CloudLoggingHandler(_log_client) logging.getLogger().addHandler(logging_handler) global _error_reporting_client _error_reporting_client = error_reporting.Client()
def bug(): client = error_reporting.Client() saludo = "hola" cuenta = 5 + 10 logging.info(saludo) try: # simulate calling a method that's not defined raise NameError except Exception: client.report_exception() return "Bug!"
def lazy_error_reporting_client() -> error_reporting.Client: """ Return a error reporting client that may be shared between cloud function invocations. https://cloud.google.com/functions/docs/monitoring/error-reporting """ global ERROR_REPORTING_CLIENT if not ERROR_REPORTING_CLIENT: ERROR_REPORTING_CLIENT = error_reporting.Client() return ERROR_REPORTING_CLIENT
def testErrorReporting(self): """ Test if we can report error using Error Reporting API """ client = error_reporting.Client(project=getProjectId()) try: client.report("Testing reachability!") except exceptions.NotFound as e: # Error Reporting API is not enabled, so we can't report errors raise e except Exception as e: # unexpected error raise e