def oauth2callback(): # Specify the state when creating the flow in the callback so that it can # verified in the authorization server response. state = flask.session['state'] uid = flask.session['uid'] flow = google_auth_oauthlib.flow.Flow.from_client_secrets_file( CLIENT_SECRETS_FILE, scopes=SCOPES, state=state) flow.redirect_uri = flask.url_for('oauth2callback', _external=True) # Use the authorization server's response to fetch the OAuth 2.0 tokens. authorization_response = flask.request.url flow.fetch_token(authorization_response=authorization_response) # Store credentials in the session. # ACTION ITEM: In a production app, you likely want to save these # credentials in a persistent database instead. credentials = flow.credentials credentials_dict = credentials_to_dict(credentials) flask.session['credentials'] = credentials_dict logging.info(f"Current credentials are {credentials_dict}") sql_handler.save_token(uid, credentials_dict) response = flask.jsonify(credentials_dict) response.status_code = 200 return response
def analyze_picture(file_name): # Instantiates a client client = vision.ImageAnnotatorClient() # The name of the image file to annotate file_name = os.path.abspath(file_name) # Loads the image into memory with io.open(file_name, 'rb') as image_file: content = image_file.read() image = types.Image(content=content) # Performs label detection on the image file response = client.label_detection(image=image) labels = response.label_annotations logging.info( 'Results of Vision AI check. Search label: {}'.format(label_to_detect)) score = 0 other_objects = "" for label in labels: other_objects = other_objects + label.description + "(" + str( round(label.score, 2)) + "), " print(label.description, round(label.score, 2)) if label.description == label_to_detect: score = label.score return score, other_objects
def prefix_filters_handler(modash_profile_ref, prefix_filters): """ The prefix filters have to be supported in SUPPORTED_FILTER_PREFIXES, and only one will be applied :param modash_profile_ref: :param prefix_filters: "prefix_filters": [ { "prefix": "languages", "value": "Chinese", "min": 0, "max": 0.10 } ] :return: """ for filter in prefix_filters: cur_prefix = filter.get('prefix') cur_value = filter.get('value') if not cur_prefix or cur_prefix not in SUPPORTED_FILTER_PREFIXES or not cur_value: continue field_name = f'{cur_prefix}{FIELD_DELIMITER}{cur_value}' logging.info(f'filtering on field: {field_name}') cur_min = filter.get('min') or 0 cur_max = filter.get('max') or 1 if cur_max > 1: cur_max = cur_max / 100 prefix_query = modash_profile_ref.where(field_name, u'>=', cur_min).where( field_name, u'<=', cur_max) return prefix_query return modash_profile_ref
def track(): """ public endpoint (no auth) Note: Shopify client side is using the following code snippet to send tracking events: # req.send(JSON.stringify({ # lifo_tracker_id: lifo_tracker_id, # shop: getShop(), # location: document.location, # navigator: navigator.userAgent, # referrer: document.referrer, # discount_code, # })), """ data = flask.request.json logging.info(f'Receiving /track request {data}') if not data.get('shop'): logging.warning(f'Invalid shop data received {data}') elif not data.get('lifo_tracker_id'): logging.debug(f'Skip none lifo event {data}') else: try: res = sql_handler.save_track_visit(data) if res.status_code == 200: logging.info('Data saved to cloud SQL') except Exception as e: logging.error(f'Saving events error: {e}') response = flask.jsonify({'status': 'OK'}) response.status_code = 200 return response
def orders_paid(): """ This is the public endpoint (no auth) for shopify orders_paid webhook. """ data = flask.request.json logging.info(f'Receiving orders_paid request {data}') if data.get('topic') != 'ORDERS_PAID': logging.warning(f'Invalid not ORDERS_PAID data received {data}') elif not data.get('domain') or not data.get('payload'): logging.warning(f'Invalid shop/customer data received {data}') else: try: shop = data.get('domain') order_id = data.get('payload').get('id') customer_id = data.get('payload').get('customer').get('id') payload = data.get('payload') res = sql_handler.save_orders_paid(shop, order_id, customer_id, payload) if res.status_code == 200: logging.info('Data saved to cloud SQL') except Exception as e: logging.error(f'Saving events error: {e}') response = flask.jsonify('OK') response.status_code = 200 return response
def init(app): logging.info('Initializing application') app.config['db'] = firestore.Client(project=PROJECT) app.config['FIREBASE_PROJECT_ID'] = PROJECT app.config['FIREBASE_AUTH_SIGN_IN_OPTIONS'] = PROVIDERS if os.getenv('GAE_ENV', '').startswith('standard') or os.getenv( 'CLOUD_RUN', '') == "True": app.secret_key = get_secret(PROD_FLASK_SECRET) app.config['FIREBASE_API_KEY'] = get_secret(FIREBASE_API_KEY) app.config['IS_DEV'] = False app.debug = False app.config['UPLOAD_BUCKET'] = UPLOAD_BUCKET client = google.cloud.logging.Client() client.setup_logging() logging.basicConfig(level=logging.INFO) else: app.config['IS_DEV'] = True app.config['FIREBASE_API_KEY'] = "dev" if os.getenv('DEV_FLASK_SECRET'): app.secret_key = os.getenv('DEV_FLASK_SECRET') else: app.secret_key = open(DEV_FLASK_SECRET, 'rb').read() app.debug = True app.config['UPLOAD_BUCKET'] = "dev_uploads" logging.basicConfig(level=logging.INFO)
def archiveLog(self, job, wsdef, workstepLog ): proj = self.projects[job.projectId] # un-backslash filename in case of windows shenanigans workstepLog = self.unbackslashPath( workstepLog ) # Check that we're configured to publish stuff if not proj.bucketName: logging.warning("archiveLog: No bucketName set in project, can't archive log.") return False # Make sure the file exists if not os.path.exists(workstepLog): logging.warning( f"archiveLog: Workstep log file {workstepLog} does not exist." ) return False else: logging.info(f"Archiving {workstepLog} to bucket {proj.bucketName}") if self.storage_client is None: self.storage_client = google.cloud.storage.Client() logFilename = os.path.split(workstepLog)[-1] bucket = self.storage_client.bucket(proj.bucketName) blobName = self.unbackslashPath( os.path.join(proj.projectId, job.jobKey, "logs", logFilename) ) blob = bucket.blob(blobName) result = blob.upload_from_filename(workstepLog, content_type="text/plain;charset=UTF-8") logArchiveUrl = f"https://{bucket.name}.storage.googleapis.com/{blob.name}" logging.info(f"Result of upload is {logArchiveUrl}")
def instagram_search(): """ AM use. This is to search instagram account from Modash """ try: data = flask.request.json url = f'{MODASH_API_ENDPINT}/instagram/search' logging.info(f'Receiving request for url {url} and body {data}') headers = { 'Content-type': 'application/json', 'Authorization': MODASH_AUTH_HEADER } modash_search = requests.post(url, data=json.dumps(data), headers=headers) search_res = modash_search.json() logging.info(f'Modash search returned {search_res}') if search_res.get('error'): logging.error('Search returned error') response = flask.jsonify({'Error': 'Failed to search'}) response.status_code = 400 else: response = flask.jsonify(search_res) response.status_code = 200 except Exception as e: logging.error(f'Search error: {e}') response = flask.jsonify({'Error': 'Failed to search'}) response.status_code = 400 return response
def onJobsListChanged( self, jobs, changes, read_time): #print( "On jobslist changed: ", jobs ) logging.info( "Job list changed:") self.updateJobsList( jobs ) # alert the main build that we might need to do some work self.changeEvent.set()
def gen_lifo_tracker_id(account_id, contract_data): """ Create and save tracking url for given influencer. Currently only support Shopify platform. :param account_id: influencer account_id :param contract_data: the data used for signing influencer-brand contract, which includes contractual details. """ lifo_tracker_id = create_tracker_id( uid=account_id, brand_campaign_id=contract_data.get('brand_campaign_id')) try: domain_or_url = contract_data.get('brand_id') shop_url = generate_shop_url(domain_or_url) if not shop_url: response = flask.jsonify({'Status': 'Invalid shop url'}) response.status_code = 422 return response tracking_url = f'{shop_url}/?lftracker={lifo_tracker_id}' commission = contract_data.get('fixed_commission') commission_type = contract_data.get('commission_type') commission_percentage = contract_data.get('percentage_commission') brand_campaign_id = contract_data.get('brand_campaign_id') res = sql_handler.save_lifo_tracker_id(account_id, lifo_tracker_id, domain_or_url, commission, commission_type, commission_percentage, brand_campaign_id, tracking_url) if len(res) > 0: logging.info(f'Data saved to cloud SQL: {tracking_url}') except Exception as e: logging.error(f'Saving events error: {e}') response = flask.jsonify({'Status': 'Failed'}) response.status_code = 400 return response return tracking_url
def main(request): """HTTP Cloud Function. :param flask.Request request: The request object - http://flask.pocoo.org/docs/1.0/api/#flask.Request """ # initialise Houston client h = Houston(plan="houston-quickstart", api_key=KEY) stage = request.args.get('stage') if stage == "start": # start a new mission and get an ID for it mission_id = h.create_mission() else: mission_id = request.args.get('mission_id') # call houston to start the stage response = h.start_stage(stage, mission_id=mission_id) logging.info(response) # # # # run the main process - depending on the stage name if 'upload-file' in stage: upload_file(request.args.get('file_name')) elif 'run-query' in stage: run_query(request.args.get('query_name')) elif 'build-report' in stage: build_report(request.args.get('source_table')) elif stage == "start": pass else: raise ValueError(f"Didn't know what task to run for stage: '{stage}'") # # # # call houston to end the stage - response from houston will tell us which stages are next response = h.end_stage(stage, mission_id) logging.info(response) # trigger the next stage(s) for next_stage in response['next']: headers = get_authorization_headers( response['params'][next_stage]['uri']) # don't wait for requests to resolve - this allows stages to run concurrently fire_and_forget(response['params'][next_stage]['uri'], headers=headers, params=response['params'][next_stage]) return "stage finished", 200
def web_entities_detection_gcf(data, context): """ For each image uploaded to GCS image bucket, perform entities detection. To deploy: (form local) gcloud functions deploy web_entities_detection_gcf --runtime python37 --trigger-resource influencer-272204.appspot.com --trigger-event google.storage.object.finalize or (remotely) gcloud functions deploy web_entities_detection_gcf --source https://source.developers.google.com/projects/influencer-272204/repos/github_rnap_influencer/moveable-aliases/first/paths/python_functions/ --runtime python37 --trigger-resource influencer-272204.appspot.com --trigger-event google.storage.object.finalize :param data: The Cloud Functions event payload. :param context: (google.cloud.functions.Context): Metadata of triggering event. :return: None """ logging.info(f"Receiving data {data} with {context}") if not data['contentType'].startswith('image/'): logging.info('This is not an image') return None name = data['name'] bucket = data['bucket'] image_uri = f'gs://{bucket}/{name}' try: uid, campaign_id, file_name = uri_parser(name) except Exception as e: print(f'Parsing image URI {image_uri} error: {e}') return None res = web_entities_include_geo_results_uri(image_uri) # note: here we use the campaign history_id to distinct videos return (db.collection(u'campaigns').document(campaign_id).collection( u'images').document(file_name).set( { u'entity_detect_res': res, u'media_object_path': name }, merge=True))
def update_door_status(doorMessage): logging.info( f"Publishing door status update {doorMessage.status.value} at {str(doorMessage.timestamp)}" ) statusCollection.document(str(doorMessage.timestamp)).set( doorMessage.to_json()) statusCollection.document("latest").set(doorMessage.to_json())
def hello(): """Return a friendly HTTP greeting.""" who = request.args.get('who', 'there') # Write a log entry logging.info(f'who: {who}') return f'Hello {who}!\n'
def load_info(): info_lock.acquire() global info_list, reply_markup, last_update if last_update and datetime.now() < last_update + timedelta(minutes=1): info_lock.release() return try: info_list_local = info_db.fetch() keyboard = [] current_ary = [] count = 0 for info in info_list_local: if count == 0: count += 1 continue current_ary.append( InlineKeyboardButton(info[0], callback_data=info[0])) if len(current_ary) == 2: keyboard.append(current_ary) current_ary = [] count += 1 if len(current_ary) > 0: keyboard.append(current_ary) info_list = info_list_local reply_markup = InlineKeyboardMarkup(keyboard) last_update = datetime.now() logging.info("{} - Finished updating info".format(str(datetime.now()))) except: print('Error occur while updating info.') finally: info_lock.release()
def happening(): logging.info('user wants to add a happening') try: auth.authorize(request) except Exception: pass return redirect('/login') return render_template("happening.html")
def receive(): email_json = request.get_json() add_email_json("last-raw-json", str(email_json)) received_to = email_json['to']['value'][0]['address'] received_from = email_json['from']['value'][0]['address'] logging.info("received email from {} to {}".format(received_from, received_to)) add_email_json(received_to, str(email_json)) return Response(status=200)
def subscribe_to_garagecommand(): streaming_pull_future = subscriber.subscribe( subscription_path, callback=garagecommand_callback) logging.info(f"Listening for messages on {subscription_path}") with subscriber: try: streaming_pull_future.result() except TimeoutError: streaming_pull_future.cancel()
def connectCloudStuff(agent, do_cloud_logging): """ Connects to logging and firestore DB and returns the db connection""" # Initialze google stuff credKey = "GOOGLE_APPLICATION_CREDENTIALS" if not credKey in os.environ: print("Creds:", agent.googleCredentialFile) os.environ[credKey] = agent.googleCredentialFile cred = credentials.Certificate(os.environ.get(credKey)) firebase_admin.initialize_app(cred, {'projectId': agent.googleProjectId}) db = firestore.client() # Initialize logging if do_cloud_logging: # logger = logging_client.logger("tkbuild-agent-" + agent.name ) logging_client = google.cloud.logging.Client() logname = "tkbuild-agent-" + agent.name print("Log name is :", logname) logging_handler = google.cloud.logging.handlers.CloudLoggingHandler( logging_client, name=logname) google.cloud.logging.handlers.setup_logging(logging_handler) # Also echo to stdout rootLogger = logging.getLogger() #rootLogger.setLevel(logging.DEBUG) stdoutHandler = logging.StreamHandler(sys.stdout) #stdoutHandler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(levelname)s: %(message)s') stdoutHandler.setFormatter(formatter) rootLogger.addHandler(stdoutHandler) else: print("Cloud logging is off") # Just run with stdout logging for testing logging.basicConfig(level=logging.INFO) # logging.debug("log debug") # logging.info("log info") # logging.warning("log warn") # logging.error("log error") logging.info(f"Agent: {agent.name}: {agent.desc}") testRepoProj = None for p in agent.projects.values(): fetchRepoUrl = "(No Fetch Step Defined)" pfetch = p.getFetchWorkstep() if pfetch: fetchRepoUrl = pfetch.repoUrl logging.info(f"Project: {p.projectId} -- {fetchRepoUrl}") return db
def publish_message(status, message): publisher = pubsub_v1.PublisherClient() topic_path = publisher.topic_path(PROJECT, TOPIC) #print(f"Topic path: {topic_path}") msg = DoorMessage(status, message) data = str(msg.to_json()) logging.info(f"Publishing status update: {data}") data = data.encode("utf-8") publisher.publish(topic_path, data)
def shop_customer_info(): """ This endpoint is called upon AM to access Shopify shop products for access_token """ shop = flask.request.args.get('shop') days_range = flask.request.args.get('days_range') try: days_range = int(days_range) except Exception as e: logging.warning('Illegal days_range, revert to default value') days_range = DEFFAULT_DATE_RANGE res = get_shopify_access_token(shop) if not res: res = {'status': 'access token not found'} response = flask.jsonify(res) response.status_code = 404 return response shop_access_token = res if request.method == 'PUT': created_at_min = datetime.datetime.now() - datetime.timedelta( days=days_range) url = f'https://{shop}/admin/api/{API_VERSION}/customers.json' logging.info(f'Receiving request for url {url}') headers = {"X-Shopify-Access-Token": shop_access_token} params = { 'limit': MAX_SHOPIFY_RESULTS_LIMIT, 'created_at_min': created_at_min.isoformat() } res = requests.get(url, headers=headers, params=params) data = res.json() logging.info(f'Obtained shop information for shop {shop}: {data}') customers = data.get('customers') if customers: for customer_json in customers: customer_id = customer_json.get('id') sql_handler.save_customer_info(shop, customer_id, customer_json) logging.info(f'Saved {len(customers)} for shop {shop}') else: logging.info('No customers found') else: logging.info(f'Getting customer location data for shop {shop}') query_results = sql_handler.get_shop_customers_locations(shop) data = [] for row in query_results: cur_res = {} cur_res['city'] = row[0] cur_res['province'] = row[1] cur_res['location_cnt'] = row[2] data.append(cur_res) response = flask.jsonify(data) response.status_code = 200 return response
def using_extras(client): import logging # [START logging_extras] my_labels = {"foo": "bar"} my_http = {"requestUrl": "localhost"} my_trace = "01234" logging.info( "hello", extra={"labels": my_labels, "http_request": my_http, "trace": my_trace} )
def sendCaseEmail(case): global CASE, STOP caseinfo = case.information + '\n \n' + case.url + \ '\n Time Found: \n' + case.time print("DECIDED") headerCase = "Decision Made for {} Made".format(case.decision) logging.info(headerCase) logging.info(caseinfo) sendEmail(headerCase, caseinfo) if case == "Trump v. Hawaii": STOP = True
def button(bot, update): load_info() query = update.callback_query message = "發生錯誤,請再試。" for info in info_list: if query.data == info[0]: message = info[1] break logging.info("{} - Query {}".format(str(datetime.now()), info[0])) query.edit_message_text(text=message + " ", reply_markup=reply_markup)
def peekVersion( self, job, versionFile ): if not os.path.exists( versionFile ): logging.warning( f"Version file {versionFile} does not exist.") return with open( versionFile ) as fp: verLine = fp.readline().strip() if verLine: job.version = verLine logging.info( f"PeekVersion: Version is {job.version}" )
def like(id): # uncomment following code to enforce authentication for liking # try: # logging.info('checking user has logged in') # auth.authorize(request) # except Exception: # logging.info('user attempted to like a happening when not logged in') # return redirect('/login') logging.info('user liking a happening') url = URL + '/happening/like/' + id likes = requests.get(url) return jsonify(likes.text)
def updateJobsList(self, jobs_ref ): newJobsList = [] for jobRef in jobs_ref: proj = self.projects[ jobRef.get('projectId') ] job = TKBuildJob.createFromFirebaseDict( proj, jobRef.id, jobRef ) newJobsList.append( job ) # TODO; wrap log_struct with something that can log to console too #self.logger.log_struct({ 'jobkey' : job.jobKey, 'worksteps' : job.worksteps } ) self.jobList = newJobsList logging.info( f"Updated jobs list (length {len(self.jobList)}).")
def shopify_products(): shop = flask.request.args.get('shop') res = get_shopify_access_token(shop) if not res: res = {'status': 'access token not found'} response = flask.jsonify(res) response.status_code = 204 return response shop_access_token = res #TODO: no need to get shop info every time. shop_info = get_shopify_shop_info(shop) if request.method == 'PUT': url = f'https://{shop}/admin/api/{API_VERSION}/products.json' logging.info(f'Receiving request for url {url}') headers = {"X-Shopify-Access-Token": shop_access_token} params = {'limit': MAX_SHOPIFY_RESULTS_LIMIT} res = requests.get(url, headers=headers, params=params) logging.info( f'Obtained shop information token {shop_access_token} for shop {shop}: {res.json()}' ) data = res.json() products = res.json().get('products') if products: for product_json in products: product_id = product_json.get('id') sql_handler.save_product_info(shop, product_id, product_json) logging.info(f'Saved {len(products)} for shop {shop}') else: logging.info('No products found') else: logging.info(f'Retrieving product information from shop {shop}') tags_count = sql_handler.get_product_tags_counts(shop) product_images = sql_handler.get_product_images(shop) data = {} tags_count_res = [{ 'vendor': row[0], 'tags': row[1], 'count': row[2] } for row in tags_count] images_res = [{ 'title': row[0], 'image': row[1], 'product_id': row[2] } for row in product_images] data['tags_count'] = tags_count_res data['product_images'] = images_res response = flask.jsonify(data) response.status_code = 200 return response
def serverUpdate(self): logging.info( f"Agent update ... {self.updCount}") self.updCount += 1 print( f" {len(self.jobList)} avail jobs:") # Report our status to the server self.updateCurrentStatus( AgentStatus.IDLE ) # Check if there are any obsolete jobs, and delete them self.cleanupObsoleteJobs() # Check if there are any jobdirs that do not exist in the job list. If so, clean up those job dirs. self.cleanupOldJobDirs() # Check if there are jobs we can do for job in self.jobList: proj = self.projects[job.projectId] # Ignore jobs with tags we can't match if not self.matchTags( job.tags, self.agentInfo.tags ): print("Skipping job with tags ", job.tags, "our tags are ", self.agentInfo.tags ) continue # Ignore jobs marked "RUN" ... this might be running on another node (todo) but # probably is just stale because firebase updates are not instant. if JobStatus.RUN in job.worksteps.values(): logging.warning("Job is marked RUN?? but we're not running it.") #sys.exit(1) continue # If the job has work left to do if job.hasWorkRemaining( proj.workstepNames ): print("job ", job, "has work left...") self.currentJob = job break else: print( "No work remains", job, job.worksteps ) # Did we find a job to run? if self.currentJob == None: logging.info("No matching jobs found to run.") else: # run the job self.runNextJobStep( self.currentJob ) # clear the current job self.currentJob = None
def cleanupOldJobDirs(self ): # Make a list of the jobkeys we have for easy lookup haveJobKeys = set() for job in self.jobList: haveJobKeys.add( job.jobKey ) # Look in the project workdir for any jobdirs that # match the pattern for a jobdir for proj in self.projects.values(): if (not os.path.exists( proj.workDir)): print(f"Workdir for {proj.projectId} does not exist, skipping.") continue for dir in os.listdir( proj.workDir ): dsplit = dir.split( "_" ) if len (dsplit) != 2: continue dirProj, jobKey = dsplit if dirProj != proj.projectId: continue if len(jobKey) != 20: continue # At this point we are pretty sure this is a work dir, and # can infer the jobkey from the workdir if jobKey in haveJobKeys: print ("Nope this is an active job") continue # Also look for other dirs listed in cleanupDirs workDir = os.path.join( proj.workDir, dir ) cleanupDirs = [ workDir ] workname = proj.projectId + "_" + jobKey for extraDir in proj.cleanupDirs: dir2 = self.replacePathVars2( extraDir, workDir, proj, None, workname ) # Make sure there are no unexpanded vars, kind of a hack but if dir2.find("$")==-1: cleanupDirs.append( dir2 ) for cleanDir in cleanupDirs: if os.path.exists( cleanDir ): logging.info( f"Cleaning up old workdir {cleanDir}" ) self.cleanupDir( cleanDir )