def get_new_relic_service_url(service_name, cluster_name): """ Get the new relic application URL for a given service. Assumes any one of the tasks in service report to the same new relic 'app'. :param service_name: str. Name of the service :param cluster_name: str. Name of cluster service is in :return: str. URL of new relic page for app """ try: task_id = ecs_api.get_task_ids_from_service(service_name,cluster_name) except: raise NewRelicAPIException else: task_id = task_id[0] # take the first task ID we found since we only need one logger.info('Found task_id {} for service {}'.format(task_id, service_name)) resultset = db.search_domain( 'select * from `ecs_id_mapper_hash` where task_id="{task_id}" and desired_status="RUNNING"'. format(task_id=task_id), 'ecs_id_mapper_hash') try: r = resultset.next() new_relic_url = r['new_relic_url'] except StopIteration: logger.info('Unable to find task {} details in our database'.format(task_id)) raise NewRelicAPIException except KeyError: new_relic_url = get_new_relic_app_instance_url(r['container_id']) db.put(r.name, {"new_relic_url": new_relic_url}, settings.hash_schema, replace=True) return new_relic_url.split('_')[0]
def register(): if request.method == 'GET': return render_template('register.html') elif request.method == 'POST': form = forms.AccountForm(request.form) if not form.validate(): return render_template('error.html', message='Invalid registration'), 400 username, password = form.username.data.lower(), form.password.data if db.has(username): return render_template('error.html', message='User already exists!'), 400 db.put( username, { 'tasks': [], 'password': bcrypt.hashpw(password.encode(), bcrypt.gensalt()).decode('utf-8') }) session['username'] = username return redirect('/tasks')
def get_newrelic_url_by_task_id(task_id): """ Get the new relic URL for a given container. This will be the app instance specific URL for new relic :param task_id: Task id is a uuid like string generated by ECS for each instance of a task :return: str. """ resultset = db.search_domain( 'select * from `ecs_id_mapper_hash` where task_id="{task_id}"'. format(task_id=task_id), 'ecs_id_mapper_hash') try: d = resultset.next() new_relic_url = d['new_relic_url'] if request.args.get('redir') and request.args.get('redir').lower() == "true": return redirect(new_relic_url, 302) else: return new_relic_url except StopIteration: abort(404) except KeyError: # We don't have the new relic url yet try: logger.info('NR URL not found in DB. Querying NR API') new_relic_url = new_relic_url_generator.get_new_relic_app_instance_url(d['container_id']) db.put(d.name, {"new_relic_url": new_relic_url}, settings.hash_schema, replace=True) if request.args.get('redir') and request.args.get('redir').lower() == "true": return redirect(new_relic_url, 302) else: return new_relic_url except KeyError: logger.error('Unable to find container id for task_id {}'.format(task_id)) abort(404, 'Unable to find container id for task_id {}'.format(task_id)) except new_relic_url_generator.NewRelicAPIException: logger.error('Invalid response from New Relic API') abort(404, 'Unable to find New Relic app for that container/service.')
def edit(task_id): if 'username' not in session: return redirect('/') try: task_id = int(task_id) except: return render_template('error.html', message='Invalid task'), 400 user = db.get(session['username']) task = next((task for task in user['tasks'] if task['id'] == task_id), None) if task is None: return render_template('error.html', message='Task not found'), 404 if request.method == 'GET': return render_template('edit.html', id=task['id']) elif request.method == 'POST': form = forms.EditForm(request.form) if not form.validate(): return render_template('error.html', message='Invalid edit'), 400 for attribute in ['title', 'content', 'priority']: if form[attribute].data: task[attribute] = form[attribute].data db.put(session['username'], user) return redirect('/tasks')
def add(): if 'username' not in session: return redirect('/') if request.method == 'GET': return render_template('add.html') elif request.method == 'POST': form = forms.TaskForm(request.form) if not form.validate(): return render_template('error.html', message='Invalid task'), 400 user = db.get(session['username']) if len(user['tasks']) >= 5: return render_template('error.html', message='Maximum task limit reached!'), 400 task = { 'title': form.title.data, 'content': form.content.data, 'priority': form.priority.data, 'id': len(user['tasks']) } user['tasks'].append(task) db.put(session['username'], user) return redirect('/tasks')
def write(self): db.put( {"qid": self.qid, "qstring": self.qstring, "point_val": self.point_val, "answers": self.answers, "child_questions": self.child_questions, "last_updated": self.last_updated}, table_name='questions', overwrite=True)
def write(self): data = { 'uid': self.uid, 'point_count': self.point_count, 'last_question': self.last_question, 'demographics': self.demographics } db.put(data, table_name='users', overwrite=True)
def update_nr_URL(): for result in get_map_entries(): nr_url = get_new_relic_app_instance_url(result['container_id']) _result = result _result['newrelic_url'] = nr_url if len(nr_url) > 0 and nr_url is not 'unknown': logger.info('Updating record with New Relic URL') print(result.name, _result, 'ecs_id_mapper_hash') db.put(result.name, _result, 'ecs_id_mapper_hash') else: logger.info('New Relic URL not found')
def write(self): db.put( { "qid": self.qid, "qstring": self.qstring, "point_val": self.point_val, "answers": self.answers, "child_questions": self.child_questions, "last_updated": self.last_updated }, table_name='questions', overwrite=True)
def dbtest(): try: indigo.server.log("db is " + str(db)) indigo.server.log(str(dir(db))) indigo.server.log(str(type(db.GLOBALSETTINGS))) indigo.server.log(str(db.get("mykey"))) db.put("mykey", "1") indigo.server.log(str(db.get("mykey"))) db.put("mykey", "2") indigo.server.log(str(db.get("mykey"))) except Exception, e: return str(e) + "::: " + str(db)
def run(url): """ Consume a url and orchestrates the classification process :param url: :return: classification object """ retresult = db.get(url) if not retresult: path_to_file = download.image(url) prepared_obj = preprocess.prepare_image(path_to_file) retresult = caffenet.classify(prepared_obj) db.put(url, path_to_file, retresult) return retresult
def do_search(api,db,keyword_query,geocode,from_id,to_id,next_id): #r = api.request('statuses/filter', {'locations': '112.5,-37.5,154.1,-12.8'}) next_id=-1 cur_id=-1 if from_id==-1: from_id=None if to_id==-1: to_id=0 count=0 pager = TwitterPager(api, 'search/tweets', {'q': keyword_query, 'geocode': geocode, 'count': '100', 'max_id': str(from_id), 'since_id' : str(to_id)}) while True: try: for item in pager.get_iterator(): #print(item) if 'text' in item: #try: if True: #print item["id"] cur_id=int(item["id"]) #if next_id != -1, we run in re-start mode, don't reset next_id #else we need to update next_id when the first item arrives in this iteration #and next iteration's to_id will be set to next_id of this iteration if next_id==-1: next_id=cur_id if cur_id<=to_id: break info=get_dict_object_from_tweet(item) if not info: print "Error parsing the tweet, ignore it" continue #put the data in the db db.put(info) count+=1 if count % 1000 == 0: print count #print item["id"],"ok" #print(info["post_text"]) #persist the progress to ensure we can resume the harvester from here progress.update(cur_id,to_id,next_id) elif 'message' in item: # something needs to be fixed before re-connecting raise Exception(item['message']) return count except TwitterAPI.TwitterError.TwitterRequestError,e: if e.status_code==429: print ("Too Many Requests, now sleeping...") sleep(60) else: raise e
def report_event(): """ update DB with new container task state change event :return: str. 'true' if successful """ if not request.json: logger.error('received non-json data') abort(400) logger.info('Received event from {}'.format(request.remote_addr)) logger.debug('Event payload {}'.format(request.json)) event_id = request.json['event_id'] event = request.json['event'] timestamp = request.json['timestamp'] db.put(str(timestamp)+"_"+str(event_id), {'container_id': event_id, 'event_action': event, 'timestamp': timestamp}, 'ecs_id_mapper_events') return 'true'
def home(): # Handle GET if request.method == 'GET': return render_template('index.html') # Handle POST and PUT else: # Get the reading json_data = json.loads(request.data) # Print the readings print str(json_data) # Store it in the DB db.put(json_data) # Send okie-dokie response return Response(status=200)
def main(filename, depth=5): print('Reading file...') with open(filename, 'r') as file: words = file.read().split() if len(words) < depth + 1: depth = len(words) - 1 print('Establishing a connection with the database...') db.connect() for curr_depth in range(1, depth + 1): print('Processing depth %d...' % curr_depth) left = words[:curr_depth] for right in words[curr_depth:]: db.put(left, right) left = left[1:] + [right] print('Finalizing...') db.close()
def aggregateStats(): # number of collaborators ncollabs, nsent, nrcv = [],[],[] for stat in db_direct.statistics.find(): if 'ncollaborators' in stat and stat['ncollaborators'] >= 10: ncollabs.append(stat['ncollaborators']) if 'nsent' in stat and stat['nsent'] > 100: nsent.append(stat['nsent']) if 'nrcv' in stat and stat['nrcv'] > 100: nrcv.append(stat['nrcv']) ncollabs.sort() nsent.sort() nrcv.sort() db.put('ncollaborators', ncollabs) db.put('nsent', nsent) db.put('nrcv', nrcv)
#!/usr/bin/env python # -*- coding: utf-8 -*- import db from data_models import Quote, Tag db = db.SQLQuoteStore() for i in range(100): quote = Quote("this is a quote %s" % (i)) quote.tags = [ Tag("foo"), Tag("bar") ] db.put(quote)
yt = build("youtube", "v3", developerKey=api_key) # https://developers.google.com/youtube/v3/docs/videos/list op = yt.videos().list(part='statistics', id=video_id) def extract_stats(result): # result here is something like # {'kind': 'youtube#videoListResponse', 'etag': 'E6-Bzb4X_fDK5xKsNzx6LWpuOdY', 'items': [{'kind': 'youtube#video', 'etag': 'CTSoVMDunhrUb-G04W0MqgsMtFI', 'id': 'drvH4XbZoPs', 'statistics': {'viewCount': '281260', 'likeCount': '2271', 'dislikeCount': '29', 'favoriteCount': '0', 'commentCount': '50'}}], 'pageInfo': {'totalResults': 1, 'resultsPerPage': 1}} return result['items'][0]['statistics'] import db db_conn = db.open() while True: result = op.execute() stats = extract_stats(result) now = datetime.datetime.now(datetime.timezone.utc) print("{}, {}".format(now, stats['viewCount'])) db.put(db_conn, now, video_id, stats) # it seems that the statistics are updated every 5 minutes. time.sleep(5 * 60)
def write(self): data ={'uid':self.uid, 'point_count': self.point_count, 'last_question': self.last_question, 'demographics': self.demographics} db.put(data, table_name='users', overwrite=True)
for filename in glob.glob(os.path.join(path, '*.json')): files_count += 1 with open(filename) as f: # Read data data_str = f.read() # Replace 'vin' with 'vehicleid' (column names) data_str = data_str.replace("\"vin\":", "\"vehicleid\":") # Now load the modified string as a JSON document data = json.loads(data_str) # For each JSON element... for element in data: readings_count += 1 try: db.put(element) except IntegrityError: err_count += 1 db.getConn().rollback() except Exception, e: print "Error on element {}".format(element) raise e print "Processed {} readings across {} files with {} errors.".format( readings_count, files_count, err_count)