def test_response_not_cached_for_req_with_no_cache(self, header_name, header_value): val1 = get_timestamp('/deals', extra_headers={header_name: header_value}) val2 = get_timestamp('/deals') assert val1 != val2
def housenumbers_diff_last_modified(relation: areas.Relation) -> str: """Gets the update date for missing/additional housenumbers.""" t_ref = util.get_timestamp( relation.get_files().get_ref_housenumbers_path()) t_osm = util.get_timestamp( relation.get_files().get_osm_housenumbers_path()) return webframe.format_timestamp(max(t_ref, t_osm))
def download(from_date_time, to_date_time): from_timestamp = util.get_timestamp(from_date_time) to_timestamp = util.get_timestamp(to_date_time) for timestamp in range(from_timestamp, to_timestamp + 1, 3600): date_time_str = util.get_date_time_str(timestamp) file_name = 'SUOh_' + date_time_str + '.PWV' url = 'https://www.suominet.ucar.edu/data/pwvConusHourly/' + file_name wget.download(url, out=file_name)
def ref_housenumbers_last_modified(relations: areas.Relations, name: str) -> str: """Gets the update date for missing house numbers.""" relation = relations.get_relation(name) t_ref = util.get_timestamp( relation.get_files().get_ref_housenumbers_path()) t_housenumbers = util.get_timestamp( relation.get_files().get_osm_housenumbers_path()) return webframe.format_timestamp(max(t_ref, t_housenumbers))
def add_answer_by_question_id(cursor, new_answer): submission_time = util.get_timestamp() new_answer['submission_time'] = submission_time new_answer['vote_number'] = 0 timestamp = util.get_timestamp() query = """ INSERT INTO answer (submission_time, vote_number, question_id, message, image) VALUES (%(submission_time)s,%(vote_number)s, %(question_id)s, %(message)s, %(image)s) """ cursor.execute(query, new_answer)
def main(): print('--- get_timestamp(today)') for i in range(100): now = util.get_timestamp(datetime.datetime.today()) print(str(now)) print('--- get_timestamp()') for i in range(100): now = util.get_timestamp() print(str(now)) print('--- now()') for i in range(100): now = util.now() print(str(now))
def count_users(self, timestamp=None): if timestamp is None: timestamp = get_timestamp() res = self._execute_sql(_COUNT_USER_RECORDS, timestamp=timestamp) row = res.fetchone() res.close() return row[0]
def allocate_user(self, email, generation=0, client_state='', keys_changed_at=0, node=None, timestamp=None): if timestamp is None: timestamp = get_timestamp() if node is None: nodeid, node = self.get_best_node() else: nodeid = self.get_node_id(node) params = { 'service': self._get_service_id(SERVICE_NAME), 'email': email, 'nodeid': nodeid, 'generation': generation, 'keys_changed_at': keys_changed_at, 'client_state': client_state, 'timestamp': timestamp } res = self._execute_sql(_CREATE_USER_RECORD, **params) return { 'email': email, 'uid': res.lastrowid, 'node': node, 'generation': generation, 'keys_changed_at': keys_changed_at, 'client_state': client_state, 'old_client_states': {}, 'first_seen_at': timestamp, }
def show_tweets(self, tweets): count = 1 arr_tweets = tweets[:] arr_tweets.reverse() for tweet in arr_tweets: timestamp = util.get_timestamp(tweet) if tweet.has_key('user'): user = tweet['user']['screen_name'] else: user = tweet['sender']['screen_name'] client = util.detect_client(tweet) if client: header = "%d. @%s - %s desde %s" % (count, user, timestamp, client) else: header = "%d. @%s - %s" % (count, user, timestamp) print header print '-' * len(header) print tweet['text'] print count += 1 self.show_rate_limits()
def test_count_users(self): self.database.allocate_user('*****@*****.**') self.database.allocate_user('*****@*****.**') self.database.allocate_user('*****@*****.**') timestamp = get_timestamp() filename = '/tmp/' + str(uuid.uuid4()) try: count_users_script( args=['--output', filename, '--timestamp', str(timestamp)]) with open(filename) as f: info = json.loads(f.readline()) self.assertEqual(info['total_users'], 3) self.assertEqual(info['op'], 'sync_count_users') finally: os.remove(filename) filename = '/tmp/' + str(uuid.uuid4()) try: args = [ '--output', filename, '--timestamp', str(timestamp - 10000) ] count_users_script(args=args) with open(filename) as f: info = json.loads(f.readline()) self.assertEqual(info['total_users'], 0) self.assertEqual(info['op'], 'sync_count_users') finally: os.remove(filename)
def archive_success_make_subrecord(record): ''' Makes the 'submission' subrecord. Takes the record. Returns the subrecord value (dict). ''' id1 = record.get('_id') subrecord = {} for key in [ 'job_id', 'when_ready_for_pbs', 'when_archival_queued', 'when_archival_started' ]: if not key in record: raise Exception( util.gen_msg( f"Expected key '{key}' not found in record w/ id '{id1}'.") ) if not record[key]: raise Exception( util.gen_msg( f"Unexpected value '{record[key]}' for key '{key}' in record w/ id '{id1}'." )) subrecord[key] = record[key] subrecord['when_archival_completed'] = util.get_timestamp() return subrecord
def route_add_question(): if request.method == 'POST': if 'file' not in request.files: flash('No file part') return redirect(request.url) file = request.files['file'] # if user does not select file, browser also # submit a empty part without filename if file.filename == '': flash('No selected file') return redirect(request.url) if file and allowed_file(file.filename): filename = secure_filename(file.filename) file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) values = [ util.get_timestamp(), '0', '0', request.form['title'], request.form['message'], filename ] data_manager.add_question(values) return redirect('/') else: return render_template('add-question.html', id=None, question=None)
def acc_file(filename): #Creates an ACC csv file from recorded data in a .dat format. savefile = 'acc_save_file.csv' f = open(savefile, 'a', newline='') writer = csv.writer(f) payloads = util.payload_retv(filename, 'ACC') for i in range(len(payloads)): timestamp = util.get_timestamp(payloads[i]) curr_milli = timestamp[3] sample_set = util.get_sample_set(payloads[i]) sample_list = util.parser_acc(sample_set) #Writes the data to the file and increments the milliseconds on the timestamp for j in range(len(sample_list)): curr_milli = curr_milli + 20 millisec = util.millis(curr_milli) time = timestamp[0:3] + [millisec] curr_sample = sample_list[j] x_sample = curr_sample[0] y_sample = curr_sample[1] z_sample = curr_sample[2] writer.writerow([time, x_sample, y_sample, z_sample])
def route_add_comment(what, id_): if request.method == 'GET': question = None answer = None if what == 'question': question = data_manager.get_question_by_id(id_) elif what == 'answer': answer = data_manager.get_answer_by_id(id_) return render_template('comment.html', question=question, answer=answer, comment=None) elif request.method == 'POST': answer_id = None question_id = None if what == 'question': question_id = id_ elif what == 'answer': answer_id = id_ values = [ question_id, answer_id, request.form['message'], util.get_timestamp(), '0' ] data_manager.add_comment(values) if answer_id: question_id = data_manager.get_question_id_by_answer_id(answer_id) return redirect(f'/question/{question_id}')
def poll(): while (True): try: timestamp = get_timestamp() filename = u"data/coinbase_prices_ETH-EUR{}".format( timestamp_to_Y_m_d_str(timestamp)) spot = get_spot_price() buy = get_buy_price() sell = get_sell_price() log_spot_buy_sell_price(filename, timestamp, spot, buy, sell) filename = u"data/coinbase_prices_ETH-USD_{}".format( timestamp_to_Y_m_d_str(timestamp)) spot = get_spot_price(currency="USD") buy = get_buy_price(currency="USD") sell = get_sell_price(currency="USD") log_spot_buy_sell_price(filename, timestamp, spot, buy, sell) sleep(60) # 1 minute except Exception as e: print(e)
def _fetch_lorem(self): url = "http://loremricksum.com/api/?paragraphs=1"es=1" self.logger.info(f"Fetching lorem ipsum from: {url}") message = Message() response = requests.get(url) if response.status_code >= 200: self.logger.debug("Response ok, mapping data") self.logger.debug(f"Got: {response.text}") data = json.loads(response.text) self.logger.debug(f"Response: {data}") message.content = data["data"][0] message.timeStamp = get_timestamp() self.logger.debug(f"Returning: '{message}") return message else: self.logger.error("Failed to get data, returning None!") return None
def route_add_answer(question_id): if request.method == 'GET': question = data_manager.get_question_by_id(question_id) answers = data_manager.get_answers_by_question_id(question_id) return render_template('answer.html', question=question, answers=answers) elif request.method == 'POST': if 'file' not in request.files: flash('No file part') return redirect(request.url) file = request.files['file'] # if user does not select file, browser also # submit a empty part without filename if file.filename == '': flash('No selected file') return redirect(request.url) if file and allowed_file(file.filename): filename = secure_filename(file.filename) file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) values = [ util.get_timestamp(), '0', question_id, request.form['message'], filename ] data_manager.add_answer(values) return redirect(f'/question/{question_id}')
def process_file(filename, filepath): #TODO: assume that cuckoo is already running. In future, maybe launch with the server #TODO: support for different cuckoo VMs/OSes # processor_modules = ['Strace_Analyzer()'] # print 'processing' # for analyzer_str in processor_modules: # analyzer = eval(analyzer_str) # print 'evalled' # output = analyzer.run_analyzer(filepath) # print 'eval finished' # updateTableEntry(filename, output) # cuckoo_modules = ['classloader'] #here are a list of classes that process the results of the cuckoo execution #each class implements a 'process' function that takes the result number and cuckoo #dir as arguments and return an AnalysisOutput object for storage # cuckoo_postprocessors = ['Classloader_Processor()'] # SUBMISSION_PATH = '/tasks/create/file' # for cuckoo_module in cuckoo_modules: # cuckoo_call_tokens = [CUCKOO_DIR+'/utils/submit.py', '--package', cuckoo_module, filepath] # cuckoo_call_str = " ".join(cuckoo_call_tokens) # cuckoo_shell_args = shlex.split(cuckoo_call_str) # #this submits to the running cuckoo instance if there is one # subprocess.call(cuckoo_shell_args) #Instead, use the REST API. assumes the cuckoo REST server is running # with open(filepath, 'rb') as sample: # multipart_file = {"file:": (filename, sample)} # request = requests.post(CUCKOO_API+SUBMISSION_PATH, files=multipart_file, package=cuckoo_module) # # json_decoder = json.JSONDecoder() # task_id = json_decoder.decode(request.text)["task_id"] # updateTableEntry(filename, None) task = AnalysisTask(filename, filepath, CUCKOO_MODULES.keys(), get_timestamp()) upload_queue.put(task) return task.file_hash
def __init__(self, task): # Task (environment) information self.task = task # should contain observation_space and action_space self.state_size = np.prod(3) self.state_range = self.task.observation_space.high[ 0:3] - self.task.observation_space.low[0:3] self.action_size = np.prod(3) self.action_range = self.task.action_space.high[ 0:3] - self.task.action_space.low[0:3] # Policy parameters self.w = np.random.normal( size=( 3, 3 ), # weights for simple linear policy: state_space x action_space self.state_size scale=(self.action_range / (2 * self.state_size)).reshape( 1, -1)) # start producing actions in a decent range # Score tracker and learning parameters self.best_w = None self.best_score = -np.inf self.noise_scale = 0.1 self.episode_num = 1 self.stats_filename = os.path.join( util.get_param('out'), "stats_{}.csv".format(util.get_timestamp())) # path to CSV file self.stats_columns = ['episode', 'total_reward'] # specify columns to save # Episode variables self.reset_episode_vars()
def test_get_timestamp(): s = '' timestamp = util.get_timestamp() s += str(timestamp) + '\n' dt = util.get_datetime('20190102123456987654', '%Y%m%d%H%M%S%f') timestamp = util.get_timestamp(dt) s += str(timestamp) + '\n' timestamp = util.get_timestamp('2019-01-02 12:34:56.789') s += str(timestamp) + '\n' timestamp = util.get_timestamp('197001020900', '%Y%m%d%H%M') s += str(timestamp) + '\n' return s
def test_purge_cassandra(self): val1_before = get_timestamp('/timestamp/purge/1') val2_before = get_timestamp('/timestamp/purge/2') val3_before = get_timestamp('/long_ttl/no-purge') response = purge_resource({'namespace': 'backend.main', 'cache_name': 'timestamp'}) assert response == 'Purged namespace: backend.main & cache_name: timestamp' val1_after = get_timestamp('/timestamp/purge/1') val2_after = get_timestamp('/timestamp/purge/2') val3_after, miss_values = get_timestamp_until_hit('/long_ttl/no-purge') # Both val1 and val2 should be deleted after purge; # however, val3 should remain as is before and after the purge. assert val1_before != val1_after assert val2_before != val2_after assert val3_before in [val3_after] + miss_values
def grab_box_coords_for_timestep(fname, label_df, time_step): ts = get_timestamp(fname) final_df = label_df.ix[(label_df.month == ts.month) & (label_df.day == ts.day) & (label_df.year == ts.year) & (label_df.time_step == time_step)].copy() final_df = final_df[["xmin", "xmax", "ymin", "ymax", "category"]] return final_df
def retire_user(self, email): now = get_timestamp() params = { 'email': email, 'timestamp': now, 'generation': MAX_GENERATION } # Pass through explicit engine to help with sharded implementation, # since we can't shard by service name here. res = self._execute_sql(_RETIRE_USER_RECORDS, **params) res.close()
def retrieve_processing(args, user_dict, mongo_collection): ''' Changes status of retrieval job matching args['obj_id'] and args['job_id'] from 'queued' to 'processing'. Takes: args (dict) with obj_id (str), job_id (str); user_dict (dict) not used; mongo_collection: MongoDB database.collection Returns: job_id (str) MongoDB record changed from: "retrievals": [{ "job_id": "8649.ctarchive.jax.org", "retrieval_status": "queued", "when_ready_for_pbs": "2020-01-02 07:34:38 EDT-0400", "when_retrieval_queued": "2020-01-02 07:34:39 EDT-0400", "when_retrieval_started": null, "when_retrieval_completed": null }] to: "retrievals": [{ "job_id": "8649.ctarchive.jax.org", *"retrieval_status": "processing", "when_ready_for_pbs": "2020-01-02 07:34:38 EDT-0400", "when_retrieval_queued": "2020-01-02 07:34:39 EDT-0400", *"when_retrieval_started": "2020-01-02 07:36:25 EDT-0400", "when_retrieval_completed": null }] ''' expected_status = 'queued' obj_id, job_id = get_args_objid_jobid(args) condition = {'_id': obj_id} cursor = mongo_collection.find(condition) if cursor.count() != 1: raise Exception(util.gen_msg(f"{count} records match {condition}.\n")) idx = get_retrievals_idx(job_id, expected_status, cursor[0]) prefix = 'retrievals.' + str(idx) result = mongo_collection.update_one({'_id': obj_id}, { '$set': { f'{prefix}.retrieval_status': 'processing', f'{prefix}.when_retrieval_started': util.get_timestamp() } }) if not result.acknowledged: raise Exception( util.gen_msg( f"MongoDB update on _id '{obj_id}' not acknowledged.")) return job_id
def grab_box_coords_for_timestep(fname, label_df, time_step): ts = get_timestamp(fname) final_df=label_df.ix[ (label_df.month==ts.month) & (label_df.day==ts.day) & (label_df.year==ts.year) & (label_df.time_step == time_step)].copy() final_df = final_df[ ["xmin", "xmax", "ymin", "ymax","category"]] return final_df
def get_token(): post_data = {"timestamp": str(util.get_timestamp())} response = session.post( url=util.get_url("xsxkapp/sys/xsxkapp/student/4/vcode.do"), headers=headers, data=post_data) # 注意此时返回的是json数据文本 # 用json工具转换成对象 obj = json.loads(response.text) return obj['data']['token']
def write_question(cursor, question): question['vote_number'] = 0 question['view_number'] = 0 question['submission_time'] = util.get_timestamp() query = """ INSERT INTO question(submission_time, view_number, vote_number, title, message, image) VALUES (%(submission_time)s,%(view_number)s,%(vote_number)s, %(title)s, %(message)s, %(image)s) """ cursor.execute(query, question)
def replace_user_records(self, email, timestamp=None): """Mark all existing records for a user as replaced.""" if timestamp is None: timestamp = get_timestamp() params = { 'service': self._get_service_id(SERVICE_NAME), 'email': email, 'timestamp': timestamp } res = self._execute_sql(_REPLACE_USER_RECORDS, **params) res.close()
def replace_user_record(self, uid, timestamp=None): """Mark an existing service record as replaced.""" if timestamp is None: timestamp = get_timestamp() params = { 'service': self._get_service_id(SERVICE_NAME), 'uid': uid, 'timestamp': timestamp } res = self._execute_sql(_REPLACE_USER_RECORD, **params) res.close()
def test_pragma_no_cache(self, header_name, header_value): cached_value, miss_values = get_timestamp_until_hit('/timestamp/cached') assert cached_value in miss_values # Should fetch resource from the master no_cache_value = get_timestamp('/timestamp/cached', extra_headers={header_name: header_value}) assert no_cache_value != cached_value # Checking that the new response was cached hit_value, miss_values = get_timestamp_until_hit('/timestamp/cached') assert hit_value in [no_cache_value] + miss_values
def timestamp_filter(self, line, start_time, stop_time): # controller logs may start earlier compared to transcoder.log start time, need another way to find # controller start time or set an offset (5 minutes?) before to get controller channel start time log_timestamp = util.get_timestamp(line) if log_timestamp != "": if start_time <= log_timestamp <= stop_time: self.timestamp_filter_rv = True else: self.timestamp_filter_rv = False # return previously saved state if no timestamp in log return self.timestamp_filter_rv
def test_expires_after_ttl(self): """We have set the expire times to > 1 sec because the docker image we use Cassandra acceptance testing does not run in-memory Cassandra. This needs to be fixed with PERF-1679.""" cached_value, miss_values = get_timestamp_until_hit('/timestamp/ttl') assert cached_value in miss_values # Cache entry should be expired after 2 seconds time.sleep(2) new_value = get_timestamp('/timestamp/ttl') assert new_value != cached_value
def dprint(string): """Print debugging info""" if const.print_debug: timestamp = get_timestamp() print "DEBUG [%s]: %s" % (timestamp, string)
def cuckoo_task_handler_process_task(task_id, file_hash, module): print 'stating task handler watcher process' VIEW_PATH = "/tasks/view/" view_url = CUCKOO_API+VIEW_PATH+str(task_id) task_finished = False json_decoder = json.JSONDecoder() while task_finished is False: request = requests.get(view_url) # print 'request text:\n'+str(request.text) data = json_decoder.decode(request.text) # print 'json data:\n'+str(data) if data['task']['status'] == 'reported': task_finished = True else: sleep(1) print 'task has \'reported\' state' REPORT_PATH = "/tasks/report/" report_url = CUCKOO_API+REPORT_PATH+str(task_id)+"/all" request = requests.get(report_url) report = request.content report64 = base64.b64encode(report) report64bin = bson.binary.Binary(report64) local_mongo = pymongo.MongoClient(MONGOURI) local_db = local_mongo.minecraft update_record = {"file_hash":file_hash} update_request = {"$set": {str(module)+".finished":"True", str(module)+".report":report64bin, str(module)+".finished_timestamp":get_timestamp() }} print 'updated record with the report data' result = local_db.mods.update_one(update_record, update_request) #now, check if all of the other modules have been completed matching_mods = local_db.mods.find({"file_hash":file_hash}) if matching_mods.count() > 1: #TODO: handle this. should not occur where we have multiple entries with the same hash raise ValueError elif matching_mods.count() == 0: #TODO: handle if there is no matching record - most likely will not happen, but might? for now, just give up, #since we have no way of getting the mod file without a record print 'could not find record for hash: '+str(hash) else: print 'got a matching record in watcher process' mod = matching_mods[0] running_mods = mod['running_modules'] all_finished = True for module in running_mods: if mod[module]['finished'] == 'False': all_finished = False break print 'all modules finished' if all_finished: message = "{{'file_hash':'{0}'}}".format(file_hash) connection = pika.BlockingConnection(pika.ConnectionParameters(host=RABBITMQ)) channel = connection.channel() channel.queue_declare(queue='task_finished') channel.basic_publish(exchange='', routing_key='task_finished', body=message, properties=pika.BasicProperties( delivery_mode = 2, # make message persistent )) print 'wrote a task finished message to rabbitmq'