def __filter(n): current_time = points[n]['time'] if current_time is None: return 0. current_time = parse_time(points[n]['time']) if 0 < n < size - 1: previous_time = points[n - 1]['time'] next_time = points[n + 1]['time'] if previous_time is None or next_time is None: return 0. previous_time = parse_time(previous_time) next_time = parse_time(next_time) if previous_time is not None and current_time is not None and next_time is not None: time = math.fabs((previous_time - next_time).total_seconds()) distance = cheap_ruler_distance([points[n - 1], points[n]]) + \ cheap_ruler_distance([points[n], points[n + 1]]) return distance / time if time > 0 else 0. return 0.
def update(self, nodeset=None): res = requests.get(NODES_JSON_URL) if not res.ok: print("warning: NodesJSONCache could not download " + NODES_JSON_URL + "!", file=sys.stderr) return nodes = [] try: json = res.json() for node in json['nodes']: nodeinfo = node['nodeinfo'] if nodeset: db_node = nodeset.find_by_nodeid(nodeinfo['node_id']) if db_node: nodes += [db_node] continue n = Node(nodeinfo['hostname'], nodeinfo['node_id']) if 'lastseen' in node: n.last_seen_at = parse_time(node['lastseen']) n.last_updated_at = parse_time(json['timestamp']) nodes += [n] except KeyError: print("warning: NodesJSONCache detected wrong format for " + NODES_JSON_URL + "!", file=sys.stderr) return self.nodes = nodes
async def get_images(hdbpp, request): "Get images for a bunch of attributes; one image per y-axis" # TODO: probably makes more sense to send one image per attribute # instead. The overhead is pretty low anyway and it makes it # possible to do more dynamic stuff on the client like hiding/ # showing attributes, changing color... params = await request.json() attributes = params["attributes"] time_range = [ parse_time(params["time_range"][0]), parse_time(params["time_range"][1]) ] size = params["size"] axes = params.get("axes") logging.debug("Attributes: %r", attributes) logging.debug("Time range: %r", time_range) logging.debug("Image size: %r", size) logging.debug("Axis config: %r", axes) # Note: unfortunately, the way things work right now it's not # possible to run these steps in parallel. E.g. in order to create # the final image, we need all the data since we must know the # global max and min values. Luckily, usually the dominating # factor will be the database calls, and these can be # parallelized. # get archived data from cassandra with timer("Fetching from database"): attr_names = [a["name"] for a in attributes] data = await get_data(hdbpp, attr_names, time_range) # calculate the max/min for each y-axis with timer("Calculating extrema"): per_axis = get_extrema(attributes, data, time_range, axes) # Now generate one image for each y-axis. loop = asyncio.get_event_loop() with timer("Making images"): # TODO: for now, we're running this in the default thread pool. # I haven't benchmarked this, but I'm hoping that this will speed # things up (apart from not blocking) since numpy etc can release # the GIL. Maybe look into using a process pool? images, descs = await loop.run_in_executor( None, partial(make_axis_images, per_axis, time_range, size, axes)) # Now wrap all the results up in a JSON response. data = json.dumps({"images": images, "descs": descs}) response = web.Response(body=data.encode("utf-8"), content_type="application/json") # With compression, the size of the data goes down even further, almost # an order of magnitude. Typical size is a few 10s of kB! It's up to the # client to allow it, though. response.enable_compression() return response
def _format_text(hostvars): now = parse_time(datetime.utcnow().isoformat() + 'Z') for key in ('package', 'type'): if not hostvars[key]: hostvars[key] = '???' print('{ago}, {hostname}, {ip}, {type}, {package}'.format( ago=_format_lastmod_time(now, parse_time(hostvars['modified'])), **hostvars ))
def cond(self, target: dict) -> bool: start_time = parse_time(self.config["date"][0]) end_time = parse_time(self.config["date"][1]) if start_time > end_time: raise Exception( f"Date filter with wrong date order: {self.config}") publish_time = parse_time(target["publish_time"]) if start_time <= publish_time < end_time: return True return False
def plot_data_new(self, plot_num=2): start_position = parse_time(self.timeEdit_from.text()) # .split()[1] fin_position = parse_time(self.timeEdit_to.text()) # .split()[1] if plot_num == 1: self.dredging.report_creat() self.plot1_data() elif plot_num == 2: if self.old_start_position == start_position and self.old_fin_position == fin_position and self.select_columns is None: return self.plot2_data(start_position, fin_position)
def process_user_input(inp, typ, tz): """ INPUT: - ``inp`` -- unsanitized input, as a string - ``typ`` -- a Postgres type, as a string """ if inp is None: return None if typ == "timestamp with time zone": return localize_time(parse_time(inp), tz) elif typ == "time": # Note that parse_time, when passed a time with no date, returns # a datetime object with the date set to today. This could cause different # relative orders around daylight savings time, so we store all times # as datetimes on Jan 1, 2020. t = parse_time(inp) t = t.replace(year=2020, month=1, day=1) return localize_time(t, tz) elif typ == "date": return parse_time(inp).date() elif typ == "boolean": if inp in ["yes", "true", "y", "t"]: return True elif inp in ["no", "false", "n", "f"]: return False raise ValueError elif typ == "text": # should sanitize somehow? return "\n".join(inp.splitlines()) elif typ in ["int", "smallint", "bigint", "integer"]: return int(inp) elif typ == "text[]": inp = inp.strip() if inp: if inp[0] == "[" and inp[-1] == "]": res = [elt.strip().strip("'") for elt in inp[1:-1].split(",")] if res == [""]: # was an empty array return [] else: return res else: # Temporary measure until we incorporate https://www.npmjs.com/package/select-pure (demo: https://www.cssscript.com/demo/multi-select-autocomplete-selectpure/) return [inp] else: return [] else: raise ValueError("Unrecognized type %s" % typ)
def datetime(value): try: dt.datetime.strptime(value, "%Y-%m-%dT%H:%M:%S+0000") result = parse_time(value) return True, result except ValueError: return False, gettext("VALIDATION_INVALID_DATETIME I.E: 2014-12-11T08:23:01+0000")
def _format_key(self, obj, with_timestamp=True): """ Transform a boto3 s3 Object or ObjectSummary object into a (simpler, implementation-abstracted) dict :param obj: either a boto3 s3 Object or ObjectSummary :param with_timestamp: by default our custom timestamps are not loaded as they require an extra API call. if you need to show the timestamp set this to True. :return: dict """ filename, ext = os.path.splitext(os.path.basename(obj.key)) if with_timestamp and hasattr(obj, "Object"): # obj is presumably an ObjectSummary, but we'll need an Object if we want the timestamp, which should get # auto-fetched when the attribute is accessed obj = obj.Object() keydict = { 'path': obj.key, 'filename': filename, 'ext': ext[1:], # ObjectSummary has .size, Object has .content_length 'size': obj.size if hasattr(obj, "size") else obj.content_length, } if with_timestamp: # First look for custom "timestamp" metadata field that is explicitly set by our S3 uploader # fall back to AWS's "last_modified" if this doesn't exist keydict["last_modified"] = ( (obj.metadata.get("timestamp") and parse_time(obj.metadata["timestamp"])) or obj.last_modified).strftime(DATETIME_FORMAT) return keydict
def __filter(n): cur_p = parsed_points[n] last_p = parsed_points[n - 1] cheap_distance = 0 duration = 0. if parsed_start_datetime is not None and cur_p['time'] is not None: duration = (parse_time(cur_p['time']) - parsed_start_datetime).total_seconds() cur_p['duration'] = duration # p['smoothed_elevation'] = smoothed_elevations[n] speed = smoothed_speeds[n] * 3600. / 1000. cur_p['speed'] = speed if duration < 60 else 0. if n < 1: cur_p['total_distance'] = 0. else: cheap_distance = cheap_ruler_distance([last_p, cur_p]) total_distance['value'] += cheap_distance / 1000. cur_p['total_distance'] = total_distance['value'] cur_p['distance'] = cheap_distance cur_p['slope'] = 100 * ( smoothed_elevations[n - 1] - smoothed_elevations[n] ) / cheap_distance if cheap_distance > 0 else 0. return cur_p
def run(): s3 = boto.connect_s3() bucket = s3.get_bucket('rediscloud') keys = bucket.list() k = max(keys, key=parse_last_modified) try: last_backup_time = parse_time(file(last_backup_time_filename).read()) if parse_last_modified(k) <= last_backup_time: print "Current backup is already the most recent one; exiting." return except IOError: print "First backup ever!" print "Got new backup! Downloading %s..." % k.name for suffix in ['', '.gz']: path = tmp_rdb_filename + suffix if os.path.exists(path): os.unlink(path) k.get_contents_to_filename(tmp_rdb_filename + ".gz", cb=cb) try_cmd('uncompressing backup', 'gunzip %s.gz' % tmp_rdb_filename) try_cmd('stopping redis server', 'service redis-server stop') print "Replacing dump file..." os.rename(tmp_rdb_filename, live_rdb_filename) try_cmd('starting redis server', 'service redis-server start') print "Saving new backup time..." file(last_backup_time_filename, 'w').write(k.last_modified) print "Done."
def parse_line(line): time, load_avg, cpu_user, cpu_sys, cpu_io, cpu_idle, mempc, memact, swappc, swaptx, diskpc, disktx, sent, recv, err, drop = line.strip( ).split('|') return sample(parse_time(time), float(load_avg), float(cpu_user), float(cpu_sys), float(cpu_io), float(cpu_idle), float(mempc), int(memact), float(swappc), int(swaptx), float(diskpc), int(disktx), int(sent), int(recv), int(err), int(drop))
def get_str_to_py_cast_func(str_type): ''' This is bad, needs to be rewritten, and should not be casting keys to strings. ''' casting_func_dict = { 'int': lambda x: int(x), 'float': lambda x: float(x), 'str': lambda x: '\'%s\'' % str(mysql_escape(x), 'utf-8'), 'datetime': lambda x: parse_time(x), 'bool': lambda x: bool(x), 'set': lambda x: set(x.split(',')), 'list': lambda x: x.split(','), # 'list': lambda x: [ # parse_time(i.split(',')[0]) if # i.split(',')[1] == 'datetime' else # locate(i.split(',')[1])(i.split(',')[0]) # for i in x.split(';') if i.split(',')[1] in supported_python_types # ], # 'set': lambda x: [ # parse_time(i.split(',')[0]) if # i.split(',')[1] == 'datetime' else # locate(i.split(',')[1])(i.split(',')[0]) # for i in list(x).split(';') if i.split(',')[1] in supported_python_types # ], } return casting_func_dict[str_type]
def update_required(self): if datetime.now(tzlocal()) - self.latest_update_check < timedelta(seconds = 5): return False latest_change_time = parse_time(self.sheet.cell(1, 1).value) self.latest_update_check = datetime.now(tzlocal()) if self.latest_update < latest_change_time: print("Update required...") return self.latest_update < latest_change_time
def extract_bill_period(pdf_filename): """Convert the PDF to a string so we can determine the dates this bill covers.""" try: text = pdf_to_str(pdf_filename) except PDFSyntaxError: log.exception("Downloaded bill file failed to parse as a PDF.") return None, None pattern = r"Service Period\n(\d+/\d+/\d+)\n(\d+/\d+/\d+)" match = re.search(pattern, text) if match: period_a = parse_time(match.group(1)).date() period_b = parse_time(match.group(2)).date() return min(period_a, period_b), max(period_a, period_b) return None, None
async def post_raw_query_http(hdbpp, request): "Handle queries for data in 'raw' (csv or json) form from the browser" params = await request.json() attributes = params["attributes"] time_range = [ parse_time(params["time_range"][0]), parse_time(params["time_range"][1]) ] data = await get_data(hdbpp, attributes, time_range) response = negotiation.Response(data=data) response.enable_compression() return response
def get_pending_events(self, until: datetime = None) -> [Event]: events = [] for item in self.state["items"]: event = Event() # Get name event.content = re.sub(r"\[.+\]\s?", "", item["content"]).strip() project_names = self._project_names(item["project_id"]) if len(project_names) > 0: event.content = project_names[-1] + " / " + event.content # Find & load times event.inception = parse_time(item["date_added"]) due_date = None if item["due"] is not None: # If there is a due date, it's treated as the do-after date (inception time) # UNLESS it has the 'deadline' label, in which case it's considered the # deadline. due_date = parse_time(item["due"]["date"]) if not is_aware(due_date): due_date = make_aware(due_date, timezone=self._timezone()) if item["due"]["is_recurring"]: event.recurrence_id = str(item["id"]) if self._label_id("deadline") in item["labels"]: event.deadline = due_date else: event.inception = due_date # Extract Tim-specific metadata metadata_matches = re.finditer(r"\[(?P<metadata>.+)\]", item["content"]) for match in metadata_matches: for component in match.group("metadata").split(","): if match := re.match( r"(?P<progression>([\w\-])+)(\s+)?#(?P<ordering>\d+(.\d+)?)", component.strip(), ): event.progression = match.group("progression") event.progression_order = float( match.group("ordering")) if (duration := parse_duration(component)): event.duration = duration
def update_required(): global sheet, latest_update, latest_update_check if datetime.now(tzlocal()) - latest_update_check < timedelta(minutes=5): return False latest_change_time = parse_time(sheet.cell(1, 1).value) latest_update_check = datetime.now(tzlocal()) if latest_update < latest_change_time: print("Update required...") return latest_update < latest_change_time
def test_list_suppliers_with_agreements_returned(self): with self.app.app_context(): response = self.client.get('/frameworks/g-cloud-7/suppliers?agreement_returned=true') assert response.status_code == 200 data = json.loads(response.get_data()) assert len(data['supplierFrameworks']) == 2 times = [parse_time(item['agreementReturnedAt']) for item in data['supplierFrameworks']] assert times[0] > times[1]
def __init__(self, node, stats): # type: (client.V1Node, str) -> None super(Node, self).__init__(node.metadata) self._status = node.status # kubelet replies statistics for the last 2 minutes with 10s # intervals. We only need the latest state. self.stats = eval(stats)['stats'][-1] # The timestamps are returned in RFC3339Nano format which cannot be parsed # by Pythons time module. Therefore we use dateutils parse function here. self.stats['timestamp'] = time.mktime(parse_time(self.stats['timestamp']).utctimetuple())
def test_user_not_bind(self): self.wechat_server.mock_timezone_now( parse_time('2018-10-19 00:00:00 UTC')) resp = self.wechat_server.send_text( '取票 ' + self.activity_map['7e'].key, 'B72AAF5F26554351B768642D7618ECCE42EA2BEEA9DE4B108E59744CFC028044') self.assertEqual(self.wechat_server.get_msg_type(resp), 'text') self.assertEqual(self.wechat_server.get_text(resp), get_template('messages/id_not_bind.html').render())
def test_user_not_bind(self): self.wechat_server.mock_timezone_now( parse_time('2018-10-19 00:00:00 UTC')) resp = self.wechat_server.send_click( CustomWeChatView.event_keys['get_ticket'], 'B72AAF5F26554351B768642D7618ECCE42EA2BEEA9DE4B108E59744CFC028044') self.assertEqual(self.wechat_server.get_msg_type(resp), 'text') self.assertEqual(self.wechat_server.get_text(resp), get_template('messages/id_not_bind.html').render())
def get_completed_events(self, after: datetime = None): # `after` keyword arg not yet implemented events = [] for item in self.completed: event = Event() event.completed = True event.completed_at = parse_time(item["completed_date"]) self._apply_source_metadata(event, item) events.append(event) return events
async def post_raw_query(hdbpp, request): "Handle queries for data in 'raw' (csv or json) form" params = await request.json() attributes = ["{cs}/{target}".format(**t) for t in params["targets"]] time_range = [ parse_time(params["range"]["from"]), parse_time(params["range"]["to"]) ] interval = params.get("interval") data = await get_data(hdbpp, attributes, time_range, interval, restrict_time=True) response = negotiation.Response(data=data) response.enable_compression() return response
def get_moving_data(parsed_points): moving_time = 0 moving_points = [] for n in range(len(parsed_points)): current_point = parsed_points[n] current_time = current_point['time'] current_time = parse_time(current_time) if current_time else None if current_point['speed'] > 1 and n > 1 and current_time: previous_time = parse_time(parsed_points[n - 1]['time']) time = math.fabs((current_time - previous_time).total_seconds()) moving_time += time moving_points.append(current_point) moving_distance = cheap_ruler_distance( moving_points) if len(moving_points) > 2 else 0. return moving_time, moving_distance
def copy_file(src_path, target_path): src_key = src_bucket.bucket.get_key(src_path) target_bucket.bucket.copy_key( target_path, src_bucket_name=src_bucket.bucket_name, src_key_name=src_path, preserve_acl=True, metadata={ "timestamp": parse_time(src_key.last_modified).strftime(DATETIME_FORMAT) } )
def test_after_ddl_cannot_see(self): self.wechat_server.mock_timezone_now( parse_time('2018-10-19 01:00:00 UTC')) # right after the ddl resp = self.wechat_server.send_click( CustomWeChatView.event_keys['book_what'], 'B72AAF5F26554351B768642D7618ECCE42EA2BEEA9DE4B108E59744CFC028044') self.assertEqual(self.wechat_server.get_msg_type(resp), 'text', 'reply not text') self.assertEqual(self.wechat_server.get_text(resp), get_template('messages/book_empty.html').render(), 'now should have no activity to snap up')
def test_before_start_book(self): self.wechat_server.mock_timezone_now( parse_time('2018-10-18 23:59:59 UTC')) resp = self.wechat_server.send_click( CustomWeChatView.event_keys['book_header'] + str(self.activity_map['7e'].id), '48A3CB2513F049A98A7DFD2453ED717296F3D2B76DC7407DB3886D5F4F4B5C04') self.assertEqual(self.wechat_server.get_msg_type(resp), 'text') self.assertEqual(self.wechat_server.get_text(resp), get_template('messages/book_not_start.html').render(), 'cannot book before start_book time')
def test_after_book_end(self): self.wechat_server.mock_timezone_now( parse_time('2018-10-19 01:00:01 UTC')) resp = self.wechat_server.send_text( '抢票 ' + self.activity_map['7e'].key, '48A3CB2513F049A98A7DFD2453ED717296F3D2B76DC7407DB3886D5F4F4B5C04') self.assertEqual(self.wechat_server.get_msg_type(resp), 'text') self.assertEqual( self.wechat_server.get_text(resp), get_template('messages/book_end_already.html').render(), 'cannot book after book_end time')
def copy_file(src_path, target_path): src_key = src_bucket.bucket.get_key(src_path) target_bucket.bucket.copy_key( target_path, src_bucket_name=src_bucket.bucket_name, src_key_name=src_path, preserve_acl=True, metadata={ "timestamp": parse_time(src_key.last_modified).strftime(DATETIME_FORMAT) })
def copy_file_with_content_disposition(src_path, target_path, download_filename): src_key = src_bucket.bucket.get_key(src_path) target_bucket.bucket.copy_key( target_path, src_bucket_name=src_bucket.bucket_name, src_key_name=src_path, preserve_acl=True, metadata={ "timestamp": parse_time(src_key.last_modified).strftime(DATETIME_FORMAT), "Content-Disposition": 'attachment; filename="{}"'.format(download_filename), }, )
def pygraf_trying(self, plot_num=3, x=None, y=None, start_position='00:00:00', fin_position='23:00:00'): start_position = parse_time(self.timeEdit_from.text()) fin_position = parse_time(self.timeEdit_to.text()) data1 = self.dredging.dredging_df.loc[start_position:fin_position, :] data2 = self.dredging.dredging_df_out.loc[ start_position:fin_position, :] if plot_num == 1: self.Graphic.canvas.ax.clear() self.Graphic.canvas.ax.plot(data1[['BucketZ', 'BoomAng']]) # self.Graphic.canvas.ax.plot(data[['BucketY','BargeY']])#data[['BucketZ','BoomAng']]) self.Graphic.canvas.ax.set_title(y) self.Graphic.canvas.draw()
def dump_idrac( ip: str, idrac_metrics: dict, metric_dtype_mapping: dict, ip_id_mapping: dict, conn: object, ): """dump_idrac Dump iDRAC Metrics Dump node metrics to TimeScaleDB Args: ip (str): ip address of iDRAC idrac_metrics (dict): iDRAC Metrics metric_dtype_mapping (dict): Metric-Datatype mapping ip_id_mapping (dict): ip-id mapping conn (object): TimeScaleDB connection object """ try: schema_name = 'idrac' nodeid = ip_id_mapping[ip] for table_name, table_metrics in idrac_metrics.items(): all_records = [] dtype = metric_dtype_mapping[table_name] table_name = table_name.lower() target_table = f"{schema_name}.{table_name}" cols = ('timestamp', 'nodeid', 'source', 'fqdd', 'value') for metric in table_metrics: # We have to offset timestamp by -6/-5 hours. For some unknow # reasons, the timestamp reported in iDRAC is not configured # correctly. timestamp = parse_time(metric['Timestamp']) timestamp = timestamp.astimezone(tz.tzlocal()) timestamp = timestamp.replace(tzinfo=tz.tzutc()) timestamp = timestamp.astimezone(tz.tzlocal()) source = metric['Source'] fqdd = metric['FQDD'] if metric['Value']: value = utils.cast_value_type(metric['Value'], dtype) all_records.append( (timestamp, nodeid, source, fqdd, value)) mgr = CopyManager(conn, target_table, cols) mgr.copy(all_records) conn.commit() except Exception as err: log.error(f"Fail to dump idrac metrics ({ip}): {err}")
def copy_file_with_content_disposition(src_path, target_path, download_filename): src_key = src_bucket.bucket.get_key(src_path) target_bucket.bucket.copy_key( target_path, src_bucket_name=src_bucket.bucket_name, src_key_name=src_path, preserve_acl=True, metadata={ "timestamp": parse_time(src_key.last_modified).strftime(DATETIME_FORMAT), "Content-Disposition": 'attachment; filename="{}"'.format(download_filename), })
def write_article(article, site): published = parse_time(article.published).date() article_text = get_article_from_html(article.link, site.article_class_name_or_id, site.stop_phrase) # f**k it if article_text and 'function' in article_text: return article_corpus = BaseCorpus(article_text) article_corpus.tokenize().lemmatize().remove_stopwords() article_text = ' '.join(article_corpus.tokens) Article(title=article.title, published=published, link=article.link, text=article_text, site=site).save()
def test_cancel_used_canceled_ticket(self): self.wechat_server.mock_timezone_now( parse_time('2018-10-19 00:00:00 UTC')) resp = self.wechat_server.send_text( '抢票 ' + self.activity_map['7e'].key, '921E1460FD86481C9087C7E2A9B7C6322967F79BDFC34ED2873EFC8106EDC38A') news = self.wechat_server.get_news(resp) self.assertEqual(len(news), 1) ticket = Ticket.objects.filter(student_id='2016012345', activity=self.activity_map['7e'], status=Ticket.STATUS_VALID).first() self.assertIsNotNone(ticket) ticket.status = Ticket.STATUS_USED ticket.save() resp = self.wechat_server.send_text( '退票 ' + self.activity_map['7e'].key, '921E1460FD86481C9087C7E2A9B7C6322967F79BDFC34ED2873EFC8106EDC38A') self.assertEqual( self.wechat_server.get_text(resp), get_template('messages/no_ticket_in_hand.html').render()) ticket.status = Ticket.STATUS_USED ticket.save() resp = self.wechat_server.send_text( '退票 ' + self.activity_map['7e'].key, '921E1460FD86481C9087C7E2A9B7C6322967F79BDFC34ED2873EFC8106EDC38A') self.assertEqual( self.wechat_server.get_text(resp), get_template('messages/no_ticket_in_hand.html').render()) ticket.status = Ticket.STATUS_CANCELLED ticket.save() resp = self.wechat_server.send_text( '退票 ' + self.activity_map['7e'].key, '921E1460FD86481C9087C7E2A9B7C6322967F79BDFC34ED2873EFC8106EDC38A') self.assertEqual( self.wechat_server.get_text(resp), get_template('messages/no_ticket_in_hand.html').render()) ticket.status = Ticket.STATUS_VALID ticket.save() resp = self.wechat_server.send_text( '退票 ' + self.activity_map['7e'].key, '921E1460FD86481C9087C7E2A9B7C6322967F79BDFC34ED2873EFC8106EDC38A') self.assertEqual( self.wechat_server.get_text(resp), get_template('messages/cancel_complete.html').render( {'activity': self.activity_map['7e']}))
def _format_key(self, key, load_timestamps, timestamp=None): """ transform a boto s3 Key object into a (simpler) dict :param key: http://boto.readthedocs.org/en/latest/ref/s3.html#boto.s3.key.Key :param load_timestamp: by default custom timestamps are not loaded as they require an extra API call. If you need to show the timestamp set this to True. :return: dict """ filename, ext = os.path.splitext(os.path.basename(key.name)) if load_timestamps: key = self.bucket.get_key(key.name) timestamp = key.get_metadata('timestamp') timestamp = timestamp or key.last_modified timestamp = parse_time(timestamp) return { 'path': key.name, 'filename': filename, 'ext': ext[1:], 'last_modified': timestamp.strftime(DATETIME_FORMAT), 'size': key.size }
def parse_line(line): time, load_avg, cpu_user, cpu_sys, cpu_io, cpu_idle, mempc, memact, swappc, swaptx, diskpc, disktx, sent, recv, err, drop = line.strip().split('|') return sample(parse_time(time), float(load_avg), float(cpu_user), float(cpu_sys), float(cpu_io), float(cpu_idle), float(mempc), int(memact), float(swappc), int(swaptx), float(diskpc), int(disktx), int(sent), int(recv), int(err), int(drop))
def parse_last_modified(k): return parse_time(k.last_modified)
def peak_period_end_time(self): if 'peak_period_end_time' in self._structure: return parse_time(self._structure['peak_period_end_time'])
def eta_begin(self): if 'eta_begin' in self._structure: return parse_time(self._structure['eta_begin'])
def spec_parse_time(timestr): t = parse_time(timestr) # have every date be relative to this one # (the only important part of a date is its weekday) return t.replace(year=2016, month=2, day=8 + t.weekday())
def start_time(self): if 'start_time' in self._event: return parse_time(self._event['start_time'])
def end_time(self): if 'end_time' in self._event: return parse_time(self._event['end_time'])
def urls_expire_time(self): if 'urls_expire_time' in self._event: return parse_time(self._event['urls_expire_time'])