def test_format_property_time_of_day(): formatter = format_property("time_of_day") assert_equal(formatter(11.5), "11:30") assert_equal(formatter(11.008), "11:00:28") assert_equal(formatter(isoparse("2016-10-20T08:20:03")), "08:20:03") assert_equal(formatter(isoparse("2016-08-12T13:37")), "13:37")
def time_from_string_timestamp_and_timezone(timestamp, timezone): # Else if t is a string we try to interprete it as an ISO time # string try: # Can't use strptime with %z in Python 2 # https://stackoverflow.com/a/23940673 result = isoparse(timestamp) except ValueError as e: raise CraftAiTimeError( """Unable to instantiate Time from given string. {}""". format(e.__str__())) if result.tzinfo is None: # Handle format like : Time(t="2017-01-01 00:00:00") if timezone: # Handle format like : Time(t="2017-01-01 00:00:00", timezone="-03:00") result = pyutc.localize(result) result = set_timezone(result, timezone) else: raise CraftAiTimeError("The given datetime string must be tz-aware," " or you must provide an explicit timezone.") else: if timezone: #Handle format like : Time("2011-04-22 01:00:00+0900", timezone="-03:00") raise CraftAiTimeError("You must provide one timezone, but two were provided:" " in the datetime string and in the timezone parameter.") return result
def test_timespec_auto(dt, sep): if dt.tzinfo is not None: # Assume offset has no sub-second components assume(dt.utcoffset().total_seconds() % 60 == 0) sep = str(sep) # Python 2.7 requires bytes dtstr = dt.isoformat(sep=sep) dt_rt = isoparse(dtstr) assert dt_rt == dt
def clean_committer_date(self): """Parse the date and time in the committer_date field. Returns: datetime.datetime: The parsed date and time. """ try: return isoparse(self.cleaned_data['committer_date']) except ValueError: raise ValidationError(ugettext( 'This date must be in ISO 8601 format.'))
def __parse_periods__(cls, value1:Union[dt.datetime,int,str], value2:Union[dt.datetime,int,str]) -> Tuple[int,int]: # Note that the earliest date that is possible to take into consideration is platform-dependent. # For compatibility reasons, we do not accept timestamps prior to epoch time 0. if isinstance(value1,str): try: period1 = int(du.isoparse(value1).timestamp()); except (OSError,OverflowError): period1 = 0; else: period1 = max(0,(int(time.mktime(value1.timetuple())))) if isinstance(value1, dt.datetime) else max(0,value1); if value1==value2: period2 = period2; elif isinstance(value2,str): try: period2 = int(du.isoparse(value2).timestamp()); except (OSError,OverflowError): period2 = dt.datetime.now().timestamp(); else: period2 = max(period1,int(time.mktime(value2.timetuple()))) if isinstance(value2, dt.datetime) else max(period1,value2); return period1, period2
def _isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset, microsecond_precision=None): tzi, offset_str = tzoffset fmt = date_fmt + 'T' + time_fmt dt = dt.replace(tzinfo=tzi) dtstr = dt.strftime(fmt) if microsecond_precision is not None: if not fmt.endswith('%f'): raise ValueError('Time format has no microseconds!') if microsecond_precision != 6: dtstr = dtstr[:-(6 - microsecond_precision)] elif microsecond_precision > 6: raise ValueError('Precision must be 1-6') dtstr += offset_str assert isoparse(dtstr) == dt
def get(self, obj_id): """ --- description: Generate a PDF finding chart to aid in spectroscopy parameters: - in: path name: obj_id required: true schema: type: string - in: query name: imsize schema: type: float minimum: 2 maximum: 15 description: Image size in arcmin (square) - in: query name: facility nullable: true schema: type: string enum: [Keck, Shane, P200] - in: query name: image_source nullable: true schema: type: string enum: [desi, dss, ztfref] description: Source of the image used in the finding chart - in: query name: obstime nullable: True schema: type: string description: | datetime of observation in isoformat (e.g. 2020-12-30T12:34:10) responses: 200: description: A PDF finding chart file content: application/pdf: schema: type: string format: binary 400: content: application/json: schema: Error """ source = Source.get_obj_if_owned_by(obj_id, self.current_user) if source is None: return self.error('Invalid source ID.') imsize = self.get_query_argument('imsize', '4.0') try: imsize = float(imsize) except ValueError: # could not handle inputs return self.error('Invalid argument for `imsize`') if imsize < 2.0 or imsize > 15.0: return \ self.error('The value for `imsize` is outside the allowed range') facility = self.get_query_argument('facility', 'Keck') image_source = self.get_query_argument('image_source', 'desi') how_many = 3 obstime = self.get_query_argument( 'obstime', datetime.datetime.utcnow().isoformat() ) if not isinstance(isoparse(obstime), datetime.datetime): return self.error('obstime is not valid isoformat') if facility not in facility_parameters: return self.error('Invalid facility') if image_source not in source_image_parameters: return self.error('Invalid source image') radius_degrees = facility_parameters[facility]["radius_degrees"] mag_limit = facility_parameters[facility]["mag_limit"] min_sep_arcsec = facility_parameters[facility]["min_sep_arcsec"] mag_min = facility_parameters[facility]["mag_min"] rez = get_finding_chart( source.ra, source.dec, obj_id, image_source=image_source, output_format='pdf', imsize=imsize, how_many=how_many, radius_degrees=radius_degrees, mag_limit=mag_limit, mag_min=mag_min, min_sep_arcsec=min_sep_arcsec, starlist_type=facility, obstime=obstime, use_source_pos_in_starlist=True, allowed_queries=2, queries_issued=0 ) filename = rez["name"] image = rez["data"] # do not send result via `.success`, since that creates a JSON self.set_status(200) self.set_header("Content-Type", "application/pdf; charset='utf-8'") self.set_header("Content-Disposition", f"attachment; filename={filename}") self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0') return self.write(image)
def _datetime(reader, elem): text, n = re.subn(r"(.*\.)(\d{6})\d+(\+.*)", r"\1\2\3", elem.text) if n > 0: log.debug(f"Truncate sub-microsecond time in <{QName(elem).localname}>") reader.push(elem, isoparse(text))
def paginate_query(query, sort_attribute=None, sort_desc=True, ignore_last_fetched=False): """ Paginates an sqlalchemy query, gracefully managing missing queries. Default ordering is to show most recently created first. Unlike raw paginate, defaults to showing all results if args aren't supplied. The pagination function can be sorted by a user specified attribute (in sort_attribute). We return the value of this attribute for the last fetched item, which can be used to return the next set of results. The reason we don't just return the id is because a given item's position in a list can change significantly. :param query: base query :param sort_attribute: override option for the sort parameter. :param sort_desc: sort in desc order :argument updated_after: only return items updated after a certain date :argument per_page: how many results to return per request. Defaults to unlimited :argument page: the page number of the results to return. Defaults to first page :returns: tuple of ( item list, total number of items, total number of pages, the last fetched item, identified by the sort key ) """ updated_after = request.args.get('updated_after') per_page = request.args.get('per_page') page = request.args.get('page') last_fetched = request.args.get('last_fetched') #Unfortunately SQLAlchemy doesn't have a better way to expose the queried object queried_object = query._primary_entity.mapper.class_ if updated_after: parsed_time = parser.isoparse(updated_after) query = query.filter(queried_object.updated > parsed_time) # If sort attribute isn't defined but the queried_object is, default to sorting by id if not sort_attribute: sort_attribute = queried_object.id if sort_attribute.expression.comparator.type.python_type == datetime.datetime and last_fetched: last_fetched = parser.isoparse(last_fetched) if sort_desc: query = query.order_by(sort_attribute.desc()) # If we have a descending sort order, the next object to return has a lower val than the last # Eg: if the last ID fetched was 10, the next allowable one to return is 9 if last_fetched: query = query.filter(sort_attribute < last_fetched) else: query = query.order_by(sort_attribute.asc()) # If we have a asc sort order, the next object to return has a has val than the last # Eg: if the last ID fetched was 10, the next allowable one to return is 11 if last_fetched: query = query.filter(sort_attribute > last_fetched) if per_page is None: items = query.all() if len(items) > 0 and not ignore_last_fetched: new_last_fetched_obj = items[-1] new_last_fetched = getattr(new_last_fetched_obj, sort_attribute.key) else: new_last_fetched = None return items, len(items), 1, new_last_fetched per_page = int(per_page) if page is None: paginated = query.paginate(0, per_page, error_out=False) else: page = int(page) paginated = query.paginate(page, per_page, error_out=False) if len(paginated.items) > 0 and not ignore_last_fetched: new_last_fetched_obj = paginated.items[-1] new_last_fetched = getattr(new_last_fetched_obj, sort_attribute.key) else: new_last_fetched = None return paginated.items, paginated.total, paginated.pages, new_last_fetched
def test_extra_subsecond_digits(dt_str): assert isoparse(dt_str) == datetime(2018, 7, 3, 14, 7, 0, 123456)
def convert_dt_str_to_dt_object(dt_str): return parser.isoparse(dt_str)
def get_report_for_date(required_date, user): for report in user['dates']: if isoparse(report['date']).date() == required_date: return report
def submit_perf_metrics(self, container_tags, container_id, container_stats): try: if container_stats is None: self.log.debug("Empty stats for container %s", container_id) return tags = container_tags[container_id] # CPU metrics cpu_stats = container_stats.get('cpu_stats', {}) prev_cpu_stats = container_stats.get('precpu_stats', {}) value_system = cpu_stats.get('cpu_usage', {}).get('usage_in_kernelmode') if value_system is not None: self.rate('ecs.fargate.cpu.system', value_system, tags) value_user = cpu_stats.get('cpu_usage', {}).get('usage_in_usermode') if value_user is not None: self.rate('ecs.fargate.cpu.user', value_user, tags) value_total = cpu_stats.get('cpu_usage', {}).get('total_usage') if value_total is not None: self.rate('ecs.fargate.cpu.usage', value_total, tags) available_cpu = cpu_stats.get('system_cpu_usage') preavailable_cpu = prev_cpu_stats.get('system_cpu_usage') prevalue_total = prev_cpu_stats.get('cpu_usage', {}).get('total_usage') # This is always false on Windows because the available cpu is not exposed if (available_cpu is not None and preavailable_cpu is not None and value_total is not None and prevalue_total is not None): cpu_delta = float(value_total) - float(prevalue_total) system_delta = float(available_cpu) - float(preavailable_cpu) else: cpu_delta = 0.0 system_delta = 0.0 # Not reported on Windows active_cpus = float(cpu_stats.get('online_cpus', 0.0)) cpu_percent = 0.0 if system_delta > 0 and cpu_delta > 0 and active_cpus > 0: if system_delta > cpu_delta: cpu_percent = (cpu_delta / system_delta) * active_cpus * 100.0 cpu_percent = round_value(cpu_percent, 2) self.gauge('ecs.fargate.cpu.percent', cpu_percent, tags) else: # There is a bug where container CPU usage is occasionally reported as greater than system # CPU usage (which, in fact, represents the maximum available CPU time during this timeframe), # leading to a non-sensical CPU percentage to be reported. To mitigate this we substitute the # system_delta with (t1 - t0)*active_cpus (with a scale factor to convert to nanoseconds) self.log.debug( "Anomalous CPU value for container_id: %s. cpu_percent: %f", container_id, cpu_percent, ) self.log.debug( "ECS container_stats for container_id %s: %s", container_id, container_stats) # example format: '2021-09-22T04:55:52.490012924Z', t1 = container_stats.get('read', '') t0 = container_stats.get('preread', '') try: t_delta = int((parser.isoparse(t1) - parser.isoparse(t0)).total_seconds()) # Simplified formula for cpu_percent where system_delta = t_delta * active_cpus * (10 ** 9) cpu_percent = (cpu_delta / (t_delta * (10**9))) * 100.0 cpu_percent = round_value(cpu_percent, 2) self.gauge('ecs.fargate.cpu.percent', cpu_percent, tags) except ValueError: pass # Memory metrics memory_stats = container_stats.get('memory_stats', {}) for metric in MEMORY_GAUGE_METRICS: value = memory_stats.get('stats', {}).get(metric) if value is not None and value < CGROUP_NO_VALUE: self.gauge('ecs.fargate.mem.' + metric, value, tags) for metric in MEMORY_RATE_METRICS: value = memory_stats.get('stats', {}).get(metric) if value is not None: self.rate('ecs.fargate.mem.' + metric, value, tags) value = memory_stats.get('max_usage') if value is not None: self.gauge('ecs.fargate.mem.max_usage', value, tags) value = memory_stats.get('usage') if value is not None: self.gauge('ecs.fargate.mem.usage', value, tags) value = memory_stats.get('limit') # When there is no hard-limit defined, the ECS API returns that value of 8 EiB # It's not exactly 2^63, but a rounded value of it most probably because of a int->float->int conversion if value is not None and value != 9223372036854771712: self.gauge('ecs.fargate.mem.limit', value, tags) # I/O metrics for blkio_cat, metric_name in iteritems(IO_METRICS): read_counter = write_counter = 0 blkio_stats = container_stats.get("blkio_stats", {}).get(blkio_cat) # In Windows is always "None" (string), so don't report anything if blkio_stats == 'None': continue elif blkio_stats is None: blkio_stats = [] for blkio_stat in blkio_stats: if blkio_stat["op"] == "Read" and "value" in blkio_stat: read_counter += blkio_stat["value"] elif blkio_stat["op"] == "Write" and "value" in blkio_stat: write_counter += blkio_stat["value"] self.rate(metric_name + 'read', read_counter, tags) self.rate(metric_name + 'write', write_counter, tags) # Network metrics networks = container_stats.get('networks', {}) for network_interface, network_stats in iteritems(networks): network_tags = tags + [ "interface:{}".format(network_interface) ] for field_name, metric_name in iteritems( NETWORK_GAUGE_METRICS): metric_value = network_stats.get(field_name) if metric_value is not None: self.gauge(metric_name, metric_value, network_tags) for field_name, metric_name in iteritems(NETWORK_RATE_METRICS): metric_value = network_stats.get(field_name) if metric_value is not None: self.rate(metric_name, metric_value, network_tags) except Exception as e: self.warning("Cannot retrieve metrics for %s: %s", container_id, e)
def get(self, obj_id): """ --- description: Retrieve offset stars to aid in spectroscopy parameters: - in: path name: obj_id required: true schema: type: string - in: query name: facility nullable: true schema: type: string enum: [Keck, Shane, P200] description: Which facility to generate the starlist for - in: query name: how_many nullable: true schema: type: integer minimum: 0 maximum: 10 description: | Requested number of offset stars (set to zero to get starlist of just the source itself) - in: query name: obstime nullable: True schema: type: string description: | datetime of observation in isoformat (e.g. 2020-12-30T12:34:10) responses: 200: content: application/json: schema: allOf: - $ref: '#/components/schemas/Success' - type: object properties: data: type: object properties: facility: type: string enum: [Keck, Shane, P200] description: Facility queried for starlist starlist_str: type: string description: formatted starlist in facility format starlist_info: type: array description: | list of source and offset star information items: type: object properties: str: type: string description: single-line starlist format per object ra: type: number format: float description: object RA in degrees (J2000) dec: type: number format: float description: object DEC in degrees (J2000) name: type: string description: object name dras: type: string description: offset from object to source in RA ddecs: type: string description: offset from object to source in DEC mag: type: number format: float description: | magnitude of object (from Gaia phot_rp_mean_mag) ra: type: number format: float description: source RA in degrees (J2000) dec: type: number format: float description: source DEC in degrees (J2000) queries_issued: type: integer description: | Number of times the catalog was queried to find noffsets noffsets: type: integer description: | Number of suitable offset stars found (may be less) than requested query: type: string description: SQL query submitted to Gaia 400: content: application/json: schema: Error """ source = Source.get_obj_if_owned_by(obj_id, self.current_user) if source is None: return self.error('Invalid source ID.') facility = self.get_query_argument('facility', 'Keck') how_many = self.get_query_argument('how_many', '3') obstime = self.get_query_argument( 'obstime', datetime.datetime.utcnow().isoformat() ) if not isinstance(isoparse(obstime), datetime.datetime): return self.error('obstime is not valid isoformat') if facility not in facility_parameters: return self.error('Invalid facility') radius_degrees = facility_parameters[facility]["radius_degrees"] mag_limit = facility_parameters[facility]["mag_limit"] min_sep_arcsec = facility_parameters[facility]["min_sep_arcsec"] mag_min = facility_parameters[facility]["mag_min"] try: how_many = int(how_many) except ValueError: # could not handle inputs return self.error('Invalid argument for `how_many`') try: starlist_info, query_string, queries_issued, noffsets = \ get_nearby_offset_stars( source.ra, source.dec, obj_id, how_many=how_many, radius_degrees=radius_degrees, mag_limit=mag_limit, min_sep_arcsec=min_sep_arcsec, starlist_type=facility, mag_min=mag_min, obstime=obstime, allowed_queries=2 ) except ValueError: return self.error('Error while querying for nearby offset stars') starlist_str = \ "\n".join([x["str"].replace(" ", " ") for x in starlist_info]) return self.success( data={ 'facility': facility, 'starlist_str': starlist_str, 'starlist_info': starlist_info, 'ra': source.ra, 'dec': source.dec, 'noffsets': noffsets, 'queries_issued': queries_issued, 'query': query_string } )
if (rec_info := soup.find('BililiveRecorderRecordInfo') ): # a latest BiliRec recording self.rid = str(rec_info.get('roomid', '')) self.name = str(rec_info.get('name', '')) self.st = isoparse(rec_info.get('start_time')) elif (m := re.match(BILIREC_FILENAME_PATTERN, path.stem)): # a legacy BiliRec recording self.rid = m['room_id'] self.name = '' if (d := soup.find('d')): self.st = datetime.fromtimestamp( int((d0 := Danmaku(d)).millisec) / 1000.0 - float(d0.ts), TIME_ZONE) else: self.st = isoparse(f"{m['date']}-{m['time']}") self.st.replace(tzinfo=TIME_ZONE) elif (m := re.match(MATSURI_FILENAME_PATTERN, path.stem)): # downloaded from https://matsuri.icu self.rid = '' self.name = '' self.st = datetime.fromtimestamp( int(m['millisec']) / 1000, TIME_ZONE) else: raise ValueError(f'Cannot parse the start time from \'{path}\'') self.danmaku_lst = [ [*map(partial(Danmaku, parent=self), soup.find_all(name='d'))], [*map(partial(Danmaku, parent=self), soup.find_all(name='sc'))], [*map(partial(Danmaku, parent=self), soup.find_all(name='gift'))], [*map(partial(Danmaku, parent=self), soup.find_all(name='guard'))]
def to_datetime(value): """Converts a string to a datetime.""" if isinstance(value, int): return parser.parse(value) return parser.isoparse(value)
def last_release_date() -> datetime: response = requests.get(f"https://api.github.com/repos/{REPO_NAME}/releases/latest") return isoparse(response.json()["published_at"])
def show_votes(_community, _post_id): try: response = Posts_Store.hgetall(_post_id) #NEW STUFF if len(response) ==0: message = { 'status' : 404, 'message' : "Post Not Found" } resp = jsonify(message) resp.status_code = 404 else: message = { 'status' : 200, 'upvote' : int(response[b'UpVotes']), 'downvote': int(response[b'DownVotes']), 'message' : 'Post: ' + _post_id + ' votes reported' } time_posted = response[b"date_created"].decode("utf-8") Top.zadd("HOT",{_post_id : hot(int(response[b"UpVotes"]),int(response[b"DownVotes"]), parser.isoparse(time_posted))}) resp = jsonify(message) resp.status_code = 200 except redis.exceptions as e: message = { 'status' : 400, 'message' : "Bad Request- Community or Post does not exists" } resp = jsonify(message) resp.status_code = 400 return resp
def downvote_post(_community,_post_id): try: response = Posts_Store.hgetall(_post_id) #NEW STUFF if len(response) ==0: message = { 'status' : 404, 'message' : "Post Not Found" } resp = jsonify(message) resp.status_code = 404 else: response[b"DownVotes"] = str(int(response[b"DownVotes"])+1).encode("utf-8") response[b"Total_Score"] = str(int(response[b"Total_Score"])-1).encode("utf-8") message = { 'status' : 200, 'downvote' : int(response[b"DownVotes"]), 'message' : 'Post :' +_post_id + ' has been downvoted', } Posts_Store.hmset(_post_id,response) Top.zadd(_community,{_post_id : int(response[b"Total_Score"])}) Top.zadd("All",{_post_id : int(response[b"Total_Score"])}) time_posted = response[b"date_created"].decode("utf-8") Top.zadd("HOT",{_post_id : hot(int(response[b"UpVotes"]),int(response[b"DownVotes"]), parser.isoparse(time_posted))}) resp = jsonify(message) resp.status_code = 200 except redis.exceptions as e: message = { 'status' : 400, 'message' : "Bad Request- Community or Post does not exists" } resp = jsonify(message) resp.status_code = 400 return resp
def test_year_only(dt): dtstr = dt.strftime('%Y') assert isoparse(dtstr) == dt
def test_deposit_period_expires(cluster): """ - proposal and partially deposit - wait for deposit period end and check - proposal deleted - no refund """ amount1 = cluster.balance(cluster.address("community")) rsp = cluster.gov_propose( "community", "param-change", { "title": "Increase number of max validators", "description": "ditto", "changes": [{ "subspace": "staking", "key": "MaxValidators", "value": 1, }], "deposit": "5000basecro", }, ) assert rsp["code"] == 0, rsp["raw_log"] ev = parse_events(rsp["logs"])["submit_proposal"] assert ev["proposal_type"] == "ParameterChange", rsp proposal_id = ev["proposal_id"] proposal = cluster.query_proposal(proposal_id) assert proposal["total_deposit"] == [{ "denom": "basecro", "amount": "5000" }] assert cluster.balance(cluster.address("community")) == amount1 - 5000 amount2 = cluster.balance(cluster.address("ecosystem")) rsp = cluster.gov_deposit("ecosystem", proposal["proposal_id"], "5000basecro") assert rsp["code"] == 0, rsp["raw_log"] proposal = cluster.query_proposal(proposal_id) assert proposal["total_deposit"] == [{ "denom": "basecro", "amount": "10000" }] assert cluster.balance(cluster.address("ecosystem")) == amount2 - 5000 # wait for deposit period passed wait_for_block_time( cluster, isoparse(proposal["submit_time"]) + timedelta(seconds=10)) # proposal deleted with pytest.raises(Exception): cluster.query_proposal(proposal_id) # deposits don't get refunded assert cluster.balance(cluster.address("community")) == amount1 - 5000 assert cluster.balance(cluster.address("ecosystem")) == amount2 - 5000
def test_year_month_day(dt, fmt): dtstr = dt.strftime(fmt) assert isoparse(dtstr) == dt
def test_param_proposal(cluster, vote_option): """ - send proposal to change max_validators - all validator vote same option (None means don't vote) - check the result - check deposit refunded """ max_validators = json.loads( cluster.raw("q", "staking", "params", output="json", node=cluster.node_rpc(0)))["max_validators"] rsp = cluster.gov_propose( "community", "param-change", { "title": "Increase number of max validators", "description": "ditto", "changes": [{ "subspace": "staking", "key": "MaxValidators", "value": max_validators + 1, }], }, ) assert rsp["code"] == 0, rsp["raw_log"] # get proposal_id ev = parse_events(rsp["logs"])["submit_proposal"] assert ev["proposal_type"] == "ParameterChange", rsp proposal_id = ev["proposal_id"] proposal = cluster.query_proposal(proposal_id) assert proposal["content"]["changes"] == [{ "subspace": "staking", "key": "MaxValidators", "value": str(max_validators + 1), }], proposal assert proposal["status"] == "PROPOSAL_STATUS_DEPOSIT_PERIOD", proposal amount = cluster.balance(cluster.address("ecosystem")) rsp = cluster.gov_deposit("ecosystem", proposal_id, "1cro") assert rsp["code"] == 0, rsp["raw_log"] assert cluster.balance(cluster.address("ecosystem")) == amount - 100000000 proposal = cluster.query_proposal(proposal_id) assert proposal["status"] == "PROPOSAL_STATUS_VOTING_PERIOD", proposal if vote_option is not None: rsp = cluster.gov_vote("validator", proposal_id, vote_option) assert rsp["code"] == 0, rsp["raw_log"] rsp = cluster.gov_vote("validator", proposal_id, vote_option, i=1) assert rsp["code"] == 0, rsp["raw_log"] assert (int(cluster.query_tally(proposal_id)[vote_option]) == cluster.staking_pool()), "all voted" else: assert cluster.query_tally(proposal_id) == { "yes": "0", "no": "0", "abstain": "0", "no_with_veto": "0", } wait_for_block_time(cluster, isoparse(proposal["voting_end_time"])) proposal = cluster.query_proposal(proposal_id) if vote_option == "yes": assert proposal["status"] == "PROPOSAL_STATUS_PASSED", proposal else: assert proposal["status"] == "PROPOSAL_STATUS_REJECTED", proposal new_max_validators = json.loads( cluster.raw("q", "staking", "params", output="json", node=cluster.node_rpc(0)))["max_validators"] if vote_option == "yes": assert new_max_validators == max_validators + 1 else: assert new_max_validators == max_validators if vote_option in ("no_with_veto", None): # not refunded assert cluster.balance( cluster.address("ecosystem")) == amount - 100000000 else: # refunded, no matter passed or rejected assert cluster.balance(cluster.address("ecosystem")) == amount
def block_time(self): return isoparse(self.status()["SyncInfo"]["latest_block_time"])
def schedule_reminder(self, reminder: dict) -> None: """A coroutine which sends the reminder once the time is reached, and cancels the running task.""" reminder_datetime = isoparse( reminder['expiration']).replace(tzinfo=None) self.scheduler.schedule_at(reminder_datetime, reminder["id"], self.send_reminder(reminder))
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() inputs = CreationTextClassificationRecordInputs.from_dict( d.pop("inputs")) def _parse_id(data: Any) -> Union[Unset, int, str]: data = None if isinstance(data, Unset) else data id: Union[Unset, int, str] return cast(Union[Unset, int, str], data) id = _parse_id(d.pop("id", UNSET)) metadata: Union[CreationTextClassificationRecordMetadata, Unset] = UNSET _metadata = d.pop("metadata", UNSET) if not isinstance(_metadata, Unset): metadata = CreationTextClassificationRecordMetadata.from_dict( _metadata) event_timestamp: Union[Unset, datetime.datetime] = UNSET _event_timestamp = d.pop("event_timestamp", UNSET) if not isinstance(_event_timestamp, Unset): event_timestamp = isoparse(_event_timestamp) status: Union[Unset, TaskStatus] = UNSET _status = d.pop("status", UNSET) if not isinstance(_status, Unset): status = TaskStatus(_status) prediction: Union[TextClassificationAnnotation, Unset] = UNSET _prediction = d.pop("prediction", UNSET) if not isinstance(_prediction, Unset): prediction = TextClassificationAnnotation.from_dict(_prediction) annotation: Union[TextClassificationAnnotation, Unset] = UNSET _annotation = d.pop("annotation", UNSET) if not isinstance(_annotation, Unset): annotation = TextClassificationAnnotation.from_dict(_annotation) multi_label = d.pop("multi_label", UNSET) explanation: Union[CreationTextClassificationRecordExplanation, Unset] = UNSET _explanation = d.pop("explanation", UNSET) if not isinstance(_explanation, Unset): explanation = CreationTextClassificationRecordExplanation.from_dict( _explanation) creation_text_classification_record = cls( inputs=inputs, id=id, metadata=metadata, event_timestamp=event_timestamp, status=status, prediction=prediction, annotation=annotation, multi_label=multi_label, explanation=explanation, ) creation_text_classification_record.additional_properties = d return creation_text_classification_record
def __from_dict(self, values: dict): for prop in self.properties(): if getattr(type(self), prop).fset is None: continue prop_value = values.get( prop, values.get( inflection.camelize(prop, uppercase_first_letter=False))) if prop_value is not None: if isinstance(prop_value, np.generic): prop_value = prop_value.item() additional_types = [] prop_type = self.prop_type(prop, additional=additional_types) if prop_type is None: # This shouldn't happen setattr(self, prop, prop_value) elif issubclass(prop_type, dt.datetime): if isinstance(prop_value, int): setattr( self, prop, dt.datetime.fromtimestamp(prop_value / 1000).isoformat()) else: import re matcher = re.search('\\.([0-9]*)Z$', prop_value) if matcher: sub_seconds = matcher.group(1) if len(sub_seconds) > 6: prop_value = re.sub( matcher.re, '.{}Z'.format(sub_seconds[:6]), prop_value) try: setattr(self, prop, isoparse(prop_value)) except ValueError: if str in additional_types: setattr(self, prop, prop_value) elif issubclass(prop_type, dt.date) and type(prop_value) is not dt.date: date_value = None if isinstance(prop_value, float): # Assume it's an Excel date if prop_value > 59: prop_value -= 1 # Excel leap year bug, 1900 is not a leap year! date_value = dt.datetime(1899, 12, 31) + dt.timedelta( days=prop_value).date() elif isinstance(prop_value, str): for format in _valid_date_formats: try: date_value = dt.datetime.strptime( prop_value, format).date() break except ValueError: pass setattr(self, prop, date_value or prop_value) elif issubclass(prop_type, float) and isinstance( prop_value, str): if prop_value.endswith('%'): setattr(self, prop, float(prop_value[:-1]) / 100) else: setattr(self, prop, float(prop_value)) elif issubclass(prop_type, EnumBase): setattr(self, prop, get_enum_value(prop_type, prop_value)) elif issubclass(prop_type, Base): if isinstance(prop_value, Base): setattr(self, prop, prop_value) else: setattr(self, prop, prop_type.from_dict(prop_value)) elif issubclass(prop_type, (list, tuple)): item_type = self.prop_item_type(prop) if issubclass(item_type, Base): item_values = tuple( v if isinstance(v, ( Base, EnumBase)) else item_type.from_dict(v) for v in prop_value) elif issubclass(item_type, EnumBase): item_values = tuple( get_enum_value(item_type, v) for v in prop_value) else: item_values = tuple(prop_value) setattr(self, prop, item_values) else: setattr(self, prop, prop_value)
GPIO.setmode(GPIO.BCM) GPIO.setup(25, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) GPIO.add_event_detect(25, GPIO.RISING, callback=button_callback, bouncetime=2000) while True: try: report = session.next() # print(report) # Only appends to gps_log if report includes time (and time is different from previous entry), lat, and lon. # This prevents duplicate entries for the same time, and entries where data is missing. if report['class'] == 'TPV': if hasattr(report, 'time') and report.time != prev_time: timestamp = isoparse(report.time) logging.debug(f'report time: {report.time}') logging.debug(f'prev time: {prev_time}') if hasattr(report, 'lat'): lat = round(report.lat, 6) if hasattr(report, 'lon'): lon = round(report.lon, 6) prev_time = report.time gps_log.append(GPSDatapoint(timestamp, lat, lon)) logging.debug( f'Appended! {[gps_log[-1].timestamp, gps_log[-1].lattitude, gps_log[-1].longitude]}' ) if len(gps_log) > 1: # Before line is drawn, display GPS coords. if len(line_coords) < 2:
def test_isoparse_sep_none(datestr, sep): isostr = datestr + sep + '14:33:09' assert isoparse(isostr) == datetime(2014, 1, 1, 14, 33, 9)
def _query_args(self, query_id): if query_id == 1: return {'date': isoparse('1998-09-22')} # 1998-12-01 - 70 days elif query_id == 3: return {'date': isoparse('1995-03-07')} elif query_id == 4: return { 'begin_date': isoparse('1994-01-01'), 'end_date': isoparse('1994-04-01') } elif query_id == 5: return { 'begin_date': isoparse('1993-01-01'), 'end_date': isoparse('1994-01-01') } elif query_id == 6: return { 'begin_date': isoparse('1993-01-01'), 'end_date': isoparse('1994-01-01') } elif query_id == 7 or query_id == 8: return { 'begin_date': isoparse('1995-01-01'), 'end_date': isoparse('1996-12-31') } elif query_id == 10: return { 'begin_date': isoparse('1993-07-01'), 'end_date': isoparse('1993-10-01') } elif query_id == 12: return { 'begin_date': isoparse('1996-01-01'), 'end_date': isoparse('1997-01-01') } elif query_id == 14: return { 'begin_date': isoparse('1996-01-01'), 'end_date': isoparse('1996-02-01') } elif query_id == 15: return { 'begin_date': isoparse('1995-10-01'), 'end_date': isoparse('1996-01-01') } elif query_id == 20: return { 'begin_date': isoparse('1994-01-01'), 'end_date': isoparse('1995-04-01') } else: return {}
def test_iso_ordinal(isoord, dt_expected): for fmt in ('{:04d}-{:03d}', '{:04d}{:03d}'): dtstr = fmt.format(*isoord) assert isoparse(dtstr) == dt_expected
def block_time(self, i=0): return isoparse(self.status(i)["sync_info"]["latest_block_time"])
def test_iso_raises(isostr, exception): with pytest.raises(exception): isoparse(isostr)
def parse_ical(): global time001 global toby global ical03 time001 = int(time.time()) toby = 100000000000 ical00 = open("ical01.ics").read().split("\n") del ical00[0:6] ical01 = [] ical02 = [] ical03 = [] for x0 in ical00: if search( ("DTEND|DTSTAMP|SEQUENCE|END:VCALENDAR|UID|END:VEVENT|BEGIN:VEVENT" ), x0): continue else: ical01.append(x0) for v0, y in enumerate(ical01): if search("DTSTART", y): ical02.append("|".join(ical01[v0:v0 + 3])) ical02[-1] = ical02[-1].split("|") else: continue for v1, z in enumerate(ical02): if (z[1] != "LOCATION:Online Delivery") or (search( "Weekly Tutorial", z[2]) != None) or (search("ELEC", z[2]) != None): del (ical02[v1]) continue else: ical03.append(z) for v2, x1 in enumerate(ical03): for v3, y in enumerate(x1): ical03[v2][v3] = y[y.find(":") + 1:] if search("COMP", x1[2]): ical03[v2].append(x1[2][0:8]) elif search("Foundations", x1[2]): ical03[v2].append("COMP1215") elif search("Programming", x1[2]): ical03[v2].append("COMP1202") elif search("Professional", x1[2]): ical03[v2].append("COMP1205") else: ical03[v2].append("") ##time zone convert ##add a custom event ical03.append( open("TESTLECTURE.csv", mode="r").read().split("\n")[1].split(",")) global future future = [] ##return all events in the future for v4, a in enumerate(ical03): a[0] = int(datetime.datetime.timestamp(parser.isoparse(a[0]))) if int(a[0]) - int( time001 ) > 0: ##if the time of the meeting - now is > 0 meaning it's in the future and it's the latest event next_event = ical03[v4] future.append(a) future = sorted(future, key=sortbystring) future = future[0:5] return future ##returns as [unix,online delivery,course desc, course code] including text or custom lecture
draw.text((Column2, 274 + 1 * 18), twind, font=font16, fill=0) draw.text((Column2, 274 + 2 * 18), tgust, font=font16, fill=0) draw.text((Column2, 274 + 3 * 18), tdir, font=font16, fill=0) draw.text((Column2, 274 + 4 * 18), ttemp, font=font16, fill=0) # convert m/s -> knots for x in range(6): ftime = timeseries[x]['time'] wspeed = timeseries[x]['windSpeed10m'] wspeed = wspeed * 1.943844 gspeed = timeseries[x]['windGustSpeed10m'] gspeed = gspeed * 1.943844 wdir = timeseries[x]['windDirectionFrom10m'] wtemp = timeseries[x]['screenTemperature'] idate = parser.isoparse(ftime) itime = str("{:%H:%M}".format(idate)) iwind = str("{:>2d}".format(int(round(wspeed)))) igust = str("{:>2d}".format(int(round(gspeed)))) idir = str("{:03d}".format(int(round(wdir)))) itemp = str("{:>2d}".format(int(round(wtemp)))) # note the 'width' for column calc dw, h = draw.textsize(itime, font=font16) ww, h = draw.textsize(iwind, font=font16) wg, h = draw.textsize(igust, font=font16) wd, h = draw.textsize(idir, font=font16) wt, h = draw.textsize(itemp, font=font16) draw.text((Column2 + 95 + x * 48 - dw, 274 + 0 * 18), itime,
def test_datetime_midnight(dt_str): assert isoparse(dt_str) == datetime(2014, 4, 11, 0, 0, 0, 0)
def getAllQueries(self, client=None, domain=None, date_from=None, date_to=None, return_type='raw'): """ This function allows querying the pihole DB. It can take client, domain or dates. dates can come in one of the following formats: ISO formatted string an instance of datetime one of the shorthand strings listed above under 'known_time_ranges' The return type can be either returned as is (default) or formatted (return_type=array_dict) in order to make using the data easier """ if self.auth_data == None: print("Unable to get queries. Please authenticate") exit(1) url = "http://" + self.ip_address + "/admin/api_db.php?getAllQueries&auth=" + self.auth_data.token if client and domain: print("Cannot search for both client AND domain") exit(1) start = None until = None if isinstance(date_from, str): try: start = date_parser.isoparse(date_from) except Exception: if date_from in self.known_time_ranges and date_to is None: start, until = self.known_time_ranges[date_from] elif isinstance(date_from, dt.datetime): start = date_from if isinstance(date_to, str): try: until = date_parser.isoparse(date_to) except Exception: pass elif isinstance(date_from, dt.datetime): until = date_to if start is not None: url +="&from=" + str(start.timestamp()) if until is not None: url +="&until=" + str(until.timestamp()) if client: url += "&client=" + client if domain: url += "&domain=" + domain result = requests.get(url).json() if 'data' not in result: raise QueryException("Empty results returned: something is wrong with your query") if return_type == 'array_dict': data = [{ 'datetime': dt.datetime.fromtimestamp(item[0]), 'type': item[1], 'requested_domain': item[2], 'client': item[3], 'status': QueryActionType(item[4]) } for item in result['data']] else: data = result['data'] return data
def test_isoweek_day(isocal, dt_expected): # TODO: Figure out how to parametrize this on formats, too for fmt in ('{:04d}-W{:02d}-{:d}', '{:04d}W{:02d}{:d}'): dtstr = fmt.format(*isocal) assert isoparse(dtstr) == dt_expected
def sync_folder(self, url, subpath=None): folder_id = self.get_folder_id(url) project_dir = os.path.join(common.vcdir, "projects") # create if needed if not os.path.exists(project_dir): print("Creating:", project_dir) os.makedirs(project_dir) # Get shared folder details try: results = self.service.files().get(fileId=folder_id, supportsAllDrives=True, fields='*').execute() except: print("Unexpected error:", sys.exc_info()) print( "Folder not found, check path and check it is shared outside your organization" ) return False # print("results:", results) if "name" in results and not self.folder_name: self.folder_name = results["name"] else: self.folder_name = folder_id print("Found folder name:", self.folder_name) # Call the Drive v3 API results = self.service.files().list( q="'" + folder_id + "' in parents", pageSize=1000, fields= "nextPageToken, files(id, name, mimeType, createdTime, modifiedTime, size, trashed)" ).execute() items = results.get('files', []) if not items: print('No files found.') return False # work_dir if subpath: work_dir = subpath else: work_dir = os.path.join(project_dir, folder_id) # create new folder if needed if not os.path.exists(work_dir): print("Creating:", work_dir) os.makedirs(work_dir) # find/remove existing files that no longer exist on the remote side remote_names = [] for item in items: if not item['trashed']: name = self.fix_extension(item['name'], item['mimeType']) remote_names.append(name) for file in sorted(os.listdir(work_dir)): basename, ext = os.path.splitext(file) # protect some files if file == "cache" or file == "results": print("INFO: Preserving local work directory:", file) elif ext == ".lof": print("INFO: Preserving local .lof file:", file) elif ext == ".txt": # maybe I created a hints.txt locally that I'd like to # preserve print("INFO: Preserving local file:", file) elif ext == ".aup" or (file.endswith("_data") and os.path.isdir( os.path.join(work_dir, file))): print("INFO: Preserving audacity project:", file) elif not file in remote_names: trashed_file = os.path.join(work_dir, file) print("NOTICE: deleting local file:", trashed_file) os.unlink(trashed_file) # download / update folder items and recurse to subfolders for item in items: if item['trashed']: continue print(item) #dt = datetime.strptime(item['createdTime'], '%Y-%m-%dT%H:%M:%S.%fZ') # created = time.mktime(dt.timetuple()) dt = parser.isoparse(item['createdTime']) created = dt.timestamp() # dt = datetime.strptime(item['modifiedTime'], '%Y-%m-%dT%H:%M:%S.%fZ') #modified = time.mktime(dt.timetuple()) dt = parser.isoparse(item['modifiedTime']) modified = dt.timestamp() print(" ts:", created, modified) if item['mimeType'].endswith("folder"): # recurse folders newurl = "https://drive.google.com/drive/folders/" + item['id'] newpath = os.path.join(work_dir, item['name']) self.sync_folder(newurl, newpath) elif item["mimeType"].endswith("shortcut"): print( "Shortcut encountered, don't know how to deal with this file:", item["name"]) elif "google-apps." in item["mimeType"]: print("skipping google app file") else: # fetch file print( "%s (%s) %.0f Kb" % (item["name"], item["mimeType"], int(item["size"]) / 1024)) name = self.fix_extension(item['name'], item['mimeType']) dest_file = os.path.join(work_dir, name) if os.path.exists(dest_file): statinfo = os.stat(dest_file) mtime = statinfo.st_mtime if modified <= mtime or item['size'] != statinfo.st_size: print(" Skipping, already downloaded") continue print(" Downloading to:", dest_file) request = self.service.files().get_media(fileId=item['id']) fh = io.BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False while done is False: try: status, done = downloader.next_chunk() print(" Download %d%%." % int(status.progress() * 100)) except Exception as e: print("Error downloading chunk:", str(e)) print("Sleeping 15 seconds and trying again") time.sleep(15) print(" downloaded bytes:", len(fh.getvalue())) with open(dest_file, 'wb') as f: f.write(fh.getvalue()) f.close() os.utime(dest_file, times=(created, modified)) return True
def test_bytes(isostr, dt): assert isoparse(isostr) == dt
def get_UTC_time(): dt_string = datetime.utcnow().replace(tzinfo=simple_utc()).isoformat() dt_string = str(parser.isoparse(dt_string)) return (dt_string)
def test_iso_raises_failing(isostr, exception): # These are test cases where the current implementation is too lenient # and need to be fixed with pytest.raises(exception): isoparse(isostr)
for ev in cal.subcomponents: estart = ev.decoded('DTSTART') eend = ev.decoded('DTEND') esummary = ev.decoded('SUMMARY').decode('UTF-8') if estart.__class__ != datetime: continue yield Event(calendar=name, start=estart, end=eend, summary=esummary) if __name__ == '__main__': LOCAL_TIMEZONE = datetime.now(timezone(timedelta(0))).astimezone().tzinfo config = configparser.ConfigParser() config.read(os.path.expanduser('~/') + '.config/nextcloud_cal.ini') if len(sys.argv) > 1: today = isoparse(sys.argv[1]) tdelta = timedelta(days=1) else: today = date.today() tdelta = timedelta(days=int(config['DEFAULT']['time_delta'])) cal_filter = '|'.join(config['DEFAULT']['cals'].split(',')) # create client client = caldav.DAVClient( config['DEFAULT']['url'], proxy=None, username=config['DEFAULT']['user'], password=config['DEFAULT']['pwd'], auth=None, ssl_verify_cert=bool(config['DEFAULT']['ssl'] == 'True')) # create connection
def test_year_month(dt): fmt = '%Y-%m' dtstr = dt.strftime(fmt) assert isoparse(dtstr) == dt
def convert(self, value, param, ctx): try: return parser.isoparse(value) except ValueError: self.fail(f"Failed to parse date '{value}' as ISO formatted date'")
def __init__(self, id: str, username: str, displayname: str, avatar_url: str, bio: str, last_seen: str): super().__init__(id, username, displayname, avatar_url) self.bio = bio self.last_seen = isoparse(last_seen)
def filter_time_range(messages, time_key, start_time, end_time): return [ td for td in messages if start_time <= isoparse(td.get(time_key)) <= end_time ]