def w3c_datetime_str(dt: _datetime = None, date_only: bool = False) -> str: """Format date/time string according to W3C. """ if not dt: dt = _datetime.now() if not dt.tzinfo: dt = _pytz.timezone(_tzname[0]).localize(dt) return dt.strftime('%Y-%m-%d') if date_only else dt.strftime('%Y-%m-%dT%H:%M:%S%z')
def format_time(): drbd1 = DateTime(1941, 9, 9) now = time.localtime() print drbd1.strftime('%m/%d/%y') print drbd1.strftime('%B %d, %Y') print print time.strftime('%m/%d/%y', now) print time.strftime('%B %d, %Y', now) print
def timestamp(tn: datetime.datetime) -> str: """ creates a properly formatted timestamp. :param tn: datetime to format to timestream timestamp string :return: """ return tn.strftime('%Y_%m_%d_%H_%M_%S')
def get_date_regex_string(dt: datetime.datetime) -> str: # Reminders: ? zero or one, + one or more, * zero or more wb = "\\b" # word boundary; escape the slash ws = "\\s" # whitespace; includes newlines # Day, allowing leading zeroes and e.g. "1st, 2nd" day = "0*" + str(dt.day) + "(st|nd|rd|th)?" # Month, allowing leading zeroes for numeric and e.g. Feb/February month_numeric = "0*" + str(dt.month) month_word = dt.strftime("%B") month_word = month_word[0:3] + "(" + month_word[3:] + ")?" month = "(" + month_numeric + "|" + month_word + ")" # Year year = str(dt.year) if len(year) == 4: year = "(" + year[0:2] + ")?" + year[2:4] # ... makes e.g. (19)?86, to match 1986 or 86 # Separator: one or more of: whitespace, /, -, comma sep = "[" + ws + "/,-]+" # ... note that the hyphen has to be at the start or end, otherwise it # denotes a range. # Regexes basic_regexes = [ day + sep + month + sep + year, # e.g. 13 Sep 2014 month + sep + day + sep + year, # e.g. Sep 13, 2014 year + sep + month + sep + day, # e.g. 2014/09/13 ] return ( "(" + "|".join([wb + x + wb for x in basic_regexes]) + ")" )
def agregators_per_month(self, date: datetime) -> json: # Грошові агрегати та їх компоненти (залишки коштів на кінець періоду, млн. грн.): # https://bank.gov.ua/NBUStatService/v1/statdirectory/monetary?date=YYYYMM # http https://bank.gov.ua/NBUStatService/v1/statdirectory/monetary date==201609 json== # Складовими Г.а. є фінансові активи у формі готівкових коштів у національній валюті, # переказних депозитів, інших депозитів, коштів за цінними паперами, крім акцій, що емітовані # депозитними корпораціями та належать на правах власності іншим фінансовим корпораціям, # нефінансовим корпораціям, домашнім господарствам та некомерційним організаціям, що # обслуговують домашні господарства. Залежно від зниження ступеня ліквідності фінансові активи # групують у різні грошові агрегати М0, М1, М2 та М3. # Грошовий агрегат М0 включає готівкові кошти в обігу поза депозитними корпораціями. # Грошовий агрегат М1 – грошовий агрегат М0 та переказні депозити в національній валюті. # Грошовий агрегат М2 – грошовий агрегат М1 і переказні депозити в іноземній валюті та інші депозити. # Грошовий агрегат М3 (грошова маса) – грошовий агрегат М2 та цінні папери, крім акцій. def _date_object(obj: dict) -> dict: try: obj['date'] = datetime.strptime(obj['dt'], '%Y%m%d') except: pass del obj['dt'] return obj params = {'date': date.strftime('%Y%m01'), 'json': ''} url = self.url + 'monetary' logger.debug(f'requested url= {url}, params= {params}') resp = requests.get(url, params=params) if resp.status_code != 200: logger.error(f'server error= {resp.status_code}') return {'error': resp.status_code} logger.debug(f'resp.json(object_hook=_date_object)= {resp.json(object_hook=_date_object)}') return resp.json(object_hook=_date_object)
def name_file(lane: int, flowcell: str, sample: str, read: int, undetermined: bool=False, date: dt.datetime=None, index: str=None) -> str: """Name a FASTQ file following MIP conventions.""" flowcell = f"{flowcell}-undetermined" if undetermined else flowcell date_str = date.strftime('%y%m%d') if date else '171015' index = index if index else 'XXXXXX' return f"{lane}_{date_str}_{flowcell}_{sample}_{index}_{read}.fastq.gz"
def strftime(dt: datetime.datetime, format_str: str): """ Custom strftime. Checks for most frequently used format strings and handles those manually, hands any others off to dt.strftime() Args: dt: A datetime object. format_str: A valid strftime format string. '%H:%M' and '%Y-%m-%d %H%M%S' are handled specially. Returns: A string representation of dt as described by format_str TODO: Test if this is any faster than calling dt.strftime. Since dt.strftime is a C function, it's PROBABLY NOT. Also check that dt is a valid datetime object? """ if format_str == HHMM_FMT: return "{:02d}:{:02d}".format(dt.hour, dt.minute) elif format_str == FULL_DATE_FMT: return "{year:04d}-{mon:02d}-{day:02d} " \ "{hour:02d}:{min:02d}:{sec:02d}".format( year=dt.year, mon=dt.month, day=dt.day, hour=dt.hour, min=dt.minute, sec=dt.second) else: return dt.strftime(format_str)
def fetch_posts(ch_monitor_id: int, day: datetime.datetime) -> dict: """ Return a mock ch response to the posts end point. Generate the mock response by sending back data from a consistent but semirandom selection of ch-posts-2016-01-0[12345].json. """ assert MOCK_TWEETS_PER_DAY <= MAX_MOCK_TWEETS_PER_DAY test_path = mediawords.util.paths.mc_root_path() + '/mediacloud/test-data/ch/' filename = test_path + "ch-posts-" + day.strftime('%Y-%m-%d') + '.json' with open(filename, 'r', encoding='utf-8') as fh: json = fh.read() data = dict(decode_json(json)) assert 'posts' in data assert len(data['posts']) >= MOCK_TWEETS_PER_DAY data['posts'] = data['posts'][0:MOCK_TWEETS_PER_DAY] # replace tweets with the epoch of the start date so that we can infer the date of each tweet in # tweet_urler_lookup below i = 0 for ch_post in data['posts']: ch_post['url'] = re.sub(r'status/(\d+)/', '/status/' + str(i), ch_post['url']) i += 1 return data
def build_url(cls, symbol: str, begin_datetime: datetime, end_datetime: datetime, granularity: Granularity): query_string = json.dumps(dict(s=symbol + '+Interactive')) return 'http://finance.yahoo.com/_td_charts_api/resource/' \ 'charts;comparisonTickers=;events=div%7Csplit%7Cearn;' \ 'gmtz=9;indicators=quote;period1={};period2={};' \ 'queryString=%7B%22s%22%3A%22{}%2BInteractive%22%7D;' \ 'range={};rangeSelected=undefined;ticker={};' \ 'useMock=false'.format( \ begin_datetime.strftime('%s'), end_datetime.strftime('%s'), urllib.parse.quote_plus(query_string), cls.GRANULARITY_RANGE_MAPPINGS[granularity]['str'], urllib.parse.quote_plus(symbol) )
def auction_results(date: datetime) -> dict: """ get result of auctions. :param date: :return: """ date = date.strftime('%d.%m.%Y') year = date.split('.')[2] # url = 'https://www.bank.gov.ua/control/uk/auction/details?date=' + date + '&year=' + year url = 'https://www.bank.gov.ua/control/uk/auction/details' payload = {'date': date, 'year': year} # if not proxy_is_used: # responce_get = requests.get(url, headers=headers) # else: # responce_get = requests.get(url, headers=headers, timeout = 3, proxies=proxies) responce_get = requests.get(url, headers=headers, params=payload) soup = BeautifulSoup(responce_get.text, "html.parser") # if date != soup.body.table.find('option', attrs={'selected': ''})['value']: # return None document = {} get_float = lambda tag: float(tag.find('td', attrs={'class': 'cell_c'}).get_text(strip=True)) document['time'] = datetime.strptime(date, '%d.%m.%Y') document['source'] = 'nbu_auction' for field in soup.body.table.find('table', attrs={'border': '0', 'width': '650px'}).find_all('tr'): if isinstance(field.td, type(None)): continue if field.td.string == 'Валюта аукціону': if field.td.next_sibling.next_sibling.get_text(strip=True) == 'Долар США': document['currency'] = 'USD' else: document['currency'] = None elif type(field.next_element) == Comment: if field.next_element in [' 1 # 1.0.1 || 1.0.2 ', ' 1.0.1 || 1.0.2 ']: if field.td.get_text(strip=True) == 'КУПІВЛЯ': document['operation'] = 'buy' elif field.td.get_text(strip=True) == 'ПРОДАЖ': document['operation'] = 'sell' elif field.next_element in [' 2 # 1.1 ', ' 1.1 ', ' 3 ']: # Загальний обсяг заявок суб'єктів ринку, прийнятих на аукціон відповідно до умов його проведення (млн. од. валюти) document['amount_requested'] = get_float(field) elif field.next_element in [' 6 # 1.2.1 ', ' 1.2.1 ']: # Курси гривні, заявлені учасниками аукціону (грн. за 1 од. валюти) document['rate_r_max'] = get_float(field) elif field.next_element in [' 7 # 1.2.2 ', ' 1.2.2 ']: document['rate_r_min'] = get_float(field) elif field.next_element in [' 9 # 1.3.1 ', ' 1.3.1 ']: document['rate_acc_med'] = get_float(field) elif field.next_element in [' 10 # 1.3.2 ', ' 1.3.2 ']: document['rate_acc_max'] = get_float(field) elif field.next_element in [' 11 # 1.3.3 ', ' 1.3.3 ']: document['rate_acc_min'] = get_float(field) elif field.next_element in [' 12 # 1.4 ', ' 1.4 ', ' 7 ']: # Загальний обсяг задоволених заявок учасників аукціону (млн. од. валюти) document['amount_accepted_all'] = get_float(field) elif field.next_element == ' 13 - 1.5 || 1.6 ': # Частка задоволених заявок за максимальним курсом аукціону в загальному обсязі задоволених заявок (%) document['amount_accepted_p_min_max'] = get_float(field) return document
def format_time(dt: datetime.datetime=None) -> str: """ >>> dt = datetime.datetime.utcnow(); (dt.timestamp() - time.timezone) - parse_time(format_time(dt)) 0.0 """ if not dt: dt = datetime.datetime.utcnow() return dt.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
def datetime_to_utc(dt: datetime): """Convert a given datetime to UTC time for timezone compatibility""" if dt.strftime(DiscordClient.DATE_FORMAT) == "1900-01-01": dt = datetime.combine(datetime.now().date(), dt.time()) to_zone = tz.tzutc() from_zone = tz.tzlocal() local = dt.replace(tzinfo=from_zone) return local.astimezone(to_zone)
def rfc822_datetime_str(dt: _datetime = None) -> str: """Format date/time string according to RFC-822 format """ if not dt: dt = _datetime.now() if not dt.tzinfo: dt = _pytz.timezone(_tzname[0]).localize(dt) return dt.strftime('%a, %d %b %Y %H:%M:%S %z')
def set_date(self, dt: datetime.datetime): self.check_has_fetched() value = dt.strftime('%m/%d/%Y') i = self.date_input_element # self.browser.execute_script('document.getElementById("{}").value="{}"'.format(self.date_input_id, value)) i.send_keys(Keys.LEFT_CONTROL, 'a') i.send_keys(value) i.send_keys(Keys.TAB) i.send_keys(Keys.TAB) self._form_nudge(self.date_input_id)
def query_stars_by_repo(repo_id: int, date_from: datetime, date_to: datetime): query = """ #standardSQL SELECT COUNT(1) AS stars, EXTRACT(YEAR FROM created_at) AS y, EXTRACT(DAYOFYEAR FROM created_at) AS doy, EXTRACT(MONTH FROM created_at) AS mon FROM `githubarchive.month.*` WHERE (_TABLE_SUFFIX BETWEEN '{date_from}' AND '{date_to}') AND repo.id = {id} AND type IN ('WatchEvent', 'ForkEvent') GROUP BY y, mon, doy """ return query.format( id=repo_id, date_from=date_from.strftime('%Y%m'), date_to=date_to.strftime('%Y%m') )
def format_time(d: datetime) -> str: """Format time relatively if necessary""" diff: timedelta = datetime.now() - d if diff.days >= 7: return d.strftime("%-d %B, %Y") elif 0 < diff.days < 7: return humanize.naturalday(d).capitalize() elif diff.seconds > 60: return humanize.naturaltime(d).capitalize() else: return "A few seconds ago"
def cal_open(self, widget, event, model=None, window=None): if self.readonly: common.message(_('This widget is readonly !')) return True win = gtk.Dialog(_('OpenERP - Date selection'), window, gtk.DIALOG_MODAL|gtk.DIALOG_DESTROY_WITH_PARENT, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OK, gtk.RESPONSE_OK)) hbox = gtk.HBox() hbox.pack_start(gtk.Label(_('Hour:')), expand=False, fill=False) hour = gtk.SpinButton(gtk.Adjustment(0, 0, 23, 1, 5), 1, 0) hbox.pack_start(hour, expand=True, fill=True) hbox.pack_start(gtk.Label(_('Minute:')), expand=False, fill=False) minute = gtk.SpinButton(gtk.Adjustment(0, 0, 59, 1, 10), 1, 0) hbox.pack_start(minute, expand=True, fill=True) win.vbox.pack_start(hbox, expand=False, fill=True) cal = gtk.Calendar() cal.display_options(gtk.CALENDAR_SHOW_HEADING|gtk.CALENDAR_SHOW_DAY_NAMES|gtk.CALENDAR_SHOW_WEEK_NUMBERS) cal.connect('day-selected-double-click', lambda *x: win.response(gtk.RESPONSE_OK)) win.vbox.pack_start(cal, expand=True, fill=True) win.show_all() try: val = self.get_value(model, timezone=False) if val: hour.set_value(int(val[11:13])) minute.set_value(int(val[-5:-3])) cal.select_month(int(val[5:7])-1, int(val[0:4])) cal.select_day(int(val[8:10])) else: hour.set_value(time.localtime()[3]) minute.set_value(time.localtime()[4]) except ValueError: pass response = win.run() if response == gtk.RESPONSE_OK: hr = int(hour.get_value()) mi = int(minute.get_value()) dt = cal.get_date() month = int(dt[1])+1 day = int(dt[2]) date = DT(dt[0], month, day, hr, mi) try: value = date.strftime(DHM_FORMAT) except ValueError: common.message(_('Invalid datetime value! Year must be greater than 1899 !')) else: self.show(value, timezone=False) self._focus_out() win.destroy()
def datetime_to_string(dt: datetime) -> str: """ Convert a datetime object to a string :param dt: Datetime object :return: String representation of a datetime object Note: Make sure that the datetime object is in UTC! """ return dt.strftime("%Y-%m-%dT%H:%M:%S.%f")
def get_id(date: datetime.datetime) -> str: """Generate a Processing Block (PB) Instance ID. Args: date (datetime.datetime): UTC date of the PB Returns: str, Processing Block ID """ date = date.strftime('%Y%m%d') return 'PB-{}-{}-{:03d}'.format(date, 'sip', randint(0, 100))
def trades(self, market_id: str, start: datetime=None, end: datetime=None, page: int=None, limit: int=_c.ORDERS_LIMIT): if isinstance(start, datetime): start = start.strftime('%Y-%m-%d') if isinstance(end, datetime): end = end.strftime('%Y-%m-%d') data = self.get('trades', params={ 'market': str(market_id), 'start': start, 'end': end, 'page': page, 'limit': limit, }) if self.return_json: return data return _m.Trades.create_from_json( data['data'], data.get('pagination'))
def to_iso8601(when: datetime=None) -> str: """ Return a datetime as string in ISO-8601 format. If no time given, default to now. :param when: """ if not when: when = datetime.now(pytz.utc) if not when.tzinfo: when = pytz.utc.localize(when) _when = when.strftime("%Y-%m-%dT%H:%M:%S.%f%z") return _when
def get_abs_rel_time(self, now: datetime.datetime = None) \ -> Tuple[str, float]: """ Returns tuple: absolute time in ISO-8601 format relative time in seconds """ if now is None: now = get_now() nowstr = now.strftime(ISO8601_FMT) timediff = now - self.start_time return nowstr, timediff.total_seconds()
def generate_report_name(now: datetime.datetime=None, report_type: str='pdf', date_format="%Y-%m-%d_%H-%M") -> str: """ Generates report name based on the current date """ if now is None: now = datetime.datetime.utcnow() formatted_date = now.strftime(date_format) report_name = 'Books_report_for_{}.{}'.format(formatted_date, report_type) logger.info("Report name generated: {}.".format(report_name)) return report_name
def get_stock_dailybar(token,scode,s_datetime:datetime,e_datetime:datetime): # 股票历史日行情 token = gettoken(client_id, client_secret) sdate = s_datetime.strftime('%Y%m%d') edate = e_datetime.strftime('%Y%m%d') url = "http://webapi.cninfo.com.cn/api/stock/p_stock2402" # 股票历史日行情 API post_data = "scode=%s&sdate=%s&edate=%s&access_token=%s" % (scode,sdate,edate,token) post_data =post_data.encode() req = urllib.request.urlopen(url, post_data) content = req.read() responsedict = json.loads(content) resultcode = responsedict["resultcode"] print(responsedict["resultmsg"], responsedict["resultcode"]) if responsedict["resultmsg"] == "success": if len(responsedict["records"]) >= 1: # gcf.print_list_nice(responsedict["records"]) # 接收到的具体数据内容 return True,DataFrame(responsedict["records"]) else: return True,DataFrame() else: return False,responsedict["resultmsg"] + ',message code:' + responsedict["resultcode"]
def swaps_per_date(self, date: datetime, period: str) -> json: # Індекс міжбанківських ставок за період # (можливі значення для періоду OVERNIGHT / 1WEEK / 2WEEKS / 1MONTH / 3MONTHS, регістр значення не має): # https://bank.gov.ua/NBUStatService/v1/statdirectory/uiir?period=1WEEK&date=YYYYMMDD # http https://bank.gov.ua/NBUStatService/v1/statdirectory/uiir period==2WEEK json== date==20161027 def _date_object(obj: dict) -> dict: try: obj['date'] = datetime.strptime(obj['operdate'], '%d.%m.%Y') except: pass del obj['operdate'] return obj params = {'period': period, 'date': date.strftime('%Y%m%d'), 'json': ''} return requests.get(self.url + 'uiir', params=params).json(object_hook=_date_object)
def auction_get_dates(year: datetime) -> set: year = year.strftime('%Y') url = 'https://www.bank.gov.ua/control/uk/auction/details' payload = {'year': year, # 'date': '25.03.2016' } if not proxy_is_used: responce_get = requests.get(url, headers=headers, params=payload) else: responce_get = requests.get(url, headers=headers, timeout = 3, proxies=proxies, params=payload) soup = BeautifulSoup(responce_get.text, "html.parser") # if year == soup.body.table.find(attrs={'name': 'year', 'onchange': 'this.form.submit();'}).find('option', attrs={'selected': ''})['value']: dates = set() for date in soup.body.table.find('select',attrs={'name': 'date'}).find_all('option'): dates.add(datetime.strptime(date['value'], '%d.%m.%Y')) return dates
def rate_currency_date(self, currency: str, date: datetime) -> dict: params = {'valcode': currency, 'date': date.strftime('%Y%m%d'), 'json': ''} document = {} recieved_doc = requests.get(self.url + 'exchange', params=params) if recieved_doc.status_code != 200: logger.error('NBU site stattus_code= {}'.format(recieved_doc.status_code)) return {} try: recieved_doc = recieved_doc.json()[0] except IndexError: logger.error('JSON parsing error in NBU docs') return {} document['currency'] = recieved_doc['cc'] document['time'] = datetime.strptime(recieved_doc['exchangedate'], '%d.%m.%Y') document['nbu_rate'] = recieved_doc['rate'] document['source'] = 'nbu' return document
def generate_sb(date: datetime.datetime, project: str, programme_block: str) -> dict: """Generate a Scheduling Block data object. Args: date (datetime.datetime): UTC date of the SBI project (str): Project Name programme_block (str): Programme Returns: str, Scheduling Block Instance (SBI) ID. """ date = date.strftime('%Y%m%d') instance_id = randint(0, 9999) sb_id = 'SB-{}-{}-{:04d}'.format(date, project, instance_id) return dict(id=sb_id, project=project, programme_block=programme_block)
def __init__(self, filename: str, lock_owner: str, lock_created: datetime) -> None: values = [ short_name(filename), lock_owner, lock_created.strftime("%m/%d/%Y %H:%M:%S"), ] super().__init__( "DIRECT_EDIT_LOCKED", title=Translator.get("LOCKED", values), description=Translator.get("DIRECT_EDIT_LOCKED_FILE", values), level=Notification.LEVEL_WARNING, flags=( Notification.FLAG_VOLATILE | Notification.FLAG_BUBBLE | Notification.FLAG_DISCARD_ON_TRIGGER | Notification.FLAG_REMOVE_ON_DISCARD ), )
def mine(self, date: datetime): """ If that date hasn't been scraped before, scrape it! """ date_string = date.strftime('%d-%m-%Y') # Switch on the engine m = Scraper(date=date, session=self._session, server=self._server) # Been there, done that if date in self._miners: self._rec('{} has already been mined', date_string) m.close() else: # Go browse the web summary page for that day # and scrape off the job uuid request parameters. jobs = m.scrape_uuids() # I don't work on weekends if not jobs: self._rec('No jobs found for {}', date_string) else: for j in jobs: # Grab the job's web page, regex it and store # the collected fields in a sensible manner. # We don't pickle the data yet: instead, we # pickle multiple days at once before exit. soup = m._get_job(j) raw_data = m._scrape_job(soup) m.process_job(raw_data) # So wanna see results? pp = PrettyPrinter() pp.pprint(m.raw_data[0]) # Job details pp.pprint(m.raw_data[1]) # Price table [pp.pprint(d) for d in m.raw_data[2]] # Addresses # We're never gonna scrape with a 100% success # rate, but let's do better next time! # TODO Hopefully remove this debug message later self._rec('Mined: {} successfully!', date_string) for message in m._warnings: self._rec(message)
def date_to_string(date: datetime) -> str: return date.strftime('%m.%Y')
def write_date_last_update_wallpaper(datetime_obj: dt.datetime): timestring = datetime_obj.strftime(DATE_FORMAT) config = read_prefs() config[KEY_LAST_DATE_UPDATE_WALLPAPER] = timestring write_config(config)
def load_swarm_poynting_flux(dn0: datetime.datetime, sat_id): file_dir = pathlib.Path( '/home/lei/01-Work/01-Project/OY22-IonosphereElectrodynamics/Lei_20220707/results' ) dstr = dn0.strftime('%Y%m%d-%H%M%S') file_path = list(file_dir.glob("*" + sat_id.upper() + "*" + dstr + '*.mat'))[0] matdata = sio.loadmat(file_path) ds = DatasetUser(visual='on') depend_0 = { 'UT': 'SC_DATETIME', 'GEO_LAT': 'SC_GEO_LAT', 'GEO_LON': 'SC_GEO_LON', 'AACGM_LAT': 'SC_AACGM_LAT', 'AACGM_LON': 'SC_AACGM_LON', 'AACGM_MLT': 'SC_AACGM_MLT' } var_name = 'SC_DATETIME' var_value: np.ndarray = matdata['tl'] ntl = var_value.shape[0] var_value = pd.to_datetime(var_value.flatten() - 719529, unit='D').to_numpy() var_value = [ datetime.datetime.utcfromtimestamp( ((var_value[i] - np.datetime64('1970-01-01T00:00:00')) / np.timedelta64(1, 's'))) for i in range(ntl) ] var_value = np.array(var_value, dtype=datetime.datetime).reshape((ntl, 1)) ut = var_value.flatten() var = ds.add_variable(var_name, value=var_value) var.visual.plot_config.style = '1P' var_name = 'SC_GEO_LAT' var_value: np.ndarray = matdata['glat'] glat = var_value.flatten() var_value = var_value.reshape((ntl, 1)) var = ds.add_variable(var_name, value=var_value) var.visual.plot_config.style = '1P' var_name = 'SC_GEO_LON' var_value: np.ndarray = matdata['glon'] var_value = var_value.reshape((ntl, 1)) glon = var_value.flatten() var = ds.add_variable(var_name, value=var_value) var.visual.plot_config.style = '1P' var_name = 'SC_GEO_R' var_value: np.ndarray = matdata['gR'] var_value = var_value.reshape((ntl, 1)) r = var_value.flatten() var = ds.add_variable(var_name, value=var_value) var.visual.plot_config.style = '1P' var_name = 'S_FA_V' var_value: np.np.ndarray = matdata['Pvpara'] var_value = var_value.reshape((ntl, 1)) var = ds.add_variable(var_name, value=var_value) var.depends[0] = depend_0 var.visual.plot_config.style = '1P' var.visual.axis[1].label = 'S' var.visual.axis[1].unit = r'W$\cdot$m$^{-3}$' var.visual.axis[2].label = r'S$^V$' var_name = 'S_FA_H' var_value: np.ndarray = matdata['Phpara'] var_value = var_value.reshape((ntl, 1)) var = ds.add_variable(var_name, value=var_value) var.depends[0] = depend_0 var.visual.plot_config.style = '1P' var.visual.axis[1].label = 'S' var.visual.axis[1].unit = r'W$\cdot$m$^{-3}$' var.visual.axis[2].label = r'S$^H$' var_name = 'd_B_x' var_value: np.ndarray = matdata['Bx'] var_value = var_value.reshape((ntl, 1)) var = ds.add_variable(var_name, value=var_value) var.depends[0] = depend_0 var.visual.plot_config.style = '1P' var.visual.axis[1].label = 'B' var.visual.axis[1].unit = 'nT' var.visual.axis[2].label = r'$\delta B_x$' var_name = 'd_B_y' var_value: np.ndarray = matdata['By'] var_value = var_value.reshape((ntl, 1)) var = ds.add_variable(var_name, value=var_value) var.depends[0] = depend_0 var.visual.plot_config.style = '1P' var.visual.axis[2].label = r'$\delta B_y$' var_name = 'd_B_z' var_value: np.ndarray = matdata['Bz'] var_value = var_value.reshape((ntl, 1)) var = ds.add_variable(var_name, value=var_value) var.depends[0] = depend_0 var.visual.plot_config.style = '1P' var.visual.axis[2].label = r'$\delta B_z$' var_name = 'Q_FLAG' var_value: np.ndarray = matdata['tmpQ'] var_value = var_value.reshape((ntl, 1)) var = ds.add_variable(var_name, value=var_value) var.depends[0] = depend_0 var.visual.plot_config.style = '1P' var.visual.axis[1].label = 'FLAG' var.visual.axis[2].label = r'Quality=1 OK' var_name = 'CALIB_FLAG' var_value: np.ndarray = matdata['tmpC'] var_value = var_value.reshape((ntl, 1)) var = ds.add_variable(var_name, value=var_value) var.depends[0] = depend_0 var.visual.plot_config.style = '1P' var.visual.axis[1].label = 'FLAG' var.visual.axis[2].label = r'Calib=0 OK' cs = GEOCSpherical(coords={ 'lat': glat, 'lon': glon, 'r': r / 6371.2 }, ut=ut) cs_new = cs.to_AACGM(append_mlt=True) var = ds.add_variable('SC_AACGM_LAT', value=cs_new['lat']) var = ds.add_variable('SC_AACGM_LON', value=cs_new['lon']) var = ds.add_variable('SC_AACGM_MLT', value=cs_new['mlt']) var = ds.add_variable('SC_GEO_ALT', value=ds['SC_GEO_R'].value - 6371.2) return ds
def python_localized_datetime_to_human_iso(value: datetime.datetime) -> str: s = value.strftime("%Y-%m-%dT%H:%M:%S.%f%z") return s[:29] + ":" + s[29:]
def test(dt: datetime): """ Log the datetime.""" print(dt, dt.astimezone()) print(dt.strftime('%Z%z'), dt.astimezone().strftime('%Z%z')) return dt
def strip_milliseconds(date: datetime_type) -> str: return date.strftime("%Y-%m-%d %H:%M:%S")
def convert_dttm(cls, target_type: str, dttm: datetime) -> str: if target_type.upper() in ("DATETIME", "DATE"): return "STR_TO_DATE('{}', '%Y-%m-%d %H:%i:%s')".format( dttm.strftime("%Y-%m-%d %H:%M:%S")) return "'{}'".format(dttm.strftime("%Y-%m-%d %H:%M:%S"))
def datetime_to_str(value: datetime.datetime) -> str: return value.strftime(DATETIME_STR_FORMAT)
def datetime_str(dt: datetime) -> str: if dt.tzinfo: dt = dt.astimezone(pytz.utc) else: dt = pytz.utc.localize(dt) return dt.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
def archive_table(_table, _archive_datetime: datetime): # get archive table format as your need return '`{}_{}`'.format(_table.table, _archive_datetime.strftime('%Y%m'))
def datetime_to_rfc3339_string(value: datetime.datetime): if value.tzinfo is not None: value = value.replace(tzinfo=None) - value.utcoffset() return value.strftime(RFC3339_FORMAT)
def asorgtime(t: datetime) -> str: return t.strftime("%H:%M")
def get_date_time_str(date_time: DT.datetime) -> str: return date_time.strftime(DATE_TIME_FORMAT)
def get_push_events_commits_filename(date: datetime, file_format='parquet'): return date.strftime(f"%Y-%m-%d-{date.hour}.{file_format}")
def insert_new_commit(self, commit_hash: str, commit_date: datetime): return self._insert_new_row( commit_hash=commit_hash, commit_date=commit_date.strftime( CommitTableHandler.__DATE_TO_STR_FORMAT), )
def format_time(ts: datetime) -> str: return ts.strftime('%Y-%m-%d %H:%M:%S')
def format_datetime(date: datetime, /) -> str: return date.strftime(Translator.get("DATETIME_FORMAT"))
def arquivo_date(dt: datetime): return dt.strftime("%Y%m%d%H%M%S")
def _get_hourly_push_events_commits_path(self, date: datetime) -> str: return date.strftime( f"{self._github_events_commits_base}/%Y/%m/%d/%Y-%m-%d-{date.hour}.parquet" )
def date_to_russian_format(date: datetime) -> str: return date.strftime("%d.%m.%Y")
def to_datetime(v: datetime): return v.strftime(settings.DATETIME_FORMAT) if v else ""
def write_summary_html(filename : Path2, options : Sequence[Tuple[str, str]], unparsed_args : List[str], cur_commit : str, cur_date : datetime.datetime, weights_hash: str, individual_stats : List[ReportStats], combined_stats : ReportStats) -> None: def report_header(tag : Any, doc : Doc, text : Text) -> None: html_header(tag, doc, text,index_css, index_js, "Proverbot Report") doc, tag, text, line = Doc().ttl() with tag('html'): report_header(tag, doc, text) with tag('body'): with tag('h4'): text("{} files processed".format(len(individual_stats))) with tag('h5'): text("Commit: {}".format(cur_commit)) with tag('h5'): text("Run on {}".format(cur_date.strftime("%Y-%m-%d %H:%M:%S.%f"))) with tag('img', ('src', 'logo.png'), ('id', 'logo')): pass with tag('h2'): text("Proofs Completed: {}% ({}/{})" .format(stringified_percent(combined_stats.num_proofs_completed, combined_stats.num_proofs), combined_stats.num_proofs_completed, combined_stats.num_proofs)) with tag('ul'): for k, v in options: if k == 'filenames': continue elif not v: continue with tag('li'): text("{}: {}".format(k, v)) with tag('table'): with tag('tr', klass="header"): line('th', 'Filename') line('th', 'Number of Proofs in File') line('th', '% Proofs Completed') line('th', '% Proofs Incomplete') line('th', '% Proofs Failed') line('th', 'Details') sorted_rows = sorted(individual_stats, key=lambda fresult:fresult.num_proofs, reverse=True) for fresult in sorted_rows: if fresult.num_proofs == 0: continue with tag('tr'): line('td', fresult.filename) line('td', str(fresult.num_proofs)) line('td', stringified_percent(fresult.num_proofs_completed, fresult.num_proofs)) line('td', stringified_percent(fresult.num_proofs - (fresult.num_proofs_completed + fresult.num_proofs_failed), fresult.num_proofs)) line('td', stringified_percent(fresult.num_proofs_failed, fresult.num_proofs)) with tag('td'): with tag('a', href=escape_filename(fresult.filename) + ".html"): text("Details") with tag('tr'): line('td', "Total") line('td', str(combined_stats.num_proofs)) line('td', stringified_percent(combined_stats.num_proofs_completed, combined_stats.num_proofs)) line('td', stringified_percent(combined_stats.num_proofs - (combined_stats.num_proofs_completed + combined_stats.num_proofs_failed), combined_stats.num_proofs)) line('td', stringified_percent(combined_stats.num_proofs_failed, combined_stats.num_proofs)) text(f'Trained as: {unparsed_args}') doc.stag('br') text(f"Reported as: {sys.argv}") doc.stag('br') text(f"Weights hash: {weights_hash}") with filename.open("w") as fout: fout.write(doc.getvalue())
def python_utc_datetime_to_sqlite_strftime_string( value: datetime.datetime) -> str: millisec_str = str(round(value.microsecond / 1000)).zfill(3) return value.strftime("%Y-%m-%d %H:%M:%S") + "." + millisec_str
def _fetch_air_quality_routine(self, day: datetime): """ Populate the air quality of the provinces. Data is fetched from http://www.arpa.umbria.it/monitoraggi/aria/Default.aspx :param day: The day of which the air quality wants to be known (instance of `~datetime`) """ super()._fetch_air_quality_routine(day) date_fmt = day.strftime('%d/%m/%Y') data = { '__EVENTTARGET': 'ctl00$Content$txtData', '__EVENTARGUMENT': '', '__LASTFOCUS': '', '__VIEWSTATE': '/wEPDwUKMTUzNjEyNDUzNw9kFgJmD2QWAgIBD2QWAmYPZBYEAgsPZBYEAgEPFgIeC18hSXRlbUNvdW50AgMWBmYPZBYEAgEPDxYCHgdWaXNpYmxlaGQWAmYPFQEIMDkvMDQvMThkAgIPFQEZJm5ic3A7PC9wPg0KPHA+Jm5ic3A7PC9wPmQCAQ9kFgQCAQ9kFgJmDxUBCDA1LzA1LzE5ZAICDxUBwgFOZWxsYSBnaW9ybmF0YSBvZGllcm5hIGNpIHNvbm8gc3RhdGUgZGVsbGUgZGlmZmljb2x0JmFncmF2ZTsgdGVjbmljaGUgaW4gbWVyaXRvIGFsbGEgcHViYmxpY2F6aW9uZSBhdXRvbWF0aWNhIGRlaSBkYXRpIGRpIHNhYmF0byA0LiBMJ2luY29udmVuaWVudGUgdmVyciZhZ3JhdmU7IHJpc29sdG8gYWwgcGkmdWdyYXZlOyBwcmVzdG8uPC9wPmQCAg9kFgQCAQ9kFgJmDxUBCDE5LzAyLzE5ZAICDxUBhwM8c3Ryb25nPk1hbnV0ZW56aW9uZSBzdHJ1bWVudGF6aW9uZSAyMDE5PC9zdHJvbmc+PGJyIC8+RGFsIDE4IGZlYmJyYWlvIGFsIHByaW1vIG1hcnpvIHNvbm8gcHJldmlzdGUgbGUgb3BlcmF6aW9uaSBkaSBtYW51dGVuemlvbmUgcGVyaW9kaWNoZSAoYW5udWFsaSkgZGVsbGEgc3RydW1lbnRhemlvbmUgaW5zdGFsbGF0YSBuZWxsYSByZXRlIGRpIG1vbml0b3JhZ2dpby4gUGVyIHF1ZXN0byBtb3Rpdm8gcG90cmViYmVybyB2ZXJpZmljYXJzaSBkZWxsZSBpbnRlcnJ1emlvbmkgbmVsIHJpbGV2YW1lbnRvIGRlaSBkYXRpIHJlbGF0aXZpIGFnbGkgc3RydW1lbnRpIGluIG1hbnV0ZW56aW9uZS4mbmJzcDs8L3A+DQo8cD4mbmJzcDs8L3A+DQo8cD4mbmJzcDs8L3A+DQo8cD4mbmJzcDs8L3A+ZAIDDw8WBB4LUG9zdEJhY2tVcmwFK2FyY2hpdmlvTm90aXppZS5hc3B4P2NvZGljZVBhZ2luYT1SUk0mem9uYT0fAWdkZAIPD2QWAmYPZBYCAgEPEA8WBh4NRGF0YVRleHRGaWVsZAUETm9tZR4ORGF0YVZhbHVlRmllbGQFAklkHgtfIURhdGFCb3VuZGdkEBUPGVBlcnVnaWEgLSBQYXJjbyBDb3J0b25lc2UcUGVydWdpYSAtIFBvbnRlIFNhbiBHaW92YW5uaRRQZXJ1Z2lhIC0gRm9udGl2ZWdnZSBDaXR0w6AgZGkgQ2FzdGVsbG8gLSBDLiBDYXN0ZWxsbxpHdWJiaW8gLSBQaWF6emEgNDAgTWFydGlyaRFNYWdpb25lIC0gTWFnaW9uZRZGb2xpZ25vIC0gUG9ydGEgUm9tYW5hEFRvcmdpYW5vIC0gQnJ1ZmEZU3BvbGV0byAtIFBpYXp6YSBWaXR0b3JpYRJUZXJuaSAtIEJvcmdvIFJpdm8PVGVybmkgLSBDYXJyYXJhEVRlcm5pIC0gTGUgR3JhemllD0FtZWxpYSAtIEFtZWxpYRNOYXJuaSAtIE5hcm5pIFNjYWxvE09ydmlldG8gLSBDaWNvbmlhIDIVDwMzXzEDM18yBDNfNjkDM183AzNfMwMzXzYDM180AzNfNQUzXzIwNQM3XzEDN18yAzdfMwM3XzUDN180AzdfNhQrAw9nZ2dnZ2dnZ2dnZ2dnZ2dkZGT1g28Bzs2KuJM0nGhoW/nLrR4W/HpnjtjYCY1FCtl6eA==', '__VIEWSTATEGENERATOR': 'A373F38E', '__PREVIOUSPAGE': '5rDzdOLdhSojgNkWU0aySKgUcCP-WXzqaXaRNPbAb-Ekcs1vVl_yJf9liwnKWXEk15jl_Z8YIAJ86zswapmkHfDz2MMg9vQnDDQypfObingUmLuVVTMztw73FN9-55lI0', '__EVENTVALIDATION': '/wEdABshO2HSLC4Irl9HO+xCVg8wb8C3weGBaOLrENr46Y99cTPW5fmNeTa451MZa8LXyblcbg/Uqmez9yXP+xSTfXC/S9OqRU0oWDv+cbRkqcKtAqcsJFHEnZTzh0X+kVeLa7e4rr9jBld/uVqJpfp464tKRYmvyX4i1bjLFIfxIkw0G+o0YQNlnq4u76x5pwotKnDgEO4xErwMzPYvPwScdqOGIUgWeFC3y966dlr8RsY+JYzWFz2lgCufNhmaoE94Y/QiRS7TDGhtA/xOb3OYxEB522qpZQfWwl21Nv1xVarGgMm6hUuJGOA6Q4Ko1E4M+sQ9CZ53jxit2DF58lu5QFtr6x1PlqI+jgkEbNYTNUujYRbbFs2N4TjG5zEZ4xduFBkrD27kcj09V7bJX/igStyEnNJs5SuXPSKM2cTNsffB6XcH17ma9zwqai6CNsf9Og0ZPzjdX2zFoASErgXLJvie8NzsH8t7duXHZk9hbS9Vs21a/4yX1BpSDSioiW1gxr+tUHjFeS1m0yjnOD9kwBYX4jCmBywb7GNFZX8+9J5ux+74SyM4niEhJdJF38T+LG4OdFP/T/wCCiwNou/IvjveW95PGaK16TIOdZz/XYSt3Q==', 'ctl00$Content$txtData': date_fmt, 'ctl00$Content$Grafico1$cboStazioni': '3_1', 'ctl00$Content$Grafico1$cboInquinante': 'SO224H' } res = requests.post( 'http://www.arpa.umbria.it/monitoraggi/aria/Default.aspx', data=data) if res.status_code == 200: soup = BeautifulSoup(res.text, 'html.parser') html_table = soup.select_one('#ctl00_Content_TabellaDati') extractor = Extractor(html_table) extractor.parse() table = extractor.return_list()[2:] html_table = soup.select_one( '#ctl00_Content_TabellaDatiAltreStazioni') extractor = Extractor(html_table) extractor.parse() table.extend(extractor.return_list()[2:]) for province in self.provinces: province_rows = [ x for x in table if x[0].split(' - ')[0].lower() == province.name.lower() ] so2 = [ self.extract_float(x[1]) for x in province_rows if self.extract_float(x[1]) is not None ] no2 = [ self.extract_float(x[3]) for x in province_rows if self.extract_float(x[3]) is not None ] co = [ self.extract_float(x[4]) for x in province_rows if self.extract_float(x[4]) is not None ] pm10 = [ self.extract_float(x[7]) for x in province_rows if self.extract_float(x[7]) is not None ] pm25 = [ self.extract_float(x[9]) for x in province_rows if self.extract_float(x[9]) is not None ] o3 = [ self.extract_float(x[5]) for x in province_rows if self.extract_float(x[5]) is not None ] c6h6 = [ self.extract_float(x[7]) for x in province_rows if self.extract_float(x[7]) is not None ] if len(so2) > 0: province.quality.so2 = round(mean(so2), 2) if len(no2) > 0: province.quality.no2 = round(mean(no2), 2) if len(co) > 0: province.quality.co = round(mean(co), 2) if len(pm10) > 0: province.quality.pm10 = round(mean(pm10), 2) if len(pm25) > 0: province.quality.pm25 = round(mean(pm25), 2) if len(o3) > 0: province.quality.o3 = round(mean(o3), 2) if len(c6h6) > 0: province.quality.c6h6 = round(mean(c6h6), 2) if self.on_quality_fetched is not None: self.on_quality_fetched(self)
def add_monthly_cost(self, dt: datetime, cost: Decimal) -> None: month = dt.strftime(MONTHLY_KEY_FORMAT) self.months[month]['costs'] += cost
def Search( query: Text, from_date: datetime.datetime = None, to_date: datetime.datetime = None, number_of_results: int = 100, ) -> pandas.DataFrame: """Search tweets. Args: query: the search query. from_date: search from this datetime. to_date: search till this datetime. number_of_results: number of results to return. Returns: A dataframe of tweets. For columns, reference: { 'id': 1371248526085226496, 'conversation_id': '1371248036563795969', 'created_at': '2021-03-14 23:54:59 UTC', 'date': '2021-03-14', 'time': '23:54:59', 'timezone': '+0000', 'user_id': 1233956153656332291, 'username': '******', 'name': 'funy guy sbungbob', 'place': '', 'tweet': '@Zer0Priv And stock up on Bitcoin and GameStop stocks', 'language': 'en', 'mentions': [], 'urls': [], 'photos': [], 'replies_count': 0, 'retweets_count': 0, 'likes_count': 2, 'hashtags': [], 'cashtags': [], 'link': 'https://twitter.com/je4ia/status/1371248526085226496', 'retweet': False, 'quote_url': '', 'video': 0, 'thumbnail': '', 'near': '', 'geo': '', 'source': '', 'user_rt_id': '', 'user_rt': '', 'retweet_id': '', 'reply_to': [{'screen_name': 'Zer0Priv', 'name': 'Zer0', 'id': '1256485417744031747'}], 'retweet_date': '', 'translate': '', 'trans_src': '', 'trans_dest': '', }, """ nest_asyncio.apply() c = twint.Config() c.Search = query if from_date: c.Since = from_date.strftime('%Y-%m-%d %H:%M:%S') if to_date: c.Until = to_date.strftime('%Y-%m-%d %H:%M:%S') c.Limit = number_of_results c.Pandas = True c.Hide_output = True twint.run.Search(c) return twint.storage.panda.Tweets_df
def get_monthly_limit(self, dt: datetime) -> Decimal: month = dt.strftime(MONTHLY_KEY_FORMAT) return self.months[month]['budget'] - self.months[month]['costs']
def format_label_date(date: datetime.datetime, interval: str) -> str: labels_format = "%-d-%b-%Y" if interval == "hour" or interval == "minute": labels_format += " %H:%M" return date.strftime(labels_format)
def get_history_pkl_name(dt: datetime): time_str = dt.strftime('%Y-%m') return time_str
def format(self, datetime_value: datetime) -> str: return datetime_value.strftime(DATETIME_FORMAT)