def apidata(cls, StockCode: str, starttime: datetime, endtime: datetime) -> pd.DataFrame: trade_date = fapi.trade_cal('SSE',starttime.strftime('%Y%m%d'),endtime.strftime('%Y%m%d'),'1') daily_basic = pd.DataFrame() for date in tqdm(trade_date["date"],desc='正在更新...',ncols=80): df = None num = 0 while(df is None): try: df = api.daily_basic(trade_date=date) num=0 except Exception as e: if(num<5): num+=1 print(e) print('通过tushare获取{date}日股票基本面指标时出现异常,将进行第{num}次重试'.format(date=date,num=str(num))) else: print('更新日行情失败!') raise e daily_basic = daily_basic.append(df) daily_basic["turnover_rate"] = daily_basic["turnover_rate"] / 100 daily_basic["turnover_rate_f"] = daily_basic["turnover_rate_f"] / 100 daily_basic["dv_ratio"] = daily_basic["dv_ratio"] / 100 daily_basic["dv_ttm"] = daily_basic["dv_ttm"] / 100 daily_basic["total_share"] = daily_basic["total_share"] * 10000 daily_basic["float_share"] = daily_basic["float_share"] * 10000 daily_basic["free_share"] = daily_basic["free_share"] * 10000 daily_basic["total_mv"] = daily_basic["total_mv"] * 10000 daily_basic["circ_mv"] = daily_basic["circ_mv"] * 10000 return daily_basic
def create_event( summary: str, start_time: datetime, end_time: datetime, guest_emails: List[str], creds_dir: pathlib.PosixPath, description: str = "Automatically created event", google_meet: str = "", ): if len(guest_emails) < 2: print("WARNING: You're creating an event with < 2 participants. " "This event might get automatically declined and deleted.") event = { "summary": summary, "location": google_meet, "description": description, "start": { "dateTime": start_time.strftime("%Y-%m-%dT%H:%M:%S"), "timeZone": "Europe/Berlin", }, "end": { "dateTime": end_time.strftime("%Y-%m-%dT%H:%M:%S"), "timeZone": "Europe/Berlin", }, "attendees": [{ "email": email } for email in guest_emails], } created_event = (create_service(creds_dir=creds_dir).events().insert( calendarId="primary", body=event, sendUpdates="all").execute()) logging.info(f"Event created: {(created_event.get('htmlLink'))}")
def get_company_bank_account_transactions( self, company_id: int, bank_account_id: int, from_date: datetime = None, to_date: datetime = None) -> list: """ Get a list of transactions for a bank account :param company_id: The company that owns the bank account :param bank_account_id: The bank account :param from_date: Only get transactions from this date (inclusive) :param to_date: Only get transactions to this date (inclusive) :returns: The bank account transactions """ params = {"CompanyId": company_id, "$orderby": "Date"} filters = ["BankAccountId eq %s" % bank_account_id] if from_date: from_date = from_date - datetime.timedelta(days=1) filters.append("Date gt DateTime'%s'" % from_date.strftime("%Y-%m-%d")) if to_date: to_date = to_date + datetime.timedelta(days=1) filters.append("Date lt DateTime'%s'" % to_date.strftime("%Y-%m-%d")) if filters: params["$filter"] = " and ".join(filters) records = list() while True: data = self._call("BankTransaction/Get", "get", params=params) records.extend(data["Results"]) if len(records) == data["TotalResults"]: break params["$skip"] = len(records) return records
def apidata(cls, StockCode: str, starttime: datetime, endtime: datetime) -> pd.DataFrame: ''' 输入参数:starttime,endtime 从tushare->daily接口中获取所有股票在某时间段内的行情 .daily接口每分钟允许调用500次 ''' trade_date = fapi.trade_cal('SSE',starttime.strftime('%Y%m%d'),endtime.strftime('%Y%m%d'),'1') price_df = pd.DataFrame() for date in tqdm(trade_date["date"],desc='正在更新...',ncols=80): df = None num = 0 while(df is None): try: df = api.daily(trade_date=date) num=0 except Exception as e: if(num<5): num+=1 print(e) print('通过tushare获取{date}日行情时出现异常,将进行第{num}次重试'.format(date=date,num=str(num))) else: print('更新日行情失败!') raise e price_df = price_df.append(df) price_df["amount"] = price_df["amount"] * 1000; #tushare的交易额单位为(千元) return price_df
def generate_a_report_file_for_code_coverage_per_repo(input: dict, week_start: datetime): time_for_five_days_later = week_start + timedelta(days=4) start_as_str = week_start.strftime("%d_%m_%Y") end_as_str = time_for_five_days_later.strftime("%d_%m_%Y") output_filename = "Code_coverage_report_per_repository_from_" + start_as_str + "_to_" + end_as_str + "_generated_at_" + input[ 'timestamp_this_was_created'] + ".csv" with open(output_filename, 'w') as csv_file: fieldnames = ['week_commencing', 'repository', 'coverage(%)'] writer = csv.DictWriter(csv_file, fieldnames=fieldnames) writer.writeheader() for repo_name, coverage in input['coverage_per_repo'].items(): writer.writerow({ 'week_commencing': week_start.strftime("%Y/%m/%d"), 'repository': repo_name.replace('pure-escapes_', ''), 'coverage(%)': coverage })
def extract_data(date: datetime): """ Extracts energy raw data from Tauron web service @param date: data date to download @returns: tuple for (energy_consumption, energy_production) """ # download daily meter data emeter.get_data(meter_id=meter_id, date=date.strftime('%d.%m.%Y')) emeter.to_flat_file(file_name_pattern.format( folder='raw', date=date.strftime('%Y_%m_%d'), timestamp=ts), raw=True, mode='w') results = [] for val, measure in [('chart', energy_consumption_measure_id), ('OZE', energy_production_measure_id)]: result = TauronDataConverter.EnergyData(data=emeter.parse(val), sensor_id=sensor_id, measure_id=measure) results.append(result) # save raw files emeter.to_flat_file(file_name_pattern.format( folder='interim', date=date.strftime('%Y_%m_%d'), timestamp=ts), mode='w') return tuple(results)
def generate_all_reporting_data_for_specific_week(target_versions: list, start_date: datetime, end_date: datetime): j1 = JIRA_Fetcher() c4 = CircleCI_Fetcher() report_object = j1.get_a_list_of_DONE_tickets_within_a_period(start_date, end_date, project_name, target_versions) j1.create_data_as_csv_for_DONE_tickets(report_object, True) for version in target_versions: report_object = j1.get_breakdown_of_tickets_with_hours_booked2(start_date, end_date, project_name, version) j1.create_data_as_csv_for_logged_work_for2(report_object, True) config = c4.get_basic_configuration_file() config["start_date_as_str"] = start_date.strftime("%Y/%m/%d") config["end_date_as_str"] = end_date.strftime("%Y/%m/%d") report_object = c4.check_several_branches(config) c4.show(report_object) c4.create_reporting_file_for_a_period(report_object) all_repos = { # 'pure-escapes_booking-manager-service': 0, 'pure-escapes_pdf-service': 0, 'pure-escapes_events-service': 0, 'pure-escapes_webapp-admin': 0, 'pure-escapes_webapp-admin-api': 0, 'pure-escapes_webapp-backend': 0, 'pure-escapes_webapp-client-api': 0, 'pure-escapes_webapp-frontend': 0 } report_object = generate_coverage_for_all_repos(all_repos) generate_a_report_file_for_code_coverage_per_repo(report_object, start_date)
def dsp_detail(self, start_datetime : datetime, end_datetime : datetime): sql = "select * from `dsp` where `DateTime_c` between ? and ? order by `DateTime_c`" self.cursor.execute(sql, (start_datetime.strftime("%Y-%m-%d %H:%M:%S"), end_datetime.strftime("%Y-%m-%d %H:%M:%S"))) self.conn.commit() rows = self.cursor.fetchall() return rows
def write_clock_v2(disp: weh002004a.WEH002004A, tm: datetime): add_1 = tm.strftime(':%S') add_2 = ' ' add_3 = tm.strftime('/%d') add_4 = __week_name[tm.weekday()] st = tm.strftime('%H %M') line = [b'',b'',b'',b''] for i in range(0, 5): if st[i] == ' ': line[0] = line[0] + add_1.encode() + b'\x20' line[1] = line[1] + add_2.encode() + b'\x20' line[2] = line[2] + add_3.encode() + b'\x20' line[3] = line[3] + add_4.encode() + b'\x20' else: num = ord(st[i]) - 48 line[0] = line[0] + __big_digit[num][0] + b'\x20' line[1] = line[1] + __big_digit[num][1] + b'\x20' line[2] = line[2] + __big_digit[num][2] + b'\x20' line[3] = line[3] + __big_digit[num][3] + b'\x20' disp.write_bytes(line[0], 0) disp.write_bytes(line[1], 1) disp.write_bytes(line[2], 2) disp.write_bytes(line[3], 3)
def __init__(self, date: datetime, csv_path): """ その日いた牛を登録する 日付をキーにしてコンストラクタでRSSIデータの読み込み """ self.csv_path = csv_path self.cow_list = [] self.date = date.strftime("%Y/%m/%d") self.csv_path += date.strftime("%Y%m%d") + "/" self.__read_from_db(self.__get_cow_id_list())
def __init__(self, *, start_date: datetime, end_date: datetime, filters: Union[Dict, UserMetricsFilter] = None, report_format: str = "csv"): variables = { "sdate": start_date.strftime('%Y-%m-%d') if start_date is not None else None, "edate": end_date.strftime('%Y-%m-%d') if end_date is not None else None, "filters": filters, "format": report_format } super().__init__(self.query, variables=variables)
def __init__(self, *, start_date: datetime, end_date: datetime, filters: Union[Dict, UserMetricsFilter] = None, limit: int = None): variables = { "sdate": start_date.strftime('%Y-%m-%d') if start_date is not None else None, "edate": end_date.strftime('%Y-%m-%d') if end_date is not None else None, "filters": filters, "limit": limit } super().__init__(self.query, variables=variables)
def send_success_sms(message_key: str, user: User, other_user: User, amount: float, reason: str, tx_time: datetime, balance: float): amount_dollars = rounded_dollars(amount) rounded_balance_dollars = rounded_dollars(balance) TokenProcessor.send_sms(user, message_key, amount=amount_dollars, token_name=default_token(user).symbol, other_user=other_user.user_details(), date=tx_time.strftime('%d/%m/%Y'), reason=reason, time=tx_time.strftime('%I:%M %p'), balance=rounded_balance_dollars)
def get_date_and_age(date: datetime): today = datetime.datetime.utcnow() if date.year != today.year: age = today.year - date.year date = date.strftime("%b %d, %Y") else: date = date.strftime("%b %d") age = None return date, age
def get_existing_cow_list(date:datetime, filepath): """ 引数の日にちに第一放牧場にいた牛のリストを得る """ filepath = filepath + date.strftime("%Y-%m") + ".csv" with open(filepath) as f: reader = csv.reader(f) for row in reader: if (datetime.datetime.strptime(row[0], "%Y/%m/%d") == date): return row[1:] print("指定の日付の牛のリストが見つかりません", date.strftime("%Y/%m/%d")) sys.exit()
def fetch_posts(self, query: dict, start_date: datetime, end_date: datetime) -> list: """Fetch tweets from archive.org that match the given query for the given day.""" ua = UserAgent() ua.set_max_size(100 * 1024 * 1024) ua.set_timeout(90) ua.set_timing([1, 2, 4, 8, 16, 32, 64, 128, 256, 512]) end_date = end_date + datetime.timedelta(days=1) start_arg = start_date.strftime('%Y-%m-%d') end_arg = end_date.strftime('%Y-%m-%d') enc_query = urlencode({ 'q': query, 'date_from': start_arg, 'date_to': end_arg }) url = "https://searchtweets.archivelab.org/export?" + enc_query log.debug("archive.org url: " + url) response = ua.get(url) if not response.is_success(): raise McPostsArchiveTwitterDataException( "error fetching posts: " + response.decoded_content()) decoded_content = response.decoded_content() # sometimes we get null characters, which choke the csv module decoded_content = decoded_content.replace('\x00', '') meta_tweets = [] lines = decoded_content.splitlines()[1:] for row in csv.reader(lines, delimiter="\t"): fields = 'user_name user_screen_name lang text timestamp_ms url'.split( ' ') meta_tweet = {} for i, field in enumerate(fields): meta_tweet[field] = row[i] if i < len(row) else '' if 'url' not in meta_tweet or meta_tweet['url'] == '': log.warning("meta_tweet '%s' does not have a url" % str(row)) continue meta_tweet['tweet_id'] = get_tweet_id_from_url(meta_tweet['url']) meta_tweets.append(meta_tweet) add_tweets_to_meta_tweets(meta_tweets) return meta_tweets
def parse_path_format(self, fmt: str, path: str, dt: datetime) -> str: return ( fix_path(fmt) .replace("${path}", path) .replace("${number}", str(self.sauron_number)) .replace("${sauron}", str(self.sauron_name)) .replace("${date}", dt.strftime("%Y-%m-%d")) .replace("${time}", dt.strftime("%H-%M-%S")) .replace("${datetime}", dt.strftime("%Y-%m-%d_%H-%M-%S")) .replace("${timestamp}", stamp(dt)) )
def apidata(self, StockCode:str, starttime:datetime, endtime:datetime)->pd.DataFrame: ''' 输入参数:starttime,endtime 总是从tushare->trade_cal获取给定时间段内各个交易所的开、休市情况 ''' calendar_df = pd.DataFrame() for exchange in self.exchangeList: calendar_df = calendar_df.append(api.trade_cal(exchange=exchange ,start_date=starttime.strftime('%Y-%m-%d') ,end_date=endtime.strftime('%Y-%m-%d'))) return calendar_df
def __init__(self, service: DataService, line: Domain.Transporter, date_from: datetime, date_to: datetime): self.service = service self.line = line self.date_from = date_from self.date_to = date_to self.title = self.line._name_ + " (" + date_from.strftime("%Y-%m-%d %H:%M:%S") + " - " \ + date_to.strftime("%Y-%m-%d %H:%M:%S") + ")" self._fft = FftResult(np.array([]), np.array([]))
def fetch_posts_from_api(self, query: str, start_date: datetime, end_date: datetime) -> list: """Return posts from a csv that are within the given date range.""" if self.mock_enabled: googler_json = self._get_mock_json(start_date, end_date) else: # disabling this for now because googler seems not to return results any more log.warning('google support disabled') return [] global _last_google_request_epoch now = time.time() if now - _last_google_request_epoch < GOOGLE_REQUEST_DELAY: delay = GOOGLE_REQUEST_DELAY - (now - _last_google_request_epoch) log.info("waiting %d seconds to make google request..." % delay) time.sleep(delay) _last_google_request_epoch = time.time() start_query = "after:" + start_date.strftime("%Y-%m-%d") end_query = "before:" + ( end_date + datetime.timedelta(days=1)).strftime("%Y-%m-%d") full_query = "%s %s %s" % (query, start_query, end_query) googler_json = subprocess.check_output( ["googler", "--json", "-n 100", full_query]) links = decode_json(googler_json) posts = [] for link in links: publish_date = start_date.strftime('%Y-%m-%d') domain = mediawords.util.url.get_url_distinctive_domain( link['url']) posts.append({ 'post_id': link['url'], 'content': "%s %s %s" % (link['title'], link['abstract'], link['url']), 'author': domain, 'channel': domain, 'publish_date': publish_date, 'data': link }) return posts
def get_historical_prices(self, stock_name: str, ct: datetime, pt: datetime)-> pd.DataFrame: """ Fetches the daily historical price for the stock for a given time frame Parameters ---------- stock_name : str The nse ticker id. ct : datetime The current date which is the end time of the time frame. pt : datetime The date from where to get the historical prices. Returns ------- df : dataframe The dataset contains the date and historical price of the stock. """ # change the timestamp to DD-MM-YYYY format ct = ct.strftime("%d-%m-%Y") pt = pt.strftime("%d-%m-%Y") head = { 'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/87.0.4280.88 Safari/537.36 " } base_url = self.__config_details['base_url'] session = requests.session() session.get(base_url, headers=head) session.get(base_url + self.__config_details['stock_details'] + stock_name, headers=head) # to save cookies session.get(base_url + self.__config_details['stock_historical_data'] + stock_name, headers=head) url = base_url + self.__config_details['stock_historical_data_download'] + stock_name + "&series=[%22EQ%22]&from=" + pt + "&to=" + ct + "&csv=true" webdata = session.get(url=url, headers=head) df = pd.read_csv(StringIO(webdata.text[3:])) # formatting the dataframe to contain only Date and ltp df = df.drop( ['series ', 'OPEN ', 'HIGH ', 'LOW ', 'PREV. CLOSE ', 'close ', 'vwap ', '52W H ', '52W L ', 'VOLUME ', 'VALUE ', 'No of trades '], axis=1) # changing Date column to timestamp and putting it in the format of YYYY-mm-dd # as we will need to sort it df['Date '] = pd.to_datetime(df['Date ']).dt.strftime("%Y-%m-%d") # sorting df based on Date column in ascending order df = df.sort_values(by=['Date ']) return df
def get_historical_prices(self, epic, resolution: Resolution, from_date: datetime, to_date: datetime): from_date_formatted = from_date.strftime("%Y-%m-%d") + 'T00%3A00%3A00' to_date_formatted = to_date.strftime("%Y-%m-%d") + 'T00%3A00%3A00' url = [ self.PRICES_URI, '/', epic, '?resolution=', resolution.name, '&from=', from_date_formatted, '&to=', to_date_formatted ] response = self.__get_response__(''.join(url), "3") if response is None: return self.error else: return prices_from_dict(json.loads(response))
def get_existing_cow_list(date:datetime): """ 引数の日にちに第一放牧場にいた牛のリストを得る """ path = os.path.abspath('./') + "/behavior_classification/" + date.strftime("%Y-%m") + ".csv" with open(path) as f: reader = csv.reader(f) for row in reader: if (datetime.datetime.strptime(row[0], "%Y/%m/%d") == date): return row[1:] print("指定の日付の牛のリストが見つかりません", date.strftime("%Y/%m/%d")) sys.exit()
def transactions(self, account_id: str, from_date: datetime=None, to_date: datetime=None) -> dict: """ Gets the transactions for an account.""" if from_date and to_date and to_date < from_date: raise ValueError("The from_date must be before the to_date") params = {} if from_date: params["fromDate"] = from_date.strftime("%Y-%m-%d") if to_date: params["toDate"] = to_date.strftime("%Y-%m-%d") url = f"/za/pb/v1/accounts/{account_id}/transactions" return self.api_call(url, params=params)
def exchange_success_sms(message_key: str, user: User, other_user: User, own_amount: float, other_amount: float, tx_time: datetime, balance: float): rounded_own_amount_dollars = rounded_dollars(own_amount) rounded_other_amount_dollars = rounded_dollars(other_amount) rounded_balance_dollars = rounded_dollars(balance) TokenProcessor.send_sms( user, message_key, own_amount=rounded_own_amount_dollars, other_amount=rounded_other_amount_dollars, own_token_name=default_token(user).symbol, other_token_name=default_token(other_user).symbol, other_user=other_user.user_details(), date=tx_time.strftime('%d/%m/%Y'), time=tx_time.strftime('%I:%M %p'), balance=rounded_balance_dollars)
def get_data(self, time_stamp: datetime = datetime.today()) -> pd.DataFrame: data = { 'ecuId': self.ecu_id, 'filter': 'power', 'date': time_stamp.strftime("%Y%m%d") } response = requests.post(self.apsystems_url, headers=self.headers, data=data) if response.status_code != 200: try: response.raise_for_status() except Exception as e: msg = ( 'Bad status code! Response content = {}. Exception = {}' .format(response.content, e)) _LOG.exception(msg) raise e.__class__(msg) ans = response.json() #ans['data']['power'] = json.loads(ans['data']['power']) power_data = json.loads(ans['data']['power']) time_data = json.loads(ans['data']['time']) energy = 0 ans = {} time_start_s = 0 for i in range(len(time_data)): power = int(power_data[i]) time_d = time_data[i] time_actual = pd.Timestamp(time_stamp.strftime("%Y-%m-%d") + " " + time_d) time_pass = time_actual.timestamp() - time_start_s if time_start_s == 0: time_pass = 300 time_start_s = time_actual.timestamp() energy += power*time_pass/3600 ans[time_actual.round("5min")] = (power,energy) time_actual += pd.Timedelta(minutes=5) ans[time_actual.round("5min")] = (0, energy) #power = [ int (p) for p in json.loads(ans['data']['power'])] #time = [pd.Timestamp(time_stamp.strftime("%Y-%m-%d") +" "+ d ).round("5min") for d in json.loads(ans['data']['time'])] return ans
def __init__(self, api_key: str, freq: int, location: str, start_date: datetime, end_date: datetime, max_retry: int = 5): self.__api_key = api_key self.__freq = freq self.__location_list = [location] self.__start_date = start_date.strftime('%d-%b-%Y') self.__end_date = end_date.strftime('%d-%b-%Y') self.__retry = 1 self.__max_retry = max_retry
def get_subfolder_name(self, media_date: datetime) -> str: """ Creates subfolder name. Depends on options.order_by :param media_date: timestamp :return subfoilder name in choices [day/month/year] """ subfolder_name = None if self.group_by == "day": subfolder_name = media_date.strftime("%Y-%m-%d") elif self.group_by == "month": subfolder_name = media_date.strftime("%Y-%b") else: subfolder_name = media_date.strftime("%Y") return subfolder_name
def get_account_ad_performance_for_single_day(ad_account: adaccount.AdAccount, single_date: datetime) -> adsinsights.AdsInsights: """Downloads the ad performance for an ad account for a given day https://developers.facebook.com/docs/marketing-api/insights Args: ad_account: An ad account to download. single_date: A single date as a datetime object Returns: A list containing dictionaries with the ad performance from the report """ logging.info('download Facebook ad performance of act_{ad_account_id} on {single_date}'.format( ad_account_id=ad_account['account_id'], single_date=single_date.strftime('%Y-%m-%d'))) ad_insights = ad_account.get_insights( # https://developers.facebook.com/docs/marketing-api/insights/fields fields=['date_start', 'ad_id', 'impressions', 'actions', 'spend', 'action_values'], # https://developers.facebook.com/docs/marketing-api/insights/parameters params={'action_attribution_windows': ['28d_click'], # https://developers.facebook.com/docs/marketing-api/insights/action-breakdowns 'action_breakdowns': ['action_type'], # https://developers.facebook.com/docs/marketing-api/insights/breakdowns 'breakdowns': ['impression_device'], 'level': 'ad', 'limit': 1000, 'time_range': {'since': single_date.strftime('%Y-%m-%d'), 'until': single_date.strftime('%Y-%m-%d')}, # By default only ACTIVE campaigns get considered. 'filtering': [{ 'field': 'ad.effective_status', 'operator': 'IN', 'value': ['ACTIVE', 'PAUSED', 'PENDING_REVIEW', 'DISAPPROVED', 'PREAPPROVED', 'PENDING_BILLING_INFO', 'CAMPAIGN_PAUSED', 'ARCHIVED', 'ADSET_PAUSED']}]}) return ad_insights
def get_time_entries(self, dt_from: datetime, dt_to: datetime, uid): data = { 'fromdate': dt_from.strftime(self.date_format), 'fromtime': dt_from.strftime(self.time_format), 'todate': dt_to.strftime(self.date_format), 'totime': dt_to.strftime(self.time_format), 'showDeleted': False } if uid: data['userId'] = uid params = urllib.parse.urlencode(data, doseq=True) entries = self.__get_json("time_entries.json?%s" % params, self.common_headers, None) return entries
def get_next_circadian_color(date: datetime = None) -> (datetime, CircadianColor): if date is None: date = datetime.datetime.now(LOCAL_TIMEZONE) current_color = get_current_circadian_color(date) current_color_idx = CIRCADIAN_COLORS_ASC.index(current_color) while True: if current_color_idx == len(CIRCADIAN_COLORS_ASC) - 1: current_color_idx = 0 next_color = CIRCADIAN_COLORS_ASC[current_color_idx] next_date = next_color.trigger_date_function(date + datetime.timedelta(days=1)) # First event tomorrow else: current_color_idx += 1 next_color = CIRCADIAN_COLORS_ASC[current_color_idx] next_date = next_color.trigger_date_function(date) logger.info("Testing next event (%s) after %s", next_color.name, current_color.name) if next_color.is_valid_for_date(date): break logger.info("Next event after %s is %s at %s", date.strftime('%Y/%m/%d %I:%M:%S %p'), next_color.name, next_date.strftime('%Y/%m/%d %I:%M:%S %p')) return next_date, next_color
def convert_datetime(dt: datetime) -> str: """Converts python datetime to MySQL datetime. Method converts given python datetime object to MySQL datetime format. Args: dt: Datetime object in default format. Returns: Datetime string in MySQL format. """ return dt.strftime('%Y-%m-%d %H:%M:%S') # Convert to MySQL datetime
def get_current_circadian_color(date: datetime = None) -> CircadianColor: if date is None: date = datetime.datetime.now(LOCAL_TIMEZONE) current_color = None for color in reversed(CIRCADIAN_COLORS_ASC): if color.trigger_date_function(date) < date and color.is_valid_for_date(date): current_color = color break # Note this won't happen so long as first color occurs at midnight if current_color is None: current_color = CIRCADIAN_COLORS_ASC[-1] logger.info("Current event at %s is %s since %s", date.strftime('%Y/%m/%d %I:%M:%S %p'), current_color.name, current_color.trigger_date_function(date).strftime('%Y/%m/%d %I:%M:%S %p')) return current_color
def fill_date_fields(template: Template, date: datetime=DateField.TODAY) -> None: """ Populate all date fields in the template. A 'date' field provides an easy way of putting the current date into a template. A date field uses built-in Python date formats, and should look like this: '{{ date }}' - using default formatting '{{ date '%d, %b %Y' }}' - using custom formatting See all supported format identifiers here http://strftime.org """ def next_date_field(): """ Return the next probable date field. """ return first(fields(template.content, with_name_like='date')) field = next_date_field() while field is not None: # default date format: 07 Oct, 2016 date_format = '%B %-d, %Y' if field.context is not None: # a date field can have a custom format custom_date_format = dequote(field.context).strip() if len(custom_date_format) > 0: # if found, we'll use that and let date.strftime handle it date_format = custom_date_format formatted_date = date.strftime(date_format) # populate the include field with the content; or blank if unresolved fill(field, formatted_date, template) field = next_date_field()
def last_change_before(self, last_change: datetime): self.add_options({"before": last_change.strftime("%Y%m%d%H%M%S")})
def j2_filter_date(date: datetime): return date.strftime('%H:%M:%S.%f')
def last_change_after(self, last_change: datetime): self.add_options({"after": last_change.strftime("%Y%m%d%H%M%S")})
def format_datetime(dt: datetime): return dt.strftime('%Y-%m-%dT%H:%M:%S%z')
def __date_to_string(date: datetime): return date.strftime("%Y%m%d%H%M00 %z")