def make_private_public(*args, **kwargs): translation.activate(settings.LANGUAGE_CODE) num_days_after_due_date = settings.FROIDE_CONFIG.get( 'make_public_num_days_after_due_date', 365) now = timezone.now() due_date_everything_should_be_made_private = now - timedelta(num_days_after_due_date) for foirequest in FoiRequest.objects.filter(Q(visibility=0) | Q(visibility=1), is_foi=True, due_date__lte=due_date_everything_should_be_made_private): foirequest.visibility = 2 foirequest.save()
def detect_private_will_be_made_public(*args, **kwargs): translation.activate(settings.LANGUAGE_CODE) num_days_after_due_date = settings.FROIDE_CONFIG.get( 'make_public_num_days_after_due_date', 365) num_days_after_due_date = num_days_after_due_date - NOTIFICATION_SENT_BEFORE_NUM_DAYS now = timezone.now() due_date_everything_should_be_made_private = now - timedelta(num_days_after_due_date) for foirequest in FoiRequest.objects.filter(Q(visibility=0) | Q(visibility=1), is_foi=True, due_date=due_date_everything_should_be_made_private): send_mail(u'{0}'.format( _("%(site_name)s: Reminder that your request is being made public in 7 days") % { "site_name": settings.SITE_NAME }, ), render_to_string("foirequest/emails/became_public.txt", { "site_name": settings.SITE_NAME, "request": foirequest, }), settings.DEFAULT_FROM_EMAIL, [foirequest.user.email] )
In [1]: from datetime import timedelta In [2]: a = timedelta(days=2, hours=6) In [3]: a Out[3]: datetime.timedelta(2, 21600) In [4]: b = timedelta(hours=4.5) In [5]: b Out[5]: datetime.timedelta(0, 16200) In [6]: c = a + b In [7]: c Out[7]: datetime.timedelta(2, 37800) In [8]: 16200/4.5 Out[8]: 3600.0 In [9]: c.days Out[9]: 2 In [10]: c.seconds Out[10]: 37800 In [11]: c.seconds/3600 Out[11]: 10.5 In [12]: c.total_seconds()/3600 Out[12]: 58.5
def cleanup_namespace_resources(namespace_pattern, cleanup_older_than, k8s_context, kube_config_file): """Delete resources within target namespaces which have exceeded some AGE threshold.""" for ctx in k8s_context: print("Processing Kubernetes context {context}...".format(context=ctx)) config.load_kube_config(context=ctx, config_file=kube_config_file) v1 = client.CoreV1Api() response = v1.list_namespace() for pattern in namespace_pattern: print( "Checking cluster namespaces against pattern: {p}".format(p=pattern)) regexp = re.compile(pattern) for ns in response.items: print("Namespace: {namespace}, creation_time: {createdAt}".format( namespace=ns.metadata.name, createdAt=ns.metadata.creation_timestamp)) # cleanup all namespace resources which match pattern and whose creation time is older than threshold if regexp.search(ns.metadata.name) and (ns.metadata.creation_timestamp <= now-timedelta(seconds=cleanup_older_than)): print("Namespace [{namespace}] exceeds age of {age} seconds. Cleaning up resources...".format( namespace=ns.metadata.name, age=cleanup_older_than)) command = "kubectl delete all --all -n {namespace}".format( namespace=ns) process = subprocess.Popen( command.split(), stdout=subprocess.PIPE) output, error = process.communicate() if error: print(error) if output: print(output) else: print("Skipping Namespace {namespace}.".format( namespace=ns.metadata.name))
def seconds_ago(s,n): f = "%Y-%m-%d %H:%M:%S" s_date = dt.strptime(s, f) - timedelta(seconds = n) return str(s_date)
def update_time(self): return timedelta(hours=1)
def screensaver_time(self): return timedelta(days=2)
在我们要分析的数据中,很多数据都带有时间戳,比如数据集中的每一项数据都带有一项数据项生成时的时间戳,有的时候我们需要根据这个时间戳进行运算,这在 Pandas 中非常方便。 在 Python 中,时间处理方面的相关功能主要集中在 datetime 包中,主要有的三种对象: datetime.datetime 代表时间对象; datetime.date 代表某一天; datetime.timedelta 代表两个 datetime.datetime 对象之间的时间差; In [496]: from datetime import datetime, timedelta In [497]: d1 = datetime(2017, 8, 1) In [498]: d1 Out[498]: datetime.datetime(2017, 8, 1, 0, 0) In [499]: delta = timedelta(days=10) In [500]: d1 + delta Out[500]: datetime.datetime(2017, 8, 11, 0, 0) 以上代码中,简单演示了 datetime.datetime 和 datetime.timedelta 的用法。 我们先创建一个简单的时间序列,然后演示一些基本的用法: In [539]: from datetime import datetime In [540]: dates = [datetime(2018, 1, 1), datetime(2018, 1, 2), datetime(2018, 1, 3), datetime(2018, 1,4)] In [541]: ts = Series(np.random.randn(4), index=dates) In [542]: ts Out[542]:
In [22]: bang_d = loc_d.astimezone(timezone('Asia/Kolkata')) In [23]: print(bang_d) 2012-12-21 21:00:00+05:30 In [24]: d = datetime(2013, 3, 10, 1, 45) In [25]: loc_d = central.localize(d) In [26]: print(loc_d) 2013-03-10 01:45:00-06:00 In [27]: from datetime import timedelta In [28]: later = central.normalize(loc_d + timedelta(minutes=30)) In [29]: print(later) 2013-03-10 03:15:00-05:00 In [30]: print(loc_d) 2013-03-10 01:45:00-06:00 In [33]: import pytz In [34]: utc_d = loc_d.astimezone(pytz.utc) In [35]: print(utc_d) 2013-03-10 07:45:00+00:00 In [36]: later_utc = utc_d + timedelta(minutes=30)
import zulu import timedelta now = zulu.now() past = now - timedelta(days - 30) print(past)
""") stocks = cursor.fetchall() for stock in stocks: symbol = stock['symbol'] stock_ids[symbol] = stock['id'] # print(stock_ids) for symbol in symbols: start_date = datetime(2020, 10, 25).date() end_date_range = datetime(2021, 1, 25).date() while start_date <end_date_range: start_date = start_date + timedelta(days=4) print(f"=== Fetching minute bars {start_date}-{end_date} for {symbol}") api = tradeapi.REST(config.API_KEY, config.SECRET_KEY, config.API_URL) minutes = api.polygon.historic_agg_v2(symbol, 1, 'minute', _from='start_date', to='end_date').df minutes = minutes.resample('1min').ffill() for index, row in minutes.iterrows(): cursor.execute(""" INSERT INTO stock_price_minute (stock_id, datetime, open, high, low, close, volume) VALUES (?, ?, ?, ?, ?, ?, ?) """,(stock_ids[symbol], index.tz_localize(None).isoformat(), row['open'], row['high'],low['low'],row['close'],row['volume'])) start_date = start_date + timedelta(days=7)
def create_profile_rows(num=1): output = [] for x in range(num): intake_date = fake.date_between(start_date="-3y", end_date="today") ins_status = random.choices( ('insured', 'uninsured', 'application pending'), weights=[1.5, 1, 0.5]) if ins_status != 'uninsured': hmo = random.choices((True, None), weights=[1.5, 0.5]) insurance = random.choices( ('Health Partners', 'Keystone First', 'Aetna Better Health', 'United Healthcare')) ins_id = fake.credit_card_number(card_type=None) prior_auth_date = intake_date + timedelta( days=random.randint(90, 200)) else: hmo = None insurance = None ins_id = None prior_auth_date = None hiv_status = random.choices((True, None), weights=[0.5, 1.5]) if hiv_status is True: hiv_care_info = fake.sentence(nb_words=6, variable_nb_words=True, ext_word_list=None) hiv_last_app = intake_date - timedelta( days=random.randint(30, 160)) viral_load = random.choices('>1500', '>10000', '>100000', 'Undetectable') cd4 = random.choices(True, None) #Todo add languadge for cd4 test else: hiv_care_info = None hiv_last_app = None viral_load = None cd4 = None hcv_status = random.choices((True, None), weights=[0.5, 1.5]) if hcv_status is True: hcv_care_info = fake.sentence(nb_words=6, variable_nb_words=True, ext_word_list=None) hcv_last_app = intake_date - timedelta( days=random.randint(30, 160)) cchange_status = random.choices("waiting", 'active', 'notinvoved') output.append({ "intake_date": intake_date, "first_name": fake.first_name(), "last_name": fake.last_name(), "dob": fake.date_of_birth(tzinfo=None, minimum_age=20, maximum_age=80), "ssn": fake.ssn(taxpayer_identification_number_type="SSN"), "state": 'PA', "city": 'Philadelphia', "address": fake.street_address(), "intersection": fake.street_name() + ' & ' + fake.street_name(), "phone_number": fake.phone_number(), "text_optin": random.choices((True, None), weights=[1.75, 0.25]), "vm_optin": random.choices((True, None), weights=[1.5, 0.5]), "state_id": random.choices((True, None), weights=[1.7, 0.3]), "insurance_status": ins_status, "hmo": hmo, "insurance": insurance, "insurance_id": ins_id, "prior_auth_date": prior_auth_date, "income_source": random.choices('Full Time', 'Part Time', 'Cash Assistance'), "monthly_income": random.randint(300, 1900), "housing_status": random.choices('homeless', 'temporary housing', 'shelter', 'has home') }) return output
def newsToday(): for i, key in enumerate(news.keys()): if news[key].date() == utc.localize(datetime.today() - timedelta(days=0) ).date():
import from datetime import datetime import timezone import timedelta tw_tz = timezone(timedelta(hours=8, minutes=16), 'Asia/Taiwan') now = datetime.now() today = now.strftime("%Y")+now.strftime("%m") + now.strftime("%d") for i in 10: print(today)
#!/usr/bin/env python# encoding: utf8 import hashlibimport jsonimport osfrom datetime import datetime, timedelta from togglapi import api from config import API_TOKEN, TIMEZONE START_MONTH = '2015-03' def get_month_start(time): return time.replace(day=1, hour=0, minute=0, second=0, microsecond=0) def get_month_end(time): month_start = get_month_start(time) next_month_start = (month_start + timedelta(days=31)).replace(day=1) return next_month_start - timedelta(seconds=1) def dump(dump_dir, year, month, verbose=False): time = datetime(year=year, month=month, day=1) month_start = get_month_start(time) month_end = get_month_end(time) month = '%d-%02d' % (year, month) if verbose: print month + ':', print month_start.isoformat(), 'to', month_end.isoformat(), '...' toggl = api.TogglAPI(API_TOKEN, TIMEZONE) data = toggl.get_time_entries(month_start.isoformat(), month_end.isoformat()) content = json.dumps(data) if not os.path.exists(dump_dir): os.makedirs(dump_dir) filename = os.path.join(dump_dir, '%s.json' % month) def md5(string): return hashlib.md5(string).digest() if os.path.exists(filename) and md5(content) == md5(open(filename).read()): if verbose: print filename, 'not changed.' else: file(filename, 'w').write(content) if verbose: print filename, 'saved.' return data def backup(backup_dir, verbose=False): time = datetime.now() while True: month = '%d-%02d' % (time.year, time.month) if month < START_MONTH: break dump(backup_dir, time.year, time.month, verbose=verbose) prev_month = (time - timedelta(days=31)).replace(day=1) time = prev_month if __name__ == '__main__': import sys if len(sys.argv) == 1: print 'Usage: %s backup_dir [-v]' % sys.argv[0] sys.exit(1) try: verbose = sys.argv[2] == '-v' except: verbose = False backup_dir = sys.argv[1] backup(backup_dir, verbose=verbose)
rom datetime import datetime, timedelta # Define default_args to be passed in to operators default_args = { 'owner': 'airflow', 'depends_on_past': False, 'start_date': datetime.now(), 'email_on_failure': False, 'email_on_retry': False, 'retries': 3, 'retry_delay': timedelta(minutes=5), } # Define the EMR settings emr_settings = { "Applications": [ { "Name": "Spark" } ], "EbsRootVolumeSize": 10, "Instances": { "Ec2SubnetId": "subnet-49403b67", "EmrManagedMasterSecurityGroup": "sg-0c9ef433da846c7bf", "EmrManagedSlaveSecurityGroup": "sg-0bf779a7d90a8d321", "InstanceGroups": [ { "EbsConfiguration": { "EbsBlockDeviceConfigs": [
import urllib.request from zipfile import ZipFile sys.path.insert('~/Documents/git/Simple-Python-ETL/') from airflow import DAG from airflow.operators.python import PythonOperator default_args = { 'owner': 'airflow', 'depends_on_past': False, 'email': ['*****@*****.**'], 'email_on_failure': False, 'email_on_retry': False, 'retries': 1, 'retry_delay': timedelta(minutes=5), # 'queue': 'bash_queue', # 'pool': 'backfill', # 'priority_weight': 10, # 'end_date': datetime(2016, 1, 1), # 'wait_for_downstream': False, # 'dag': dag, # 'sla': timedelta(hours=2), # 'execution_timeout': timedelta(seconds=300), # 'on_failure_callback': some_function, # 'on_success_callback': some_other_function, # 'on_retry_callback': another_function, # 'sla_miss_callback': yet_another_function, # 'trigger_rule': 'all_success }
import airflow from airflow.models import DAG from airflow.operators.dummy_operator import DummyOperator import timedelta # Setting up default arguments for DAG initialization default_args = { 'owner': 'airflow', 'start_date': airflow.utils.dates.days_ago(2) } # Initializing our DAG dag = DAG(dag_id='DAG_tmp', default_args=default_args, description='DAG for DSC, temporary', schedule_interval=timedelta(days=1)) # Defining Tasks ############## # Reading data read_data_1 = DummyOperator(task_id='Read data', dag=dag) read_data_2 = DummyOperator(task_id='Read data', dag=dag) read_data_3 = DummyOperator(task_id='Read data', dag=dag) read_data_4 = DummyOperator(task_id='Read data', dag=dag) read_data_5 = DummyOperator(task_id='Read data', dag=dag)
def time_ticks(x, pos): return str(timedelta(seconds=int(x))).split(".")[0]
cbar = fig.colorbar(cs, orientation='vertical') cbar.ax.set_ylabel('$^oC$',fontsize=14) ax.set_ylabel('Depth (m)',fontsize=14) file = folder_fig + 'Temp_' + e.dataset_id plt.savefig(file,bbox_inches = 'tight',pad_inches = 0.1) #%% Temp transect POM time_matrix_pom = np.tile(time_pom,(z_matrix_pom.shape[1],1)).T kw = dict(levels = np.arange(6,31,3)) fig, ax = plt.subplots(figsize=(10, 3)) cs = plt.contourf(time_matrix_pom,z_matrix_pom,target_temp_pom,cmap=cmocean.cm.thermal,**kw) plt.contour(time_matrix_pom,z_matrix_pom,target_temp_pom,[26],color='k') plt.title('HWRF-MPIPOM Cycle ' + cycle + ' Along Track ' + e.dataset_id[0:4],fontsize=14) plt.ylim(-70,0) plt.xlim(timeg[0],timeg[-1]) time_vec = [datetime(2020,8,2,0),datetime(2020,8,2,12),datetime(2020,8,3,0),datetime(2020,8,3,12)] plt.xticks(time_vec) xfmt = mdates.DateFormatter('%b-%d-%H') ax.xaxis.set_major_formatter(xfmt) cbar = fig.colorbar(cs, orientation='vertical') cbar.ax.set_ylabel('$^oC$',fontsize=14) ax.set_ylabel('Depth (m)',fontsize=14) file = folder_fig + 'HWRF_POM_vs_RU33_' + storm_id + '_' + cycle plt.savefig(file,bbox_inches = 'tight',pad_inches = 0.1) timeg[0] + timedelta(hours=6)