def get_field_parser_info(field_description): """ Возвращает данные для парсера строк файлов записей БД 1С: длину значения колонки (байт) и функцию преобразования из массива байт в значение """ if field_description.type == 'B': # Бинарные данные оставляем в чисто виде return FieldParserInfo(field_description.length, lambda x: x) elif field_description.type == 'L': # Булево return FieldParserInfo(1, lambda x: unpack('?', x)[0]) elif field_description.type == 'N': # Число return FieldParserInfo(field_description.length // 2 + 1, lambda x: numeric_to_int(x, field_description.precision)) elif field_description.type == 'NC': # Строка фиксированной длины return FieldParserInfo(field_description.length * 2, lambda x: x.decode('utf-16')) elif field_description.type == 'NVC': # Строка переменной длины return FieldParserInfo(field_description.length * 2 + 2, nvc_to_string) elif field_description.type == 'RV': # Версия строки return FieldParserInfo(16, lambda x: '.'.join(str(i) for i in unpack('4i', x))) elif field_description.type == 'NT': # Текст неограниченной длины return FieldParserInfo(8, lambda x: NText(*unpack('2I', x))) elif field_description.type == 'I': # Двоичные данные неограниченной длины return FieldParserInfo(8, lambda x: Image(*unpack('2I', x))) elif field_description.type == 'DT': # У пустой даты год = 0000 return FieldParserInfo(7, lambda x: None if x[:2] == b'\x00\x00' else datetime.strptime(x.hex(), '%Y%m%d%H%M%S'))
def get_donation_start(): global don_start isvaild = False while not isvaild: data = input("Enter your Start of donation (HH:MM):") try: don_start = datetime.strptime(data, "%H:%M") # Csak akkor engedi tovább az adatot ha ilyen formátumba van isvaild = True except ValueError: print(data, "is not a valid time! HH:MM. ex: 13:10") return don_start
def get_donation_end(): global don_end isvaild = False while not isvaild: data = input("Enter your End of donation (HH:MM):") try: don_end = datetime.strptime(data, "%H:%M") # Csak akkor engedi tovább az adatot ha ilyen formátumba van if don_start < don_end: isvaild = True else: print("Donation End have to be later thad Donation Start! (Donation start:", don_start.strftime("%H:%M"), "):") except ValueError: print(data, "is not a valid time! HH:MM. ex: 13:10") return don_end
def get_event_date(): global ev_date isvaild = False while not isvaild: data = input("Enter your Event date (YYYY.MM.DD):") try: ev_date = datetime.strptime(data, "%Y.%m.%d") # Csak akkor engedi tovább az adatot ha ilyen formátumba van if ev_date.isoweekday() != 6 and ev_date.isoweekday() != 7: if (ev_date.date() - datetime.now().date()).days > 10: isvaild = True else: print("Your donation date have to be 10 days later from now") else: print("Event of date must not be on weekends") except ValueError: print(data, "is not vaild date! Try again(YYYY.MM.DD): ex: 2010.10.10") return ev_date
def getprices(theURL): text=requests.get(theURL).text.split('\n')[1:] reader = csv.DictReader(text, fieldnames=("Date", "Open", "High", "Low", "Close", "Volume", "Adj Close")) jsonText = json.dumps([row for row in reader]) jsonDictionary=json.loads(jsonText) theData = [] for x in range(0,len(jsonDictionary)): newRowList = [] dummy = 0 newRowList.insert(dummy, datetime.strptime(jsonDictionary[x]["Date"], "%Y-%m-%d").date()); dummy = dummy+1 # newRowList.insert(dummy, float(jsonDictionary[x]["Open"])); dummy += 1 # newRowList.insert(dummy, float(jsonDictionary[x]["High"])); dummy += 1 # newRowList.insert(dummy, float(jsonDictionary[x]["Low"])); dummy += 1 # newRowList.insert(dummy, float(jsonDictionary[x]["Close"])); dummy += 1 # newRowList.insert(dummy, int (jsonDictionary[x]["Volume"])); dummy += 1 newRowList.insert(dummy, float(jsonDictionary[x]["Adj Close"])) theData.insert(x, newRowList) return theData
def list_donor_data(pure_data, sorted_by_index, page_number): """ :param data: :return: """ row_counter = 0 for row in sorted(pure_data, key=itemgetter(sorted_by_index)): row_counter += 1 print("\n", "-" * 40, "\n") print("{}, {}".format(row[1], row[0])) print("{} kg".format(row[2])) year = datetime.strptime(row[4], "%Y.%m.%d") today = datetime.now() age = ((today - year).days//365) print("{} - {} years old".format(row[4], age)) print("{}".format(row[10])) if row_counter % page_number == 0: answer = input("\nNext page? Press y to continue: ") if answer == 'y': os.system("CLS") row_counter == 0
def _get_new_data(self, page_url, soup): # 返回需要获取的数据 page = "http://wallstreetcn.com/news?status=published&type=news&order=-created_at&limit=30&page=" pattern = re.compile(r'http://wallstreetcn\.com/node/\d+') if page_url[:-1] == page or page_url[:-2] == page: links1 = soup.find_all('img', class_="lazy img") links2 = soup.find_all('a', class_="title") img = [] title = [] for link in links1: img.append(link['data-original']) for link in links2: title.append(link.get_text().strip()) new_data = dict(map(lambda x, y: [x, y], title, img)) elif pattern.match(page_url): new_data = {} title_node = soup.find('h1', class_="article-title") content_node = soup.find('div', class_="article-content") author_node = soup.find('span', class_="item author").find("a") time_node = soup.find('span', class_="item time") comment_count_node = soup.find('span', class_="wscn-cm-counter") title = title_node.get_text().strip() content = content_node.get_text() author = author_node.get_text() post_at = datetime.strptime(time_node.get_text(), '%Y年%m月%d日 %H:%M:%S') if comment_count_node is not None: comment_count = comment_count_node.get_text() else: comment_count = 0 new_data['title'] = title new_data['author'] = author new_data['post_at'] = post_at new_data['comment_count'] = comment_count new_data['content'] = content else: new_data = {} return new_data pass
def date_from_string(date): split_date = date.split("Date:")[1].split() str_to_date = " ".join([split_date[i] for i in range(0, len(split_date))]) return datetime.strptime(str_to_date, "%a %b %d %X %Y %z")
def parse_pdf(self, path_to_file): pdf = pdfplumber.open(path_to_file) page = pdf.pages[0] rows = page.extract_words() zamjena = "" is_br_polisa = "" reg_oznaka_vozila = "" ugovornik_prvi_red = "" ugovornik_drugi_red = "" ime_naziv_ugovornik = "" jmbg_pib_ugovornik = "" grad_ugovornik = "" ulica_broj_ugovornik = "" br_tel_ugovornik = "" osiguranik_prvi_red = "" osiguranik_drugi_red = "" ime_naziv_osiguranik = "" jmbg_pib_osiguranik = "" grad_osiguranik = "" ulica_broj_osiguranik = "" br_tel_osiguranik = "" vrsta = "" marka_tip = "" broj_sasije = "" broj_motora = "" namjena = "" godina_proizvodnje = 0 snaga_kw = "" zapremina_ccm = "" broj_mjesta = "" nosivost_kg = "" datum_i_vrijeme = "" datum_od = "" datum_do = "" vrijeme_od = "" vrijeme_do = "" porez = "" za_naplatu = "" sklopljeno_u = "" sklopljeno_dana = "" for row in rows: print(row) text = row['text'] text_str = text # if 'cid' in text_str.lower(): # text_str = text_str.strip('(') # text_str = text_str.strip(')') # ascii_num = text_str.split(':')[-1] # ascii_num = int(ascii_num) # text = chr(ascii_num) # 66 = 'B' in ascii if 'cid' in text_str.lower(): text = 'N/A' # x0 - lijeva pozicija teksta u PDF-u x0 = float(row['x0']) # x1 - lijeva pozicija teksta u PDF-u x1 = float(row['x1']) # top - gornja pozicija teksta u PDF-u top = float(row['top']) if 33 < top < 37 and x1 > 450: print("zamjena: " + text) zamjena = text if 53 < top < 56 and x1 > 450: print("is_br_polisa: " + text) is_br_polisa = text if 94 < top < 97: print("reg_oznaka_vozila: " + text) reg_oznaka_vozila = text if 137.544 < top < 140.415: ugovornik_prvi_red += text + ' ' if 157 < top < 162: ugovornik_drugi_red += text + ' ' if 191.02 < top < 194: osiguranik_prvi_red += text + ' ' if 210 < top < 215: osiguranik_drugi_red += text + ' ' if 244 < top < 248 and x1 < 450: vrsta += text + ' ' if 247 < top < 249 and x1 > 450: godina_proizvodnje = text if 266 < top < 268 and x1 < 450: marka_tip += text + ' ' if 266 < top < 268 and x1 > 450: snaga_kw = text if 285 < top < 288 and x1 < 450: broj_sasije = text if 285 < top < 288 and x1 > 450: zapremina_ccm = text if 303 < top < 306 and x1 < 450: broj_motora = text if 303 < top < 306 and x1 > 450: broj_mjesta = text if 321 < top < 324 and x1 < 450: namjena += text + ' ' if 321 < top < 324 and x1 > 450: nosivost_kg = text if 354 < top < 358: datum_i_vrijeme += text + ' ' if 584 < top < 590 and x1 < 450: porez = text if 584 < top < 590 and x1 > 450: za_naplatu = text if 755 < top < 760 and x1 < 300: sklopljeno_u = text if 755 < top < 760 and x1 < 300: sklopljeno_u = text if 755 < top < 760 and x1 > 300: sklopljeno_dana = text pdf.close() if not reg_oznaka_vozila: messagebox.showerror("Greska", "Došlo je do greške!\nDatoteka koju ste izabrali nije polisa!") # parse za ugovornika ugovornik_prvi_red_podaci = ugovornik_prvi_red.split(' ') duzina_liste = len(ugovornik_prvi_red_podaci) jmbg_pib_ugovornik = ugovornik_prvi_red_podaci[duzina_liste - 2] i = 0 for podatak in ugovornik_prvi_red_podaci: if i < duzina_liste - 2: ime_naziv_ugovornik += podatak + " " i += 1 print("ime_naziv_ugovornik: " + ime_naziv_ugovornik) print("jmbg_pib_ugovornik: " + jmbg_pib_ugovornik) ugovornik_drugi_red_podaci = ugovornik_drugi_red.split(' ') duzina_liste = len(ugovornik_drugi_red_podaci) if ugovornik_drugi_red.__contains__('067') or ugovornik_drugi_red.__contains__( '068') or ugovornik_drugi_red.__contains__('069'): # ima broj telefona i = 0 for podatak in ugovornik_drugi_red_podaci: if i == 0: grad_ugovornik = podatak elif 0 < i < duzina_liste - 2: ulica_broj_ugovornik += podatak + " " else: br_tel_ugovornik += podatak i += 1 else: # nema broj telefona i = 0 for podatak in ugovornik_drugi_red_podaci: if i == 0: grad_ugovornik = podatak else: ulica_broj_ugovornik += podatak + " " i += 1 print("grad_ugovornik: " + grad_ugovornik) print("ulica_broj_ugovornik: " + ulica_broj_ugovornik) print("br_tel_ugovornik: " + br_tel_ugovornik) ################################################## # parse za osiguranika osiguranik_prvi_red_podaci = osiguranik_prvi_red.split(' ') duzina_liste = len(osiguranik_prvi_red_podaci) jmbg_pib_osiguranik = osiguranik_prvi_red_podaci[duzina_liste - 2] i = 0 for podatak in osiguranik_prvi_red_podaci: if i < duzina_liste - 2: ime_naziv_osiguranik += podatak + " " i += 1 print("ime_naziv_osiguranik: " + ime_naziv_osiguranik) print("jmbg_pib_osiguranik: " + jmbg_pib_osiguranik) osiguranik_drugi_red_podaci = osiguranik_drugi_red.split(' ') duzina_liste = len(osiguranik_drugi_red_podaci) if osiguranik_drugi_red.__contains__('067') or osiguranik_drugi_red.__contains__( '068') or osiguranik_drugi_red.__contains__('069'): # ima broj telefona i = 0 for podatak in osiguranik_drugi_red_podaci: if i == 0: grad_osiguranik = podatak elif 0 < i < duzina_liste - 2: ulica_broj_osiguranik += podatak + " " else: br_tel_osiguranik += podatak i += 1 else: # nema broj telefona i = 0 for podatak in osiguranik_drugi_red_podaci: if i == 0: grad_osiguranik = podatak else: ulica_broj_osiguranik += podatak + " " i += 1 print("grad_osiguranik: " + grad_osiguranik) print("ulica_broj_osiguranik: " + ulica_broj_osiguranik) print("br_tel_osiguranik: " + br_tel_osiguranik) ################################################## # podaci o vozilu print("vrsta:" + vrsta) print("marka_tip:" + marka_tip) print("godina_proizvodnje: " + godina_proizvodnje) print("snaga_kw: " + snaga_kw) print("broj_sasije: " + broj_sasije) print("zapremina_ccm: " + zapremina_ccm) print("broj_motora: " + broj_motora) print("broj_mjesta: " + broj_mjesta) print("namjena: " + namjena) print("nosivost_kg: " + nosivost_kg) # datum i vrijeme dv = datum_i_vrijeme.rstrip() datum_i_vrijeme_niz = dv.split(" ") datum_od = datum_i_vrijeme_niz[0] vrijeme_od = datum_i_vrijeme_niz[1] datum_do = datum_i_vrijeme_niz[2] vrijeme_do = datum_i_vrijeme_niz[3] date_object_datum_od = datetime.strptime(datum_od, '%d.%m.%Y') datum_od = date_object_datum_od.strftime('%Y-%m-%d') date_object_datum_do = datetime.strptime(datum_do, '%d.%m.%Y') datum_do = date_object_datum_do.strftime('%Y-%m-%d') date_object_sklopljeno_dana = datetime.strptime(sklopljeno_dana, '%d.%m.%Y') sklopljeno_dana = date_object_sklopljeno_dana.strftime('%Y-%m-%d') print("datum_od: " + datum_od) print("vrijeme_od: " + vrijeme_od) print("datum_do: " + datum_do) print("vrijeme_do: " + vrijeme_do) print("porez: " + porez) print("za_naplatu: " + za_naplatu) print("sklopljeno_u: " + sklopljeno_u) print("sklopljeno_dana: " + sklopljeno_dana) # print("vrsta:" + vrsta.rstrip()) rstrip mice zadnji space iz string-a # polisa_broj = input('Unesite broj polise!') self.empty_all_fields() self.zamjena_entry.insert(0, zamjena) self.is_br_polise_entry.insert(0, is_br_polisa) self.reg_oznaka_vozila_entry.insert(0, reg_oznaka_vozila) self.ugovornik_ime_naziv_entry.insert(0, ime_naziv_ugovornik) self.ugovornik_jmbg_pib_entry.insert(0, jmbg_pib_ugovornik) self.ugovornik_grad_entry.insert(0, grad_ugovornik) self.ugovornik_ulica_broj_entry.insert(0, ulica_broj_ugovornik) self.ugovornik_br_tel_entry.insert(0, br_tel_ugovornik) self.osiguranik_ime_naziv_entry.insert(0, ime_naziv_osiguranik) self.osiguranik_jmbg_pib_entry.insert(0, jmbg_pib_osiguranik) self.osiguranik_grad_entry.insert(0, grad_osiguranik) self.osiguranik_ulica_broj_entry.insert(0, ulica_broj_osiguranik) self.osiguranik_br_tel_entry.insert(0, br_tel_osiguranik) self.vrsta_entry.insert(0, vrsta) self.marka_tip_entry.insert(0, marka_tip) self.broj_sasije_entry.insert(0, broj_sasije) self.broj_motora_entry.insert(0, broj_motora) self.namjena_entry.insert(0, namjena) self.godina_proizvodnje_entry.insert(0, godina_proizvodnje) self.snaga_kw_entry.insert(0, snaga_kw) self.zapremina_ccm_entry.insert(0, zapremina_ccm) self.broj_mjesta_entry.insert(0, broj_mjesta) self.nosivost_kg_entry.insert(0, nosivost_kg) self.datum_od_entry.insert(0, datum_od) self.datum_do_entry.insert(0, datum_do) self.vrijeme_od_entry.insert(0, vrijeme_od) self.vrijeme_do_entry.insert(0, vrijeme_do) self.porez_entry.insert(0, porez) self.za_naplatu_entry.insert(0, za_naplatu) self.sklopljeno_u_entry.insert(0, sklopljeno_u) self.sklopljeno_dana_entry.insert(0, sklopljeno_dana)
if ticket in result_df['ticket'].values: continue print('-' * 80) print(ticket) try: df_ticket_data = df_adj_close[['date', ticket]] except: print('failed to find ticket: ' + ticket) continue df_ticket_data = df_ticket_data[df_ticket_data[ticket] > 0.] df_ticket_data = df_ticket_data.reindex() date_data = df_ticket_data.date try: min_year = datetime.strptime(date_data.min(), '%Y-%M-%d').year except: print(ticket + ' has no date data!') continue max_year = datetime.strptime(date_data.max(), '%Y-%M-%d').year if min_year >= max_year: print('%s not processed because of min_year >= max_year' % (ticket)) continue returns = np.empty((MAX_ITER, )) i = 0 failed = 0 while i < MAX_ITER: data = get_data_random_dates(df_ticket_data, min_year, max_year) bah = BuyAndHoldInvestmentStrategy()
if dt != (year_val+'-06-01T00:00:00') and count_months == 0: continue elif dt == (year_val+'-06-01T00:00:00'): count_months = 4 past_dates=[year_val+'-05-29T00:00:00',year_val+'-05-30T00:00:00'] # past_dates=[year_val+'-05-28',year_val+'-05-29'] elif dt == (year_val+'-07-01T00:00:00') or dt == (year_val+'-08-01T00:00:00') or dt == (year_val+'-09-01T00:00:00'): count_months -= 1 elif dt == (year_val+'-10-01T00:00:00'): break if dt!=(year_val+'-06-01T00:00:00') and count_months!=0: past_dates[0] = past_dates[1] prev_date = str(past_dates[0]).replace('T', '') #print(prev_date) next_date = datetime.strptime(prev_date, '%Y-%m-%d%H:%M:%S') next_date = next_date + timedelta(1) next_date = datetime.strftime(next_date, '%Y-%m-%d') next_date = next_date + 'T00:00:00' past_dates[1] = next_date lat = 5.0 longitude = 65.0 print(dt) dict_grid[dt]={} list_values.append(dt) list_values_prev1.append(past_dates[0]) list_values_prev2.append(past_dates[1]) if dt == '2013-06-01T00:00:00': list_header.append('time')
def fetch_arbitrage_data(self, dateofanalysis): data = arbitragecollection.find( {'dateOfAnalysis': datetime.strptime(dateofanalysis, '%Y-%m-%d')}) return data
from _datetime import datetime, timedelta import calendar def prev_month(date): year = date.year month = date.month - 1 if date.month == 1: year = date.year - 1 month = 12 return date - timedelta(days=calendar.monthrange(year, month)[1]) print('Yesterday was {}'.format( (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d'))) print('Tomorrow will be {}'.format( (datetime.now() + timedelta(days=1)).strftime('%Y-%m-%d'))) print('Month ago was {}'.format( prev_month(datetime.now()).strftime('%Y-%m-%d'))) dt = datetime.strptime('01/01/17 12:10:03.234567', '%d/%m/%y %H:%M:%S.%f')
from _datetime import datetime z = '22/12/2012 ' #convertendo para formato date f = datetime.strptime(z, '%d/%m/%Y') #imprimindo num formato print(f.strftime('%d/%m/%Y')) # https://blog.alura.com.br/lidando-com-datas-e-horarios-no-python/ #imprimindo sem zeros na frente print(l2[0].strftime('%#d/%#m/%Y')) time.strftime('%Y %m %d', y).replace(' 0', ' ')
from collections import deque from _datetime import datetime, timedelta data = input().split(';') robots = deque([]) products = deque([]) start_time = datetime.strptime(input(), '%H:%M:%S') time_add = timedelta(seconds=1) product_time = start_time + time_add robot = {} # Adding robots as dictionary to the robots deque for el in data: robot = {} name, time = el.split('-') time = int(time) robot['name'] = name robot['processing_time'] = time robot['available_at'] = product_time robots.append(robot) product = input() # Adding products to the products deque while product != 'End': products.append(product) product = input() # Looping through product line and robots to calculate processing time for each product while len(products) > 0: current_product = products.popleft()
cursor.execute('select * from signup where signup_id="{signup_id}"'.format(signup_id=signup_id)) for old_state in cursor: print(old_state) signup_data = old_state['signup_data'] print(signup_data) url_regex = r'offer_expiry": "(.*)", "idd_consultation' before_regex = r'(.*"offer_expiry": ")' after_regex = r'(", "idd_consultation.*)' results = re.findall(url_regex, signup_data) ddate = results[0] print(ddate) #_date_time_read_formatting = "%Y-%m-%dT%H:%M:%S+00:00" datetime_date = datetime.strptime(ddate, "%Y-%m-%dT%H:%M:%S+00:00") print("Existing expiry: " + str(datetime_date)) d = date.today() print("Todays date: " + str(d)) new_expiry = d - relativedelta(days=1) new_offer_expiry_date = date(new_expiry.year, new_expiry.month, new_expiry.day) new_offer_expiry_time = time(datetime_date.hour, datetime_date.minute, datetime_date.second) #print("New expiry: " + str(new_offer_expiry)) #print("New expiry: " + str(new_offer_expiry_time)) nedate=str(new_offer_expiry_date) netime=str(new_offer_expiry_time) ne_expiry_string=(nedate+"T"+netime+"+00:00") cursor.execute('select * from signup where signup_id="{signup_id}"'.format(signup_id=signup_id))
def measure_curr_application_time(process): process_creation_date = datetime.strptime(process.CreationDate.split(".")[0], "%Y%m%d%H%M%S") delta = datetime.now() - process_creation_date return int(delta.seconds / 60)
import os from _datetime import datetime, timedelta import sys import glob import numpy as np #NX = 27 #NY = 29 NX = 36 NY = 37 dir_input = "/dados/radar/saoroque/ppi/level1_tamanduatei/2015/01" dir_output = "/dados/radar/saoroque/ppi/level1_tamanduatei_txt/2015/01" start = datetime.strptime("201501312310", "%Y%m%d%H%M") end = datetime.strptime("201501312350", "%Y%m%d%H%M") datehour = start while datehour <= end: pattern1 = datetime.strftime(datehour, "*%Y%m%d_%H*.dat") files = glob.glob(os.path.join(dir_input, pattern1)) nfiles = len(files) for file in sorted(files): filename = os.path.basename(file) data = np.fromfile(file.strip(), dtype=np.float32).reshape((NY, NX), order='C') np.place(data, data==255, -99) #np.place(data, data<0, 0.0) txt_file = os.path.join(dir_output, filename.replace(".dat", ".txt")) np.savetxt(txt_file, data, fmt='%03d') #datehour = datehour + timedelta(hours=1)
def US11(): HID = [] WID = [] HName = [] WName = [] div_date = [] marr_date = [] SP_ID = [] check_list = [] #conn = sqlite3.connect('DATA.db') cur = conn.cursor() cur.execute("SELECT SPOUSE FROM INDIVIDUAL WHERE LENGTH(SPOUSE)>5") rows = cur.fetchall() for spid in rows: SP_ID = list(spid) SP_ID = SP_ID[0].split(',') for spid in SP_ID: cur.execute( "SELECT MARRIED,DIVORCED,HUSBAND_ID,HUSBAND_NAME,WIFE_ID,WIFE_NAME FROM FAMILY WHERE ID=?", (spid, )) rows1 = cur.fetchall() for m, d, hid, hnm, wid, wnm in rows1: marr_date.append(re.sub(r'[^0-9a-zA-Z ]', '', str(m))) div_date.append(re.sub(r'[^0-9a-zA-Z ]', '', str(d))) HID.append(re.sub(r'[^@0-9a-zA-Z ]', '', str(hid))) HName.append(re.sub(r'[^0-9a-zA-Z ]', '', str(hnm))) WID.append(re.sub(r'[^@0-9a-zA-Z]', '', str(wid))) WName.append(re.sub(r'[^0-9a-zA-Z ]', '', str(wnm))) if len(HID) != len(set(HID)): for p, id in enumerate(set(HID)): div = div_date[p] if div != "NA": # do something when there is divorce date for the individual print(div) mdt1 = datetime.strptime(str(marr_date[p]), "%d %b %Y") mdt1 = datetime.date(mdt1) mdt2 = datetime.strptime(str(marr_date[p + 1]), "%d %b %Y") mdt2 = datetime.date(mdt2) div = datetime.strptime(str(div), "%d %b %Y") div = datetime.date(div) if mdt1 < mdt2: end = mdt2 start = mdt1 else: end = mdt1 start = mdt2 if start < div < end: continue else: check_list.append(id) print("ERROR: US11: ID-", id, "is in a bigamous relationship!") else: mdt1 = datetime.strptime(str(marr_date[p]), "%d %b %Y") mdt1 = datetime.date(mdt1) mdt2 = datetime.strptime(str(marr_date[p + 1]), "%d %b %Y") mdt2 = datetime.date(mdt2) if mdt1 < mdt2: pos = p end = mdt2 start = mdt1 else: pos = p + 1 end = mdt1 start = mdt2 cur.execute("SELECT DEATH FROM INDIVIDUAL WHERE ID=?", (WID[pos], )) rr = cur.fetchall() for d in rr: d = re.sub(r'[^0-9a-zA-Z ]', '', str(d)) if d != "NA": d = datetime.strptime(str(d), "%d %b %Y") d = datetime.date(d) if start < d < end: continue else: check_list.append(id) print("ERROR: US11: ID-", id, "has bigamous relationship!") else: print("ERROR: Insufficient data - The individual", id, " may or may not have a bigamous relationship") if len(WID) != len(set(WID)): for p, id in enumerate(set(WID)): div = div_date[p] if div != "NA": print(div) # do something when there is divorce date for the individual mdt1 = datetime.strptime(str(marr_date[p]), "%d %b %Y") mdt1 = datetime.date(mdt1) mdt2 = datetime.strptime(str(marr_date[p + 1]), "%d %b %Y") mdt2 = datetime.date(mdt2) div = datetime.strptime(str(div), "%d %b %Y") div = datetime.date(div) if mdt1 < mdt2: end = mdt2 start = mdt1 else: end = mdt1 start = mdt2 if start < div < end: continue else: check_list.append(id) print("ERROR: US11: ID-", id, "is in a bigamous relationship!") else: mdt1 = datetime.strptime(str(marr_date[p]), "%d %b %Y") mdt1 = datetime.date(mdt1) mdt2 = datetime.strptime(str(marr_date[p + 1]), "%d %b %Y") mdt2 = datetime.date(mdt2) if mdt1 < mdt2: pos = p end = mdt2 start = mdt1 else: pos = p + 1 end = mdt1 start = mdt2 cur.execute("SELECT DEATH FROM INDIVIDUAL WHERE ID=?", (HID[pos], )) rr = cur.fetchall() for d in rr: d = re.sub(r'[^0-9a-zA-Z ]', '', str(d)) if d != "NA": d = datetime.strptime(str(d), "%d %b %Y") d = datetime.date(d) if start < d < end: continue print( "No individual is in a bigamous relationship!") else: check_list.append(id) print("ERROR: US11: ID-", id, "has bigamous relationship!") else: print("ERROR: Insufficient data - The individual", id, "may or may not have a bigamous relationship")
import os from os import path, listdir, stat from _datetime import datetime import time FILES_PATH = os.path.join( r'D:\GitHub\netology_django\request-handling\file_server', 'files') # print(listdir(FILES_PATH)) # for file in listdir(FILES_PATH): # print(file) # statinfo = stat(FILES_PATH +'\\' + file) # print(statinfo) # print(time.gmtime(statinfo.st_mtime)) # mt = time.gmtime(statinfo.st_mtime) # print(mt.tm_mon) # d = datetime(mt.tm_year, mt.tm_mon, mt.tm_mday) # print(d, type(d)) mt = '2019-01-02' print(datetime.strptime(mt, '%Y-%m-%d')) # print(time.ctime(statinfo.st_ctime))
def undoExample(): """ An example for doing multiple undo operations. This is a bit more difficult than in Lab2-4 due to the fact that there are now several controllers, and each of them can perform operations that require undo support. Follow the code below and figure out how it works! """ undoController = UndoController() ''' Start client Controller ''' clientRepo = Repository() clientValidator = ClientValidator() clientController = ClientController(undoController, clientValidator, clientRepo) ''' Start car Controller ''' carRepo = Repository() carValidator = CarValidator() carController = CarController(undoController, carValidator, carRepo) ''' Start rental Controller ''' rentRepo = Repository() rentValidator = RentalValidator() rentController = RentalController(undoController, rentValidator, rentRepo, carRepo, clientRepo) print("---> Initial state of repositories") print(clientRepo) print(carRepo) print(rentRepo) ''' We add a client, a new car as well as a rental ''' clientController.create(103, "1900102035588", "Dale") carController.create(201, "CJ 02 ZZZ", "Dacia", "Sandero") rentStart = datetime.strptime("2015-11-26", "%Y-%m-%d") rentEnd = datetime.strptime("2015-11-30", "%Y-%m-%d") rentController.createRental(301, clientRepo.find(103), carRepo.find(201), rentStart, rentEnd) print("---> We added a client, a new car and a rental") print(clientRepo) print(carRepo) print(rentRepo) ''' Now undo the performed operations, one by one ''' undoController.undo() print("---> After 1 undo") print(clientRepo) print(carRepo) print(rentRepo) undoController.undo() print("---> After 2 undos") print(clientRepo) print(carRepo) print(rentRepo) undoController.undo() print("---> After 3 undos") print(clientRepo) print(carRepo) print(rentRepo)
import csv from _datetime import datetime with open('date_file.csv', 'r', encoding='utf-8') as data: reader = csv.reader(data) header = next(reader) date = [] for i in list(reader): date.append(datetime.strptime(f"{i[0]}-{i[1]}-{i[2]}", "%d-%m-%Y")) print(min(date).date()) #вариант через lambda # date = min( # list(map(lambda x: datetime.strptime(f"{x[0]}-{x[1]}-{x[2]}", "%d-%m-%Y"), list(reader))) # ).date() # print(date) # долгий и не совсем понятный для других вариант # def min_date(data): # # def min_year(data): # y_list = [] # for i in range(len(data)): # y_list.append(data[i][2]) # return min(y_list) # # def min_month(data): # m_list = [] # for i in range(len(data)): # m_list.append(data[i][1]) # return min(m_list)
date_last_tweet = datetime.min num_mentions = 0 url_counter = 0 place_counter = 0 turkish_counter = 0 mobile_counter = 0 fav_counter = 0 photo_counter = 0 gif_counter = 0 withheld = False followers = 0 following = 1 hashtag_counter = 0 geo_counter = 0 try: created_at = datetime.strptime(user['created_at'], "%a %b %d %H:%M:%S +0000 %Y") twitter_age = relativedelta(datetime.now(), created_at).years users.update_one({'id': user['id']}, {'$set': {'twitter_age': twitter_age}}) except Exception as e: print(e) months = defaultdict(int) weeks = defaultdict(int) hours = defaultdict(int) tweets = timeline.find({'user.id': user['id']}, projection) for tweet in tweets: try: if tweet['retweeted'] == 'true': retweeted_counter += 1 num_of_terms += len(re.findall(r'\w+', tweet['text']))
def read_to(self, dataset, input_filepath, configs, appending): # read required parameters from config nr_gates = configs['parameters']['n_gates'] range_gates = configs['parameters']['range_gates'] # for every file to read open and process it with open(input_filepath, 'r') as file: # read in header header = self.read_header(file) # skip next lines line = file.readline() while '****' not in line: line = file.readline() # read rest of file containing measurement data raw_file = file.readlines() # file closed # check variables in header assert float(nr_gates) == header['Number of gates'] assert float(range_gates) == header['Range gate length (m)'] # split the info line for every ray from the rest of the data # measured_info contains timestamp, azimut, elevation, pitch and roll # i.e. 20.084231 39.81 -0.00 -0.20 0.10 # all lines n*n_gates with 0 <= n < number of rays measured_info_s = raw_file[::int(nr_gates) + 1] # convert the measured info from list of str to numpy array measured_info = np.empty((len(measured_info_s), 5)) # for i in range(len(measured_info_s)): # measured_info[i, :] = np.fromstring(measured_info_s[i], dtype='f4', sep=' ') for i, line in enumerate(measured_info_s): measured_info[i, :] = np.fromstring(line, dtype='f4', sep=' ') # convert the time stamp from decimal hour of day to epoch double format header_day = header['Start time'].split(' ')[0] measured_info[:, 0] = decimaltime2sec(measured_info[:, 0], header_day) # the rest is the actual data with range gate number, doppler, # intensity and backscatter # i.e. 0 -0.2173 1.135933 7.655162E-6 # 1 -0.2173 1.127027 7.154400E-6 del (raw_file[::int(nr_gates + 1)]) measured_data_s = [x.lstrip().rstrip() for x in raw_file] # convert measured data from list of str to numpy array measured_data = np.empty((len(measured_data_s), 4)) # for i in range(len(measured_data_s)): # measured_data[i, :4] = np.fromstring(measured_data_s[i], dtype='f4', sep=' ') for i, line in enumerate(measured_data_s): measured_data[i, :4] = np.fromstring(line, dtype='f4', sep=' ') # Dimensions n_rays = int(measured_info.shape[0]) n_gates = int(header['Number of gates']) # Initialize the data set if not appending to existing data if not appending: # create the dimensions dataset.createDimension('range', header['Number of gates']) # the time dimension must be without limits (None) to append later dataset.createDimension('time', None) # create the coordinate variables # range # see header of measurement file # Center of gate = (range gate + 0.5) * Gate length gate_length = header['Range gate length (m)'] _range_dist = (measured_data[0:nr_gates, 0] + 0.5) * gate_length range_dist = dataset.createVariable('range', 'f4', ('range', )) range_dist.units = 'm' range_dist.long_name = 'range_gate_distance_from_lidar' range_dist[:] = _range_dist range_dist.comment = 'distance to center of probe volume' # time # get start time for storing the campaign start (first measurement) # timestamp in comment start_time = datetime.utcfromtimestamp( measured_info[0, 0]).isoformat() + 'Z' # timestamps are stored as seconds since campaign start _time = measured_info[:, 0] - measured_info[0, 0] time = dataset.createVariable('time', 'f4', ('time', )) time.units = 's' time.long_name = 'time stamp' time[:] = _time time.comment = 'seconds since campaign start at ' + start_time # create the data variables # TODO: get the scan type from data scan_type = dataset.createVariable('scan_type', 'i', 'time') scan_type.units = 'none' scan_type.long_name = 'scan_type_of_the_measurement' scan_type[:] = np.ones( (n_rays, 1)) * get_scan_type(header['Filename']) # TODO: define scan ID scan_id = dataset.createVariable('scan_id', 'i', 'time') scan_id.units = 'none' scan_id.long_name = 'scan_id_of_the_measurement' # scan_cycle = dataset.createVariable('scan_cycle', 'i', 'time') scan_cycle.units = 'none' scan_cycle.long_name = 'scan_cycle_number' scan_cycle[:] = np.ones((n_rays, 1)) # create the beam steering variables # azimuth _azimuth = measured_info[:, 1] azimuth_angle = dataset.createVariable('azimuth_angle', 'f4', 'time') azimuth_angle.units = 'degrees' azimuth_angle.long_name = 'azimuth_angle_of_lidar_beam' azimuth_angle[:] = _azimuth azimuth_angle.comment = 'clock-wise angle from north' azimuth_angle.accuracy = '' azimuth_angle.accuracy_info = 'max resolution 0.00072 degrees' # elevation _elevation = measured_info[:, 2] elevation_angle = dataset.createVariable('elevation_angle', 'f4', 'time') elevation_angle.units = 'degrees' elevation_angle.long_name = 'elevation_angle_of_lidar beam' elevation_angle[:] = _elevation elevation_angle.comment = 'upwards angle from horizontal' elevation_angle.accuracy = '' elevation_angle.accuracy_info = 'max resolution 0.00144 degrees' # yaw, pitch, roll # yaw is not available _yaw = np.zeros(measured_info[:, 3].shape) yaw = dataset.createVariable('yaw', 'f4', 'time') yaw.units = 'degrees' yaw.long_name = 'lidar_yaw_angle' yaw[:] = _yaw yaw.comment = 'The home position is configured in a way that 0 ' \ 'azimuth corresponds to north.' yaw.accuracy = '' _pitch = measured_info[:, 3] pitch = dataset.createVariable('pitch', 'f4', 'time') pitch.units = 'degrees' pitch.long_name = 'lidar_pitch_angle' pitch[:] = _pitch pitch.comment = '' pitch.accuracy = '' pitch.accuracy_info = 'No information on pitch accuracy available.' _roll = measured_info[:, 4] roll = dataset.createVariable('roll', 'f4', 'time') roll.units = 'degrees' roll.long_name = 'lidar_roll_angle' roll[:] = _roll roll.comment = '' roll.accuracy = '' roll.accuracy_info = 'No information on roll accuracy available.' # measurement variables # Doppler velocity DOPPLER = dataset.createVariable('VEL', 'f4', ('time', 'range')) DOPPLER.units = 'm.s-1' DOPPLER.long_name = 'doppler' DOPPLER[:, :] = measured_data[:, 1].reshape(n_rays, n_gates) INTENSITY = dataset.createVariable('INTENSITY', 'f4', ('time', 'range')) INTENSITY.units = '' INTENSITY.long_name = 'intensity' INTENSITY.comment = 'snr + 1' INTENSITY[:] = measured_data[:, 2].reshape(n_rays, n_gates) BACKSCATTER = dataset.createVariable('BACKSCATTER', 'f4', ('time', 'range')) BACKSCATTER.units = 'm-1.s-1' BACKSCATTER.long_name = 'backscatter' BACKSCATTER[:] = measured_data[:, 3].reshape(n_rays, n_gates) else: # get current number of stored measurements n_times = len(dataset.dimensions['time']) # time #get campaign start time _start_time = dataset.variables['time'].comment start_time = datetime.strptime(_start_time[-27:], "%Y-%m-%dT%H:%M:%S.%fZ") _time = measured_info[:, 0] - start_time.timestamp() dataset.variables['time'][n_times:] = _time # scan type _scan_type = np.ones( (n_rays, 1)) * get_scan_type(header['Filename']) dataset.variables['scan_type'][n_times:] = _scan_type # scan cycle _last_scan_cycle = dataset.variables['scan_cycle'][n_times - 1] _scan_cycle = np.ones((n_rays, 1)) * (_last_scan_cycle + 1) dataset.variables['scan_cycle'][n_times:] = _scan_cycle # azimuth _azimuth = measured_info[:, 1] dataset.variables['azimuth_angle'][n_times:] = _azimuth # elevation _elevation = measured_info[:, 2] dataset.variables['elevation_angle'][n_times:] = _elevation # yaw is not available _yaw = np.zeros(measured_info[:, 3].shape) dataset.variables['yaw'][n_times:] = _yaw # pitch _pitch = measured_info[:, 3] dataset.variables['pitch'][n_times:] = _yaw # roll _roll = measured_info[:, 4] dataset.variables['roll'][n_times:] = _yaw # doppler _doppler = measured_data[:, 1].reshape(n_rays, n_gates) dataset.variables['VEL'][n_times:] = _doppler # intensity _intensity = measured_data[:, 2].reshape(n_rays, n_gates) dataset.variables['INTENSITY'][n_times:] = _doppler # backscatter _backscatter = measured_data[:, 3].reshape(n_rays, n_gates) dataset.variables['BACKSCATTER'][n_times:] = _backscatter
from _datetime import datetime filename = '2018-03-19_15:18:43.516286.csv' # gets information just from the name of the .csv file date = filename[:19] date_obj = datetime.strptime(date, '%Y-%m-%d_%H:%M:%S') day_num = date_obj.day month_num = date_obj.month time_of_day = date_obj.time() day_of_week = date_obj.isoweekday() # Sunday = 0 & Saturday = 6 def make_subset(item): return { key: item[key] for key in [ 'name', 'size', 'category', 'price', 'quantity', ] } class InventoryQueries: def __init__(self, inventory): self.inventory = inventory def get_available_inventory(self):
def load_data(file, version): if (version == 15): #load csv data = pd.read_csv(file + '.csv', sep=',', header=0, usecols=[0, 1, 5, 6], dtype={ 0: np.int32, 1: np.int32, 5: str, 6: np.int32 }) #specify header names data.columns = ['UserId', 'ItemId', 'TimeStr', 'ActionType'] buy_key = 2 elif (version == 13): #load csv( user_id,brand_id,action_type,action_time ) data = pd.read_csv(file + '.csv', sep=',', header=0, usecols=[0, 1, 2, 3], dtype={ 0: np.int32, 1: np.int32, 3: str, 2: np.int32 }) #specify header names data.columns = ['UserId', 'ItemId', 'ActionType', 'TimeStr'] buy_key = 1 data = data[data.ActionType.isin([0, buy_key])] #click+buy #convert time string to timestamp and remove the original column data['SessionId'] = data.groupby([data.UserId, data.TimeStr]).grouper.group_info[0] data['ActionNum'] = data.groupby([data.UserId, data.TimeStr]).cumcount() if (version == 15): data['Time'] = data.apply(lambda x: (datetime.strptime( x['TimeStr'] + '-2015 00:00:00.000', '%m%d-%Y %H:%M:%S.%f' ) + timedelta(seconds=x['ActionNum'])).timestamp(), axis=1) elif (version == 13): data['Time'] = data.apply(lambda x: (datetime.strptime( x['TimeStr'] + '-2015 00:00:00.000', '%m-%d-%Y %H:%M:%S.%f' ) + timedelta(seconds=x['ActionNum'])).timestamp(), axis=1) del (data['ActionNum']) del (data['TimeStr']) data.sort_values(['SessionId', 'Time'], inplace=True) #output data_start = datetime.fromtimestamp(data.Time.min(), timezone.utc) data_end = datetime.fromtimestamp(data.Time.max(), timezone.utc) buys = data[data.ActionType == buy_key] data = data[data.ActionType == 0] del (data['ActionType']) print( 'Loaded data set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n\n' .format(len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat())) return data, buys
def transform(df): import pandas from _datetime import datetime listofbikes = df["number"].unique() df["date"] = df["time"].apply(lambda t: datetime.strptime( t.split(".")[0], "%Y-%m-%d %H:%M:%S").date()) df["timestamp"] = df["time"].apply( lambda t: datetime.strptime(t.split(".")[0], "%Y-%m-%d %H:%M:%S")) df["time"] = df["time"].apply(lambda t: datetime.strptime( t.split(".")[0].split(" ")[1], "%H:%M:%S").time()) routes = [] for bike in listofbikes: bike_df = df.loc[df['number'] == bike] bike_doc = bike_df.to_dict(orient='records') try: oldlng = bike_doc[0]["lng"] oldlat = bike_doc[0]["lat"] laststamp = bike_doc[0]['timestamp'] except: continue for row in bike_doc: currentstamp = row["timestamp"] # Check if bike has moved if oldlng != row["lng"] or oldlat != row["lat"]: route = {} route["bikeid"] = bike route["date"] = str(row["date"]) route["starttime"] = laststamp route["endtime"] = currentstamp route["startlng"] = oldlng route["startlat"] = oldlat route["endlng"] = row["lng"] route["endlat"] = row["lat"] routes.append(route) oldlng = row["lng"] oldlat = row["lat"] laststamp = currentstamp import networkx as nx import osmnx as ox ox.config(use_cache=True, log_console=True) G = ox.graph_from_place('Cologne', network_type='bike') printroutes = [] ins = len(routes) cnt = 0 for route in routes: try: orig_node = ox.get_nearest_node( G, (route["startlat"], route["startlng"])) dest_node = ox.get_nearest_node(G, (route["endlat"], route["endlng"])) route1 = nx.shortest_path(G, orig_node, dest_node, weight='length') printroutes.append(route1) # cnt += 1 # if (cnt % 100) == 0: # print(str(cnt) + " | " + str(ins)) route["route"] = route1 except: continue # ox.plot_graph_routes(G, listofroutes, fig_height=20, node_size=1, route_linewidth=1, # route_alpha=0.4, orig_dest_node_size=10, orig_dest_node_color='r', # node_alpha=0.2, save=True, filename="graphbikes", file_format="png") streetlist = [] for route in routes: try: streets = [] lastnode = route["route"][0] lastname = "empty" for node in route["route"]: if node == lastnode: continue for d in G.get_edge_data(lastnode, node).values(): if 'name' in d: if type(d['name'] ) == str and d['name'] not in lastname: streetlist.append(d['name']) streets.append(d['name']) lastname = d['name'] lastnode = node route["streetlist"] = streets except: continue import numpy for row in routes: if 'route' not in row.keys(): routes.remove(row) continue for key in row: if isinstance(row[key], numpy.int64): row[key] = int(row[key]) return routes
from _datetime import datetime import re client = MongoClient("mongodb://localhost") twitterdb = client.twitterdb cursor = twitterdb.twitter_search.find({ "$text": { "$search": "austrian etihad klm lufthansa qatar singaporeair flysaa turkishairlines" } }) twitter_search = ["text", "created_at"] twitter_df1 = pd.DataFrame(list(cursor), columns=twitter_search) remove_ms = lambda x: re.sub("\+\d+\s", "", x) mk_dt = lambda x: datetime.strptime(remove_ms(x), "%a %b %d %H:%M:%S %Y") my_form = lambda x: "{:%Y-%m-%d}".format(mk_dt(x)) twitter_df1.created_at = twitter_df1.created_at.apply(my_form) twitter_df1['Airline'] = pd.np.where( twitter_df1.text.str.contains("austria", case=False), "Austrian Airlines", pd.np.where( twitter_df1.text.str.contains("etihad", case=False), "Etihad Airways", pd.np.where( twitter_df1.text.str.contains("klm", case=False), "KLM", pd.np.where( twitter_df1.text.str.contains("lufthansa", case=False), "Lufthansa", pd.np.where( twitter_df1.text.str.contains("qatar", case=False), "Qatar Airways", pd.np.where(
from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV from _datetime import datetime from math import sin, cos, sqrt, atan2, radians # Read data test = pd.read_csv("E:/MSc/Sem 02/Machine Learning/Kaggle/test.csv") train = pd.read_csv("E:/MSc/Sem 02/Machine Learning/Kaggle/train.csv") # Pre-processing the Train data set # Calculating the true time duration my subtracting pickup_time from drop_time train['true_duration'] = 0 for i in range(0, len(train)): train.at[i, 'true_duration'] = ( datetime.strptime(train['drop_time'][i], '%m/%d/%Y %H:%M') - datetime.strptime(train['pickup_time'][i], '%m/%d/%Y %H:%M')).seconds # Calculating the distance travelled using the Pickup latitude, Pickup longitude, Drop latitude and Drop longitude R = 6373 # Radius of earth in km for j in range(0, len(train)): train.at[j, 'lat1'] = radians(train['pick_lat'][j]) train.at[j, 'lon1'] = radians(train['pick_lon'][j]) train.at[j, 'lat2'] = radians(train['drop_lat'][j]) train.at[j, 'lon2'] = radians(train['drop_lon'][j]) train['dlon'] = train['lon2'] - train['lon1'] train['dlat'] = train['lat2'] - train['lat1'] train['c'] = 0
def string_to_date(string): try: return datetime.strptime(string, "%d.%m.%Y").date() except ValueError: return datetime.strptime(string, "%d.%m.%y").date()
def date_in_term(date: datetime, term: dict): start_of_term = datetime.strptime(term.get("start"), "%m/%d/%Y") end_of_term = datetime.strptime(term.get("end"), "%m/%d/%Y") return start_of_term.date() <= date.date() and end_of_term.date( ) >= date.date()
def to_multi_graph(self): mg = MultiGraph(self._database_name, graphs_source=self._mg_dictionary, directed=self._directed) mg.sort_by(lambda x: datetime.strptime(x, self._format_out)) return mg
print( ' '.join([str(i),'out of',str(len(tickerSet)),'-',tickerSet[i],'options found, set ,',str(k)])) contractData = pd.Series(scrape.text.split('\n'))[pd.Series(['OptionsGet' in x for x in scrape.text.split('\n')])].iloc[0] try: contracts = re.search('View By Expiration: (.*)Call', contractData).group(1) except: contracts = re.search('View By Expiration: (.*)Put', contractData).group(1) contracts = pd.DataFrame([x.split(' ') for x in contracts.split(' | ')], columns = ['month','year']) contracts['link'] = [list(month_abbr).index(x) for x in contracts.month.tolist()] contracts['expiry'] = [contracts.year.astype(str)[x] + '-' + '0' + str(contracts.link[x]) if len(str(contracts.link[x])) == 1 else \ contracts.year.astype(str)[x] + '-' + str(contracts.link[x]) for x in range(contracts.shape[0])] contracts['contract'] = [ x.replace('-','') for x in contracts.expiry] expDate = datetime.strptime(pd.unique(re.findall('OptionsExpire at close (.*?)Strike', contractData))[0].split(', ')[1], '%d %B %Y') ccy = re.search('Currency in (.*).', contractData).group(1) for ii in range(len(contracts)): print(ii) if ii != 0: testPop = [] while len(testPop) == 0: pullData = requests.Request('GET', url + tickerSet[i] + '&m=' + contracts.expiry[ii]) pullData = pullData.prepare() dataText, track = tryRequest(funct = pullData, typ = 'sendRaise', track = track) scrape = BeautifulSoup(dataText,'lxml') testPop = tickerData = re.split('<table',str(scrape.findAll("table"))) optionData = pd.Series(tickerData)[pd.Series([(('"yfnc_tablehead1"' in x) & ('</td></tr></table></td></tr></table>' not in x) & ('</th></tr></table></td></tr></table>' not in x)) | \
def statisticsExample(): """ An example for the creation of statistics. Several cars, clients and rentals are created and then a statistics is calculated over them. Follow the code below and figure out how it works! """ undoController = UndoController() ''' Start client Controller ''' clientRepo = Repository() clientValidator = ClientValidator() clientController = ClientController(undoController, clientValidator, clientRepo) clientController.create(100, "1820203556699", "Aaron") clientController.create(101, "2750102885566", "Bob") clientController.create(102, "1820604536579", "Carol") # We name the instances so it's easier to create some test values later aaron = clientRepo.find(100) bob = clientRepo.find(101) carol = clientRepo.find(102) ''' Start car Controller ''' carRepo = Repository() carValidator = CarValidator() carController = CarController(undoController, carValidator, carRepo) carController.create(200, "CJ 01 AAA", "Audi", "A3") carController.create(201, "CJ 01 BBB", "Audi", "A4") carController.create(202, "CJ 01 CCC", "Audi", "A5") carController.create(203, "CJ 01 DDD", "Audi", "A6") audiA3 = carRepo.find(200) audiA4 = carRepo.find(201) audiA5 = carRepo.find(202) audiA6 = carRepo.find(203) ''' Start rental Controller ''' rentRepo = Repository() rentValidator = RentalValidator() rentController = RentalController(undoController, rentValidator, rentRepo, carRepo, clientRepo) rentController.createRental(300, aaron, audiA3, datetime.strptime("2015-11-20", "%Y-%m-%d"), datetime.strptime("2015-11-22", "%Y-%m-%d")) rentController.createRental(301, carol, audiA5, datetime.strptime("2015-11-24", "%Y-%m-%d"), datetime.strptime("2015-11-25", "%Y-%m-%d")) rentController.createRental(302, carol, audiA6, datetime.strptime("2015-12-10", "%Y-%m-%d"), datetime.strptime("2015-12-12", "%Y-%m-%d")) rentController.createRental(303, aaron, audiA4, datetime.strptime("2015-11-21", "%Y-%m-%d"), datetime.strptime("2015-11-25", "%Y-%m-%d")) rentController.createRental(304, aaron, audiA3, datetime.strptime("2015-11-24", "%Y-%m-%d"), datetime.strptime("2015-11-27", "%Y-%m-%d")) rentController.createRental(305, bob, audiA5, datetime.strptime("2015-11-26", "%Y-%m-%d"), datetime.strptime("2015-11-27", "%Y-%m-%d")) rentController.createRental(306, carol, audiA6, datetime.strptime("2015-12-15", "%Y-%m-%d"), datetime.strptime("2015-12-20", "%Y-%m-%d")) rentController.createRental(307, bob, audiA4, datetime.strptime("2015-12-01", "%Y-%m-%d"), datetime.strptime("2015-12-10", "%Y-%m-%d")) rentController.createRental(308, carol, audiA4, datetime.strptime("2015-12-11", "%Y-%m-%d"), datetime.strptime("2015-12-15", "%Y-%m-%d")) rentController.createRental(309, aaron, audiA5, datetime.strptime("2015-11-28", "%Y-%m-%d"), datetime.strptime("2015-12-02", "%Y-%m-%d")) for cr in rentController.mostRentedCars(): print (cr)
def all(self, request, *args, **kwargs): """ Returns all available events Or Return Matched events if following params are passed: 1. campus: Campus of the event 2. start_at: start datetime 3. end_at: end datetime 4. preferences: list of food preferences """ queryset = self.queryset campus_ids = request.query_params.getlist('campus', None) preference_slugs = request.query_params.getlist('preferences', None) start_at = datetime.strptime( request.query_params.get('start_at'), settings. REST_FRAMEWORK['DATETIME_FORMAT']) if request.query_params.get( 'start_at', None) else None end_at = datetime.strptime(request.query_params.get('end_at'), settings.REST_FRAMEWORK['DATETIME_FORMAT'] ) if request.query_params.get( 'end_at', None) else None if campus_ids or preference_slugs or start_at or end_at: # Get Matched Events query_params = Q() if campus_ids: query_params = query_params & Q( location__campus_id__in=campus_ids) if preference_slugs: query_params = query_params & Q( preferences__food_preference__slug__in=preference_slugs) if start_at and end_at: # Both Start and End time are specified # Filter Events which lie between the time period query_params = query_params & Q(start_at__gte=start_at) & Q( end_at__lte=end_at) elif start_at: # Start at is specified # Return events which start on the same date after this time query_params = query_params & Q( start_at__contains=start_at.date()) & Q( start_at__gte=start_at) elif end_at: # End at is specified # Return events which end on the same date before this time query_params = query_params & Q( end_at__contains=end_at.date()) & Q(end_at__lte=end_at) if self.user_id: # Exclude the events User has joined or created queryset = queryset.exclude(users__user_id=self.user_id) queryset = queryset.filter(query_params).distinct() else: # Get All Events queryset = self.queryset page = self.paginate_queryset(queryset) if page is not None: serializer = self.get_serializer(page, many=True) return self.get_paginated_response(serializer.data) serializer = self.get_serializer(queryset, many=True) return Response(serializer.data)
def check_endingtime(end_datetime): dt = datetime.now().strftime('%Y-%m-%d %H:%M') dateobjnow = datetime.strptime(dt, '%Y-%m-%d %H:%M') endobj = end_datetime.strftime('%Y-%m-%d %H:%M') enddateobj = datetime.strptime(endobj, '%Y-%m-%d %H:%M') return enddateobj < dateobjnow
def test_convert_to_date(self): self.assertEqual(Common.convert_to_date("19.11.2016"), datetime.strptime("19.11.2016", "%d.%m.%Y").date(), "date convert should be equal")
name = "sukanya" def calculator(year): date = datetime.now() current_date = date.year age = current_date-year print("hellooooo %s your age is: %d"%(name, age)) loop="f" while (loop=="f"): dob = input("enter date in mm/dd/yyyy") date=dob pattern = re.match('[0-9]{1,2}/?[0-9]{1,2}/?[0-9]{2,4}',dob) if pattern is not None: month, day, year = map(int,dob.split('/')) if (day<= 31 and month<=12 and year <=2015): d=datetime.strptime(date,"%m/%d/%y") print(d) calculator(year) loop = "t" else: print("enter the date in correct format mm/dd/yyyy") loop = "f" else: print("enter the date in correct format") loop = "f"
def statisticsExample(): """ An example for the creation of statistics. Several cars, clients and rentals are created and then a statistics is calculated over them. Follow the code below and figure out how it works! """ undoController = UndoController() ''' Start client Controller ''' clientRepo = Repository() clientValidator = ClientValidator() clientController = ClientController(undoController, clientValidator, clientRepo) clientController.create(100, "1820203556699", "Aaron") clientController.create(101, "2750102885566", "Bob") clientController.create(102, "1820604536579", "Carol") # We name the instances so it's easier to create some test values later aaron = clientRepo.find(100) bob = clientRepo.find(101) carol = clientRepo.find(102) ''' Start car Controller ''' carRepo = Repository() carValidator = CarValidator() carController = CarController(undoController, carValidator, carRepo) carController.create(200, "CJ 01 AAA", "Audi", "A3") carController.create(201, "CJ 01 BBB", "Audi", "A4") carController.create(202, "CJ 01 CCC", "Audi", "A5") carController.create(203, "CJ 01 DDD", "Audi", "A6") audiA3 = carRepo.find(200) audiA4 = carRepo.find(201) audiA5 = carRepo.find(202) audiA6 = carRepo.find(203) ''' Start rental Controller ''' rentRepo = Repository() rentValidator = RentalValidator() rentController = RentalController(undoController, rentValidator, rentRepo, carRepo, clientRepo) rentController.createRental(300, aaron, audiA3, datetime.strptime("2015-11-20", "%Y-%m-%d"), datetime.strptime("2015-11-22", "%Y-%m-%d")) rentController.createRental(301, carol, audiA5, datetime.strptime("2015-11-24", "%Y-%m-%d"), datetime.strptime("2015-11-25", "%Y-%m-%d")) rentController.createRental(302, carol, audiA6, datetime.strptime("2015-12-10", "%Y-%m-%d"), datetime.strptime("2015-12-12", "%Y-%m-%d")) rentController.createRental(303, aaron, audiA4, datetime.strptime("2015-11-21", "%Y-%m-%d"), datetime.strptime("2015-11-25", "%Y-%m-%d")) rentController.createRental(304, aaron, audiA3, datetime.strptime("2015-11-24", "%Y-%m-%d"), datetime.strptime("2015-11-27", "%Y-%m-%d")) rentController.createRental(305, bob, audiA5, datetime.strptime("2015-11-26", "%Y-%m-%d"), datetime.strptime("2015-11-27", "%Y-%m-%d")) rentController.createRental(306, carol, audiA6, datetime.strptime("2015-12-15", "%Y-%m-%d"), datetime.strptime("2015-12-20", "%Y-%m-%d")) rentController.createRental(307, bob, audiA4, datetime.strptime("2015-12-01", "%Y-%m-%d"), datetime.strptime("2015-12-10", "%Y-%m-%d")) rentController.createRental(308, carol, audiA4, datetime.strptime("2015-12-11", "%Y-%m-%d"), datetime.strptime("2015-12-15", "%Y-%m-%d")) rentController.createRental(309, aaron, audiA5, datetime.strptime("2015-11-28", "%Y-%m-%d"), datetime.strptime("2015-12-02", "%Y-%m-%d")) for cr in rentController.mostRentedCars(): print(cr)
def color_cells(row_start, row_count, column): for i in range(row_start, row_count): time = sheet.cell(row=i, column=column).value print(i, time) try: if 8 <= time.hour <= 18: sheet.cell(row=i, column=column).fill = green_fill else: sheet.cell(row=i, column=column).fill = yellow_fill except: continue # Begins March 10, 2AM Ends November 3rd, 2AM. -4 instead of -5. edt_start_str = "2019-03-10 02:00:00" edt_start_obj = datetime.strptime(edt_start_str, '%Y-%m-%d %H:%M:%S') edt_end_str = "2019-11-3 02:00:00" edt_end_obj = datetime.strptime(edt_end_str, '%Y-%m-%d %H:%M:%S') def convert_edt(row_start, row_count, column): print(column) for i in range(row_start, row_count): try: time = sheet.cell(row=i, column=column).value if edt_start_obj <= time <= edt_end_obj: edt_time = time - timedelta(hours=1) sheet.cell(row=i, column=column).value = edt_time else: print("") except:
def activate(self, date_str, date_bool = False): if date_bool != False: self.date = datetime.strptime(date_str, '%m/%d/%Y') else: self.date = date.today() self.job_id = self.generateId(date_str)
def test_convert_to_time(self): self.assertEqual(Common.convert_to_time("15:46"), datetime.strptime("15:46", "%H:%M").time(), "time convert should be equal")
# Calcula o total investido total = 0 for j in titulos: total = total + j[i][1] percentagem = list() for j in titulos: percentagem.append(floor(j[i][1]/total*100*100)/100) percentagens.append([titulos[0][i][0],percentagem]) titulos = list() for i in range(0,len(percentagens[0][1])): datas = list() valores = list() for j in percentagens: date_object = datetime.strptime(j[0], '%Y/%m/%d') datas.append(date_object) valores.append(j[1][i]) titulos.append([datas, valores]) for i in titulos: plt.plot(i[0], i[1], '.-') plt.hold(True) plt.show()