def format_date(self, date): # Get rid of eg. 2008-06-26T08:30:00 self.date = date if "T" in self.date: self.date=date.split("T")[0] elif self.date == "Varies by indicator": return '' elif "00:00:00" in self.date: self.date=date.split('00:00:00')[0] elif self.date == "Every 5-10 minutes": return '' try: if ", " in self.date: self.valid_date = datetime.strptime(self.date, "%B %d, %Y") elif "/" in self.date: self.valid_date = datetime.strptime(self.date, "%Y/%m/%d") elif len(date)==4: self.valid_date = datetime.strptime(self.date, "%Y") else: self.valid_date = datetime.strptime(self.date, '%Y-%m-%d') return str(self.valid_date.date()) except ValueError: return ''
def format_date(date): new_date = {} new_date['year'] = int(date.split('-')[0]) new_date['month'] = int(date.split('-')[1]) new_date['day'] = int(date.split('-')[2]) return new_date
def waterdata_func(site_num,begin_year,begin_mon,begin_date,end_year,end_mon,end_date): import urllib url = [] #site_num = '08078000' begin_date = str(datetime(begin_year,begin_mon,begin_date).date()) end_d = str(datetime(end_year,end_mon,end_date).date()) url = 'http://waterdata.usgs.gov/nwis/dv?cb_00060=on&cb_00065=on&format=rdb&period=&begin_date='+begin_date+'&end_date='+end_d+'&site_no='+site_num data = urllib.urlopen(url) discharge = [] dates = [] for lines in data.readlines()[30:]: da = lines.split() discharge.append(float(da[3])) date = da[2] year = int(date.split('-')[0]) month = int(date.split('-')[1]) day = int(date.split('-')[2]) dates.append(datetime(year,month,day)) discharges = 0.028316*np.array(discharge) # converting to cubic meter data_date = np.array(dates) return discharges, data_date print discharges print data_date
def main(self): # 作成日 date = str(datetime.today()).split('.')[0] day = date.split(' ')[0] time = 'T' + date.split(' ')[1] + 'Z' # print day + time # ルート zabbix = Element('zabbix_export') # バージョン version = SubElement(zabbix, 'version') version.text = '2.0' # 日付 date = SubElement(zabbix, 'date') date.text = day + time # グループ groups = SubElement(zabbix, 'groups') group = SubElement(groups, 'group') groupname = SubElement(group, 'name') groupname.text = 'AAA' # テンプレート templates = SubElement(zabbix, 'templates') template = SubElement(templates, 'template') templatechild = SubElement(template, 'template') templatechild.text = 'BBB' #templatechild.set = ('discription', 'BBB') templatename = SubElement(template, 'name') templatename.text = 'CCC' templategroups = SubElement(template, 'groups') templategroup = SubElement(templategroups, 'group') templategroupname = SubElement(templategroup, 'name') templategroupname.text = 'AAA' # while True: # templategroups = SubElement(template, 'groups') # templategroup = SubElement(templategroups, 'group') # templategroupname = SubElement(templategroup, 'name') # templategroupname.text = 'AAA' # xmlファイル書き込み #tree = ElementTree(zabbix) #tree.write(xmlfile, 'UTF-8', 'True') #改行なし f = open('test.xml', 'w') tree = minidom.parseString(tostring(zabbix)).toprettyxml() print tree f.write(tree) f.flush() f.close()
def github_date(date): if date is not None: date = date.split("T")[0] year, month, day = [int(p) for p in date.split("-")] else: gm = time.gmtime() year = gm.tm_year month = gm.tm_mon day = gm.tm_mday return year, month, day
def valid_date(date): if len(date.split('/')) == 3: try: year, month, day = [int(x) for x in date.split('/')] return 1987 <= year <= int( datetime.today().year) and 0 < month <= 12 and 0 < day <= 31 except ValueError: return False return False
def format_date(self,date): """ Formats the date from STELA mode to MASTER2009 YYYYMMDDT.HHH->yyyy mm dd hh """ d = datetime.now() d = d.strftime("%Y %m %d %H") if(re.search("T",date)): d = " ".join([date.split("T")[0].replace("-"," "),date.split("T")[1].split(":")[0]]) return d
def getUnixTime(dateTime): dateTime = str(dateTime) date = dateTime.split(' ')[0] time = dateTime.split(' ')[1] dt = datetime(int(date.split('-')[0]), int(date.split('-')[1]), int(date.split('-')[2]), int(time.split(':')[0]), int(time.split(':')[1])) timestamp = dt.replace(tzinfo=timezone.utc).timestamp() return int(timestamp)
def day(article_text): article = article_text if len(list(datefinder.find_dates(article))) > 0: date = str(list(datefinder.find_dates(article))[0]) date = date.split() date = date[0] year, month, day = date.split('-') day_name = datetime.date(int(year), int(month), int(day)) return day_name.strftime("%A") return "Monday"
def get_unix_time(dt): dt = str(dt) date = dt.split(' ')[0] time = dt.split(' ')[1] timestamp = datetime( int(date.split('-')[0]), int(date.split('-')[1]), int(date.split('-')[2]), int(time.split(':')[0]), int(time.split(':')[1])).replace(tzinfo=timezone.utc).timestamp() return int(timestamp)
def reverse_datetime(date): tmp = date.split(" ") date = tmp[0] time = tmp[1] d = int(date.split("-")[0]) m = int(date.split("-")[1]) y = int(date.split("-")[2]) h = int(time.split(":")[0]) mi = int(time.split(":")[1]) date = datetime(int(y), int(m), int(d), int(h), int(mi), 00, 000) return date
def __init__(self, date=None): if date != None: self.day = int(date.split()[1].replace(',', '')) self.month = date.split()[0] self.year = int(date.split()[2]) else: now = datetime.now(EST()).date() self.today = now.strftime("%B %d, %Y") self.day = int(self.today.split()[1].replace(',', '')) self.month = self.today.split()[0] self.year = int(self.today.split()[2])
def update_graph_scatter(df, column, per): years = list(set(df["YEAR"])) n = len(years) data = [] if per == 'DAY': for i in range(n): trace = go.Scatter( x=df[df["YEAR"] == years[i]][per].apply( lambda date: date.split('-')[1]), y=df[df["YEAR"] == years[i]][column], name=str(years[i]), mode='lines+markers+text', #text = df[df["YEAR"]==years[i]][column], #textposition='middle center', textfont=dict(size=10, ), marker=dict(size=3, color=colors[i], line=dict(width=1, ))) data.append(trace) layout = go.Layout( dict( title=df[df["YEAR"] == years[0]][per].apply( lambda date: date.split('-')[0]).tolist()[0], legend_bgcolor="rgb(255,255,255)", plot_bgcolor="rgb(255,255,255)", paper_bgcolor="rgb(255,255,255)", showlegend=True, margin=dict(t=50), )) else: for i in range(n): trace = go.Scatter( x=df[df["YEAR"] == years[i]][per], y=df[df["YEAR"] == years[i]][column], name=str(years[i]), mode='lines+markers+text', text=df[df["YEAR"] == years[i]][column], #textposition='middle center', textfont=dict(size=10, ), marker=dict(size=3, color=colors[i], line=dict(width=1, ))) data.append(trace) layout = go.Layout( dict( title="", legend_bgcolor="rgb(255,255,255)", plot_bgcolor="rgb(255,255,255)", paper_bgcolor="rgb(255,255,255)", showlegend=True, margin=dict(t=50), )) return {"data": data, "layout": layout}
def getPrices(): session = HTMLSession() r = session.get( 'https://elen.nu/timpriser-pa-el-for-elomrade-se3-stockholm') datetable = r.html.find('tr') for tr in datetable: date = tr.find('td', first=True) if (date != None): date = date.text year = date.split('-')[0] month = date.split('-')[1] splitday = date.split('-')[2] day = splitday.split(' ')[0] time = date.split(" ")[1] hour = time.split(":")[0] priceList = tr.find('td') counter = 0 for price in priceList: counter = counter + 1 if (counter % 2 == 0): amount = price.text priceString = amount.split(" ")[0] priceFloat = float(priceString) alreadyExists = HourPrice.query.filter_by(year=year, month=month, day=day, hour=hour).all() if (len(alreadyExists) == 0): hourPrice = HourPrice(year=year, month=month, day=day, hour=hour, price=priceFloat) db.session.add(hourPrice) db.session.commit() print("Getting hourly prices") averagetable = r.html.find(".elspot-area-price")[2].text priceAverageString = (averagetable.split(" ")[0]) priceAverageFloat1 = float(priceAverageString.split(",")[0]) priceAverageFloat2 = float(priceAverageString.split(",")[1]) / 100 priceAverage = priceAverageFloat1 + priceAverageFloat2 alreadyExists2 = AveragePrice.query.filter_by(year=year, month=month, day=day).all() if (len(alreadyExists2) == 0): averagePrice = AveragePrice(year=year, month=month, day=day, priceAverage=priceAverage) db.session.add(averagePrice) db.session.commit() print("Getting daily averages")
def search_by_month_and_day(phone_book, date): flag = False for key, value in phone_book.items(): if len(value) == 1: pass else: if value[1].split('/')[0] == date.split('/')[0] and value[1].split( '/')[1] == date.split('/')[1]: flag = True print(key, *value) if not flag: print('Sorry, nothing was found')
def extract(ground_truth, horizon, sou, version, model, mc, dates): df = pd.DataFrame() for date in dates[1:]: if date == dates[-1] and not os.path.exists("result/prediction/"+model+"/"+ '_'.join([ground_truth,date,horizon,version+".csv"])): date = '-'.join([date.split('-')[0],"06-30" if date.split('-')[1] <= "06" else "12-31"]) print(date) if not os.path.exists("result/prediction/"+model+"/"+ '_'.join([ground_truth,date,horizon,version+".csv"])): print("Missing prediction in "+ '_'.join([ground_truth,date,horizon,version+".csv"])) continue df = pd.concat([df,pd.read_csv("result/prediction/"+model+"/"+ '_'.join([ground_truth,date,horizon,version+".csv"]), index_col = 0)],axis = 0) return df.loc[dates[0]:dates[-1],:]
def getDailyURLS(date): month=date.split('-')[0] day=date.split('-')[1] year=date.split('-')[2] url = 'https://www.basketball-reference.com/boxscores/?month=%s&day=%s&year=%s' %(month,day,year) r = requests.get(url) soup = BeautifulSoup(r.text, 'lxml') # Find which teams are playing # teamtables = soup.findAll('table', {'class': 'teams'}) teamlist=[] for tbl in teamtables: tbody = tbl.find('tbody') trlist=[] for tr in tbody.findAll('tr'): trlist.append(tr) for row in trlist: for a in row.findAll('a'): teamlist.append(a.text) teamlist[:] = (value for value in teamlist if value != 'Final') with open('/home/Jon2Anderson/nba/boxScoreScrape/teamnames/nbateamnames.json') as json_file: teamnamedict = json.load(json_file) teamsthatplayed = [teamnamedict.get(item,item) for item in teamlist] listofteams=teamsthatplayed gamelist=[] while len(teamsthatplayed)>1: gamelist.append(teamsthatplayed[0:2]) teamsthatplayed.pop(0) teamsthatplayed.pop(0) data = soup.findAll('p', {'class': 'links'}) i=0 urls=[] for game in data: first=str(data[i]) start="href=\"" end="\">Box" url=(first[first.find(start)+len(start):first.rfind(end)]) base="https://www.basketball-reference.com" url = base + url urls.append(url) i=i+1 tuplelist=[] for i in range(len(urls)): tuplelist.append((urls[i],gamelist[i])) return(tuplelist)
def scrapeVipMarket5(): # UTF-8 support reload(sys) sys.setdefaultencoding('utf-8') now = datetime.now() down = Downloader('http://www.vipmarket5.mk/search/') content = down.get_content() html = unicode(content) linkovi = xpath.search(html, '//tr[@class="frame_content"]') ads = [] for l in linkovi: link = "http://www.vipmarket5.mk" + xpath.get(l, '//div[@style="width:365px; height:90%; margin-top:10px;"]/b/a/@href') title = xpath.get(l, '//div[@style="width:365px; height:90%; margin-top:10px;"]/b/a') imageUrl = xpath.get(l, '//div[@style="overflow:hidden; width:150px; height: 146px; margin: 5px;"]/a/img/@src') download = Downloader(link) cont = download.get_content() cont = unicode(cont) description = xpath.get(cont, '//div[@class="feature"]/p').strip() if description == "": description = "/" #VNIMANIE! NEMA KATEGORII category="/" subcategory="/" price = xpath.get(l, '//div[@style="margin-top:5px; margin-left:10px;height:155px; overflow:hidden;"]/h4/a') if price == u"Цена:По договор": value = "/" currency = "/" else: price = price.split(":") price = price[1] price = price.split(" ") value = price[0] if price[1]=="€": currency = "EUR" elif price[1]=="ден.": currency = "MKD" date = xpath.get(l, '//b[@style="font-weight:bold;"]') date = date.split(": ") date = date[1] date = date.split(".") date = date[2]+"-"+date[1]+"-"+date[0] country = u"Македонија" region = xpath.get(cont, '//div[@style="float:left; width: 140px; overflow:hidden; font-family: Tahoma,Geneva,sans-serif; font-weight:bold"]') if region == "": region = "/" ad = Ad(link, title, imageUrl, description, category, subcategory, value, currency, region, date, country) ads.append(ad) return adsToJson(ads) #print scrapeVipMarket5()
def get_date_from_afvaltype(self, data, afvalnaam): try: date = data["datum"] day = date.split()[1] month = MONTH_TO_NUMBER[date.split()[2]] year = str(datetime.today().year if datetime.today(). month <= int(month) else datetime.today().year + 1) return year + "-" + month + "-" + day except Exception as exc: _LOGGER.warning( "Something went wrong while splitting data: %r. This probably means that trash type %r is not supported on your location", exc, afvalnaam) return ""
def getAllFromEsumXML(tree, uid): list_tags=['Authors','ArticleIds'] doc_summary = tree.find('DocumentSummarySet').find('DocumentSummary') rec={} columns=["uid"] values=[uid] for child in doc_summary: if child.tag == 'Authors': authorIDs=extractAuthors(child, uid) elif child.tag == 'ArticleIds': artcolumns, artvalues=extractArticleIds(child) for i in range(len(artcolumns)): if child.text!=None and not ("\n") in child.text and child.text!="null": rec[artcolumns[i]]=artvalues[i] columns.append(artcolumns[i]) values.append(artvalues[i].replace("'","")) elif child.text!=None and not ("\n") in child.text and child.text!="null": rec[child.tag] = child.text columns.append(child.tag) values.append(child.text.replace("'","").replace('"',"")) if child.tag=="FullJournalName": journalid=check_insert_select("id", "journals", ("name",), (child.text.replace("'","").replace('"',""),))[0][0] columns.append("journalid") values.append(journalid) values.append( values[columns.index("PubDate")]) columns.append( "PubDateString") #logging.debug(columns, "columns") #logging.debug(values, "values") try: check_insert("uid","papers", tuple(columns),tuple(values)) except psycopg2.DataError: if (values[columns.index("PubDate")].find("-")!=-1): date=values[columns.index("PubDate")][0:values[columns.index("PubDate")].index("-")] else: date=values[columns.index("PubDate")] if len(date.split())==1: date=date+" Jan" if len(date.split())==2: date=date+" 01" try: date=datetime.strptime(date,'%Y %b %d').strftime('%Y %m %d') except ValueError: date=datetime.strptime(date.split()[0]+" Jan 01", '%Y %b %d').strftime('%Y %m %d') values[columns.index("PubDate")]=date check_insert("uid","papers", tuple(columns),tuple(values)) return rec
def parse_date(date): hora = date.split('_')[1] date = date.split('_')[0].split('-') year = date[0] month = date[1] day = date[2] if month[0] == '0': month = month[1] if day[0] == '0': day = day[1] final_date = year + '-' + month + '-' + day + '_' + hora return final_date
def fix_date(date): # Get rid of eg. 2008-06-26T08:30:00 if "T" in date: date=date.split("T")[0] elif date == "Varies by indicator": return '' elif "00:00:00" in date: date=date.split('00:00:00')[0] try: valid_date = time.strptime(date, '%Y-%m-%d') return date except ValueError: #print 'Invalid date!', date return ''
def validateDate(date): vd = dateType(date) if vd > 0: if vd == 1: return date elif vd == 2: ds = date.split('-') return ds[2]+'-'+ds[1]+'-'+ds[0] else: ds = date.split('-') months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] return ds[2]+'-'+str(months.index(ds[0])+1)+'-'+ds[1] else: print('Wrong date. Input date in one of possible formats: YYYY-MM-DD, or DD-MM-YYYY or MMM-DD-YYYY (eg 2014-12-22, 22-12-2014 or Dec-22-2014)') return ''
def reschedule_termly_polls(date): #enable script in case its disabled Script.objects.filter(slug='emis_school_administrative').update(enabled=True) #first destroy all existing script progress for head teachers in annual script ScriptProgress.objects.filter(connection__contact__groups__name__iexact='head teachers',\ script__slug='emis_school_administrative').delete() reporters = EmisReporter.objects.filter(groups__name__iexact='head teachers') # Schedule annual messages d = datetime.datetime.now() dl = date.split('-') new_time = datetime.datetime(int(dl[0]), int(dl[1]), int(dl[2]), d.hour, d.minute, d.second, d.microsecond) for reporter in reporters: connection = reporter.default_connection sp = ScriptProgress.objects.create(connection=connection, script=Script.objects.get(slug='emis_school_administrative')) sp.set_time(new_time) #now the smcs termly scripts Script.objects.filter(slug='emis_smc_termly').update(enabled=True) #first destroy all existing script progress for smcs in their termly script ScriptProgress.objects.filter(connection__contact__groups__name__iexact='smc',\ script__slug='emis_smc_termly').delete() reporters = EmisReporter.objects.filter(groups__name__iexact='smc') for reporter in reporters: connection = reporter.default_connection sp = ScriptProgress.objects.create(connection=connection, script=Script.objects.get(slug='emis_smc_termly')) sp.set_time(new_time)
def diurnalCompute(emails, PLOT=False): '''Diurnal plot of all emails, with years on x axis and time of day on y axis. Input must be a list of emails of class HeaderParser, with the key 'Date' containing the date. Outputs the (year,month,day), (Hour, minute second) ''' xday = [] ytime = [] #for i in range(len(headers)): for email in emails: try: date = email['Date'] _temp = date.split(',') _temp[0] = _temp[0].strip() email = ', '.join(_temp) timestamp = mktime(parsedate(date)) mailstamp = datetime.fromtimestamp(timestamp) xday.append(mailstamp) # Time the email is arrived # Note that years, month and day are not important here. y = datetime(2010,10,14, mailstamp.hour, mailstamp.minute, mailstamp.second) ytime.append(y) except Exception as e: print email print e #return headers[i] #figure(figsize=(20,8)) #plot_date(xday,ytime,'.',alpha=.7) #xticks(rotation=30) if PLOT: diurnalPlot(xday, ytime) return xday, ytime
def get_flights_mobile(request): date = request.GET.get("date") if not date: py_date = datetime.now() else: YYMMDD = date.split("-") py_date = datetime.now().replace(year=int(YYMMDD[0]), month=int(YYMMDD[1]), day=int(YYMMDD[2])) dest = request.GET.get("dest") if not dest: dest = ".*" limit = request.GET.get("limit") if not limit: limit = 5 flights = Flight.objects.filter(destination__iregex=dest, flight_date__gte=py_date).order_by('flight_date')[:limit] print "HERE" flights_dict = {} flights_arr = [] for f in flights: flights_arr.append({ "flight_number": f.flight_number, "flight_date": f.flight_date, "destination": f.destination, "status": f.status }) flights_dict['flights'] = flights_arr flights_dict['date'] = py_date return JsonResponse(flights_dict)
def get_id_period (self, date): from_iso_dt, to_iso_dt = util.inc_dt(date.strftime(util.ISO8601_DATE), util.ISO8601_DATE, self.PERIOD_TYPE) from_dt = util.get_dt(from_iso_dt, util.ISO8601_DATE) to_dt = util.get_dt(to_iso_dt, util.ISO8601_DATE) response = self.br.open(self.search_url) fields = {} date = date.strftime(self.request_date_format) date_parts = date.split('/') #fields [self.date_from_field['day']] = date_parts[0] fields [self.date_field_from['month']] = date_parts[1] fields [self.date_field_from['year']] = date_parts[2] util.setup_form(self.br, self.search_form, fields ) if self.DEBUG: print self.br.form response = util.submit_form(self.br) final_result = [] if response: html = response.read() url = response.geturl() result = scrapemark.scrape(self.scrape_ids, html, url) if result and result.get('records'): self.clean_ids(result['records']) final_result.extend(result['records']) if final_result: return final_result, from_dt, to_dt else: return [], None, None # monthly scraper - so empty result is always invalid
def delete_tweets_from_archive_until_year( filename_archive, tweets_year, tweets_month ): global APP_API FILE_PATH = APP_PATH+'/'+filename_archive if not os.path.exists( FILE_PATH ): print 'TWEETS: Das twitter-Archiv in '+FILE_PATH+' ist nicht vorhanden.' return if not is_api_configured(): return # get list of ids to destroy from zip file result_array = tweets_extract_ids_from_zipfile( filename_archive, tweets_year, tweets_month ) tweet_ids = result_array[0] num_to_delete = result_array[1] # sort in reversed order tweet_ids_sorted = sorted( tweet_ids.keys(), reverse=True ) print "TWEETS: There are %d tweets to delete." % num_to_delete estimated_time_needed = estimated_time_of_arrival( num_to_delete ) print "TWEETS: Deletion will take an estimated %s to finish." % estimated_time_needed continue_deleting = query_yes_no( "TWEETS: REALLY DELETE ALL TWEETS NOW?", default="no" ) if not continue_deleting: print "TWEETS: Aborted deleting." return begin = False num_deleted = 0 for date in tweet_ids_sorted: year, month = date.split("/") # not really used num_to_delete_month = len( tweet_ids[date] ) num_deleted_month = 0 print "TWEETS: Deleting %s tweets of: %s" % ( str( num_to_delete_month ), date ) for tid in tweet_ids[date]: if begin or last == 0 or tid == last: begin = True error_counter = 0 while True: try: APP_API.DestroyStatus(tid) num_deleted += 1 num_deleted_month += 1 print "TWEETS: %d DELETED %d/%d of %d/%d (MONTH/TOTAL)" % ( tid, num_deleted_month,num_deleted, num_to_delete_month, num_to_delete ) break except twitter.error.TwitterError, e: try: message = e.message[0]['message'] retry = False except: message = repr( e.message ) retry = True print "TWEETS: %d ERROR %s %d/%d of %d/%d (MONTH/TOTAL)" % (tid, message, num_deleted_month,num_deleted, num_to_delete_month, num_to_delete) error_counter += 1 if error_counter > 5: print "TWEETS: Too many errors, aborting!" exit(1) if not retry: break # exit endless while loop
def calendar_change_date(): date = request.form['new_date'] date_chunks = date.split(' ') date = date_chunks[1] + ' ' + date_chunks[2] + ' ' + date_chunks[3] new_date = datetime.datetime.strptime(date, "%b %d %Y") # Send them to the new day return redirect(url_for('.home', date=new_date, teacherset=request.form['teacherset']))
def reshape_date(date): # ~시간전, ~분전, ~초전 시간 고치기 if re.search('전', date): if re.search('시간', date): date = datetime.now() - timedelta(hours=int(date.split("시간")[0])) date = date.strftime("%Y-%m-%d") elif re.search('분', date): date = datetime.now() - timedelta(minutes=int(date.split("분")[0])) date = date.strftime("%Y-%m-%d") elif re.search('초', date): date = datetime.now() - timedelta(seconds=int(date.split("초")[0])) date = date.strftime("%Y-%m-%d") else: date = datetime.strptime(date, "%Y.%m.%d") date = date.strftime("%Y-%m-%d") return date
def parse_details(self, response): # print 'parse_details' url = response.url title = response.xpath( '//div[@id="art_header"]/h1[@class="title"]/text()').extract_first( ) texts = response.xpath( '//div[@id="art_start"]/p/text() | //div[@id="art_start"]/div/text() | //div[@id="art_start"]/a/b/text() | //div[@id="art_start"]/text()' ).extract() article = u''.join(texts) date = response.xpath( '//div[@class="art_author"]/text()').extract_first() (day, month, year) = date.split()[0:3] #print day, type(month), year articleDate = makeStdDate(day, month, year) # print '>1>',articleDate, strToday,(articleDate == strToday) # if (articleDate == strToday): yield { 'url': url, 'title': title, 'text': article, 'date': strToday #articleDate }
def check_humandate(date): """ 'humandate' must be a human-readable date with a 3-letter month and 4-digit year. Examples include 'Feb 18-20, 2025' and 'Feb 18 and 20, 2025'. It may be in languages other than English, but the month name should be kept short to aid formatting of the main Software Carpentry web site. """ if ',' not in date: return False month_dates, year = date.split(',') # The first three characters of month_dates are not empty month = month_dates[:3] if any(char == ' ' for char in month): return False # But the fourth character is empty ("February" is illegal) if month_dates[3] != ' ': return False # year contains *only* numbers try: int(year) except: return False return True
def convert_str_datetime(date: str, pattern: str): if pd.isnull(date): return DateTimeUtils.datetime_default if "T" in date: return datetime.strptime(date.split("T")[0], pattern) else: return datetime.strptime(date, pattern)
def get_date(self , date): """ Returns a date in Bhav Website compatible format """ date_list = date.split('-') date_list[1] = str(int(date_list[1])) date_list[2] = str(int(date_list[2])) return date_list[0] + '-' + date_list[1] + '-' + date_list[2]
def dateToDatetime(date): if date: date = date.split('/') date = datetime.datetime(int(date[2]), int(date[0]), int(date[1])) current_tz = timezone.get_current_timezone() date = date.astimezone(current_tz) return date
def is_date(date): ''' # Функция принимает на вход строку с датой # Возвращает True, еслли дата корректна, False если не корректна # Для примера возьмём дату "1995-04-30" ''' # Разбиваем дату разделителем "-", фукнция split возвращает список # Если дата корректна, список будет вида ["YYYY","MM","DD"] date_splitted = date.split("-") # Для начала, длина списка должна быть равной 3 if len(date_splitted) == 3: # Теперь запишем в переменные year, month, day элементы списка date_splitted year, month, day = date_splitted # Теперь просто проверим, что строка с годом имеет длину 4, строки с месяцем и днём длину 2 if len(year) == 4 and len(month) == 2 and len( day) == 2 and int(month) < 13 and int(month) > 0 and int( day) > 0 and int(day) < 32: # Дата нам подходит return True else: # Иначе дата нам не подходит return False else: # Иначе дата не подходит return False
def store_holiday(): d = datetime.strptime('0001-1-1','%Y-%m-%d') r = requests.get("https://isthemarketopen.com/") soup = BeautifulSoup(r._content) find = soup.find_all("font") date = "" for link in find: if "clos" in link.text.lower(): date = (link.text.split("-")[1][1:].lower()) year = date.split(",")[1][1:] day = date[date.index(",")-2: date.index(",")] # if "jan" in date: # # elif "feb" in date: # # elif "april" in date: # # elif "may" in date: # # elif "june" in date: # if "july" in date.lower(): d = datetime.strptime(str(year+"-"+str(7)+"-"+day), '%Y-%m-%d') # elif "aug" in date: # # elif "sep" in date: # # elif "oct" in date: # # elif "nov" in date: # # elif "dec" in date: print(d.strftime("%Y-%m-%d"))
def checkquery(self, location=None, date=None, islog=False): datesplit = date.split(',') if len(datesplit) <= 1: period = timedelta(days=1) datef0 = datetime.strptime(datesplit[0], "%Y-%m-%d") datef1 = datef0 + period datef0str, datef1str = datef0.strftime( '%Y-%m-%d %X'), datef1.strftime('%Y-%m-%d %X') else: period = timedelta(days=1) datef0 = datetime.strptime(datesplit[0], "%Y-%m-%d") datef1 = datetime.strptime(datesplit[1], "%Y-%m-%d") + period datef0str, datef1str = datef0.strftime( '%Y-%m-%d %X'), datef1.strftime('%Y-%m-%d %X') timeperiod = (datef0str, datef1str) query = { 'siteName': location, 'log_time': { '$gte': timeperiod[0], '$lte': timeperiod[1] } } try: if islog is False: self.airdata.find(query)[0] find_result = self.airdata.find(query) else: self.datalog.find(query)[0] find_result = self.datalog.find(query) except IndexError: find_result = None finally: self.client.close() # the return of this method is a mongodb cursor object or None object return find_result
def gedcomDateToUnixTimestamp(date): dateArray = date.split(' ') month = MONTHS[dateArray[1]] day = dateArray[0] year = dateArray[2] timeString = '{0}/{1}/{2}'.format(day, month, year) return time.mktime(datetime.strptime(timeString, '%d/%m/%Y').timetuple())
def join_files(data_path, out_path='./'): """ Joins all the Detector Health Detail files in the directory into one big csv and dataframe. Args: dir (str) = path to data directory out_path (str) = path to the directory where you want to save the csv """ fnames = os.listdir(data_path) # Count the rows r = 0 start_path = os.getcwd() os.chdir(data_path) # change cwd to data dir for name in fnames: with open(name, 'r') as f: f.next() # skip header for line in f: r+=1 # Iitialize the output df temp = pd.read_csv(fnames[0], sep='\t') h = list(temp.columns) # header labels df = pd.DataFrame(index=np.arange(0,r), columns=['Date']+h) # Read all the files and combine into one dataframe i=0 #TODO Find a faster way to do this loop. for name in fnames: date = name.split('_health')[0] # YYYY_MM_DD with open(name,'r') as f: f.next() # skip header for line in f: df.iloc[i,:] = [date] + line.split('\t') i += 1 os.chdir(start_path) # change back to original dir df.to_csv(out_path+date.split('_')[0]+'_joined_health_detail.csv') return df
def isWeekday(self, date): dates = date.split('-') if len(dates) == 3: # if calendar.weekday(int(dates[0]),int(dates[1]),int(dates[2])) < 5: if self.isDayOfWeek(date,5) == 0 and self.isDayOfWeek(date,6) == 0: return 1 return 0
def statistical_temperature_query_by_initial_date(date): date_parts = date.split('-') start_date = dt.date(int(date_parts[0]), int(date_parts[1]), int(date_parts[2])) session = Session(engine) station_temperature = session.query( func.max(Measurement.tobs).label("highest_temperature"), func.avg(Measurement.tobs).label("average_temperature"), func.min(Measurement.tobs).label("lowest_temperature")).filter( Measurement.date >= start_date) session.close() results = station_temperature.one() highest_temperature = results.highest_temperature lowest_temperature = results.lowest_temperature average_temperature = results.average_temperature list = [] list.append({"Highest temperature": highest_temperature}) list.append({"Lowest temperature": lowest_temperature}) list.append({"Average temperature": average_temperature}) return jsonify(list)
def check_valid_date(date): """ Verifica que la fecha tenga el formato correcto :param date: :return: año, mes, dia :rtype: tuple """ if not isinstance(date, str): # raise ValueError("La date no es del tipo string") return False split = date.split("-") m = int(split[0]) d = int(split[1]) if m < 1 or m > 12: raise ValueError( "Error en {}: El mes debe ser mayor a 1 y menor a 12".format( date)) # return False maxday = Season.month_day[m] if d < 1 or d > maxday: raise ValueError( "Error en {}: En el mes {} el dia debe ser menor a {}".format( date, m, maxday)) # return False return m, d
def export_users_csv_date(request): response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=" Custom Report.csv"' date = request.POST.get('date') tobepassed="" global datel if(date!=None): date = date.split('/') tobepassed+= date[2] + "-" + date[0] + "-" + date[1] datel.append(tobepassed) print(datel) writer = csv.writer(response) writer.writerow(['Entry No.','Name','Entry Time','Entry Date', 'Exit Time', 'Phone', 'Email', 'Address', 'Purpose', 'Identity', 'If Other then Specify', 'Reference ID','Aadhar','Section to be Visited ','Image Name As Taken on Device','Reference Person Name','Reference Person Contact']) print("here") passv="" if len(datel)!=0: passv+=datel[len(datel)-1] users = visitor.objects.filter(dateofentry=passv).values_list() for user in users: writer.writerow(user) if request.method=="GET": while(len(datel)!=0): datel.pop(0) return response
def Single_Day_Inquriy_Obs(self, date): """ Takes argument date format Jan 01 2021 """ date = date.split() new_date = date[0] + date[1] + date[2] results = self.collectiondayhist.find({"Date": new_date}) High = self.Preferences.HighTemp() Low = self.Preferences.LowTemp() Rainfall = self.Preferences.Rainunit() Snowfall = self.Preferences.Snowunit() TempUnit = self.Preferences.UnitT() PreUnit = self.Preferences.UnitP() for x in results: High = str(x[High]) + TempUnit Low = str(x[Low]) + TempUnit if str(x[Rainfall]) == "Trace amount": Rain = "Trace amount" if str(x[Rainfall]) != "Trace amount": Rain = str(x[Rainfall]) + " " + PreUnit if str(x[Snowfall]) == "Trace amount": Snow = "Trace amount" if str(x[Snowfall]) != "Trace amount": Snow = str(x[Snowfall]) + " " + PreUnit WDate = date[0] + " " + date[1] + " " + date[2] Webreturn = { "Date": WDate, "High": High, "Low": Low, "Rainfall": Rain, "Snowfall": Snow } return Webreturn
def day_menu(): options = {'1': ('Today', Menu.aut_menu)} # add 4 prior days to options for i in range(1, 5): formdate = Menu.DATE + timedelta(days=-i) options[str(i + 1)] = (formdate.strftime('%m/%d/%Y'), Menu.ini_menu) options['6'] = ('Archive', Menu.ini_menu) options['9'] = ('Back', Menu.back) print('###############') print('# {haz} -{sec}- #'.format( haz='Winter' if Menu.obs['haz'][0] == '2' else 'Severe', sec=Menu.obs['sec'][0])) print('# ' + Menu.DATE.strftime('%m/%d/%Y') + ' #') print('###############') print() print('Select Day') sel = Menu.show_menu(options) # if archive day selected, get date if sel == '6': Menu.archive = True print('\nEnter the Archive date (mm/dd/yyyy):') date = input('\n>> ') date = date.split('/') Menu.obs['day'] = datetime.datetime(int(date[2]), int(date[0]), int(date[1])) else: Menu.obs['day'] = Menu.DATE + timedelta(days=-(int(sel) - 1)) return options[sel][1]()
def get_day_number(date): days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] date = date.split("-") month = int(date[1]) res = sum(days[:month - 1]) res = res + int(date[2]) return res
def Single_Day_Inquiry_Hourly_Obs(self, date): """ Take argument date format Jan 02 2021 1 PM """ date = date.split() Date = date[0] + date[1] + date[2] time = self.time_convert(date[3], date[4]) Precipitation = self.Preferences.Precipunit() Temp = self.Preferences.Temp() results = self.collectionhourhist.find({"Date": Date, "Time": time}) TempUnit = self.Preferences.UnitT() PreUnit = self.Preferences.UnitP() for x in results: Temp = str(x[Temp]) + TempUnit Precip = str(x[Precipitation]) if str(x[Precipitation]) == "No precipitation": Precip = "No precipitation" if str(x[Precipitation]) == "Trace amount": Precip = "Trace amount" if str(x[Precipitation]) != "No precipitation" and str( x[Precipitation]) != "Trace amount": Precip = str(x[Precipitation]) + " " + PreUnit time_per = date[0] + " " + date[1] + " " + date[2] + " " + time Webreturn = {"Date": time_per, "Temp": Temp, "Precipitation": Precip} return Webreturn
def date_to_timestamp(date): date = date.split('/') if len(date) == 3: if len(date[2].strip()) == 2: date[2] = '19'+date[2].strip() return time.mktime((int(date[2]), int(date[1]), int(date[0]), 0,0,0,0,0,-1)) else: return False
def calendar_change_date_teacher(): date = request.form['new_date'] current_teacher = request.form['current_teacher'] date_chunks = date.split(' ') date = date_chunks[1] + ' ' + date_chunks[2] + ' ' + date_chunks[3] new_date = datetime.datetime.strptime(date, "%b %d %Y").date() # Send them to the new day return redirect(url_for('.filter_teachers', date=new_date, current_teacher=current_teacher))
def split_datetime_range(date): if isinstance(date,(tuple,list)): d0,d1 = date elif isinstance(date, (str,unicode)) and date.find('~')>=0: d0,d1 = date.split('~') else: d0,d1 = date, date return d0, d1
def date_time(date): db_date = date.split(" ") hours = db_date[1].split(".") hours = hours[0] if hours > "00:00:01" and hours < "05:59:59": current_date=datetime.strptime(date, "%Y-%m-%d %H:%M:%S.%f") + timedelta(days=-1) current_date.strftime("%Y-%m-%d %H:%M:%S.%f") return current_date return date
def parse_date(date=None, default=None): if date is None: if default is None: default = datetime.today() return default year, month, day = [int(x) for x in date.split('-')] return datetime(year=year, month=month, day=day)
def get_date(date): #this function returns the date in "date" format split_date = date.split("-") return_date = [] if len(split_date) > 0: year = int(split_date[0]) month = int(split_date[1]) day = int(split_date[2].split(" ")[0]) return_date = [year, month, day] return return_date
def getDateStats(dt): if "T" in dt: date, time = dt.split("T") else: date, time = dt.split(" ") time = time.split(".")[0] yr, m, d = map(int, date.split("-")) h, minute, s = map(int, map(float, time.split(":"))) return (yr, m , d, h, minute)
def get_archive_date(): ''' Get date of latest archive files on USAspending.gov ''' url = 'https://apps.usaspending.gov/DownloadCenter/AgencyArchive' d = pq(url) date = d('#ResultsTable').find('tr').eq(1).find('td').eq(3).text() date = date.split('/') date = date[2]+date[0]+date[1] return date
def get_archive_date(): """ Get date of latest archive files on USAspending.gov """ url = "https://apps.usaspending.gov/DownloadCenter/AgencyArchive" d = pq(url) date = d("#ResultsTable").find("tr").eq(1).find("td").eq(3).text() date = date.split("/") date = date[2] + date[0] + date[1] return date
def is_valid_date(date): #mm-dd-yyyy try: date = date.split('/') if (int(date[0]) > 0 and int(date[0]) <= 12) and (int(date[1]) > 0 and int(date[1]) <= 31) and (int(date[2])>= 1900 and int(date[2]) <= 2300): return '/'.join(date) else: return '' except: wrong_date.append(date) return ''
def get_current_date(self): curDate=str(datetime.datetime.utcnow() + datetime.timedelta(hours=-8)) date=curDate.split(' ') date=date[0] date=date.split('-') to_return={} to_return['year']=date[0] to_return['month']=date[1] to_return['day']=date[2] return to_return