def recuperer(request, pk): ref = ReferenceDossier.objects.all().count() data = Calldata.objects.get(id=pk) num = ref + 1 reference = ('ARCEP/' + timestring.Date(datetime.date.today()).year.__str__() + '/' + timestring.Date(datetime.date.today()).month.__str__() + '/' + num.__str__()) plainte = Plainte() plainte.nom = data.nom plainte.prenoms = data.prenoms plainte.profession = data.profession plainte.nationalite = data.nationnalite plainte.contact = data.contact plainte.adresse = data.adresse plainte.email = data.mail plainte.date_entree = timestring.Date(datetime.date.today()).date plainte.canal_id = 1 plainte.operateur_id = data.operateur_id plainte.categorie_id = data.categorie_plainte_id plainte.objet = data.objet_appel plainte.date_constat = data.date_constat plainte.reference = reference plainte.etat_dossier = 'Non affecté' plainte.annee = timestring.Date(datetime.date.today()).year plainte.mois = timestring.Date(datetime.date.today()).month plainte.save() data.recuperer = True data.save() ReferenceDossier.objects.create(numero=num, libelle=reference) return redirect(charger_ligne)
def parse_article(self, response): #May 21, 2019, 02.47 PM IST date_time_str = str(" ".join( response.xpath('//div[contains(@class, "publish_on")]/text()'). extract()).strip().split(":")[-1].strip()) date_time_str = date_time_str[0:len(date_time_str) - 4] publish_date = timestring.Date(str(date_time_str)) current_date = timestring.Date(date.today()) date_7_days_ago = date.today() - timedelta(days=7) date_7_days_ago = timestring.Date(str(date_7_days_ago)) if date_7_days_ago < publish_date <= current_date: i = EcotimesItem() i["title"] = " ".join( response.xpath( '/html/body/section[2]/div[5]/div[1]/div/section/div[1]/article/h1/text()' ).extract()).strip() i["description"] = " ".join( list( map( lambda x: x.strip(), response.xpath( '//div[@class="artText"]//text()').extract()))) i["published_date"] = " ".join( response.xpath('//div[contains(@class, "publish_on")]/text()'). extract()).strip().split(":")[-1].strip() i["created_data"] = datetime.datetime.now() i["url"] = response.url i["source"] = self.name yield i
def input_bus_to_HC(string): time_dict_list = input_time_function(string) input_time = str(timestring.Date((string.split(","))[0])) input_time = datetime.datetime.strptime(input_time, '%Y-%m-%d %H:%M:%S') next_buses = "" for time_dict in time_dict_list: time_before_2hrs = input_time - datetime.timedelta( hours=1.5) #the point before the next 3 buses time_in_2hrs = input_time + datetime.timedelta( hours=1.5) #the point after the next 3 buses bus_time = str(timestring.Date( time_dict['LeaveBrynMawr'])) #formats to today's time as a string bus_time = datetime.datetime.strptime( bus_time, '%Y-%m-%d %H:%M:%S' ) #converts the time string into a datetime object for comparison if time_before_2hrs <= bus_time and time_in_2hrs >= bus_time: next_buses += str(time_dict['LeaveBrynMawr']) + "\n" if next_buses != "": return ("Buses from Bryn Mawr before & after %s:\n" % (string) + next_buses) else: return "Sorry! There are no buses at this time.\n Please input a different time!"
def _get_related_jobs(self, most_recent_scans, project_name, version_name): later_than = min([s['createdAt'] for s in most_recent_scans]) later_than = timestring.Date(later_than).date pv_name = "{} {}".format(project_name, version_name) scan_loc_name = most_recent_scans[0]['scan_details']['name'] scan_ids = [mrs['scan_details']['scanSourceId'] for mrs in most_recent_scans] jobs = CodeLocationStatusChecker.jobs jobs_with_job_descriptions = list(filter(lambda j: 'jobEntityDescription' in j, jobs)) pv_jobs = list(filter( lambda j: pv_name in j.get('jobEntityDescription', []) and timestring.Date(j['createdAt']).date > later_than, jobs_with_job_descriptions)) pv_job_types = set([pj['jobSpec']['jobType'] for pj in pv_jobs]) s_jobs = list(filter( lambda j: scan_loc_name in j.get('jobEntityDescription', []) or j['jobSpec']['entityKey']['entityId'] in scan_ids, jobs)) # TODO: Filter again for later_than? or pre-filter for later_than? s_job_types = set([sj['jobSpec']['jobType'] for sj in s_jobs]) most_recent_pv_jobs = self._get_most_recent_jobs(pv_job_types, pv_jobs) most_recent_s_jobs = self._get_most_recent_jobs(s_job_types, s_jobs) combined_recent_jobs = most_recent_pv_jobs + most_recent_s_jobs # de-dup; see https://stackoverflow.com/questions/11092511/python-list-of-unique-dictionaries combined_recent_jobs = list({j['id']: j for j in combined_recent_jobs}.values()) return combined_recent_jobs
def process_messages(messages): for message in messages: key = { '_id': str(message['event_id']) + "_" + str(message['sportsbook_id']) } data = {} # Line if message['alert_type_id'] == 1: data = message["details"] data["date"] = timestring.Date(message["created_date"]).date lineCol.update(key, data, upsert=True) # Percent if message['alert_type_id'] == 2: data = message["details"] data["date"] = timestring.Date(message["created_date"]).date percentCol.update(key, data, upsert=True) # Bets if message['alert_type_id'] == 3: key = {'_id': str(message['event_id'])} data = message["details"] data["date"] = timestring.Date(message["created_date"]).date betCol.update(key, data, upsert=True) # Game if message['alert_type_id'] == 4: key = {'_id': message['event_id']} data['home_score'] = message['details']['home_score'] data['visitor_score'] = message['details']['visitor_score'] data['period_short'] = message['details']['period_short'] data['period_time'] = message['details']['period_time'] data['home_score'] = message['details']['home_score'] eventCol.update(key, data, upsert=True)
def to_date(filename): if ".qsf" in filename: return timestring.Date("1 1 1970") try: date = filename[len(survey_name) + 1:-4].replace(".", ":").replace( "_", " ") return timestring.Date(date) except: print("Failed to parse ", filename)
def get_date_from_timestring(date): import timestring year = timestring.Date(date).year month = timestring.Date(date).month day = timestring.Date(date).day hour = timestring.Date(date).hour return year, month, day, hour
def load(self, dict): self.id = dict['id'] self.name = dict['name'] self.email = dict['email'] self.user_id = dict['user_id'] self.enabled = dict['enabled'] self.expiration = timestring.Date(dict['expiration']) self.project_id = dict['project_id'] self.created_at = timestring.Date(dict['created_at']) self.history = History(self, dict['history'])
def search(self, args, offset=0, limit=0, order=None, count=False): # pprint(args) # print('---------------------------------------------') DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S" akhir = "" awal = "" awal_utc_str = "" akhir_utc_str = "" for doms in args: if doms[0] in 'date_order': if doms[1] == '=': dt = timestring.Date(doms[2]) # lokalisasi datetime localtz = pytz.timezone('Asia/Jakarta') dt_local_str = pytz.utc.localize(datetime.datetime.strptime(doms[2], DATETIME_FORMAT)).astimezone(localtz).strftime(DATETIME_FORMAT) dt_local = timestring.Date(dt_local_str) awal = str(dt_local.date.date()) + ' ' + '00:00:00' akhir = str(dt_local.date.date()) + ' ' + '23:59:00' # set to UTC lagi awal_utc_str = localtz.localize(datetime.datetime.strptime(awal, DATETIME_FORMAT)) awal_utc_str = awal_utc_str.astimezone(pytz.utc).strftime(DATETIME_FORMAT) akhir_utc_str = localtz.localize(datetime.datetime.strptime(akhir, DATETIME_FORMAT)) akhir_utc_str = akhir_utc_str.astimezone(pytz.utc).strftime(DATETIME_FORMAT) # # doms = [('date_order','>=',awal),('date_order','<=',akhir)] doms[1] = '>=' doms[2] = awal_utc_str # doms = ['date_order','>=',awal] args.append(['date_order','<=',akhir_utc_str]) # print('=================================') # pprint(doms) # print('---------------------------------') # else: # print 'tidak ketemu' # append new filter # pprint(doms) # if doms[0] in 'order_date': # kick me! # else: # love me! # print(akhir) # print(localtz) # print(datetime.utcfromtimestamp(akhir)) # print('awal :' + awal_utc_str) # print('akhir : ' + akhir_utc_str) # print('---------------------------------------------') # pprint(args) return super(hs_sale_order,self).search(args,offset,limit,order,count)
def add_task(id): if (timestring.Date(request.form.get("date")) < datetime.date(datetime.now())): return redirect(url_for("customer_page_with_error", id=id)) t = Task(datetime.now(), timestring.Date(request.form.get("date")).date, False, request.form.get( "task_comment"), current_user.firstname + " " + current_user.surename, Customer.query.get(id).name, id, current_user.id) db.session().add(t) db.session().commit() return redirect(url_for("customer_page", id=id))
def _get_current_borders(current): deadzone = Settings.main['config']['timeslots']['deadzone'] borders = None schedule = Settings.main['schedule'] for item in schedule.keys(): if ts.Date(item).weekday == current.weekday: borders = (ts.Date(schedule[item]['start']) + deadzone, ts.Date(schedule[item]['end']) - deadzone) if borders is None: raise ServiceNotWorking() return borders
def dateNormalizer(payload): """ ---> SQLite DateTime type only accepts Python datetime <--- Return normalized JSON payload with Python datetime """ import timestring normalizedBirthDate = timestring.Date(payload['BirthDate']).date normalizedHireDate = timestring.Date(payload['HireDate']).date payload['BirthDate'] = normalizedBirthDate payload['HireDate'] = normalizedHireDate return payload
def setReminder(date, time, name): """ Parses the date and time into a datetime object and runs new_reminder on it """ d = timestring.Date(date) t = timestring.Date(time) dt = t.replace(day=d.day, month=d.month, year=d.year) status = new_reminder(dt.date, name) if status == 0: print("New Reminder:\n" "{0}: {1}".format(dt.date.strftime("%m/%d/%Y %H:%M"), name)) else: print("Error occured")
def setUp(self): """Pre-test setup""" stats = { timestring.Date("1/1/2016 1:00:00"): { "s1": 121212 }, timestring.Date("1/1/2016 1:01:00"): { "s2": 232323, "_fun": "baddata" } } out = StringIO.StringIO() output_csv(out, stats) self.outstr = out.getvalue() out.close()
def _order_late(order: Order) -> bool: now = ts.Date('now') day = ts.Date('0:00') if order.status_id != 1: return False if ts.Date(order.created_at) < day: return True end = ts.Date(str(timeslots.get_timeslot(order.timeslot_id).time_end)) if now > end: return True return False
async def remind(self, ctx, *, text: str = None): if text is None: await self.bot.say( ':warning: `Invalid Time Format`\n**Example:** `1h2m` (1 hour, 2 minutes)' ) return curr = ti.time() time_result = await self.get_time(text) if time_result: time = time_result[0] text = time_result[1] if time_result is False: time = 1 try: epoch = int(timestring.Date(text).to_unixtime()) assert curr >= epoch text = re.sub(TIMESTRING_RE, '', text) except: await self.bot.say( ':warning: `Invalid Time Format`\n**Example:** `1h2m` (1 hour, 2 minutes)' ) return elif time <= 0: await self.bot.say(':warning: AFAIK, time can\'t be negative.') return else: epoch = int(curr) + time text = text if text != '' else None sql = 'INSERT INTO `reminders` (`user`, `time`, `message`) VALUES (%s, %s, %s)' self.cursor.execute(sql, (ctx.message.author.id, epoch, text)) self.cursor.commit() await self.bot.say( ':white_check_mark: Reminder set for `{0}` seconds.'.format(time))
def to_json(po_folder): """ This function extracts the content and other information from the generated htmls and convert them to a single data json file. """ json_ary = [] for html in os.listdir(po_folder): data = '' dic = {} with open(os.path.join(po_folder, html)) as html_file: data = html_file.read() soup = BeautifulSoup(data, 'lxml') link = '/post/' + html.split('.')[0] title = soup.find('h3')['id'] content = soup.find('body').contents str_content = '' for item in content: str_content += str(item) dic['link'] = link dic['title'] = title dic['content'] = str_content dic['date'] = '-'.join(html.split('-')[0:3]) json_ary.append(dic) json_ary.sort(key=lambda item: timestring.Date(item['date']), reverse=True) with open('static/data.json', 'w') as out_json: json.dump(json_ary, out_json)
def parse(self, response): # iterate entries for entry in response.css('div').css('li.archive-item-component'): #retrieve info for our current post item = ScrapyItem() # p.post-excerpt class de type p = post-excerpt item['source'] = 'wired' temp_string = entry.css('time::text').extract_first() item['brief'] = entry.css('a').css('p.archive-item-component__desc::text').extract_first() item['url'] = entry.css('a::attr(href)').extract_first() item['title'] = entry.css('a').css('h2::text').extract_first() # check time now = datetime.datetime.now() now = now.strftime("%Y-%m-%dT%H:%M:%S.%f%z") item['tstamp'] = now # transfer time into ISO 8601 temp = timestring.Date(temp_string).date item['date'] = temp.strftime("%Y-%m-%dT%H:%M:%S.%f%z") # item['date'] = entry.css('time::text').extract_first() # item['brief'] = entry.css('p.post-excerpt::text').extract_first() # item['url'] = entry.css('h2').css('a::attr(href)').extract_first() # item['title'] = entry.css('h2').css('a::text').extract_first() yield item
def parse(self, response): news = response.css('div.sr') titles = news.css('h4') briefs = news.css('p') # iterate entries for j in range(len(titles)): #retrieve info for our current post item = ScrapyItem() item['source'] = 'esa' temp_string = briefs[j].css('::text').extract_first() if briefs[j].css('b::text').extract_first() != '...': continue item['brief'] = briefs[j].css('::text').extract()[1] item['url'] = titles[j].css('a::attr(href)').extract_first() item['title'] = titles[j].css('a::text').extract_first() # check time now = datetime.datetime.now() now = now.strftime("%Y-%m-%dT%H:%M:%S.%f%z") item['tstamp'] = now # transfer time into ISO 8601 temp = timestring.Date(temp_string).date item['date'] = temp.strftime("%Y-%m-%dT%H:%M:%S.%f%z") yield item
def stocks_list(request): #sorts the objects in the database if request.method == "POST": form_A = forms.AddNewStock(request.POST) print('running in stocks_list if') form_B = forms.goForIT() if form_A.is_valid(): form_A.save() return redirect('stocks:stocks') else: form_A = forms.AddNewStock() print("running in stocks_list else") form_B = forms.goForIT() stocks = Stock.objects.all().order_by('stock_name') arr = list(stocks.values()) for stock in arr: print(stock) for s in stock: #print(str(stock[s])) #print(type(stock[s])) if type(stock[s]) == datetime.datetime: date = timestring.Date(str(stock[s])).date date = datetime.datetime.strftime(date, '%m-%d-%y') stock[s] = date firebase_db.child('Stocks').child(stock['id']).set(stock) #firebase_db.child('Stocks').set(json.dumps(stocks.values())) return render(request, "stocks/stocks_list.html", { 'Stocks': stocks, 'form_A': form_A, 'form_B': form_B })
def parse(self, response): for entry in response.css('li.archive-item-component'): item = ScrapyItem() url_temp = entry.css('a::attr(href)').extract_first() item['url'] = 'https://www.wired.com' + url_temp item['source'] = 'wired' temp_string = entry.css('time::text').extract_first() item['brief'] = entry.css('a').css( 'p.archive-item-component__desc::text').extract_first() item['title'] = entry.css('a').css('h2::text').extract_first() # check time now = datetime.datetime.now() now = now.strftime("%Y-%m-%dT%H:%M:%S.%f%z") item['tstamp'] = now # transfer time into ISO 8601 temp = timestring.Date(temp_string).date item['date'] = temp.strftime("%Y-%m-%dT%H:%M:%S.%f%z") # request to article page request = scrapy.Request(item['url'], callback=self.parse_article) request.meta['item'] = item yield request # go to next page if exists next_url = response.css('li.pagination-component__caret--right').css( 'a::attr(href)').extract_first() if next_url: yield scrapy.Request('https://www.wired.com' + next_url)
def render(cv: CurriculumVitae, baseFolder: str, params={}, resources={}): file = open("Templates/" + baseFolder + "/main.tex", "r", encoding="utf-8") templateString = file.read() file.close() cvDict = {} headerVars = ["name", "address", "github", "linkedin", "email", "phone", "birthday", "homepage"] cvDict["firstname"] = text_clean(cv.header.name.split(' ')[0]) cvDict["surname"] = text_clean(' '.join(cv.header.name.split(' ')[1:])) cvDict["lastname"] = text_clean(cv.header.name.split(' ')[-1]) for var in headerVars: cvDict[var] = text_clean(eval("cv.header." + var)) if cv.header.birthday != None: cvDict["birthday"] = timestring.Date(cv.header.birthday).date cvDict["work_array"] = CvRenderCheetahTemplate.extract_item(cv, Models.CvWorkExperienceItem) cvDict["education_array"] = CvRenderCheetahTemplate.extract_item(cv, Models.CvEducationalExperienceItem) cvDict["academic_array"] = CvRenderCheetahTemplate.extract_item(cv, Models.CvAcademicProjectItem) cvDict["language_array"] = CvRenderCheetahTemplate.extract_item(cv, Models.CvLanguageItem) cvDict["project_array"] = CvRenderCheetahTemplate.extract_item(cv, Models.CvImplementationProjectItem) cvDict["achievement_array"] = CvRenderCheetahTemplate.extract_item(cv, Models.CvAchievementItem) cvDict["skill_dict"] = CvRenderCheetahTemplate.extract_skills(cv) cvDict["params"] = params cvDict["break_into_items"] = CvRenderCheetahTemplate.break_into_items cvDict["format_skill"] = CvRenderCheetahTemplate.format_skill cvDict["format_datetime"] = format_datetime for key in resources: resources[key] = text_clean(resources[key]) cvDict["resources"] = resources template = Template(templateString, cvDict) return str(template)
def test_date(self): schema = valideer.parse({"date": "date"}) for date in ('jan 15th 2015', 'tomorrow at 10:30', 'last tuesday'): self.assertEqual( schema.validate(dict(date=date))['date'], timestring.Date(date)) self.assertRaises(error, schema.validate, dict(date="never"))
def parse(self, response): # iterate entries for entry in response.css('li.wd_item'): #retrieve info for our current post item = ScrapyItem() item['source'] = 'lockheed_martin' temp_string = entry.css('div.wd_date::text').extract_first() item['brief'] = entry.css('div').css('p::text').extract_first() item['url'] = entry.css('a::attr(href)').extract_first() item['title'] = entry.css('div').css('a::text').extract_first() # check time now = datetime.datetime.now() now = now.strftime("%Y-%m-%dT%H:%M:%S.%f%z") item['tstamp'] = now # transfer time into ISO 8601 temp = timestring.Date(temp_string).date item['date'] = temp.strftime("%Y-%m-%dT%H:%M:%S.%f%z") # item['date'] = entry.css('time::text').extract_first() # item['brief'] = entry.css('p.post-excerpt::text').extract_first() # item['url'] = entry.css('h2').css('a::attr(href)').extract_first() # item['title'] = entry.css('h2').css('a::text').extract_first() yield item
def parse(self, response): #iterate entries for entry in response.css('div.element2'): #retrieve info for our current post item = ScrapyItem() item['source'] = 'NYTimes' temp_string = entry.css('span.dateline::text').extract_first() item['brief'] = entry.css('p.summary').extract_first() item['url'] = entry.css('a::attr(href)').extract_first() item['title'] = entry.css('a::text').extract_first() # check time now = datetime.datetime.now() now = now.strftime("%Y-%m-%dT%H:%M:%S.%f%z") item['tstamp'] = now # transfer time into ISO 8601 temp = timestring.Date(temp_string).date item['date'] = temp.strftime("%Y-%m-%dT%H:%M:%S.%f%z") print(item) yield item
def parse(self, response): # iterate entries for entry in response.css('div.listing-wide__inner'): item = ScrapyItem() #retrieve info for our current post item['source'] = 'scientificamerican' temp_string = entry.css('div.t_meta::text').extract_first().split( ' — ')[0] item['brief'] = entry.css('p::text').extract_first() item['url'] = entry.css('h2').css('a::attr(href)').extract_first() item['title'] = entry.css('h2').css('a::text').extract_first() # check time now = datetime.datetime.now() now = now.strftime("%Y-%m-%dT%H:%M:%S.%f%z") item['tstamp'] = now # transfer time into ISO 8601 temp = timestring.Date(temp_string).date item['date'] = temp.strftime("%Y-%m-%dT%H:%M:%S.%f%z") # request to article page request = scrapy.Request(item['url'], callback=self.parse_article) request.meta['item'] = item yield request paginate = response.css('div.pagination__right') if paginate.css('a'): next_link = 'https://www.scientificamerican.com/search/' + paginate.css( 'a::attr(href)').extract_first() yield scrapy.Request(next_link)
def bus_to_HC(): """ Gets relevant bus times according to the current time """ time_dict_list = relevant_csv() next_buses = "Bus from Bryn Mawr:\n" for time_dict in time_dict_list: time_now = datetime.datetime.now() #current time time_in_2hrs = datetime.datetime.now() + datetime.timedelta( hours=2.0) #the point after the next 3 buses bus_time = str(timestring.Date( time_dict['LeaveBrynMawr'])) #formats to today's time as a string bus_time = datetime.datetime.strptime( bus_time, '%Y-%m-%d %H:%M:%S' ) #converts the time string into a datetime object for comparison if time_now <= bus_time and time_in_2hrs >= bus_time: next_buses += str(time_dict['LeaveBrynMawr']) + "\n" if next_buses != "Bus from Bryn Mawr:\n": return next_buses else: return "Sorry! There are no buses at this time. Please check back later!"
def upload_feedback(db): """Upload feedback""" upload = request.files.get("feedback") reader = csv.DictReader(codecs.iterdecode(upload.file, "utf-8")) for fb in reader: # check to see if we already have it db.execute(""" select id from post P where P.onyen = %(onyen)s and P.key = %(key)s""", fb, ) fb["time"] = timestring.Date(fb["time"]).date existing = db.fetchone() if not existing: db.execute(""" insert into post (time, onyen, key, ip) values (%(time)s, %(onyen)s, %(key)s, '') returning id""", fb) fb["postid"] = db.fetchone()[0] else: fb["postid"] = existing[0] db.execute(""" insert into feedback (postid, score, msg) values (%(postid)s, %(score)s, %(msg)s) on conflict (postid) do update set score = %(score)s, msg = %(msg)s""", fb) return {}
async def remind(self, ctx, when:str, *, text:str=None): match = self.remind_regex.findall(when) if len(match) == 0: await self.bot.say(':warning: `Invalid Time Format`\n**Example:** `1d2h (1 day, 2 hours)`') return utc = datetime.utcnow() utc_ = int(utc.strftime("%s")) idk = True try: for x in match: for s in x: if idk: succ = s idk = False else: hah = int(succ)*self.TimeUnits[s] time = int(hah)+int(utc_) idk = True except Exception as e: try: time = int(timestring.Date(when).to_unixtime()) if utc_ >= time: await self.bot.say(':warning: `Invalid Time Format`\n**Example:** `1d2h (1 day, 2 hours)`') return except: await self.bot.say(':warning: `Invalid Time Format`\n**Example:** `1d2h (1 day, 2 hours)`') return sql = 'INSERT INTO `reminders` (`user`, `time`, `message`) VALUES (%s, %s, %s)' self.cursor.execute(sql, (ctx.message.author.id, time, text)) self.connection.commit() await self.bot.say(':white_check_mark: Reminder set.')
def get_year(soup): try: tag, code = marc_tags['year'] datafield = soup.find_all(tag=tag)[0] return timestring.Date(datafield.find_all(code=code)[0].string).year except: return None