def cast(self, time, retro=False): '''force time to be in workhours''' if self.isworktime(time): return time # ok if retro: if not self.isworkday(time) or time.time() < self.start: return datetimef(self.prevworkday(time.date()), self.end) # only remaining case is time>self.end on a work day return datetimef(time.date(), self.end) else: if not self.isworkday(time) or time.time() > self.end: return datetimef(self.nextworkday(time.date()), self.start) # only remaining case is time<self.start on a work day return datetimef(time.date(), self.start)
def cast(self,time,retro=False): '''force time to be in workhours''' if self.isworktime(time): return time #ok if retro: if not self.isworkday(time) or time.time()<self.start: return _datetime(self.prevworkday(time.date()),self.end) #only remaining case is time>self.end on a work day return _datetime(time.date(),self.end) else: if not self.isworkday(time) or time.time()>self.end: return _datetime(self.nextworkday(time.date()),self.start) #only remaining case is time<self.start on a work day return _datetime(time.date(),self.start)
def _generate_smbg(self, d): """Generate timestamps and smbg values from a non-uniform pool of potential timestamps.""" readings = [] i = 0 while i < self.readings_per_day: hour = random.choice(HOURS) timestamp = dt(d.year, d.month, d.day, hour, random.choice(SIXTY), random.choice(SIXTY), random.choice(MICRO)) near = [] for reading in self.dexcom: t = reading['deviceTime'] if t.date() == d: if t.hour == hour: near.append(reading) jump = random.randint(-26, 26) try: value = random.choice(near)['value'] + jump readings.append({'value': value, 'deviceTime': timestamp}) # exception occurs when can't find a near enough timestamp because data starts with datetime.now() # which could be middle of the afternoon, but this method will always try to generate some morning timestamps except IndexError: pass i += 1 return readings
def test_create_report_error_people_seen(self): region = 'CC' trail_name = 'test_trail' trailhead = create_trail_and_trailhead(name=trail_name, region=region, coordinates=fake.word(), filters=None) time = datetime.now() path = reverse('reports_trailhead', args=( region, trailhead.trail.id, trailhead.id, )) response = self.client.post( path, { 'trail': trailhead.trail.id, 'trailhead': trailhead.id, 'date_hiked': time.date(), 'day_hiked': 'Th', 'trail_begin': time.time(), 'trail_end': time.time(), 'pkg_location': 'P', 'pkg_estimate_begin': 29, 'pkg_estimate_end': 34, 'cars_seen': 34, 'people_seen': -344, }) self.assertEqual(response.status_code, 200) self.assertTemplateUsed('reports.html') self.assertContains(response, 'Ensure this value is greater than or equal to 0')
def get_time_events(self, time, **kwargs): all_day = kwargs.get('all_day') delta = kwargs.get('delta') if all_day: return [event for event in self.event_list if time.date() == event.start_time.date() and event.all_day==True] elif delta: return [[event, event.get_duration()] for event in self.event_list if event.start_time >= time and event.start_time < time + delta and event.all_day==False] else: return []
def test_create_report_view(self): region = 'CC' trail_name = 'test_trail' trailhead = create_trail_and_trailhead(name=trail_name, region=region, coordinates=fake.word(), filters=None) time = datetime.now() path = reverse('reports_trailhead', args=( region, trailhead.trail.id, trailhead.id, )) post_response = self.client.post( path, { 'trail': trailhead.trail.id, 'trailhead': trailhead.id, 'date_hiked': time.date(), 'day_hiked': 'Th', 'trail_begin': time.time(), 'trail_end': time.time(), 'car_type': 'Suv', 'weather_type': 'S', 'temperature': 'C', 'bathroom_status': 'C', 'bathroom_type': 'FP', 'access': 'FS', 'access_distance': 5.0, 'access_condition': 'P+', 'pkg_location': 'P', 'pkg_estimate_begin': 29, 'pkg_estimate_end': 34, 'cars_seen': 34, 'people_seen': 344, 'horses_seen': False, 'dogs_seen': True }) self.assertRedirects(post_response, path) get_response = self.client.get(path, args=( region, trailhead.trail.id, trailhead.id, )) reports = get_response.context['reports_list'] self.assertEqual(get_response.status_code, 200) self.assertTemplateUsed('reports.html') self.assertContains(get_response, 'Reports (1)') self.assertEqual(len(reports), 1) self.assertEqual(reports[0].trail.name, 'test_trail')
def prepare_data(self): if self._data is not None: return self._data time_offset = timedelta(days=SAMPLE_DATE_OFFSET) time_bias = timedelta(days=SAMPLE_DATE_BIAS) self._limit_sample_start = self.date - time_offset - time_bias self._limit_sample_end = self._limit_sample_start + time_offset sql = "SELECT \ MIN(vol) as vol_min, AVG(vol) as vol_avg, MAX(vol) as vol_max, \ MIN(close) as vol_min, AVG(close) as vol_avg, MAX(close) as vol_max, \ MIN(count) as count_min, MAX(count) as count_max \ FROM {0} \ WHERE `code`='{1}' AND `time`>='{2}' AND `time`<='{3}' ".format( TABLE_NAME_5MIN, self.code, self._limit_sample_start, self._limit_sample_end) rs = self.db.execute(sql) self._vol_min, self._vol_avg, self._vol_max, \ self._price_min, self._price_avg, self._price_max, \ self._count_min, self._count_max = rs.fetchone() if self._count_min is None: raise RuntimeError("Cannot fetch sample data for stock {} at {}".format(self.code, self.date)) return shifted_date = self._get_shifted_startdate() if shifted_date is None: return rs = self.db.execute( "SELECT `date`, ROUND((`traded_market_value`/`close`)) as total_vol " "FROM {0} " "WHERE `code`='{1}' AND `date`>='{2}' AND `date`<='{3}' " "ORDER BY `date` ASC".format( TABLE_NAME_DAILY, self.code, shifted_date, self.date)) daily_df = pd.DataFrame(rs.fetchall()) daily_df.columns = ['date', 'total_vol'] self._daily_df = daily_df.set_index(['date'], drop=True) rs = self.db.execute( "SELECT * " "FROM {0} " "WHERE `code`='{1}' AND `time`>='{2}' AND `time`<='{3}' " "ORDER BY time ASC".format( TABLE_NAME_5MIN, self.code, shifted_date, self.date + timedelta(days=1))) df = pd.DataFrame(rs.fetchall()) df.columns = ['code', 'time', 'open', 'high', 'low', 'close', 'vol', 'amount', 'count'] df = df.set_index(['time'], drop=True) df = df.drop(labels='code', axis=1) df['date'] = [time.date() for time in df.index.tolist()] self._data = df return self._data
def mark_attendance_from_checkin(checkin, employee, time): att_time = time.time() att_date = time.date() in_time = '' out_time = '' checkins = frappe.db.sql( """select name,time from `tabEmployee Checkin` where employee = "%s" and date(time) = '%s' order by time """ % (employee, att_date), as_dict=True) if checkins: if len(checkins) >= 2: in_time = checkins[0].time out_time = checkins[-1].time elif len(checkins) == 1: in_time = checkins[0].time if in_time and out_time: status = 'Present' else: status = 'Absent' att = frappe.db.exists('Attendance', { 'employee': employee, 'attendance_date': att_date, 'docstatus': 0 }) if not att: att = frappe.new_doc("Attendance") att.employee = employee att.attendance_date = att_date att.shift = 'G' att.status = status att.in_time = in_time att.out_time = out_time att.save(ignore_permissions=True) frappe.db.commit() return att.name else: if in_time: frappe.db.set_value("Attendance", att, 'in_time', in_time) if out_time: frappe.db.set_value("Attendance", att, 'out_time', out_time) frappe.db.set_value("Attendance", att, 'shift', 'G') frappe.db.set_value("Attendance", att, 'status', status) return att
def timesFix(): # need to sort the time values into buckets of values by states # should resemble # # Texas 10PM # different time values tweets at times # state by tweetsUsed = [] startTime = 0 endTime = 0 with open("data/usefulTweets.json") as tw: tweetsUsed = json.load(tw) # 3 files (15,30,1 hour) # masterNorm = [] masterTotal = [] tweetsUsed.reverse() for tweet in tweetsUsed: if tweet["location"] in TOPFIVESTATES: time = tweet["created_at"] try: tweet["created_at"] = datetime.strptime( time, '%Y-%m-%d %H:%M:%S') except TypeError: pass masterTotal.append(tweet) if tweet["location"] in NORMTOPFIVESTATES: time = tweet["created_at"] try: tweet["created_at"] = datetime.strptime( time, '%Y-%m-%d %H:%M:%S') except TypeError: pass masterNorm.append(tweet) masterNormTime = list(map(lambda t: t["created_at"], masterNorm)) indexBeg = 0 indexEnd = 0 for i, time in enumerate(masterNormTime): if time.date() == datetime(2018, 2, 27).date() and not indexBeg: indexBeg = i if time.date() == datetime(2018, 3, 3).date() and not indexEnd: indexEnd = i else: continue masterNorm = masterNorm[indexBeg:indexEnd] startTime = masterNorm[0]["created_at"].date() endTimeTry1 = masterNorm[-1]["created_at"].date() endTime = endTimeTry1 + timedelta(days=1) loop15Minutes = list( rrule(MINUTELY, interval=15, dtstart=startTime, until=endTime)) loop30Minutes = list( rrule(MINUTELY, interval=30, dtstart=startTime, until=endTime)) loopHourly = list( rrule(HOURLY, interval=1, dtstart=startTime, until=endTime)) indexLoop15 = list( map(lambda t: t.strftime('%Y-%m-%d %H:%M:%S'), loop15Minutes)) indexLoop30 = list( map(lambda t: t.strftime('%Y-%m-%d %H:%M:%S'), loop30Minutes)) indexLoop60 = list( map(lambda t: t.strftime('%Y-%m-%d %H:%M:%S'), loopHourly)) states15N = {key: [0] * len(loop15Minutes) for key in NORMTOPFIVESTATES} states30N = {key: [0] * len(loop30Minutes) for key in NORMTOPFIVESTATES} states60N = {key: [0] * len(loopHourly) for key in NORMTOPFIVESTATES} counter = 0 index = 0 for time in loop15Minutes: working = True while working: if not len(masterNorm) == index: twTime = masterNorm[index]["created_at"] twLocation = masterNorm[index]["location"] if twTime >= time and twTime <= loop15Minutes[counter + 1]: states15N[twLocation][counter] += 1 index += 1 pass else: working = False else: working = False # for state, items in states15N.items(): # items[counter] = ( # (items[counter] / # STATES_POPULATION[state]) # * 1000000 # ) # states15N[state] = items counter += 1 continue counter = 0 index = 0 for time in loop30Minutes: working = True while working: if not len(masterNorm) == index: twTime = masterNorm[index]["created_at"] twLocation = masterNorm[index]["location"] if twTime >= time and twTime <= loop30Minutes[counter + 1]: states30N[twLocation][counter] += 1 index += 1 pass else: working = False else: working = False # for state, items in states30N.items(): # items[counter] = ( # (items[counter] / # STATES_POPULATION[state]) # * 1000000 # ) # states30N[state] = items counter += 1 continue counter = 0 index = 0 for time in loopHourly: working = True while working: if not len(masterNorm) == index: twTime = masterNorm[index]["created_at"] twLocation = masterNorm[index]["location"] if twTime >= time and twTime <= loopHourly[counter + 1]: states60N[twLocation][counter] += 1 index += 1 pass else: working = False else: working = False # for state, items in states60N.items(): # items[counter] = ( # (items[counter] / # STATES_POPULATION[state]) # * 1000000 # ) counter += 1 continue ##This edit was made to help with R autocorrelation//Temporal Analysis states15N["Times"] = indexLoop15 states30N["Times"] = indexLoop30 states60N["Times"] = indexLoop60 newDict15 = {"Location": [], "Tweets": [], "Times": []} newDict30 = {"Location": [], "Tweets": [], "Times": []} newDict60 = {"Location": [], "Tweets": [], "Times": []} for keys, items in states15N.items(): if not keys == "Times": for idx, location in enumerate(items): newDict15["Location"].append(keys) newDict15["Tweets"].append(location) newDict15["Times"].append(states15N["Times"][idx]) for keys, items in states30N.items(): if not keys == "Times": for idx, location in enumerate(items): newDict30["Location"].append(keys) newDict30["Tweets"].append(location) newDict30["Times"].append(states30N["Times"][idx]) for keys, items in states60N.items(): if not keys == "Times": for idx, location in enumerate(items): newDict60["Location"].append(keys) newDict60["Tweets"].append(location) newDict60["Times"].append(states60N["Times"][idx]) tweets15 = DataFrame(newDict15) tweets30 = DataFrame(newDict30) tweets60 = DataFrame(newDict60) tweets15.to_csv("data/tweets15Days.csv") tweets30.to_csv("data/tweets30Days.csv") tweets60.to_csv("data/tweets60Days.csv")