def get_posts_per_day(self): """ Gets average count of posts per day for the last 7 days """ today = date.today() ppd = cache.get(CACHE_KEY_PPD + str(today)) if ppd: return ppd posts_per_days = [] for i in POSTS_PER_DAY_RANGE: day_end = today - timedelta(i + 1) day_start = today - timedelta(i + 2) day_time_start = timezone.make_aware(datetime.combine( day_start, dtime()), timezone.get_current_timezone()) day_time_end = timezone.make_aware(datetime.combine( day_end, dtime()), timezone.get_current_timezone()) posts_per_days.append(float(self.filter( pub_time__lte=day_time_end, pub_time__gte=day_time_start).count())) ppd = (sum(posts_per_day for posts_per_day in posts_per_days) / len(posts_per_days)) cache.set(CACHE_KEY_PPD + str(today), ppd) return ppd
def make_people(self): """ Make mock data from the Person and Child demo models. Follows the format: Person() first_name (CharField) last_name (CharField) last_modifed (DateField) birth_date (DateTimeField) hammer_time (TimeField) Child() parent (ForeignKey(Person)) first_name (CharField) last_name (CharField) age (IntegerField) color (CharField) """ people = ( ('John', 'Doe', ( ('Will', 'Doe', 5, 'R'), ('James', 'Doe', 8, ''), ('Robert', 'Doe', 3, 'G'), ), date.today() - timedelta(seconds=self.day * 5), datetime.today() - timedelta(seconds=self.day), dtime(hour=12)), ('Maria', 'Smith', ( ('Susan', 'Smith', 1, 'Y'), ('Karen', 'Smith', 4, 'B'), ), date.today() - timedelta(seconds=self.day * 10), datetime.today() - timedelta(seconds=self.day * 30), dtime(hour=16)), ('Donald', 'King', ( ('Charles', 'King', None, ''), ('Helen', 'King', 7, 'G'), ('Mark', 'King', 2, 'Y'), ('Karen', 'King', 4, 'R'), ('Larry', 'King', 5, 'R'), ('Lisa', 'King', 3, 'R'), ), datetime.today() - timedelta(seconds=self.day * 15), datetime.today() - timedelta(seconds=self.day * 60), dtime(hour=20)), ('Paul', 'Nelson', (), date.today() - timedelta(seconds=self.day * 20), datetime.today() - timedelta(seconds=self.day * 90), dtime(hour=22)), ) for first, last, cn, lm, bd, ht in people: person = Person( first_name=first, last_name=last, last_modifed=lm, # DateField birth_date=bd, # DateTimeField hammer_time=ht) # TimeField person.save() for child_first, child_last, age, color in cn: child = Child( parent=person, first_name=child_first, last_name=child_last, age=age, color=color ) child.save()
def test_timezones(self): due0 = dtime(2013, 11, 2, 5, 3, 3, tzinfo=pytz.utc) start0 = dtime(2013, 11, 1, 20, 20, 15, tzinfo=pytz.utc) finish0 = dtime(2013, 11, 3, 5, 9, 3, tzinfo=pytz.utc) action0 = Action(self.text, due=due0, start=start0, finish=finish0) self.assertEqual(action0.dt('due', 'utc').day, 2) self.assertEqual(action0.dt('start_time', 'utc').hour, 20) self.assertEqual(action0.dt('finish_time', 'utc').hour, 5) self.assertEqual(action0.dt('due', 'local').day, 1) self.assertEqual(action0.dt('start_time', 'local').hour, 13) self.assertEqual(action0.dt('finish_time', 'local').hour, 22)
def get_totals_interval(self, data, start=dtime(5, 0, 0), finish=dtime(16, 0, 0), interval=timedelta(hours=3)): """ :param data : expected to be filtered for a single station already :param start : datetime.time object beginning for first (morning) interval :param finish: datetime.time object beginning for second (evening) interval :param interval: length of interval :return: start and finish entrance totals """ dates = set(data[:, 6].tolist()) for date in dates: same_day = data[np.where(data[:, 6] == date)] filtered_morning = self.filter_data_interval(same_day, start, interval) filtered_evening = self.filter_data_interval(same_day, finish, interval) in_total_morn = np.sum(filtered_morning[:, -2].astype(int)) in_total_eve = np.sum(filtered_evening[:, -2].astype(int))
def WaitUntilTime(waitdays, hour, min, message): startStep() whenStart = datetime.combine(datetime.today() + timedelta(days=waitdays), dtime(hour, min)) userMessage(message + "@" + whenStart.isoformat(' ')) while datetime.now() < whenStart: time.sleep(updateInterval / speedUp) endStep()
def schedule_trip(trip,scheduler,api): previous = datetime.now() - timedelta(days=1) prior_venue = None for i in trip.checkins: c = i.getCheckinTime(trip.offset) if prior_venue: while (previous + prior_venue.category.transit_time > c): c += prior_venue.category.transit_time previous = c prior_venue = i scheduler.enterabs(c,1,api.checkins.add,({'venueId': i.id},)) if trip.checkouts: for i in trip.checkouts: c = i.getCheckoutTime(trip.offset) if prior_venue: while (previous + prior_venue.category.transit_time > c): c += prior_venue.category.transit_time previous = c prior_venue = i scheduler.enterabs(c,1,api.checkins.add,({'venueId': i.id},)) # Compute the midnight after our last checkout d = previous.date() + timedelta(days=1) t = dtime(0) midnight = datetime.combine(d,t) # Schedule an event so the queue isn't empty until the next day scheduler.enterabs(midnight,1,nothing,()) return scheduler
def _collectSendData(self): """collecting data from Entries and sending this voc to Controller""" data = {} data["artist"] = self._elements["eArtist"].get() data["track"] = self._elements["eTrack"].get() year = int(self._elements["eYear"].get()) month = int(self._elements["eMonth"].get()) day = int(self._elements["eDay"].get()) hour = int(self._elements["eHour"].get()) minute = int(self._elements["eMinute"].get()) dtimeObj = dtime(year, month, day, hour, minute).timestamp() data["timestamp"] = math.floor(dtimeObj) isAuthentificated = controller.Scrobbler.hasUser() if not isAuthentificated: controller.Scrobbler.authentificateUser() self._createQuestionFrame() else: if data["artist"] != "" and data["track"] != "": controller.Scrobbler.scrobble(data)
def test_filter_datetime_lte_filter(self): """ Test filtering 'DateTime' field types using simple ORM filtering (i.e. filter_type='lte') """ people_report = self.make_people_report() # DateField ff = FilterField.objects.create( report=people_report, field='last_modifed', filter_type='lte', filter_value=str(date.today() - timedelta(seconds=self.day * 10)), ) generate_url = reverse('generate_report', args=[people_report.id]) response = self.client.get(generate_url) # filter from 4 to 2 people self.assertEquals(len(response.data['data']), 3) # TimeField ff.field = 'hammer_time' ff.filter_value = str(dtime(hour=13)) ff.save() response = self.client.get(generate_url) self.assertEquals(len(response.data['data']), 1) # DateTimeField ff.field = 'birth_date' ff.filter_value = str(datetime.today() - timedelta(seconds=self.day * 40)) ff.save() response = self.client.get(generate_url) self.assertEquals(len(response.data['data']), 2)
def format_date(dt, format, assume_utc=False, as_utc=False): ''' Return a date formatted as a string using a subset of Qt's formatting codes ''' if not format: format = 'dd MMM yyyy' if not isinstance(dt, datetime): dt = datetime.combine(dt, dtime()) if hasattr(dt, 'tzinfo'): if dt.tzinfo is None: dt = dt.replace(tzinfo=_utc_tz if assume_utc else _local_tz) dt = dt.astimezone(_utc_tz if as_utc else _local_tz) if format == 'iso': return isoformat(dt, assume_utc=assume_utc, as_utc=as_utc) if dt == UNDEFINED_DATE: return '' strf = partial(strftime, t=dt.timetuple()) repl_func = partial(fd_repl_func, dt, strf, 'ap' in format.lower()) return re.sub( '(s{1,2})|(m{1,2})|(h{1,2})|(ap)|(AP)|(d{1,4}|M{1,4}|(?:yyyy|yy))', repl_func, format)
def next_day(weekday, hour=0, minute=0, second=0): now = datetime.now() d_days = (weekday - now.weekday()) % 7 day = now + timedelta(days=d_days) if d_days == 0 and dtime(hour, minute, second) < now.time(): day += timedelta(days=7) return day.replace(hour=hour, minute=minute, second=second, microsecond=0)
def test_filter_datetime_range(self): """ Test filtering 'DateTime' field types using a range filter. Each FilterField accepts 2 values of the respective field type to create the range. Ex. Filter a TimeField for 'user logins between 10am - 1pm': filter_type='range', filter_value("HH:MM") = "10:00" filter_value2("HH:MM") = "13:00" """ people_report = self.make_people_report() # DateField ff = FilterField.objects.create( report=people_report, field='last_modifed', filter_type='range', filter_value=str(date.today() - timedelta(seconds=self.day * 7)), filter_value2=str(date.today()), ) generate_url = reverse('generate_report', args=[people_report.id]) response = self.client.get(generate_url) self.assertEquals(len(response.data['data']), 1) # TimeField ff.field = 'hammer_time' ff.filter_value = str(dtime(hour=10)) ff.filter_value2 = str(dtime(hour=13)) ff.save() response = self.client.get(generate_url) self.assertEquals(len(response.data['data']), 1) # DateTimeField ff.field = 'birth_date' ff.filter_value = str(datetime.now() - timedelta(seconds=self.day * 50)) ff.filter_value2 = str(datetime.now() - timedelta(seconds=self.day * 70)) ff.save() response = self.client.get(generate_url) self.assertEquals(len(response.data['data']), 1)
def send_mail(subject, username, body, force=False): """Send the mail only once per day.""" now = datetime.now() if force or dtime(00, 00) <= now.time() <= dtime(00, 10): sender = '*****@*****.**' receivers = ['*****@*****.**' % username, '*****@*****.**' % ADMIN] message = """\ From: %s To: %s Subject: %s %s """ % (sender, ", ".join(receivers), subject, body) try: smtpObj = smtplib.SMTP('localhost') smtpObj.sendmail(sender, receivers, message) sys.stdout.write("Successfully sent email to %s\n" % username) sys.stdout.flush() except smtplib.SMTPException: sys.stderr.write("Error: unable to send email to %s\n" % username) sys.stdout.flush()
def __repr__(self): start_time = self.start_time if not self.start_time or self.start_time == "": start_time = dtime(0) game_time = datetime.combine(self.date, start_time).isoformat() team_prefix = "vs" location_prefix = "@" if self.site == "away": team_prefix = "at" location_prefix = "in" return "<%s %s %s %s %s %s>" % (self.__class__.__module__, game_time, team_prefix, self.opponent, location_prefix, self.location)
def worktime_diffference(start, end, debug = None): # workout the overlap between the argument period and the support working hours if debug: print('Start of period: ', start.strftime('%d/%m/%Y %H:%M')) print('End of period : ', end.strftime('%d/%m/%Y %H:%M')) print('awareness', start.tzinfo, end.tzinfo) day = timedelta(days=1) hour = timedelta(hours=1) if start.time() < dtime(SHIFT_TIMES['start']): start = start.replace(hour=SHIFT_TIMES['start']).replace(minute=0) print(start.time(), dtime(SHIFT_TIMES['end']), start.time() > dtime(SHIFT_TIMES['end'])) if start.time() > dtime(SHIFT_TIMES['end']): start = start.replace(hour=SHIFT_TIMES['start']).replace(minute=0) + timedelta(days=1) if end.time() < dtime(SHIFT_TIMES['start']): end = end.replace(hour=SHIFT_TIMES['end']).replace(minute=0) + timedelta(days=-1) if end.time() > dtime(SHIFT_TIMES['end']): end = end.replace(hour=SHIFT_TIMES['end']).replace(minute=0) if debug: print('adjusted times:', start, end) if start.date() != end.date(): delta_days = (end - start) // day # delta days (17 // 3 = 2) transposed_end = end - timedelta(days=delta_days) result = transposed_end - start + delta_days * timedelta(hours = SHIFT_TIMES['workhours']) # if debug: print('delta days:', str(delta_days)) print('transposed end: ', transposed_end.strftime('%d/%m/%Y %H:%M')) if transposed_end.date() != start.date(): result += timedelta(hours=-SHIFT_TIMES['non workhours']) if debug: print('transposed end', transposed_end, 'start', start, 'result:', result) else: result = end - start if debug: print('end', end, 'start', start, 'result:', result) return round(result / hour, 2)
def setObsDate(self, shift=0): # detect utf offset diff = round((datetime.now() - datetime.utcnow()).total_seconds()) self.shift = shift self.today = date.today() # midnight of today utcmidnight = ( datetime.combine(date.today(), dtime(0, 0, 0)) + timedelta(seconds=int(diff)) + timedelta(seconds=int(shift)) ) self.obs.date = str(utcmidnight)
def show_day(year, month, day): d = date(year, month, day) unix_lower = int(datetime.combine(d, dtime( 0, 0, 0)).strftime('%s')) unix_upper = int(datetime.combine(d, dtime(23, 59, 59)).strftime('%s')) events = [] min_lat, min_lon = 400., 400. max_lat, max_lon = 0., 0. for key in EVENTS: event = EVENTS[key] if event['server_time'] >= unix_lower and event['server_time'] <= unix_upper: events.append(event) min_lat = min(min_lat, event['lat']) min_lon = min(min_lon, event['lon']) max_lat = max(max_lat, event['lat']) max_lon = max(max_lon, event['lon']) if len(events): center_lat = (max_lat + min_lat)/2 center_lon = (max_lon + min_lon)/2 else: center_lat = 50.1 center_lon = 8.6 return {'date': d, 'events': events, 'lat': center_lat, 'lon': center_lon}
def set_string_format(self,class_,format): """Specify the format used to convert a string into an instance of the class. class_ can be: - unicode : the format is the encoding - date, datetime : format = the format string as defined in strftime """ if class_ is unicode: # test encoding ; will raise LookupError if invalid unicode('a').encode(format) # create the conversion function bytestring -> unicode string def _from_string(us): return unicode(us,format) self.from_string[unicode] = _from_string elif class_ is date: # test date format d = date(1994,10,7) t = time.strptime(d.strftime(format),format) if not t[:3] == d.timetuple()[:3]: raise TimeFormatError,'%s is not a valid date format' %format else: # create the conversion function string -> date def _from_string(ds): return date(*time.strptime(ds,format)[:3]) self.from_string[date] = _from_string elif class_ is datetime: # test datetime format dt = datetime(1994,10,7,8,30,15) t = time.strptime(dt.strftime(format),format) if not t[:6] == dt.timetuple()[:6]: raise TimeFormatError,'%s is not a valid datetime format' \ %format else: # create the conversion function string -> date def _from_string(dts): return datetime(*time.strptime(dts,format)[:6]) self.from_string[datetime] = _from_string elif class_ is dtime: # test datetime format dt = dtime(8,30,15) t = time.strptime(dt.strftime(format),format) if not t[3:6] == (dt.hour, dt.minute, dt.second): raise TimeFormatError,'%s is not a valid datetime.time format' \ %format else: # create the conversion function string -> dtime def _from_string(dts): return dtime(*time.strptime(dts,format)[3:6]) self.from_string[dtime] = _from_string else: raise ValueError,"Can't specify a format for class %s" %class_
def timefstr(date_list, timeformat): """ converts a time (as a string) to a datetimeobject the date is today removes "used" elements of list :returns: datetimeobject """ time_start = time.strptime(date_list[0], timeformat) time_start = dtime(*time_start[3:5]) day_start = date.today() dtstart = datetime.combine(day_start, time_start) date_list.pop(0) return dtstart
def _draw_time(draw_time_str): """The lottery vendor's timezone math is wonky. We've seen a draw time of 25:00 for games who's timezone is -5 and the local draw time is 20:00 To fix that, I'm turning 25:00 into 01:00 EST""" hour, minute = int(draw_time_str[:2]), int(draw_time_str[2:]) hour = hour % 24 # adjust hours in case they give me more than 59 minutes extra_hours = minute / 60 hour = hour + extra_hours # remove hours from the minutes minute = minute % 60 return dtime(hour, minute)
def get_localized_time(datetime=None, long_format=False, time_only=False): """Display a date/time in a user-friendly way. It should be localized to the user's preferred language. Note that you can specify both long_format and time_only as True (or any other value that can be converted to a boolean True value), but time_only then wins: the long_format value is ignored. You can also use datetime.datetime or datetime.date instead of Plone's DateTime. In case of datetime.datetime everything works the same, in case of datetime.date the long_format parameter is ignored and on time_only an empty string is returned. :param datetime: [required] Message to show. :type datetime: DateTime, datetime or date :param long_format: When true, show long date format. When false (default), show the short date format. :type long_format: boolean :param time_only: When true, show only the time, when false (default), show the date. :type time_only: boolean :returns: Localized time :rtype: string :raises: ValueError :Example: :ref:`portal_get_localized_time_example` """ tool = get_tool(name='translation_service') request = getRequest() # isinstance won't work because of date -> datetime inheritance if type(datetime) is date: if time_only: return '' datetime = dtime(datetime.year, datetime.month, datetime.day) long_format = False return tool.ulocalized_time( datetime, long_format, time_only, domain='plonelocales', request=request, )
def timefstr(dtime_list, timeformat): """ converts a time (as a string) to a datetimeobject the date is today removes "used" elements of list :returns: datetimeobject """ if len(dtime_list) == 0: raise ValueError() time_start = datetime.strptime(dtime_list[0], timeformat) time_start = dtime(*time_start.timetuple()[3:5]) day_start = date.today() dtstart = datetime.combine(day_start, time_start) dtime_list.pop(0) return dtstart
def plotbyavgtime(datetimes): ndays = len(set(d.date() for d in datetimes)) # bucket each time object datetimes = map(buckettime, datetimes) counts = {} for k, g in it.groupby(datetimes): mykey = k.time() mysum, mycnt = counts.get(mykey, (0,0)) counts[mykey] = (mysum + len(list(g)), mycnt + 1) ts = [] # plot can only handle date objects, not time objects hack = dt.now() for h in xrange(24): for m in xrange(0, 60, 10): t = dtime(h, m) mysum, mycnt = counts.get(t, (0,0)) assert mycnt <= ndays ts.append((dt.combine(hack, t), mysum / float(ndays))) print argmax(ts, lambda x: x[1]) timefmt = DateFormatter('%I:%M%p') fig, ax = plt.subplots() ax.plot([t[0] for t in ts], [t[1] for t in ts], '-') # format the ticks ax.xaxis.set_major_locator(HourLocator()) ax.xaxis.set_major_formatter(timefmt) ax.xaxis.set_minor_locator(MinuteLocator(interval=10)) ax.autoscale_view() # format the coords message box ax.fmt_xdata = DateFormatter('%I:%M%p') ax.grid(True) fig.autofmt_xdate() ax.set_ylabel('avg msgs/day at time') ax.set_title('GChat Activity Breakdown by Time of Day') return fig
def _getData(textString): currStringList = textString.split("\n")[0].split("=>") data = {} data["artist"] = currStringList[0] data["track"] = currStringList[1] dateList = currStringList[2].split("/") year = int(dateList[0]) month = int(dateList[1]) day = int(dateList[2]) timeList = currStringList[3].split(":") hour = int(timeList[0]) minute = int(timeList[1]) dtimeObj = dtime(year, month, day, hour, minute).timestamp() data["timestamp"] = math.floor(dtimeObj) return data
def format_date(dt, format, assume_utc=False, as_utc=False): ''' Return a date formatted as a string using a subset of Qt's formatting codes ''' if not format: format = 'dd MMM yyyy' if not isinstance(dt, datetime): dt = datetime.combine(dt, dtime()) if hasattr(dt, 'tzinfo'): if dt.tzinfo is None: dt = dt.replace(tzinfo=_utc_tz if assume_utc else _local_tz) dt = dt.astimezone(_utc_tz if as_utc else _local_tz) if format == 'iso': return isoformat(dt, assume_utc=assume_utc, as_utc=as_utc) if dt == UNDEFINED_DATE: return '' repl_func = partial(fd_repl_func, dt, 'ap' in format.lower()) return re.sub( '(s{1,2})|(m{1,2})|(h{1,2})|(ap)|(AP)|(d{1,4}|M{1,4}|(?:yyyy|yy))', repl_func, format)
def get_path(self, cycle, domain=None, fhr=0): ''' Get the path of a product for a given cycle, domain, and forecast hour. @param cycle Datetime object representing the cycle @param domain Domain number, if applicable @param fhr Forecast hour, if applicable @return The path to the file, if it exists @raise AmbiguousProductException If more than one files match the <self.file_pattern> for the given cycle @raise MissingProductException If no files match the <self.file_pattern> for the given cycle ''' # for legacy code, convert cycle in seconds since epoch to a datetime if not isinstance(cycle, dtime): tm = time.gmtime(cycle) cycle = dtime(year=tm.tm_year, month=tm.tm_mon, day=tm.tm_mday, hour=tm.tm_hour, minute=tm.tm_min) # set additional keys to interpolate kwargs = {} if domain is not None: kwargs['dom'] = domain if self.storm_id is not None: kwargs['storm_id'] = self.storm_id kwargs['storm_id_lc'] = self.storm_id.lower() kwargs['storm_id_uc'] = self.storm_id.upper() path = os.path.join(self.topdir, self.file_pattern) # resolve the path pathPattern = self._conf.timestrinterp(self.name, path, fhr, cycle,**kwargs) pathGlob = glob.glob(pathPattern) if len(pathGlob) == 1: return pathGlob[0] elif len(pathGlob) == 0: raise MissingProductException(pathPattern) else: raise AmbiguousProductException("Pattern {} matched more than one file, " "namely {}".format(pathPattern, pathGlob))
def get(self): mymidnight = datetime.combine(datetime.today()-timedelta(hours=timezone), dtime(6, 0, 0, 0)) mymidnight_timestamp = (mymidnight - datetime(1970, 1, 1)).total_seconds() bucket_name = os.environ.get( 'BUCKET_NAME', app_identity.get_default_gcs_bucket_name()) bucket = '/' + bucket_name + '/Users' index_file = cloudstorage.open(bucket + '/current_index') index = int(index_file.readline()) for x in range(0, index + 1): self.response.write ('in for loop') #filename = '/withingsapp.appspot.com/Tokens/' + str(x) filename = bucket + '/' + str(x) + '/' + 'token' if self.FileExists(bucket+ '/' + str(x), 'token'): #(True):#(self.FileExists('/withingsapp.appspot.com/Tokens/', str(x))): with cloudstorage.open(filename) as cloudstorage_file: refresh_token = cloudstorage_file.readline() access_token = cloudstorage_file.readline() cloudstorage_file.close() startdate=mymidnight_timestamp enddate=int (time.time()) #timestamp url = GET_MEASURE+'action=getmeas&startdate='+str(startdate)+'&enddate='+str(enddate)+'&meastype=1'+'&access_token=' url = url+access_token self.response.write('accesstoken:') self.response.write(len(access_token)) measure_req = urllib2.urlopen(url) measure_read = measure_req.read() #self.response.write(measure_read) #filename = '/withingsapp.appspot.com/Weight/Raw/' + str(x) filename = bucket + '/' + str(x) + '/Dailyrecords/' + str((datetime.today()-timedelta(hours=timezone)).strftime('%Y%m%d')) + '/weight.json' write_retry_params = cloudstorage.RetryParams(backoff_factor=1.1) with cloudstorage.open( filename, 'w', content_type='text/plain', options={ 'x-goog-meta-foo': 'foo', 'x-goog-meta-bar': 'bar'}, retry_params=write_retry_params) as cloudstorage_file: cloudstorage_file.write(measure_read) cloudstorage_file.close() else: self.response.write ('tamader')
def clean_date_for_sort(dt, fmt=None): ''' Return dt with fields not in shown in format set to a default ''' if not fmt: fmt = 'yyMd' if not isinstance(dt, datetime): dt = datetime.combine(dt, dtime()) if hasattr(dt, 'tzinfo'): if dt.tzinfo is None: dt = dt.replace(tzinfo=_local_tz) dt = as_local_time(dt) if fmt == 'iso': fmt = 'yyMdhms' tt = {'year':UNDEFINED_DATE.year, 'mon':UNDEFINED_DATE.month, 'day':UNDEFINED_DATE.day, 'hour':UNDEFINED_DATE.hour, 'min':UNDEFINED_DATE.minute, 'sec':UNDEFINED_DATE.second} repl_func = partial(cd_repl_func, tt, dt) re.sub('(s{1,2})|(m{1,2})|(h{1,2})|(d{1,4}|M{1,4}|(?:yyyy|yy))', repl_func, fmt) return dt.replace(year=tt['year'], month=tt['mon'], day=tt['day'], hour=tt['hour'], minute=tt['min'], second=tt['sec'], microsecond=0)
def date_to_numeric(date): #date = '07/06/2017 07:46:39' #date = 'Wed Nov 4 12:04:28 2011' #date = '※ 轉錄者: smallwo (71.212.4.14), 11/30/2016 14:40:18' temp = re.split(' ', date) if (len(temp[len(temp) - 1]) != 4 or date == 'None'): return 0 if (re.search('※', date)): #print(date) # r"[0-9]*/[0-9]*/[0-9]* [[0-9]*:[0-9]*:[0-9]*]*" 為正規化擷取文字 date = str( re.findall(r"[0-9]*/[0-9]*/[0-9]* [[0-9]*:[0-9]*:[0-9]*]*", date)) date = date.replace("['", '') date = date.replace("']", '') date = dtime.strptime(date, '%m/%d/%Y %H:%M:%S') else: date = date.replace(' ', ' ') # 正規化 抓取月分與星期 regex = re.compile('(?P<week>[a-zA-Z]+)\s+(?P<month>[a-zA-Z]+)') m = regex.search(date) month = m.group('month') week = m.group('week') date = str(re.findall(r" [0-9]* [[0-9]*:[0-9]*:[0-9]* [0-9]*]*", date)) date = date.replace("['", '') date = date.replace("']", '') date = week + ' ' + month + date if (month == 'July'): # 月份的簡寫, 在July比較特別, 需要額外處理 # %a 是星期, %B是月份(非縮寫), %d 是day, %H是小時, %M是分鐘, %S 是秒, %Y是年 date = dtime.strptime(date, '%a %B %d %H:%M:%S %Y') else: # %b 是月份縮寫 date = dtime.strptime(date, '%a %b %d %H:%M:%S %Y') # change to numeric by seconds value = (date - dtime(1970, 1, 1)).total_seconds() # 所有日期轉成秒, 便於比較 return value
def date2datetime(dt): return dtime(dt.year, dt.month, dt.day)
def ts2t(value): hour, value = divmod(value, 3600) minute, value = divmod(value, 60) second, value = divmod(value, 1) return dtime(*map(int, [hour, minute, second, value * 1000000]))
import asyncio from ircbot.plugin import BotPlugin from datetime import datetime, timedelta from datetime import time as dtime import itertools import functools import aiohttp CENTRAL = "Brussels-Central" CHAPELLE = "Brussels-Chapelle/Brussels-Kapellekerk" TRAIN_TIMES = [ (dtime( 7, 15), dtime( 8, 15), CENTRAL, "S11778"), # 07:51 (dtime( 8, 15), dtime( 9, 15), CENTRAL, "S11779"), # 08:51 (dtime( 9, 15), dtime(10, 15), CENTRAL, "S11780"), # 09:52 (dtime(10, 15), dtime(11, 15), CENTRAL, "S11781"), # 10:52 (dtime(11, 15), dtime(12, 15), CENTRAL, "S11782"), # 11:52 (dtime(15, 15), dtime(16, 15), CHAPELLE, "S11765"), # 16:06 (dtime(16, 15), dtime(17, 15), CHAPELLE, "S11766"), # 17:06 (dtime(17, 15), dtime(18, 15), CHAPELLE, "S11767"), # 18:05 (dtime(18, 15), dtime(19, 20), CHAPELLE, "S11768"), # 19:05 ] RULES = { 'train_morning': [ {"hour": [9], "minute": [40, 50], "weekday": [0, 1, 2, 3, 4]}, ], 'train_evening': [ {"hour": [17], "minute": [49, 59], "weekday": [0, 1, 2, 3, 4]}, ],
def due_date(self): meta = self.event.tickets_event_meta if self.confirm_time: return datetime.combine((self.confirm_time + timedelta(days=meta.due_days)).date(), dtime(23, 59, 59)).replace(tzinfo=timezone.tzlocal()) else: return None
def cr_dynamo_event(state, module, client, clientstreams, event_source, function_name, source_params): found = True streams = client.list_event_source_mappings( FunctionName=function_name)['EventSourceMappings'] targetStream = None UUID = None eventObj = None for stream in streams: streamSource = stream['EventSourceArn'] if event_source in streamSource: targetStream = streamSource UUID = stream['UUID'] eventObj = stream break if state == 'absent': # delete if targetStream: # already missing skip try: client.delete_event_source_mapping(UUID=UUID) except ClientError as e: module.fail_json( msg="[E] dynamo trigger DELETE failed {0} - {1}".format( event_source, e.response['Error']['Message'])) else: # add params = eventObjConform(module, source_params) enabled = params['enabled'] batch_size = params['batch_size'] starting_position = params['starting_position'] MaximumBatchingWindowInSeconds = params[ 'MaximumBatchingWindowInSeconds'] ParallelizationFactor = params['ParallelizationFactor'] DestinationConfig = params['DestinationConfig'] MaximumRecordAgeInSeconds = params['MaximumRecordAgeInSeconds'] BisectBatchOnFunctionError = params['BisectBatchOnFunctionError'] MaximumRetryAttempts = params['MaximumRetryAttempts'] if not targetStream: table = event_source.split("/")[-1] targetStream = getTableStream(state, module, clientstreams, table) if eventObj: if MaximumBatchingWindowInSeconds != eventObj[ 'MaximumBatchingWindowInSeconds']: eventObj.update({ "MaximumBatchingWindowInSeconds": MaximumBatchingWindowInSeconds }) found = False if BisectBatchOnFunctionError != eventObj[ 'BisectBatchOnFunctionError']: eventObj.update( {"BisectBatchOnFunctionError": BisectBatchOnFunctionError}) found = False if not found: try: client.update_event_source_mapping(**eventObj) except ClientError as e: module.fail_json( msg="[E] dynamo trigger DELETE failed {0} - {1}". format(event_source, e.response['Error']['Message'])) else: try: if 'StartingPositionTimestamp' in params: StartingPositionTimestamp = params[ 'StartingPositionTimestamp'] if StartingPositionTimestamp == 0 or StartingPositionTimestamp == '0': year = dtime.today().year StartingPositionTimestamp = dtime(year, 1, 1) else: StartingPositionTimestamp = dtime.utcfromtimestamp( StartingPositionTimestamp) else: year = dtime.today().year StartingPositionTimestamp = dtime(year, 1, 1) params_obj = { "EventSourceArn": targetStream, "FunctionName": function_name, "Enabled": enabled, "BatchSize": batch_size, "MaximumBatchingWindowInSeconds": MaximumBatchingWindowInSeconds, "ParallelizationFactor": ParallelizationFactor, "StartingPosition": starting_position, "DestinationConfig": DestinationConfig, "MaximumRecordAgeInSeconds": MaximumRecordAgeInSeconds, "BisectBatchOnFunctionError": BisectBatchOnFunctionError, "MaximumRetryAttempts": MaximumRetryAttempts } if starting_position == "AT_TIMESTAMP": params_obj.update({ "StartingPositionTimestamp": StartingPositionTimestamp }) client.create_event_source_mapping(**params_obj) found = False except ClientError as e: module.fail_json( msg="[E] dynamo trigger DELETE failed {0} - {1}".format( event_source, e.response['Error']['Message'])) return [event_source], False if found else True
import re import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap import netCDF4 as nc4 from field_types import MetField, SoilField from field_types import get_met_field, get_soil_field # # Set global options # #START_TIME = dtime(year=2006, month=9, day=10, hour=1, minute=0) START_TIME = dtime(year=2006, month=5, day=26, hour=6, minute=0) DURATION = tdelta(days=120) INPUT_FREQUENCY = tdelta(hours=3) INPUT_FREQUENCY = tdelta(hours=1) # specify the number of grid points in the input data NUM_LATS = 2881 NUM_LONS = 5760 # Prefix of input file to use (c1440_NR. to use full res input file) G5NR_FILE_PREFIX = "c1440_NR." # path where input data reside G5NR_DATA_TOPDIR = os.getcwd() # # Globals
def main(): ###################################--system imports--#################################### import os, sys import argparse import time import logging if sys.version_info.major < 3 and sys.version_info.minor < 5: exit( "This script is written for python3.5 and above please upgrade python!" ) start_time = time.time() current_time = start_time ### Add base directory to path BASE_DIR = os.path.dirname(os.path.abspath(__file__)) sys.path.append(BASE_DIR) ##################################--global functions--################################### def dynamic_import(abs_module_path, class_name): module = import_module(".".join([abs_module_path, class_name])) target_class = getattr(module, class_name) return target_class def report_time(prev_time, final=False): current = time.time() diff = current - prev_time seconds = diff % 60 minutes = int((diff - seconds) / 60) mtext = "minute" if minutes != 1: mtext += "s" if final: logger.info("--- Time summary {} {} {} seconds---".format( minutes, mtext, seconds)) else: logger.info("--- process finished in {} {} {} seconds---\n".format( minutes, mtext, seconds)) return current def write_missing(missing): '''Write missing genomes to file''' with open("{outdir}/FlexTaxD.missing".format(outdir=args.outdir), "w") as of: for gen in missing: print(gen["genome_id"], end="\n", file=of) return ######################################################################################### ##################################--error functions--###################################¤ class InputError(Exception): """InputError""" def __init__(self, message, expression=""): self.expression = expression self.message = message ######################################################################################### ''' Supported programs ''' programs = ["kraken2", "krakenuniq", "ganon"] parser = argparse.ArgumentParser() basic = parser.add_argument_group('basic', 'Basic commands') basic.add_argument( '-o', '--outdir', metavar="", default=".", help= "Output directory (same directory as custom_taxonomy_databases dump)") basic.add_argument('-db', '--database', '--db', metavar="", type=str, default=".ctdb", help="Custom taxonomy sqlite3 database file") ### Download options, process local directory and potentially download files download_opts = parser.add_argument_group('download_opts', "Download and file handling") download_opts.add_argument( '-p', '--processes', metavar="", type=int, default=8, help= "Use multiple cores for downloading genomes and kraken if -kp is not set" ) download_opts.add_argument('--download', action='store_true', help="Download additional sequences") download_opts.add_argument('-r', '--representative', action='store_true', help="Download GTDB representative genomes") download_opts.add_argument( '--rep_path', metavar="URL", default=latest_genome_reps, help="Specify GTDB representative version URL full path") download_opts.add_argument( '--force_download', action='store_true', help= "Download sequences from genbank if not in refseq (WARNING: might include genome withdrawals)" ) download_opts.add_argument('--genomes_path', metavar="", default=None, help='path to genomes') ### Kraken options not needed for public version this script is made to export names and nodes.dmp files classifier_opts = parser.add_argument_group('classifier_opts', "Classifier options") classifier_opts.add_argument( '--create_db', action='store_true', help="Start create db after loading databases") classifier_opts.add_argument( '--dbprogram', metavar="", default="kraken2", choices=programs, help="Select one of the supported programs [" + ", ".join(programs) + "]") classifier_opts.add_argument('--db_name', metavar="", default=None, help="database directory (fullpath)") classifier_opts.add_argument( '--params', metavar="", default="", help="Add extra params to create command (supports kraken*)") classifier_opts.add_argument( '--test', action='store_true', help="test database structure, only use 100 seqs") classifier_opts.add_argument('--keep', action='store_true', help="Keep temporary files") classifier_opts.add_argument( '--skip', metavar="", default="", help= "Do not include genomes within this taxonomy (child tree) in the database (works for kraken), can be a file ending with txt and genome ids one per row" ) classifier_opts.add_argument( '-kp', '--build_processes', metavar="", type=int, default=None, help="Use a different number of cores for kraken classification") debugopts = parser.add_argument_group("Logging and debug options") #debugopts.add_argument('--tmpdir', metavar='', default="/tmp/FlexTaxD", help="Specify reference directory") debugopts.add_argument('--logs', metavar='', default="logs/", help="Specify log directory") debugopts.add_argument('--verbose', action='store_const', const=logging.INFO, help="Verbose output") debugopts.add_argument('--debug', action='store_const', const=logging.DEBUG, help="Debug output") debugopts.add_argument('--supress', action='store_const', const=logging.ERROR, default=logging.WARNING, help="Supress warnings") parser.add_argument("--version", action='store_true', help=argparse.SUPPRESS) if len(sys.argv) == 1: parser.print_help(sys.stderr) sys.exit(1) args = parser.parse_args() if not os.path.exists(args.database): raise FileNotFoundError( "No database file could be found, please provide a FlexTaxD database to run FlexTaxD!" ) if not os.path.exists(args.genomes_path): raise FileNotFoundError( "Directory {path} does not exist".format(path=args.genomes_path)) if args.version: print("{name}: version {version}".format(name=__pkgname__, version=__version__)) print("Maintaner group: {maintaner} ({email})".format( maintaner=__maintainer__, email=", ".join(__email__))) print("Github: {github}".format(github=__github__)) exit() '''Log file and verbose options''' logval = args.supress if args.debug: logval = args.debug elif args.verbose: logval = args.verbose from datetime import date from datetime import time as dtime t = dtime() today = date.today() logpath = args.logs + "FlexTaxD-create-" + today.strftime( "%b-%d-%Y") + "{}.log" if os.path.exists(logpath): logpath = logpath.format("-{:%H:%M}".format(t)) else: logpath = logpath.format("") logging.basicConfig( level=logval, format="%(asctime)s %(module)s [%(levelname)-5.5s] %(message)s", handlers=[logging.FileHandler(logpath), logging.StreamHandler()]) logger = logging.getLogger(__name__) logger.info("FlexTaxD-create logging initiated!") logger.debug("Supported formats: {formats}".format(formats=programs)) ''' Process data ''' if args.outdir: if not os.path.exists(args.outdir): os.system("mkdir -p {outdir}".format(outdir=args.outdir)) skip = False if os.path.exists("{db_path}/library/library.fna".format( db_path=args.db_name)) or os.path.exists("{db_path}/.tmp0.fasta"): ans = input( "Database library file already exist, (u)se library, (o)verwrite (c)ancel? (u o,c): " ) if ans in ["o", "O"]: logger.info("Overwrite current build progress") shutil.rmtree("{db_path}".format(db_path=args.db_name)) elif ans.strip() in ["u", "U"]: logger.info("Resume database build") skip = True else: exit("Cancel execution!") ''' 1. Process genome_path directory''' if not skip: process_directory = dynamic_import("modules", "ProcessDirectory") logger.info("Processing files; create kraken seq.map") process_directory_obj = process_directory(args.database) genomes, missing = process_directory_obj.process_folder( args.genomes_path) ''' 2. Download missing files''' if args.download or args.representative: download = dynamic_import("modules", "DownloadGenomes") download_obj = download(args.processes, outdir=args.outdir, force=args.force_download, download_path=args.genomes_path) new_genome_path, missing = download_obj.run( missing, representative=args.representative, url=args.rep_path) if not new_genome_path: still_missing = missing if len(still_missing) > 0: print("Not able to download: {nr}".format( nr=len(still_missing))) else: new_genomes, missing = process_directory_obj.process_folder( new_genome_path) genomes += new_genomes else: if len(missing) > 0: logger.info( "Genome annotations with no matching source: {nr}".format( nr=len(missing))) write_missing(missing) ''' 3. Add genomes to database''' if args.db_name: if args.dbprogram.startswith("kraken"): logger.info("Loading module: CreateKrakenDatabase") classifier = dynamic_import("modules", "CreateKrakenDatabase") else: logger.info("Loading module: CreateGanonDB") classifier = dynamic_import("modules", "CreateGanonDB") limit = 0 if args.test: limit = 10 '''Use the genome -> path dictionary to build database''' if not skip: logger.info("Get genomes from input directory!") genomes = process_directory_obj.get_genome_path_dict() else: genomes = False if args.skip: if args.skip.endswith(".txt"): args.skip = read_skip_file(args.skip) logger.info( "File passed to skip, {n} genomes and {x} taxids added to skiplist" .format(n=len(args.skip["genome_id"]), x=len(args.skip["tax_id"]))) classifierDB = classifier( args.database, args.db_name, genomes, args.outdir, create_db=args.create_db, limit=limit, dbprogram=args.dbprogram, params=args.params, skip=args.skip, processes=args.processes, build_processes=args.build_processes, debug=args.debug, verbose=args.verbose, ) report_time(current_time) if not skip: classifierDB.create_library_from_files() logger.info("Genome folder preprocessing completed!") ''' 4. Create database''' if args.create_db: report_time(current_time) logger.info("Create database") try: classifierDB.create_database(args.outdir, args.keep) except UnboundLocalError as e: logger.error("#Error: No database name was given!") logger.error("#UnboundLocalError " + e) exit() logger.debug(report_time(start_time, final=True))
from datetime import datetime, time as dtime, timedelta from pymongo import MongoClient, collection import os import pandas as pd import numpy as np MONGODB_HOST = os.environ.get("MONGODB_HOST", "172.16.11.81") STRATEGY = os.environ.get("STRATEGY_COL", "HENGQIN.strategy") def readCalendar(): with open("calendar.csv") as f: return f.read().split("\n") OPEN_TIME = dtime(9) CLOSE_TIME = dtime(15) def expectedTime(): calendar = np.array(readCalendar()[:-1]) now = datetime.now() i = calendar.searchsorted(now.strftime("%Y-%m-%d"), side="right") - 1 date = calendar[i] endDt = datetime.strptime(date, "%Y-%m-%d").replace(hour=16) return min([now, endDt]) def isTradeTime(): calendar = readCalendar() now = datetime.now() date = now.strftime("%Y-%m-%d")
def find_listener(self, listeners, listener_collection): # check if the bell ringer's selected time is valid (currently a bell ringer could only select one school) # 周一到周五晚上8-9pm,仅限UT多伦多大学的倾听者; # 周六和周日的时间仅会匹配到western西安大略大学的倾听者。 # 其余时间两所学校都可以进行匹配。 western_invalid_slots = set([3, 8, 13, 18, 23, 28, 33]) toronto_invalid_slots = set([26, 27, 27, 29, 30, 31, 32, 33, 34]) bell_ringer_avail_set = set(self.availability[:]) if self.university == "西安大略大学 Western University": # check if the bell ringer's availability is a subset of western's invalid slots if bell_ringer_avail_set.issubset(western_invalid_slots): return -2 if self.university == "多伦多大学 University of Toronto": # check if the bell ringer's availability is a subset of u toronto's invalid slots if bell_ringer_avail_set.issubset(toronto_invalid_slots): return -2 # +++++++++Don't match any pairs within 3 hours of application time +++++++++++++ # Offset Num : | 1 | 2 | 3 | 4 | 5 | Day + 1 | # Start Match after: | 9am | 10am | 8pm | 9pm | 10pm | Next Day | # Application Time: | <6am | 6am-7am | 7am-5pm | 5pm-6pm | 6pm-7pm | >7pm | # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ offset_num = 1 start_date = self.application_time.date() if self.application_time.time() > dtime(19,0): # If submitted application after 7pm, then go to the next day start_date = start_date + timedelta(days=1) elif self.application_time.time() > dtime(18,0): # If submitted application after 6pm, then start matching after 10pm offset_num = 5 elif self.application_time.time() > dtime(17,0): # If submitted application after 5pm, then start matching after 9pm offset_num = 4 elif self.application_time.time() > dtime(7,0): # If submitted application after 7am, then start matching after 8pm offset_num = 3 elif self.application_time.time() > dtime(6,0): # If submitted application after 6am, then start matching after 10am offset_num = 2 else: # If submitted application before or eq to 6am, then start matching after 9am offset_num = 1 start_weekday = start_date.isoweekday() start_time_slot = (start_weekday - 1) * NUM_SLOTS_IN_ONE_DAY + offset_num # We start looking for matched date after this start_time_slot # Reorder availability list so that the first element is the next potential time slot after start_weekend reordered_availability = self.availability[:] for time_slot in self.availability: if time_slot >= start_time_slot: break reordered_availability.pop(0) reordered_availability.append(time_slot) # Match a listener with the same availability continue_finding_listeners = True # Flag indicates if need to loop through listeners again loop_number = 0 # How many times it has looped while continue_finding_listeners: continue_finding_listeners = False # Dont loop again if no listener's time_slot matches for time in reordered_availability: for listener in listeners: # check the listener's university # (match will be faster if the listeners are splitted into two groups) if listener.university != self.university: continue_finding_listeners = True continue if time in listener.availability: # Calculate matched date = start_date + date diff matched_weekday = (time - 1) // NUM_SLOTS_IN_ONE_DAY + 1 delta_days = matched_weekday - start_weekday if delta_days < 0: delta_days += 7 delta_days += 7 * loop_number matched_date = start_date + timedelta(days = delta_days) # Check if the listener is available ( if listener is already busy on this day) if (str(time) in listener.avail_after and matched_date <= listener.avail_after[str(time)] ): continue_finding_listeners = True continue # Update matched listener's avail_after listener.avail_after[str(time)] = matched_date if not config.DISABLE_FREEZING: listener_collection.update( {'_id':listener.db_id}, {"$set": {"avail_after." + str(time): datetime.combine(matched_date,dtime(0,0))} }) return (listener, matched_date, convert_enum_to_availabilty(time)) loop_number += 1 # Don't match after 2 weeks from the first available bell ringer time_slot if loop_number >= 2: break return -1
def waitTillStartTime (self, gap_time= 30): while not (self.start_at < dtime(hour= ddtime.now().hour, minute= ddtime.now().minute) < self.end_at): self.sleep(gap_time)
def dtime_for_line(line): y, m, d = [int(x) for x in line.split()[0].split("/")] hr, mn = [int(x) for x in line.split()[1].split(":")] return dtime(year=y, month=m, day=d, hour=hr, minute=mn)
] colors = [ "red", #'magenta', "green", ] line_width = 2.0 #extents = [10, -90, 40, -80] extents = [20, -95, 40, -80] #test #start_date = dtime(year=2006, month=9, day=8, hour=7) #end_date = dtime(year=2006, month=9, day=10, hour=6) #interval = tdelta(hours=3) start_date = dtime(year=2006, month=9, day=7, hour=18) #end_date = dtime(year=2006, month=9, day=13, hour=1) # bsnr last #end_date = dtime(year=2006, month=9, day=12, hour=3) # g5nr ends 9/12@3z end_date = dtime(year=2006, month=9, day=11, hour=22) # ...and has gap @ 9/11@23z interval = tdelta(hours=1) # Set the "initialization" dates, which will be used to calculate the # absolute dates. # get_geos5trk_track_data() will automatically set start_date to the first # date in the 'atcf' #paths[0].start_date = ... #paths[1].start_date = dtime(year=2006, month=9, day=4, hour=0) # set for gfdltrk too # how frequently to mark the line showing the track TIME_MARKER_INTERVAL = 6
gc.enable() logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) file_handler = logging.FileHandler('analysis_manager.log') formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') file_handler.setFormatter(formatter) logger.addHandler(file_handler) if __name__ == '__main__': try: # ===Parameters setting=== # set date to calculate statistics for yesterday start_date = datetime.combine(date.today() - timedelta(days=1), dtime(14, 0)) end_date = datetime.combine(date.today(), dtime(14, 0)) logger.info('============================================') logger.info("Start date: {}; end date: {}.".format( (start_date + timedelta(days=1)).strftime('%Y-%m-%d'), end_date.strftime('%Y-%m-%d'))) find_ip_address = "115.146.85.107/" save_ip_address = "" db_name = "backup" politician_tweet_collection_name = "restfulTweets" politician_info_collection_name = "politicianInfo" hashtag_collection_name = "restfulByHashtag" f_tools = functional_tools.FunctionalTools() # ===Politician information and tweets manipulation===
except (ConnectionError, ReadTimeout): sys.stdout.write("{} Connection error, freezing for 15 seconds\n".format(formatedDate())) self.sleep(15) except ConnectionResetError: # never tested sys.stdout.write("{} Connection aborted, freezing for {} seconds\n".format(formatedDate(), 4 * 2 ** self._ConnectionResetErrors)) self.sleep(4 * 2 ** self._ConnectionResetErrors) except KeyboardInterrupt: sys.stdout.write("{} Exit message received\n".format(formatedDate())) return except LoginError: raise except Exception as e: sys.stdout.write("{} Exception : {} : {}\n".format(formatedDate(), type(e).__name__), e) else: pass finally: self.saveUnfollowQueue() self.start() if __name__ == "__main__": bot = InstaBotWithAutoMod ( start_at= dtime(hour= 23, minute= 7), tags= ("draw", "drawing"), end_at= dtime(hour= 23, minute= 12), ) bot.login("username", "password") bot.start()
def TimeOfDay(dt): tod = dtime(dt.hour, dt.minute, dt.second, dt.microsecond) return tod
def get_divided_hours(self, contract, date_from_payslip, date_to_payslip, hi_cutstr, hf_cutstr): # 1. =====> fechas nómina a zona horaria usuario employee_tz = pytz.timezone( contract.employee_id.company_id.partner_id.tz) country_emp_id = contract.employee_id.company_id.country_id.id date_from_payslip_c = employee_tz.localize( fields.Datetime.from_string(date_from_payslip)) date_to_aux = fields.Datetime.from_string(date_to_payslip) date_to_payslip_c = employee_tz.localize( datetime.combine(date_to_aux.date(), dtime(hour=23, minute=59, second=59))) holidays_ids = self.env['hr.holidays'].get_holidays_ids( date_from_payslip_c, date_to_payslip_c, country_emp_id) # 2. consulta # 2.1. =====> fechas nómina a zona horaria utc cero date_from_payslip_c = date_from_payslip_c.astimezone(utc_cero) date_to_payslip_c = date_to_payslip_c.astimezone(utc_cero) # 2.2. cortar zona horaria date_from_payslip = datetime.combine(date_from_payslip_c.date(), date_from_payslip_c.time()) date_to_payslip = datetime.combine(date_to_payslip_c.date(), date_to_payslip_c.time()) # 2.3. consulta user_employee = contract.employee_id.user_id ph_consulta1 = self.env['account.analytic.line'].search( [ '&', '&', '&', ('user_id', '=', user_employee.id), ('date_from', '<', str(date_from_payslip)), '&', ('date_to', '>', str(date_from_payslip)), ('date_to', '<', str(date_to_payslip)), ('state', '=', 'aprobado') ], order='date_from') ph_consulta2 = self.env['account.analytic.line'].search( [ '&', '&', ('date_from', '>=', str(date_from_payslip)), ('date_to', '<=', str(date_to_payslip)), '&', ('user_id', '=', user_employee.id), ('state', '=', 'aprobado') ], order='date_from') ph_consulta3 = self.env['account.analytic.line'].search( [ '&', '&', '&', ('user_id', '=', user_employee.id), ('date_to', '>', str(date_to_payslip)), '&', ('date_from', '>', str(date_from_payslip)), ('date_from', '<', str(date_to_payslip)), ('state', '=', 'aprobado') ], order='date_from') # 2.4. dividir partes de horas ph_consulta1 = self.cut_analyticline_withrange(ph_consulta1, date_from_payslip, date_to_payslip, employee_tz) ph_consulta2 = self.cut_analyticline_withrange(ph_consulta2, date_from_payslip, date_to_payslip, employee_tz) ph_consulta3 = self.cut_analyticline_withrange(ph_consulta3, date_from_payslip, date_to_payslip, employee_tz) # 2.5. unir partes_horas = ph_consulta1 + ph_consulta2 + ph_consulta3 # 3. cálculo hi_cut = dtime(hour=int(hi_cutstr[:2]), minute=int(hi_cutstr[-2:])) hf_cut = dtime(hour=int(hf_cutstr[:2]), minute=int(hf_cutstr[-2:])) h_ordinarias = [0, 0] h_dominicales = [0, 0] e_ordinarias = [0, 0] e_dominicales = [0, 0] for registro_horas in partes_horas: fecha = registro_horas[0].date() corte_i = datetime.combine(fecha, hi_cut) corte_f = datetime.combine(fecha, hf_cut) holiday_obj = holidays_ids.filtered( lambda r: r.date == str(registro_horas[0].date())) if (registro_horas[0].weekday() == 6) or holiday_obj: if registro_horas[0] < corte_i: h_dominicales[1] += ( (corte_i - registro_horas[0]).total_seconds() / 3600) if registro_horas[1] <= corte_f: h_dominicales[0] += ( (registro_horas[1] - corte_i).total_seconds() / 3600) else: h_dominicales[0] += ( (corte_f - corte_i).total_seconds() / 3600) h_dominicales[1] += ( (registro_horas[1] - corte_f).total_seconds() / 3600) else: if corte_f < registro_horas[1]: h_dominicales[0] += ( (corte_f - registro_horas[0]).total_seconds() / 3600) h_dominicales[1] += ( (registro_horas[1] - corte_f).total_seconds() / 3600) else: h_dominicales[0] += ( (registro_horas[1] - registro_horas[0]).total_seconds() / 3600) else: if registro_horas[0] < corte_i: h_ordinarias[1] += ( (corte_i - registro_horas[0]).total_seconds() / 3600) if registro_horas[1] <= corte_f: h_ordinarias[0] += ( (registro_horas[1] - corte_i).total_seconds() / 3600) else: h_ordinarias[0] += ( (corte_f - corte_i).total_seconds() / 3600) h_ordinarias[1] += ( (registro_horas[1] - corte_f).total_seconds() / 3600) else: if corte_f < registro_horas[1]: h_ordinarias[0] += ( (corte_f - registro_horas[0]).total_seconds() / 3600) h_ordinarias[1] += ( (registro_horas[1] - corte_f).total_seconds() / 3600) else: h_ordinarias[0] += ( (registro_horas[1] - registro_horas[0]).total_seconds() / 3600) return [h_ordinarias, h_dominicales, e_ordinarias, e_dominicales]
def combine_dt(date, time): return dtime(date.year, date.month, date.day, time.hour, time.minute, time.second)
ftp = None try: ftp = ftplib.FTP(ftp_host, user=ftp_user) break except ftplib.error_temp: print 'Error establishing FTP connection. Will retry in 5 mins' time.sleep(300) if ftp is None: print 'Unable to establish FTP connection. Giving up' sys.exit(1) already_there = [] if os.path.exists(db_file): print "Loading 'database' of downloaded datasets" already_there = pickle.load(open(db_file, 'rb')) ymd = dtime(year=year, month=month, day=day) for dataset in datasets: if dataset.startswith("inst"): tag = "inst" elif dataset.startswith("tavg"): tag = "tavg" elif dataset.startswith("const"): tag = "const" else: print 'unknown tag' sys.exit(3) out_dir = os.path.join(output_directory, dataset) if not os.path.exists(out_dir): os.makedirs(out_dir)
import json import six from .exceptions import (ORMError, InvalidOperation, ColumnError, MissingColumn, InvalidColumnValue, RestrictError) from .util import (_numeric_keygen, _string_keygen, _many_to_one_keygen, _boolean_keygen, dt2ts, ts2dt, t2ts, ts2t, session, _connect, STRING_INDEX_KEYGENS_STR) NULL = object() MODELS = {} MODELS_REFERENCED = {} _NUMERIC = (0, 0.0, _Decimal('0'), datetime(1970, 1, 1), date(1970, 1, 1), dtime(0, 0, 0)) NO_ACTION_DEFAULT = object() SKIP_ON_DELETE = object() ON_DELETE = ('no action', 'restrict', 'cascade', 'set null', 'set default') six.string_types_ex = six.string_types if six.PY3: six.string_types_ex += (bytes, ) def is_numeric(allowed): return any(isinstance(i, allowed) for i in _NUMERIC) def is_string(allowed): allowed = (allowed, ) if isinstance(allowed, type) else allowed return any(
def _search_log_for_datetime(log_path, dt, word_count, reverse=False): """ Find the start position of the line containing the datetime nearest to dt. :param log_path: the absolute path to the log file :param dt: the datetime to be searched :param word_count: the number of "words" that represents the datetime in the log :param reverse: if False, finds the line with the largest datetime less than dt if True, finds the line with the smallest datetime larger than dt """ ONE_KILOBYTE = 1024 with open(log_path, 'r') as log: lbound = 0 ubound = os.path.getsize(log_path) # the algorithm does not guarantee that one of lbound or ubound is changed in each iteration # we use search_area to keep track of the block size (ubound - lbound) to see if it changes. # It is initialized to 0 in order to declare it before use (note 0 is an impossible case) search_area = 0 while ubound > lbound: # Doing a linear search when the size gets small to avoid the issue of keeping track of the previous line. if ubound - lbound < ONE_KILOBYTE: return _linear_search_log_for_time(log_path, dt, word_count, lbound, ubound, reverse) mid = (lbound + ubound) / 2 log.seek(mid) log.readline( ) # skip to the end of the line, we will process the next one # some lines may not begin with a valid datetime (e.g exception messages that spans multiple lines) # hence, we loop until we find a line with a valid datetime is_valid_dt = False while not is_valid_dt: if log.tell() == ubound: # we have reached the end of the search block. # This should rarely happen unless there is a really long line in the log. # Do a linear search because we cannot make further progress return _linear_search_log_for_time(log_path, dt, word_count, lbound, ubound, reverse) line_start = log.tell() line = log.readline() timestr = ' '.join(line.split()[:word_count]) try: log_dt = parser.parse( timestr) # TODO(tlan) exception management # if the parse does not work the default time returned is dtime(0, 0, 0) # we ignore this time so it is not included in the calculation # if a test starts or ends with this time, this will return a superset of the relevant log lines of the tests # There may be a chance this affects validation; but the chance is low enough that it is acceptable if log_dt.time() != dtime(0, 0, 0): is_valid_dt = True except: is_valid_dt = False # the following conditional block updates the bounds. The line that was just process is kept within the bounds, # because it still may be the line that we are looking for. if log_dt > dt: ubound = log.tell() elif log_dt < dt: lbound = line_start else: # here we have found a matching time, but we must continue searching # because there may be multiple lines with matching times! if not reverse: ubound = log.tell() else: lbound = line_start # perform linear search when further progress cannot be made. if ubound - lbound == search_area: return _linear_search_log_for_time(log_path, dt, word_count, lbound, ubound, reverse) search_area = ubound - lbound
import json import warnings import six from .exceptions import (ORMError, InvalidOperation, ColumnError, MissingColumn, InvalidColumnValue, RestrictError) from .util import (_numeric_keygen, _string_keygen, _many_to_one_keygen, _boolean_keygen, dt2ts, ts2dt, t2ts, ts2t, session, _connect) from django.contrib.gis.geos import Point as GeoPoint from doordash.driver.routing.route import Route from rest_framework.utils.encoders import JSONEncoder NULL = object() MODELS = {} _NUMERIC = (0, 0.0, _Decimal('0'), datetime(1970, 1, 1), date(1970, 1, 1), dtime(0, 0, 0)) USE_LUA = True NO_ACTION_DEFAULT = object() SKIP_ON_DELETE = object() ON_DELETE = ('no action', 'restrict', 'cascade') def _restrict(entity, attr, refs): name = entity.__class__.__name__ raise RestrictError( "Cannot delete entity %s with pk %s, %i foreign references from %s.%s exist"%( name, getattr(entity, entity._pkey), len(refs), name, attr)) def _on_delete(ent): ''' This function handles all on_delete semantics defined on OneToMany columns.
def getDiffTime(self, timeStart): timeDiff = (datetime.now() - timeStart).seconds seconds = timeDiff % 60 minutes = timeDiff // 60 hours = timeDiff // 3600 return dtime(hour=hours, minute=minutes, second=seconds)
def from_block(self,block): if block[0] == '!': return None else: return dtime(int(block[1:3]),int(block[3:5]),int(block[5:7]))
def parse_time(self, timeStr): timeList = timeStr.split(":") return dtime(hour=int(timeList[0]), minute=int(timeList[1]), second=int(timeList[2]))
def main(db: DBHelper): """The entry point""" updates_offset = None # track last_update_id to use it as offset while True: # infinitely listen to new updates (as long as the script is running) try: # =============================== Handling Schedule ============================================== # Order = 0 1 2 3 4 5 6 weekdays = ("monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday") study_days = (5, 6, 0, 1, 2) today = datetime.today().weekday() # What is today? now_in_egypt = str(format(datetime.utcnow() + timedelta(hours=2), "%H:%M:%S")) # Cairo time = UTC+2 now_in_egypt = dtime(*[int(x) for x in now_in_egypt.split(":")]) # to datetime.time object before_eight = dtime(7, 59, 45) # get before 8:00AM with 5 seconds after_eight = dtime(8, 0, 15) # get after 8:00AM with 5 seconds # if it's in range (07:59:55 |08:00| 08:00:05) in the morning if time_in_range(before_eight, after_eight, now_in_egypt): if today in study_days: schedule = db.get_schedule_of(weekdays[today]) # get schedule of today # ================== formating the message to send msg_schedule_part = "" for idx, entry in enumerate(schedule): msg_schedule_part += str(idx+1) + '. ' + entry[1] + ' at ' + entry[0] + '\n' msg = "Good morning, \n" \ "today is {0} and the schedule is: \n\n" \ "{1}".format(weekdays[today].title(), msg_schedule_part) users = db.get_users() # get list of all users for user in users: if user.active: log.info(f"Sending today's schedule to: {user}") send_message(user.chat_id, msg) # send today's schedule time.sleep(0.5) # sleep for .5 second before sending to the next user # =============================== Handling Announcements ========================================= # last_check: nonlocal var will be used to check for future announcement each 2 hours # for less resources consumption # get future announcements (where done column is) # if ann.done != "once" nor "twice" and ann.cancelled != true # send announcement to each active user # set ann.done = "once" # if ann.done="once" check timedelta # if timedelta < 24hrs # send the another announcement reminder, and mark ann.done="twice" # else: pass anns = db.get_announcements() for ann in anns: if ann.done == "": users = db.get_users() # get list of all users for user in users: if user.active: log.info(f"Sending announcement: {ann} schedule to: {user}") send_message(user.chat_id, ann.description) ann.done = "once" db.update_announcement(ann.id) time.sleep(0.5) # sleep for .5 second before sending to the next user elif ann.done == "once": # if ann.time - current_time = 1 day "YYYY-MM-DD HH:MM" if ann.time - timedelta: pass # =============================== Handling incoming messages ===================================== log.info("getting updates...") updates = get_updates(updates_offset) # get new updates after last handled one if "result" in updates: # to prevent KeyError exception if len(updates["result"]) > 0: # make sure updates list is longer than 0 updates_offset = last_update_id(updates) + 1 # to remove handled updates handle_updates(updates["result"], db) # handle new (unhandled) updates else: log.info('no updates to be handled') time.sleep(0.5) # delay the loop a .5 second except KeyboardInterrupt: # exit on Ctrl-C log.info("\nquiting...") exit(0)
def _from_string(dts): return dtime(*time.strptime(dts, format)[3:6])
def due_date(self): if self.confirm_time: return datetime.combine((self.confirm_time + timedelta(days=self.meta.due_days)).date(), dtime(23, 59, 59)).replace(tzinfo=tzlocal()) else: return None
def ts2t(value): hour, value = divmod(value, 3600) minute, value = divmod(value, 60) second, value = divmod(value, 1) return dtime(*map(int, [hour, minute, second, value*1000000]))
slack_client = SlackClient( SLACK_BOT_TOKEN) # instantiate Slack & Twilio clients channel = "C46UVV43H" # TODO: get channel ID from API # Google related informations gmaps = googlemaps.Client(key=GOOGLE_API_KEY) # Instantiate gmaps # City mapper API related informations CM_ENDPOINT = "https://developer.citymapper.com/api/1/traveltime/?" requests_headers = { 'user-agent': 'curl/7.47.0', 'Content-Type': 'application/json' } # Timer related information starting_hour = dtime(6, 0) ending_hour = dtime(6, 25) # Destination related informations for timer task departure_addr = 'Ekino' arrival_addr = 'Jouy en josas' ######################## # # Google Maps related methods # ######################## def get_addr_from_coordonates(coordinates): '''
def balances(self, account=None, date=None, hash=None, from_date=None, to_date=None, pre_booked=False): if account: account = self.__session().merge(account) if date: # get balance of specific date sql = text(""" SELECT base_value + IFNULL(delta_value, 0), currency FROM ( SELECT b.value AS base_value, b.sign_delta_date * SUM(t.value) AS delta_value, b.currency FROM ( SELECT id, account_id, date, target_date, value, currency, ABS(target_date - date) AS delta_date, ((target_date >= date) * 2 - 1) AS sign_delta_date FROM balances JOIN (SELECT :target_date AS target_date) ON account_id=:account_id ORDER BY delta_date ASC, id DESC LIMIT 1 ) AS b LEFT OUTER JOIN transactions AS t ON t.account_id = b.account_id AND ( (b.sign_delta_date == 1 AND t.date > b.date AND t.date <= b.target_date) OR (b.sign_delta_date == -1 AND t.date <= b.date AND t.date > b.target_date) ));""") d = int( time.mktime( datetime.combine( date, dtime(hour=23, minute=59, second=59, tzinfo=None)).timetuple())) b = self.__session().execute(sql, { 'target_date': d, 'account_id': account.id }).fetchall()[0] return Balance(account=account, date=date, value=b[0], currency=b[1]) elif from_date and to_date: # get daily balances of a date range d = from_date delta = timedelta(days=1) b = {} balances = [] while d <= to_date: balances.append(self.balances(account=account, date=d)) d += delta return balances elif pre_booked: # get pre-booked balance return self.balances(account=account, date=account.transactions[0].date) else: # get most recent balance available return self.__session().query(Balance) \ .filter(Balance.account_id == account.id) \ .order_by(Balance.date.desc()) \ .limit(1).first() elif hash: return self.__session().query(Balance) \ .filter(Balance.hash == hash) \ .order_by(Balance.date.desc()) \ .limit(1).first() else: raise Exception("No account/date/date range or hash given.")