def test_clock_update_next_time(self): # 设置时间 now = datetime.datetime.combine( self.trading_date, datetime.time(23, 59, 58, tzinfo=tz.tzlocal()) ) self.clock_engine.reset_now(now) # 触发前, 注册时间事件 moment = datetime.time(23, 59, 59, tzinfo=tz.tzlocal()) cmh = ClockMomentHandler(self.clock_engine, 'test', moment) # 确认未触发 self.assertFalse(cmh.is_active()) # 将系统时间设置为触发时间 now = datetime.datetime.combine( self.trading_date, datetime.time(23, 59, 59, tzinfo=tz.tzlocal()) ) self.clock_engine.reset_now(now) # 确认触发 self.assertTrue(cmh.is_active()) # 更新下次触发时间 cmh.update_next_time() # 确认未触发 self.assertFalse(cmh.is_active())
def test_normalize_tz(self): rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz='US/Eastern') result = rng.normalize() expected = date_range('1/1/2000', periods=10, freq='D', tz='US/Eastern') self.assert_(result.equals(expected)) self.assert_(result.is_normalized) self.assert_(not rng.is_normalized) rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz='UTC') result = rng.normalize() expected = date_range('1/1/2000', periods=10, freq='D', tz='UTC') self.assert_(result.equals(expected)) self.assert_(result.is_normalized) self.assert_(not rng.is_normalized) from dateutil.tz import tzlocal rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz=tzlocal()) result = rng.normalize() expected = date_range('1/1/2000', periods=10, freq='D', tz=tzlocal()) self.assert_(result.equals(expected)) self.assert_(result.is_normalized) self.assert_(not rng.is_normalized)
def pretty_date(timestamp): """ return a human readable date for the UTC time tuple specified """ lt = timestamp.replace(tzinfo=tzutc()).astimezone(tzlocal()) lnow = datetime.now(tzlocal()) tdiff = lnow - lt time = lt.strftime("%I:%M%p") time = time.lstrip('0').replace(':00', '') if tdiff < timedelta(days=1) and lt.day == lnow.day: return time yesterday = lnow - timedelta(days=1) if tdiff < timedelta(days=2) and lt.day == yesterday.day: return 'yesterday at ' + time if tdiff < timedelta(days=7): return lt.strftime('%A at ') + time if lt.year == lnow.year: return lt.strftime('%a, %b %d at ') + time return lt.strftime('%a, %b %d %Y at ') + time
def get_all_pull_requests(r): '''Gets all pull requests for the repo r, regardless of whether they are open or closed''' for p in r.get_pulls(state='all'): created_at_ist = p.created_at.replace(tzinfo=tzutc()).astimezone(tzlocal()).ctime() if p.closed_at is not None: closed_at_ist = p.closed_at.replace(tzinfo=tzutc()).astimezone(tzlocal()).ctime() else: closed_at_ist = None pull_requests[p.id] = { 'user': p.user.name, 'username' : p.user.login, 'created_at': created_at_ist, 'closed_at': closed_at_ist, 'additions':p.additions, 'deletions':p.deletions, 'changed_files': p.changed_files, 'review_comments' : p.review_comments, 'merged' : p.is_merged() } if p.user.name is None or p.user.name.strip() == '': pull_requests[p.id]['user'] = p.user.login return pull_requests
def test_normalize_tz(self): rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="US/Eastern") result = rng.normalize() expected = date_range("1/1/2000", periods=10, freq="D", tz="US/Eastern") self.assertTrue(result.equals(expected)) self.assertTrue(result.is_normalized) self.assertFalse(rng.is_normalized) rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="UTC") result = rng.normalize() expected = date_range("1/1/2000", periods=10, freq="D", tz="UTC") self.assertTrue(result.equals(expected)) self.assertTrue(result.is_normalized) self.assertFalse(rng.is_normalized) from dateutil.tz import tzlocal rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal()) result = rng.normalize() expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal()) self.assertTrue(result.equals(expected)) self.assertTrue(result.is_normalized) self.assertFalse(rng.is_normalized)
def parse_timestamp(value): """Parse a timestamp into a datetime object. Supported formats: * iso8601 * rfc822 * epoch (value is an integer) This will return a ``datetime.datetime`` object. """ if isinstance(value, (int, float)): # Possibly an epoch time. return datetime.datetime.fromtimestamp(value, tzlocal()) else: try: return datetime.datetime.fromtimestamp(float(value), tzlocal()) except (TypeError, ValueError): pass try: # In certain cases, a timestamp marked with GMT can be parsed into a # different time zone, so here we provide a context which will # enforce that GMT == UTC. return dateutil.parser.parse(value, tzinfos={'GMT': tzutc()}) except (TypeError, ValueError) as e: raise ValueError('Invalid timestamp "%s": %s' % (value, e))
def mark_month(self,calendar_window): """ Marks every date that has an entry this month. """ # Prepare first and last date in this month, a bit awkward: # Get current month from the calendar widget: year, month, day = calendar_window.get_date() start_date = datetime(year,month + 1, 1, tzinfo=tzlocal()) tmp_date = start_date + timedelta(days=31) end_date = datetime(tmp_date.year, tmp_date.month, 1, tzinfo=tzlocal()) - timedelta(days=1) # Freeze to reduce flicker: calendar_window.freeze() calendar_window.clear_marks() # Find all entries and mark them: for calendar in self.cache.load_all(): for entry in calendar.entries: map( lambda e: calendar_window.mark_day(e.time.day), entry.get_events_starting_between(start_date, end_date) ) calendar_window.thaw()
def test_normalize_tz(self): rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz='US/Eastern') result = rng.normalize() expected = date_range('1/1/2000', periods=10, freq='D', tz='US/Eastern') tm.assert_index_equal(result, expected) assert result.is_normalized assert not rng.is_normalized rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz='UTC') result = rng.normalize() expected = date_range('1/1/2000', periods=10, freq='D', tz='UTC') tm.assert_index_equal(result, expected) assert result.is_normalized assert not rng.is_normalized rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz=tzlocal()) result = rng.normalize() expected = date_range('1/1/2000', periods=10, freq='D', tz=tzlocal()) tm.assert_index_equal(result, expected) assert result.is_normalized assert not rng.is_normalized
def test_timezone_aware_parser(self): """ Test the timezone_aware_parser method with different string formats """ # test case 1 string with timezone info tz_string = '2009/05/13 19:19:30 -0400' result = load_datetime_tz(tz_string) assert result.tzinfo == tzoffset(None, -14400) # test case 2 string with timezone info with a different format tz_string = '2004-04-09T21:39:00-08:00' result = load_datetime_tz(tz_string) assert result.tzinfo == tzoffset(None, -28800) # test case 3 string without timezone info, # expecting tzlocal() as timezone tz_string = str(datetime.now()) result = load_datetime_tz(tz_string) assert result.tzinfo == tzlocal() # test case 4 string with a wrong timezone format, # expecting tzlocal() as timezone tz_string = '16:08:12 05/08/03 AEST' result = load_datetime_tz(tz_string) assert result.tzinfo == tzlocal()
def getRealTimeProduct(): try: currentfetchTime=datetime.datetime.now(tzlocal()) lastFetchTime=getLastFetchTime() lastFetchTime=parser.parse(lastFetchTime) print lastFetchTime,currentfetchTime conn = pyodbc.connect('DSN=Newproject') cursor = conn.cursor() es = Elasticsearch(['http://52.4.182.180/es/'],connection_class=RC) fetchTime=datetime.datetime.now(tzlocal()) #print sql_cmd cursor.execute(sql_cmd,[lastFetchTime,currentfetchTime]) columns = [column[0] for column in cursor.description] rows = cursor.fetchall() for row in rows: #print rows result=dict(zip(columns, row)) result['ORD_DATE']=result['ORD_DATE'].replace(tzinfo=to_zone) res = es.index(index="pez", doc_type='MS_PE_KORDER_TODAY', body=result) #print result print(res) #results.append(dict(zip(columns, row))) updateLastFetchTime(currentfetchTime) except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] print(exc_type, fname, exc_tb.tb_lineno)
def should_run(self, now, last_run): if last_run == datetime.min: return True local_now = now + tzlocal().utcoffset(now) local_last_run = last_run + tzlocal().utcoffset(last_run) return local_now.hour == self.hour and local_last_run.date() != local_now.date()
def test_read(self): for it in TestDatabase.read_db('todo'): if it.summary == 'only summary': self.assertIsNone(it.dtstart) self.assertIsNone(it.status) self.assertIsNone(it.completed_at) self.assertFalse(it.is_completed) elif it.summary == 'with dtstart': self.assertEqual(datetime(2016, 1, 1, hour=1, tzinfo=tzlocal()), it.dtstart) self.assertIsNone(it.status) self.assertIsNone(it.completed_at) self.assertFalse(it.is_completed) elif it.summary == 'in process': self.assertIsNone(it.dtstart) self.assertEqual('IN-PROCESS', it.status) self.assertIsNone(it.completed_at) self.assertFalse(it.is_completed) elif it.summary == 'completed': self.assertIsNone(it.dtstart) self.assertEqual('COMPLETED', it.status) self.assertEqual(datetime(2016, 1, 1, tzinfo=tzlocal()), it.completed_at) self.assertTrue(it.is_completed) elif it.summary == 'cancelled': self.assertIsNone(it.dtstart) self.assertEqual('CANCELLED', it.status) self.assertIsNone(it.completed_at) self.assertTrue(it.is_completed) else: self.fail('unknown item')
def _run(self): total_searched_streams = set() while True: try: event_batch = self.queue.get(False) for event in event_batch.events: revised_event = event.copy() if event.get('timestamp') is not None: revised_event['timestamp'] = \ datetime.fromtimestamp(event['timestamp']/1000.0, tzlocal()) if event.get('ingestionTime') is not None: revised_event['ingestionTime'] = \ datetime.fromtimestamp( event['ingestionTime']/1000.0, tzlocal()) print_stdout(self.output_format.format(**revised_event)) if event_batch.searched_log_streams: for searched_log_stream in event_batch.searched_log_streams: total_searched_streams.add(searched_log_stream.get('logStreamName')) print_stderr("Searching for more data...") except Queue.Empty: if self.stop_flag.is_set(): print_stderr('{0} log streams searched in log group {1}:'.format(len(total_searched_streams), event_batch.log_group_name)) print_stderr(', '.join(sorted(total_searched_streams))) logger.debug('Renderer is leaving...') break else: self.stop_flag.wait(0.1)
def getLocalTimeZone( ): if 'time_zone' in g.userVariables: return pytz( g.userVariables[ 'time_zone' ] ) elif tz.tzlocal( ) is None: return pytz.timezone( 'US/Eastern' ) else: return tz.tzlocal( )
def test_three_args_with_tzinfo(self): timefmt = 'YYYYMMDD' d = '20150514' assertEqual(self.factory.get(d, timefmt, tzinfo=tz.tzlocal()), datetime(2015, 5, 14, tzinfo=tz.tzlocal()))
def on_notice(self, source, target, message): if source.nick.lower() in ("duality", "jawsper", "lock-o-matic"): if message in ("We are open", "We are closed"): space_open = message == "We are open" self.set_space_status( "1" if space_open else "0", datetime.now().replace(tzinfo=tzlocal()), "Lock-O-Matic" ) return result = re.search("^(.+) entered the space", message) if result: nick = result.group(1) self.__led_welcome(nick) elif "TkkrLab" in message: result = re.search(":\s+(?P<status>[a-z]+)\s*@\s*(?P<datetime>.*)$", message) if result: status_bool = result.group("status") == "open" status_time = dateutil.parser.parse(result.group("datetime"), dayfirst=True).replace( tzinfo=tzlocal() ) space_open = self.status.open space_time = self.status.time if ( space_open != status_bool or not space_time or abs((space_time - status_time).total_seconds()) > 100 ): logging.info("Space status too different from Lock-O-Matic status, updating own status") self.set_space_status("1" if status_bool else "0", status_time, "Lock-O-Matic")
def find_next_show(): try: r = requests.get(u'http://theradio.cc/?feed=eo-events') except: return None, None cal = icalendar.Calendar.from_ical(r.text) next_event = None delta_min = None time_min = None for event in cal.walk('vevent'): count, dt = get_reoccurences(event) time = event.get('dtstart').dt.astimezone(tz.tzlocal()) current_time = datetime.datetime.now(tz.tzlocal()) while count > 0: if time > current_time: delta = time - current_time if not delta_min: delta_min = delta next_event = event time_min = time elif delta < delta_min: delta_min = delta next_event = event time_min = time time += dt count -= 1 return next_event, time_min
def printPrettyDate(T): """ Format output of a timestamp. If a small amount of time has elapsed between *T_now* and *T*, then return the interval. **TODO:** This should be localized based on the HTTP request. :param T: Timestamp. :type T: datetime.datetime :rtype: str """ T = T.astimezone(tz.tzlocal()) T_elapsed = (datetime.datetime.now(tz=tz.tzlocal()) - T) if T_elapsed < datetime.timedelta(seconds=30): return "just now" elif T_elapsed < datetime.timedelta(minutes=1): return "%s seconds ago" % (T_elapsed.seconds) elif T_elapsed < datetime.timedelta(hours=1): return "%s minute%s ago" % (T_elapsed.seconds / 60, '' if T_elapsed.seconds / 60 == 1 else 's') elif T_elapsed < datetime.timedelta(hours=24): return "%s hour%s ago" % (T_elapsed.seconds / 3600, '' if T_elapsed.seconds / 3600 == 1 else 's') elif T_elapsed < datetime.timedelta(days=7): return "%s day%s ago" % (T_elapsed.days, '' if T_elapsed.days == 1 else 's') else: return "%s %s %s" % (T.day, monthOf[T.month-1], T.year)
def get_times(service, cals): ftl = [] # Free Time List btl = [] # Busy Time List time_min = flask.session["begin_date"] time_max = arrow.get(flask.session["end_date"]).replace(hours=+24, seconds=-1).isoformat() # Iterate through selected calendars for cal in cals: # Get the items from google cal_items = service.events().list(calendarId=cal, timeMin=time_min, timeMax=time_max, singleEvents=True).execute()['items'] for item in cal_items: try: t_start = item["start"]["dateTime"] except: t_start = arrow.get(item["start"]["date"], "YYYY-MM-DD").replace( tzinfo=tz.tzlocal()).isoformat() try: t_end = item["end"]["dateTime"] except: t_end = arrow.get(item["end"]["date"], "YYYY-MM-DD").replace( tzinfo=tz.tzlocal()).isoformat() item_range = {"start": t_start, "end": t_end, "desc": item["summary"]} if "transparency" in item and item["transparency"] == "transparent": ftl.append(item_range) else: btl.append(item_range) return ftl,btl
def get_busy_dict_11_test(): """ Tests sequential one day events during the interval. """ begin_date = arrow.get().replace( tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16, month=11, year=2015) end_date = arrow.get().replace( tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20, month=11, year=2015) events = [{'start': {'dateTime': '2015-11-16T09:00:00-08:00'}, 'end': {'dateTime': '2015-11-16T10:00:00-08:00'}}, {'start': {'dateTime': '2015-11-16T10:00:00-08:00'}, 'end': {'dateTime': '2015-11-16T11:00:00-08:00'}}] busy = {'2015-11-16T09:00:00-08:00': {'start': {'dateTime': '2015-11-16T09:00:00-08:00'}, 'end': {'dateTime': '2015-11-16T10:00:00-08:00'}}, '2015-11-16T10:00:00-08:00': {'start': {'dateTime': '2015-11-17T10:00:00-08:00'}, 'end': {'dateTime': '2015-11-17T11:00:00-08:00'}}} busy_test = get_busy_dict(events, begin_date, end_date) for event in busy_test: assert event in busy
def get_free_times_10_test(): """ Tests a complex busy schedule. """ begin_date = arrow.get().replace( tzinfo=tz.tzlocal(), hour=9, minute=0, second=0, microsecond=0, day=16, month=11, year=2015) end_date = arrow.get().replace( tzinfo=tz.tzlocal(), hour=17, minute=0, second=0, microsecond=0, day=20, month=11, year=2015) busy = [{'start': {'dateTime': '2015-11-16T09:00:00-08:00'}, 'end': {'dateTime': '2015-11-18T11:00:00-08:00'}}, {'start': {'dateTime': '2015-11-19T09:00:00-08:00'}, 'end': {'dateTime': '2015-11-19T13:00:00-08:00'}}, {'start': {'dateTime': '2015-11-19T15:00:00-08:00'}, 'end': {'dateTime': '2015-11-19T16:00:00-08:00'}}, {'start': {'dateTime': '2015-11-20T16:00:00-08:00'}, 'end': {'dateTime': '2015-11-20T17:00:00-08:00'}}] free = [('2015-11-18T11:00:00-08:00', '2015-11-18T17:00:00-08:00'), ('2015-11-19T13:00:00-08:00', '2015-11-19T15:00:00-08:00'), ('2015-11-19T16:00:00-08:00', '2015-11-20T16:00:00-08:00')] assert free == get_free_times(busy, begin_date, end_date)
def _add_programme(self, channelName, programmeNode, doCommit=False): title_node = None subtitle_node = None description_node = None title_node = programmeNode.find("./title") subtitle_node = programmeNode.find("./sub-title") description_node = programmeNode.find("desc") episodenum_node = programmeNode.find("episode-num") serie = False categories = [] for category in programmeNode.findall("./category"): category_text = category.text.title() if category_text == 'serie'.title(): serie = True else: ## Only add category when it is not 'Serie' as serie is handled in a dedicated field categories.append(category_text) title = title_node.text subtitle = subtitle_node.text if subtitle_node is not None else '' description = description_node.text if description_node is not None else '' episode_number = episodenum_node.text if episodenum_node is not None else '' start_time = parse(programmeNode.attrib['start']).astimezone(tz=tzlocal()) ## astimezone added to handle cases when epg dates are in utc, astimezone also handles offsets like +0200 nicely stop_time = parse(programmeNode.attrib['stop']).astimezone(tz=tzlocal()) duration = timestamp(stop_time) - timestamp(start_time) channel = self._channel_from_name(channelName) prg = Programme(channel, title, subtitle, description, start_time, stop_time, duration, episode_num=episode_number, series=serie, categories=categories) prg.add(doCommit)
def register_clock_moent_makeup(self, makeup): begin = datetime.datetime.combine( self.trade_date, datetime.time(23, 59, 59, tzinfo=tz.tzlocal()) ) time.time = mock.Mock(return_value=begin.timestamp()) # 注册时刻一个超时事件 moment = datetime.time(0, 0, 0, tzinfo=tz.tzlocal()) self.clock_engine.register_moment('test', moment, makeup=makeup) self.test_active = False def clock(event): # 记录补发 if event.data.clock_event == 'test': self.test_active = True self.main_engine.event_engine.register(ClockEngine.EventType, clock) # 开启事件引擎 self.main_engine.event_engine.start() self.clock_engine.tock() time.sleep(0.1) self.main_engine.event_engine.stop() # 确认补发 self.assertEqual(self.test_active, makeup)
def onActiveTraceChanged(self): """Display trace creation/finalized date/time when a trace is selected""" nodes=self.traceView.selectedNodes() dataNode=self.model.getFirstDataNode(nodes[0]) if nodes else None if dataNode: description = dataNode.content.traceCollection.description traceCreation = description.get("traceCreation") traceFinalized = description.get("traceFinalized") if traceCreation: traceCreationLocal = traceCreation.astimezone(tzlocal()) #use local time self.createdDateLabel.setText(traceCreationLocal.strftime('%Y-%m-%d')) self.createdTimeLabel.setText(traceCreationLocal.strftime('%H:%M:%S')) else: self.createdDateLabel.setText('') self.createdTimeLabel.setText('') if traceFinalized: traceFinalizedLocal = traceFinalized.astimezone(tzlocal()) #use local time self.finalizedDateLabel.setText(traceFinalizedLocal.strftime('%Y-%m-%d')) self.finalizedTimeLabel.setText(traceFinalizedLocal.strftime('%H:%M:%S')) else: self.finalizedDateLabel.setText('') self.finalizedTimeLabel.setText('') else: self.createdDateLabel.setText('') self.createdTimeLabel.setText('') self.finalizedDateLabel.setText('') self.finalizedTimeLabel.setText('')
def test_clock_update_next_time(self): # 设置时间 now = datetime.datetime.combine( self.trade_date, datetime.time(23, 59, 58, tzinfo=tz.tzlocal()) ) time.time = mock.Mock(return_value=now.timestamp()) # 触发前, 注册时间事件 moment = datetime.time(23, 59, 59, tzinfo=tz.tzlocal()) cmh = ClockMomentHandler(self.clock_engine, 'test', moment) # 确认未触发 self.assertFalse(cmh.is_active()) # 将系统时间设置为触发时间 now = datetime.datetime.combine( self.trade_date, datetime.time(23, 59, 59, tzinfo=tz.tzlocal()) ) time.time = mock.Mock(return_value=now.timestamp()) # 确认触发 self.assertTrue(cmh.is_active()) # 更新下次触发时间 cmh.update_next_time() # 确认未触发 self.assertFalse(cmh.is_active())
def get_delta_days_for_trending(request, collection): todays_date = datetime.now(tz.tzlocal()) start_date = request.json.get('%s-start' % QUERY_FIELD) end_date = request.json.get('%s-end' % QUERY_FIELD) if start_date and end_date: delta = parser.parse(end_date) - parser.parse(start_date) elif start_date: delta = todays_date - parser.parse(start_date).replace( tzinfo=tz.tzlocal() ) elif end_date: first_element = getattr(g.db, collection).find().sort( SORT_FIELD, pymongo.ASCENDING ).limit(1) start_date = first_element[0].get(SORT_FIELD) delta = parser.parse(end_date).replace( tzinfo=tz.tzlocal() ) - start_date else: first_element = getattr(g.db, collection).find().sort( SORT_FIELD, pymongo.ASCENDING ).limit(1) start_date = first_element[0].get(SORT_FIELD) delta = todays_date - start_date return delta.days
def get_status(self): import datetime result = "" first_attendance = Attendance.objects.filter(profile=self.profile, date=self.date).earliest('datetime') is_first = first_attendance == self current_datetime = self.datetime.astimezone(tz.tzlocal()) first_datetime = first_attendance.datetime.astimezone(tz.tzlocal()) try: attend_time = self.profile.attendancemanager.get_attend_time() leave_time = self.profile.attendancemanager.get_leave_time() except: attend_time = first_datetime.time() leave_time = (datetime.datetime(2000, 1, 1, attend_time.hour, attend_time.minute, attend_time.second) + datetime.timedelta(hours=1)).time() if is_first and current_datetime.time() <= attend_time: result = u"출석했습니다." elif is_first and current_datetime.time() > attend_time: result = u"출석했습니다." elif not is_first and current_datetime.time() < leave_time: result = u"출석했습니다." elif not is_first and current_datetime.time() >= leave_time: result = u"출석했습니다." return result
def testResampleData(self): # test upsampling by a factor of 2 timeStamps = numpy.array([numpy.datetime64( datetime.datetime(2000, 1, 1, tzinfo=tz.tzlocal()) + datetime.timedelta(hours=i)) for i in xrange(8)]) values = numpy.linspace(0, 7, 8) newSamplingInterval = numpy.timedelta64(1800, 's') (newTimeStamps, newValues) = param_finder._resampleData(timeStamps, values, newSamplingInterval) trueNewTimeStamps = numpy.array([numpy.datetime64( datetime.datetime(2000, 1, 1, tzinfo=tz.tzlocal()) + datetime.timedelta(hours=0.5 * i)) for i in xrange(15)]) self.assertTrue(numpy.allclose(newValues, numpy.linspace(0, 7, 15))) self.assertAlmostEqual(numpy.sum(newTimeStamps - trueNewTimeStamps), 0) # test down-sampling by a factor of 2 newSamplingInterval = numpy.timedelta64(7200, 's') (newTimeStamps, newValues) = param_finder._resampleData(timeStamps, values, newSamplingInterval) trueNewTimeStamps = numpy.array([numpy.datetime64( datetime.datetime(2000, 1, 1, tzinfo=tz.tzlocal()) + datetime.timedelta(hours=2 * i)) for i in xrange(4)]) self.assertTrue(numpy.allclose(newValues, numpy.linspace(0, 6, 4))) self.assertAlmostEqual(numpy.sum(newTimeStamps - trueNewTimeStamps), 0)
def create_admin(email, password): try: admin = Users() admin.email = email admin.password = encrypt_password(password) admin.active = True admin.confirmed_at = datetime.now(tzlocal()) db.session.add(admin) db.session.commit() except IntegrityError: db.session.rollback() admin = db.session.query(Users).filter(Users.email == email).one() admin.password = encrypt_password(password) admin.confirmed_at = datetime.now(tzlocal()) db.session.commit() print('Password reset for {0}'.format(email)) try: administrator = Roles() administrator.name = 'administrator' administrator.description = 'administrator' db.session.add(administrator) db.session.commit() except IntegrityError: db.session.rollback() administrator = (db.session.query(Roles) .filter(Roles.name == 'administrator').one()) admin.roles.append(administrator) db.session.commit()
def __init__(self, first_weekday=0, timezone=None): """Creates a Calendar object for providing date/time paths and for relative date/time manipulation. Values for `first_weekday` are 0 for Monday, 6 for Sunday. Default is 0.""" if isinstance(first_weekday, compat.string_type): try: self.first_weekday = _WEEKDAY_NUMBERS[first_weekday.lower()] except KeyError: raise ConfigurationError("Unknown weekday name %s" % first_weekday) else: value = int(first_weekday) if value < 0 or value >= 7: raise ConfigurationError("Invalid weekday number %s" % value) self.first_weekday = int(first_weekday) if timezone: self.timezone_name = timezone self.timezone = gettz(timezone) or tzstr(timezone) else: self.timezone_name = datetime.now(tzlocal()).tzname() self.timezone = tzlocal()
p.add_argument('--time-offset', help='Time offset between GPX and photos. If your camera is ahead by one minute, time_offset is 60.', default=0, type=float) p.add_argument('--interval', help='Time between shots. Used to set images times with sub-second precission', type=float, default=0.0) p.add_argument('--bearing-offset', help='Direction of the camera in degrees, relative to the direction of travel', type=float, default=0.0) return p.parse_args() if __name__ == '__main__': args = get_args() now = datetime.datetime.now(tzlocal()) print("Your local timezone is {0}, if this is not correct, your geotags will be wrong.".format(now.strftime('%Y-%m-%d %H:%M:%S %Z'))) if args.path.lower().endswith(".jpg"): # single file file_list = [args.path] else: # folder(s) file_list = [] for root, sub_folders, files in os.walk(args.path): files.sort() file_list += [os.path.join(root, filename) for filename in files if filename.lower().endswith(".jpg")] # start time start_time = time.time()
def get_history(self, timeline): history_path = os.path.join(self.wal_dir, '{0:08X}.history'.format(timeline)) history_mtime = mtime(history_path) if history_mtime: try: with open(history_path, 'r') as f: history = f.read() history = list(parse_history(history)) if history[-1][0] == timeline - 1: history_mtime = datetime.fromtimestamp(history_mtime).replace(tzinfo=tz.tzlocal()) history[-1].append(history_mtime.isoformat()) return history except Exception: logger.exception('Failed to read and parse %s', (history_path,))
def test_parse_local(self): assertEqual(self.parser.parse('local'), tz.tzlocal())
import simplejson import os from time import sleep from datetime import datetime, timedelta from dateutil.tz import tzlocal API_URL = "http://127.0.0.1:8000/api/v1/job/" data = { "name": "test_job", "command": "bash " + os.path.dirname(os.path.realpath(__file__)) + "/example-kala-commands/example-command.sh", "epsilon": "PT5S", } dt = datetime.isoformat(datetime.now(tzlocal()) + timedelta(0, 10)) data["schedule"] = "%s/%s/%s" % ("R2", dt, "PT10S") if __name__ == "__main__": print "Sending request to %s" % API_URL print "Payload is: %s" % data r = requests.post(API_URL, data=simplejson.dumps(data)) print "\n\n CREATING \n" job_id = simplejson.loads(r.content)['id'] print "Job was created with an id of %s" % job_id print "\n\n GETTING \n" m = requests.get(API_URL + job_id) print m.content
def parse_dt(dt) -> str: dt = parse(dt) dt = dt.astimezone(tzlocal()).replace(tzinfo=None) return humanize.naturaltime(dt)
def get_timestamp(self): return datetime.now(tzlocal()).strftime("%Y-%m-%dT%H:%M:%SZ")
def _date_parser(date_string): return parse(date_string).astimezone(tzlocal())
# along with SickRage. If not, see <http://www.gnu.org/licenses/>. import re import datetime from dateutil import tz from sickbeard import db, helpers, logger from sickrage.helper.common import try_int # regex to parse time (12/24 hour format) time_regex = re.compile(r'(?P<hour>\d{1,2})(?:[:.](?P<minute>\d{2})?)? ?(?P<meridiem>[PA]\.? ?M?)?\b', re.I) network_dict = {} try: sb_timezone = tz.tzwinlocal() if tz.tzwinlocal else tz.tzlocal() except Exception: sb_timezone = tz.tzlocal() missing_network_timezones = set() def update_network_dict(): """Update timezone information from SR repositories""" url = 'http://sickrage.github.io/sb_network_timezones/network_timezones.txt' data = helpers.getURL(url, session=helpers.make_session(), returns='text') if not data: logger.log(u'Updating network timezones failed, this can happen from time to time. URL: {0}'.format(url), logger.WARNING) load_network_dict() return
def localTimeLiteral(t: datetime.datetime) -> Literal: return Literal(t.astimezone(tzlocal()).isoformat(), datatype=ROOM.todo)
def parse_time(cls, _time): return parse_datetime(_time).astimezone(tz.tzlocal())
def aware(d): "Convert the given datetime into a timezone-aware datetime." return datetime.datetime(d.year, d.month, d.day, tzinfo=tzlocal())
print(f"Loading data from file: {input_path}") # Load input data from Metr with open(input_path, 'r') as input_f: data = json.load(input_f) print(f"Metr app version: {data['appver']} ") print(f"Firmware version: {data['fw']}") record_count = len(data['elapsed']) new_data = [] start_time_utc = datetime.fromtimestamp(data['start'] // 1000, tz=timezone.utc) start_time_local = datetime.fromtimestamp(data['start'] // 1000, tz=tzlocal()) for i in range(0, record_count): elapsed_delta = timedelta(days=0, hours=0, minutes=0, microseconds=data['elapsed'][i]) current_time_utc = start_time_utc + elapsed_delta record = { 'consumption': data['consumption'][i], 'current': data['current'][i], 'duty': data['duty'][i], 'elapsed': data['elapsed'][i], 'elapsedAh': data['elapsedAh'][i], 'elapsedAhRegen': data['elapsedAhRegen'][i], 'elapsedDistance': data['elapsedDistance'][i], 'elapsedWh': data['elapsedWh'][i], "elapsedWhRegen": data['elapsedWhRegen'][i],
def parse_pubdate(pubdate, human_time=False, timezone=None, **kwargs): """ Parse publishing date into a datetime object. :param pubdate: date and time string :param human_time: string uses human slang ("4 hours ago") :param timezone: use a different timezone ("US/Eastern") :keyword dayfirst: Interpret the first value as the day :keyword yearfirst: Interpret the first value as the year :returns: a datetime object or None """ now_alias = ('right now', 'just now', 'now') df = kwargs.pop('dayfirst', False) yf = kwargs.pop('yearfirst', False) fromtimestamp = kwargs.pop('fromtimestamp', False) # This can happen from time to time if pubdate is None: log.debug('Skipping invalid publishing date.') return try: if human_time: if pubdate.lower() in now_alias: seconds = 0 else: match = re.search( r'(?P<time>[\d.]+\W*)(?P<granularity>\w+)', pubdate) matched_time = match.group('time') matched_granularity = match.group('granularity') # The parse method does not support decimals used with the month, # months, year or years granularities. if matched_granularity and matched_granularity in ( 'month', 'months', 'year', 'years'): matched_time = int(round(float(matched_time.strip()))) seconds = parse('{0} {1}'.format(matched_time, matched_granularity)) if seconds is None: log.warning('Failed parsing human time: {0} {1}', matched_time, matched_granularity) raise ValueError( 'Failed parsing human time: {0} {1}'.format( matched_time, matched_granularity)) return datetime.now(tz.tzlocal()) - timedelta(seconds=seconds) if fromtimestamp: dt = datetime.fromtimestamp(int(pubdate), tz=tz.gettz('UTC')) else: day_offset = 0 if 'yesterday at' in pubdate.lower( ) or 'today at' in pubdate.lower(): # Extract a time time = re.search(r'(?P<time>[0-9:]+)', pubdate) if time: if 'yesterday' in pubdate: day_offset = 1 pubdate = time.group('time').strip() dt = parser.parse( pubdate, dayfirst=df, yearfirst=yf, fuzzy=True) - timedelta(days=day_offset) # Always make UTC aware if naive if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None: dt = dt.replace(tzinfo=tz.gettz('UTC')) if timezone: dt = dt.astimezone(tz.gettz(timezone)) return dt except (AttributeError, TypeError, ValueError): log.exception('Failed parsing publishing date: {0}', pubdate)
def get_local_time(time_str): """Convert time string into a local timestamp""" t = parser.parse(time_str) t_local = t.astimezone(tz.tzlocal()) return time.mktime(t_local.timetuple())
def nice_date_from_utc(timestamp, timezone=tz.tzlocal()): return timestamp.replace( tzinfo=tz.tzutc()).astimezone(timezone).strftime('%x %X')
import robin_stocks as r from dateutil import tz from datetime import datetime from_zone = tz.tzutc() to_zone = tz.tzlocal() def utcToLocal(strDate): utc = datetime.strptime(strDate, '%Y-%m-%dT%H:%M:%SZ') utc = utc.replace(tzinfo=from_zone) return utc.astimezone(to_zone) import configparser config = configparser.RawConfigParser() configFilePath = '/Users/philipmassey/.tokens/robinhood.cfg' config.read(configFilePath) rhuser = config.get('login', 'user') rhpwd = config.get('login', 'pwd') login = r.login(rhuser, rhpwd) r = r
import boto3 import datetime from datetime import tzinfo from dateutil import tz client = boto3.client('ec2', region_name='ap-south-1') snapshots = client.describe_snapshots(OwnerIds=['117377266569']) delete_days = 90 from_zone = tz.tzlocal() to_zone = tz.tzutc() ninty_days_back_date = datetime.datetime.now() - datetime.timedelta( delete_days) current = ninty_days_back_date.replace(tzinfo=to_zone) for snapshot in snapshots['Snapshots']: try: if snapshot['StartTime'] < current: id = snapshot['SnapshotId'] client.delete_snapshot(SnapshotId=id) print "deleted- {}".format(id) except Exception as err: print err, snapshot['SnapshotId']
def nice_utc_date(timestamp, timezone=tz.tzlocal()): return timestamp.strftime('%F %T')
def _get_hexo_date(self, *args): date = parser.parse(self._get_date(*args)) if date.tzname(): date = date.astimezone(tz.tzlocal()) return date.strftime('%Y-%m-%d %H:%M:%S')
def dt_tolocal(dt): dt = dt.replace(tzinfo=tz.tzutc()) return dt.astimezone(tz.tzlocal())
def retrieveExpiryTime(timing): return timing.replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal())
def export_to_nwb(session_key, nwb_output_dir=default_nwb_output_dir, save=False, overwrite=True): this_session = (acquisition.Session & session_key).fetch1() # =============== General ==================== # -- NWB file - a NWB2.0 file for each session nwbfile = NWBFile( session_description=this_session['session_note'], identifier='_'.join([ this_session['subject_id'], this_session['session_time'].strftime('%Y-%m-%d_%H-%M-%S') ]), session_start_time=this_session['session_time'], file_create_date=datetime.now(tzlocal()), experimenter='; '.join((acquisition.Session.Experimenter & session_key).fetch('experimenter')), institution=institution, experiment_description=experiment_description, related_publications=related_publications, keywords=keywords) # -- subject subj = (subject.Subject & session_key).fetch1() nwbfile.subject = pynwb.file.Subject( subject_id=this_session['subject_id'], description=subj['subject_description'], genotype=' x '.join((subject.Subject.Allele & session_key).fetch('allele')), sex=subj['sex'], species=subj['species']) # =============== Intracellular ==================== cell = ((intracellular.Cell & session_key).fetch1() if intracellular.Cell & session_key else None) if cell: # metadata cell = (intracellular.Cell & session_key).fetch1() whole_cell_device = nwbfile.create_device(name=cell['device_name']) ic_electrode = nwbfile.create_ic_electrode( name=cell['cell_id'], device=whole_cell_device, description='N/A', filtering='low-pass: 10kHz', location='; '.join([ f'{k}: {str(v)}' for k, v in (reference.ActionLocation & cell).fetch1().items() ])) # acquisition - membrane potential mp, mp_wo_spike, mp_start_time, mp_fs = ( intracellular.MembranePotential & cell).fetch1( 'membrane_potential', 'membrane_potential_wo_spike', 'membrane_potential_start_time', 'membrane_potential_sampling_rate') nwbfile.add_acquisition( pynwb.icephys.PatchClampSeries(name='PatchClampSeries', electrode=ic_electrode, unit='mV', conversion=1e-3, gain=1.0, data=mp, starting_time=mp_start_time, rate=mp_fs)) # acquisition - current injection current_injection, ci_start_time, ci_fs = ( intracellular.CurrentInjection & cell).fetch1( 'current_injection', 'current_injection_start_time', 'current_injection_sampling_rate') nwbfile.add_stimulus( pynwb.icephys.CurrentClampStimulusSeries( name='CurrentClampStimulus', electrode=ic_electrode, conversion=1e-9, gain=1.0, data=current_injection, starting_time=ci_start_time, rate=ci_fs)) # analysis - membrane potential without spike mp_rmv_spike = nwbfile.create_processing_module( name='icephys', description='Spike removal') mp_rmv_spike.add_data_interface( pynwb.icephys.PatchClampSeries(name='icephys', electrode=ic_electrode, unit='mV', conversion=1e-3, gain=1.0, data=mp_wo_spike, starting_time=mp_start_time, rate=mp_fs)) # =============== Extracellular ==================== probe_insertion = ((extracellular.ProbeInsertion & session_key).fetch1() if extracellular.ProbeInsertion & session_key else None) if probe_insertion: probe = nwbfile.create_device(name=probe_insertion['probe_name']) electrode_group = nwbfile.create_electrode_group(name='; '.join([ f'{probe_insertion["probe_name"]}: {str(probe_insertion["channel_counts"])}' ]), description='N/A', device=probe, location='; '.join([ f'{k}: {str(v)}' for k, v in (reference. ActionLocation & probe_insertion ).fetch1().items( ) ])) for chn in (reference.Probe.Channel & probe_insertion).fetch(as_dict=True): nwbfile.add_electrode(id=chn['channel_id'], group=electrode_group, filtering=hardware_filter, imp=-1., x=chn['channel_x_pos'], y=chn['channel_y_pos'], z=chn['channel_z_pos'], location=electrode_group.location) # --- unit spike times --- nwbfile.add_unit_column( name='sampling_rate', description='Sampling rate of the raw voltage traces (Hz)') nwbfile.add_unit_column(name='unit_x', description='x-coordinate of this unit (mm)') nwbfile.add_unit_column(name='unit_y', description='y-coordinate of this unit (mm)') nwbfile.add_unit_column(name='unit_z', description='z-coordinate of this unit (mm)') nwbfile.add_unit_column( name='cell_type', description='cell type (e.g. wide width, narrow width spiking)') for unit in (extracellular.UnitSpikeTimes & probe_insertion).fetch(as_dict=True): # make an electrode table region (which electrode(s) is this unit coming from) nwbfile.add_unit( id=unit['unit_id'], electrodes=(unit['channel_id'] if isinstance( unit['channel_id'], np.ndarray) else [unit['channel_id']]), sampling_rate=ecephys_fs, unit_x=unit['unit_x'], unit_y=unit['unit_y'], unit_z=unit['unit_z'], cell_type=unit['unit_cell_type'], spike_times=unit['spike_times'], waveform_mean=np.mean(unit['spike_waveform'], axis=0), waveform_sd=np.std(unit['spike_waveform'], axis=0)) # =============== Behavior ==================== behavior_data = ((behavior.LickTrace & session_key).fetch1() if behavior.LickTrace & session_key else None) if behavior_data: behav_acq = pynwb.behavior.BehavioralTimeSeries(name='lick_trace') nwbfile.add_acquisition(behav_acq) [behavior_data.pop(k) for k in behavior.LickTrace.primary_key] lt_start_time = behavior_data.pop('lick_trace_start_time') lt_fs = behavior_data.pop('lick_trace_sampling_rate') for b_k, b_v in behavior_data.items(): behav_acq.create_timeseries(name=b_k, unit='a.u.', conversion=1.0, data=b_v, starting_time=lt_start_time, rate=lt_fs) # =============== Photostimulation ==================== photostim = ((stimulation.PhotoStimulation & session_key).fetch1() if stimulation.PhotoStimulation & session_key else None) if photostim: photostim_device = (stimulation.PhotoStimDevice & photostim).fetch1() stim_device = nwbfile.create_device( name=photostim_device['device_name']) stim_site = pynwb.ogen.OptogeneticStimulusSite( name='-'.join([photostim['hemisphere'], photostim['brain_region']]), device=stim_device, excitation_lambda=float(photostim['photo_stim_excitation_lambda']), location='; '.join([ f'{k}: {str(v)}' for k, v in (reference.ActionLocation & photostim).fetch1().items() ]), description=(stimulation.PhotoStimulationInfo & photostim).fetch1('photo_stim_notes')) nwbfile.add_ogen_site(stim_site) if photostim['photostim_timeseries'] is not None: nwbfile.add_stimulus( pynwb.ogen.OptogeneticSeries( name='_'.join([ 'photostim_on', photostim['photostim_datetime'].strftime( '%Y-%m-%d_%H-%M-%S') ]), site=stim_site, resolution=0.0, conversion=1e-3, data=photostim['photostim_timeseries'], starting_time=photostim['photostim_start_time'], rate=photostim['photostim_sampling_rate'])) # =============== TrialSet ==================== # NWB 'trial' (of type dynamic table) by default comes with three mandatory attributes: # 'id', 'start_time' and 'stop_time'. # Other trial-related information needs to be added in to the trial-table as additional columns (with column name # and column description) if acquisition.TrialSet & session_key: # Get trial descriptors from TrialSet.Trial and TrialStimInfo trial_columns = [ { 'name': tag.replace('trial_', ''), 'description': re.search( f'(?<={tag})(.*)#(.*)', str((acquisition.TrialSet.Trial * stimulation.TrialPhotoStimInfo ).heading)).groups()[-1].strip() } for tag in (acquisition.TrialSet.Trial * stimulation.TrialPhotoStimInfo).heading.names if tag not in (acquisition.TrialSet.Trial & stimulation.TrialPhotoStimInfo).primary_key + ['start_time', 'stop_time'] ] # Trial Events - discard 'trial_start' and 'trial_stop' as we already have start_time and stop_time # also add `_time` suffix to all events trial_events = set(((acquisition.TrialSet.EventTime & session_key) - [{ 'trial_event': 'trial_start' }, { 'trial_event': 'trial_stop' }]).fetch('trial_event')) event_names = [{ 'name': e + '_time', 'description': d } for e, d in zip(*(reference.ExperimentalEvent & [{ 'event': k } for k in trial_events]).fetch('event', 'description'))] # Add new table columns to nwb trial-table for trial-label for c in trial_columns + event_names: nwbfile.add_trial_column(**c) photostim_tag_default = { tag: '' for tag in stimulation.TrialPhotoStimInfo.heading.names if tag not in stimulation.TrialPhotoStimInfo.primary_key } # Add entry to the trial-table for trial in (acquisition.TrialSet.Trial & session_key).fetch(as_dict=True): events = dict( zip(*(acquisition.TrialSet.EventTime & trial & [{ 'trial_event': e } for e in trial_events] ).fetch('trial_event', 'event_time'))) trial_tag_value = ({ **trial, **events, **(stimulation.TrialPhotoStimInfo & trial).fetch1() } if (stimulation.TrialPhotoStimInfo & trial) else { **trial, **events, **photostim_tag_default }) # rename 'trial_id' to 'id' trial_tag_value['id'] = trial_tag_value['trial_id'] [ trial_tag_value.pop(k) for k in acquisition.TrialSet.Trial.primary_key ] # Final tweaks: i) add '_time' suffix and ii) remove 'trial_' prefix events = {k + '_time': trial_tag_value.pop(k) for k in events} trial_attrs = { k.replace('trial_', ''): trial_tag_value.pop(k) for k in [n for n in trial_tag_value if n.startswith('trial_')] } nwbfile.add_trial(**trial_tag_value, **events, **trial_attrs) # =============== Write NWB 2.0 file =============== if save: save_file_name = ''.join([nwbfile.identifier, '.nwb']) if not os.path.exists(nwb_output_dir): os.makedirs(nwb_output_dir) if not overwrite and os.path.exists( os.path.join(nwb_output_dir, save_file_name)): return nwbfile with NWBHDF5IO(os.path.join(nwb_output_dir, save_file_name), mode='w') as io: io.write(nwbfile) print(f'Write NWB 2.0 file: {save_file_name}') return nwbfile
raise ArithmeticError('bad computation') logger.info( 'Determined (for %s): m_theta = %.2f, b_theta = %.2f', name, m_theta, b_theta) # (same for m_phi, b_phi) m_phi = (phi0 - phi1) / (t0 - t1) b_phi = phi0 - m_phi * t0 if not isclose(b_phi, phi1 - m_phi * t1): raise ArithmeticError('bad computation') logger.info('Determined (for %s): m_phi = %.2f, b_phi = %.2f', name, m_phi, b_phi) # assume firing in 10 seconds now = datetime.now(tzlocal()) rock_fired_time = fired slug_fired_time = now + timedelta(seconds=5) # compute times as seconds base_time = now.replace(hour=0, minute=0, second=0, microsecond=0) t_rock = (rock_fired_time - base_time).total_seconds() t_slug = (slug_fired_time - base_time).total_seconds() v_rock, v_slug = v, V_SLUG r0_rock, r0_slug = r0, 0
def do(self, action, options=None): """ This method executes the defined action in the given event. :param action: :param options: Contains the flask parameters g, request, response and the handler_def configuration :type options: dict :return: """ ret = True g = options.get("g") request = options.get("request") response = options.get("response") content = self._get_response_content(response) handler_def = options.get("handler_def") handler_options = handler_def.get("options", {}) serial = request.all_data.get("serial") or \ content.get("detail", {}).get("serial") or \ g.audit_object.audit_data.get("serial") if action.lower() in [ ACTION_TYPE.SET_TOKENREALM, ACTION_TYPE.SET_DESCRIPTION, ACTION_TYPE.DELETE, ACTION_TYPE.DISABLE, ACTION_TYPE.ENABLE, ACTION_TYPE.UNASSIGN, ACTION_TYPE.SET_VALIDITY, ACTION_TYPE.SET_COUNTWINDOW, ACTION_TYPE.SET_TOKENINFO, ACTION_TYPE.SET_FAILCOUNTER, ACTION_TYPE.DELETE_TOKENINFO ]: if serial: log.info("{0!s} for token {1!s}".format(action, serial)) if action.lower() == ACTION_TYPE.SET_TOKENREALM: realm = handler_options.get("realm") only_realm = is_true(handler_options.get("only_realm")) # Set the realm.. log.info("Setting realm of token {0!s} to {1!s}".format( serial, realm)) # Add the token realm set_realms(serial, [realm], add=not only_realm) elif action.lower() == ACTION_TYPE.DELETE: remove_token(serial=serial) elif action.lower() == ACTION_TYPE.DISABLE: enable_token(serial, enable=False) elif action.lower() == ACTION_TYPE.ENABLE: enable_token(serial, enable=True) elif action.lower() == ACTION_TYPE.UNASSIGN: unassign_token(serial) elif action.lower() == ACTION_TYPE.SET_DESCRIPTION: description = handler_options.get("description") or "" description, td = parse_time_offset_from_now(description) s_now = (datetime.datetime.now(tzlocal()) + td).strftime(AUTH_DATE_FORMAT) set_description( serial, description.format( current_time=s_now, now=s_now, client_ip=g.client_ip, ua_browser=request.user_agent.browser, ua_string=request.user_agent.string)) elif action.lower() == ACTION_TYPE.SET_COUNTWINDOW: set_count_window( serial, int(handler_options.get("count window", 50))) elif action.lower() == ACTION_TYPE.SET_TOKENINFO: tokeninfo = handler_options.get("value") or "" tokeninfo, td = parse_time_offset_from_now(tokeninfo) s_now = (datetime.datetime.now(tzlocal()) + td).strftime(AUTH_DATE_FORMAT) try: username = request.User.loginname realm = request.User.realm except Exception: username = "******" realm = "N/A" add_tokeninfo( serial, handler_options.get("key"), tokeninfo.format(current_time=s_now, now=s_now, client_ip=g.client_ip, username=username, realm=realm, ua_browser=request.user_agent.browser, ua_string=request.user_agent.string)) elif action.lower() == ACTION_TYPE.DELETE_TOKENINFO: delete_tokeninfo(serial, handler_options.get("key")) elif action.lower() == ACTION_TYPE.SET_VALIDITY: start_date = handler_options.get(VALIDITY.START) end_date = handler_options.get(VALIDITY.END) if start_date: d = parse_date(start_date) set_validity_period_start(serial, None, d.strftime(DATE_FORMAT)) if end_date: d = parse_date(end_date) set_validity_period_end(serial, None, d.strftime(DATE_FORMAT)) elif action.lower() == ACTION_TYPE.SET_FAILCOUNTER: try: set_failcounter( serial, int(handler_options.get("fail counter"))) except Exception as exx: log.warning("Misconfiguration: Failed to set fail " "counter!") else: log.info("Action {0!s} requires serial number. But no serial " "number could be found in request.") if action.lower() == ACTION_TYPE.INIT: log.info("Initializing new token") init_param = { "type": handler_options.get("tokentype"), "genkey": 1, "realm": handler_options.get("realm", "") } user = None if is_true(handler_options.get("user")): user = self._get_tokenowner(request) tokentype = handler_options.get("tokentype") # Some tokentypes need additional parameters if handler_options.get("additional_params"): add_params = yaml.safe_load( handler_options.get("additional_params")) if type(add_params) == dict: init_param.update(add_params) if tokentype == "sms": if handler_options.get("dynamic_phone"): init_param["dynamic_phone"] = 1 else: init_param['phone'] = user.get_user_phone( phone_type='mobile', index=0) if not init_param['phone']: log.warning( "Enrolling SMS token. But the user " "{0!r} has no mobile number!".format(user)) elif tokentype == "email": if handler_options.get("dynamic_email"): init_param["dynamic_email"] = 1 else: init_param['email'] = user.info.get("email", "") if not init_param['email']: log.warning( "Enrolling EMail token. But the user {0!s}" "has no email address!".format(user)) elif tokentype == "motp": init_param['motppin'] = handler_options.get("motppin") t = init_token(param=init_param, user=user) log.info("New token {0!s} enrolled.".format(t.token.serial)) return ret
def get_historic_rates(self, product_id, start=None, end=None): ''' Args: product_id (str): Product start (Optional[str]): Start time in ISO 8601 end (Optional[str]): End time in ISO 8601 interval (Optional[str]): Desired time slice in seconds ''' # Max Candles in one call epoch = datetime.utcfromtimestamp(0).replace(tzinfo=tzutc()) max_candles = 200 candles_list = [] # get config enabled = self.binance_conf.get('backfill_enabled') period = int(self.binance_conf.get('backfill_period')) interval_str = self.binance_conf.get('backfill_interval') interval = binance.helpers.interval_to_milliseconds(interval_str) if (interval == None): log.error("Invalid Interval - %s" % interval_str) product = None for p in self.get_products(): if p['id'] == product_id: product = p if product is None: log.error("Invalid Product Id: %s" % product_id) return None if not enabled: log.debug("Historical data retrieval not enabled") return None if not end: # if no end, use current time end = datetime.now() end = end.replace(tzinfo=tzlocal()) if not start: # if no start given, use the config real_start = start = end - timedelta(days=period) - timedelta( seconds=interval // 1000) else: real_start = start real_start = start = start.replace(tzinfo=tzlocal()) log.debug("Retrieving Historic candles for period: %s to %s" % (real_start.isoformat(), end.isoformat())) td = max_candles * interval // 1000 tmp_end = start + timedelta(seconds=td) tmp_end = min(tmp_end, end) # adjust time with server time start = start + timedelta(seconds=self.timeOffset) tmp_end = tmp_end + timedelta(seconds=self.timeOffset) count = 0 while (start < end): # # looks like there is a rate=limiting in force, we will have to slow down count += 1 if (count > 3): # rate-limiting count = 0 sleep(2) start_ts = int((start - epoch).total_seconds() * 1000.0) end_ts = int((tmp_end - epoch).total_seconds() * 1000.0) log.debug("Start: %s end: %s" % (start_ts, end_ts)) candles = self.public_client.get_klines( symbol=product['symbol'], interval=Client.KLINE_INTERVAL_5MINUTE, limit=max_candles, startTime=start_ts, endTime=end_ts) if candles: if isinstance(candles, dict): # # Error Case err_msg = candles.get('message') if (err_msg): log.error( "Error while retrieving Historic rates: msg: %s\n will retry.." % (err_msg)) else: # candles are of struct [[time, o, h, l,c, V]] candles_list += [ OHLC(time=int(candle[6] + 1) // 1000, low=candle[3], high=candle[2], open=candle[1], close=candle[4], volume=candle[5]) for candle in candles ] # log.debug ("%s"%(candles)) log.debug( "Historic candles for period: %s to %s num_candles: %d " % (start.isoformat(), tmp_end.isoformat(), (0 if not candles else len(candles)))) # new period, start from the (last +1)th position start = tmp_end # + timedelta(seconds = (interval//1000)) tmp_end = start + timedelta(seconds=td) tmp_end = min(tmp_end, end) # log.debug ("c: %s"%(candles)) else: log.error( "Error While Retrieving Historic candles for period: %s to %s num: %d" % (start.isoformat(), tmp_end.isoformat(), (0 if not candles else len(candles)))) return None log.debug("Retrieved Historic candles for period: %s to %s num: %d" % (real_start.isoformat(), end.isoformat(), (0 if not candles_list else len(candles_list)))) # log.debug ("%s"%(candles_list)) return candles_list
def expiryTime(timing): return timing.replace(tzinfo=tz.tzlocal()).astimezone(tz.tzutc()) + datetime.timedelta(hours=8)
def process_request(self, request): """Process the signed request.""" # User has already been authed by alternate middleware if hasattr(request, "facebook") and request.facebook: return request.facebook = False if not self.is_valid_path(request): return if self.is_access_denied(request): return authorization_denied_view(request) # No signed request found in either GET, POST nor COOKIES... if 'signed_request' not in request.REQUEST and 'signed_request' not in request.COOKIES: return # If the request method is POST and its body only contains the signed request, # chances are it's a request from the Facebook platform and we'll override # the request method to HTTP GET to rectify their misinterpretation # of the HTTP standard. # # References: # "POST for Canvas" migration at http://developers.facebook.com/docs/canvas/post/ # "Incorrect use of the HTTP protocol" discussion at http://forum.developers.facebook.net/viewtopic.php?id=93554 if request.method == 'POST' and 'signed_request' in request.POST: request.POST = QueryDict('') request.method = 'GET' request.facebook = Facebook() try: request.facebook.signed_request = SignedRequest( signed_request=request.REQUEST.get('signed_request') or request.COOKIES.get('signed_request'), application_secret_key=FACEBOOK_APPLICATION_SECRET_KEY) except SignedRequest.Error: request.facebook = False # Valid signed request and user has authorized the application if request.facebook \ and request.facebook.signed_request.user.has_authorized_application \ and not request.facebook.signed_request.user.oauth_token.has_expired: # Initialize a User object and its corresponding OAuth token try: user = User.objects.get( facebook_id=request.facebook.signed_request.user.id) except User.DoesNotExist: oauth_token = OAuthToken.objects.create( token=request.facebook.signed_request.user.oauth_token. token, issued_at=request.facebook.signed_request.user.oauth_token. issued_at.replace(tzinfo=tzlocal()), expires_at=request.facebook.signed_request.user. oauth_token.expires_at.replace(tzinfo=tzlocal())) user = User.objects.create( facebook_id=request.facebook.signed_request.user.id, oauth_token=oauth_token) user.synchronize() # Update the user's details and OAuth token else: user.last_seen_at = now() if 'signed_request' in request.REQUEST: user.authorized = True if request.facebook.signed_request.user.oauth_token: user.oauth_token.token = request.facebook.signed_request.user.oauth_token.token user.oauth_token.issued_at = request.facebook.signed_request.user.oauth_token.issued_at.replace( tzinfo=tzlocal()) user.oauth_token.expires_at = request.facebook.signed_request.user.oauth_token.expires_at.replace( tzinfo=tzlocal()) user.oauth_token.save() user.save() if not user.oauth_token.extended: # Attempt to extend the OAuth token, but ignore exceptions raised by # bug #102727766518358 in the Facebook Platform. # # http://developers.facebook.com/bugs/102727766518358/ try: user.oauth_token.extend() except: pass request.facebook.user = user
def _normalized_order(self, order): # ''' # Desc: # Error Handle and Normalize the order json returned by gdax # to return the normalized order detail back to callers # Handles - # 1. Initial Order Creation/Order Query # 2. Order Update Feed Messages # Ref: https://docs.gdax.com/#the-code-classprettyprintfullcode-channel # Sample order: # { # "symbol": "BTCUSDT", # "orderId": 28, # "orderListId": -1, //Unless OCO, value will be -1 # "clientOrderId": "6gCrw2kRUAF9CvJDGP16IP", # "transactTime": 1507725176595, # "price": "1.00000000", # "origQty": "10.00000000", # "executedQty": "10.00000000", # "cummulativeQuoteQty": "10.00000000", # "status": "FILLED", # "timeInForce": "GTC", # "type": "MARKET", # "side": "SELL", # "fills": [ # { # "price": "4000.00000000", # "qty": "1.00000000", # "commission": "4.00000000", # "commissionAsset": "USDT" # }, # { # "price": "3999.00000000", # "qty": "5.00000000", # "commission": "19.99500000", # "commissionAsset": "USDT" # }, # { # "price": "3998.00000000", # "qty": "2.00000000", # "commission": "7.99600000", # "commissionAsset": "USDT" # }, # Known Errors: # 1. {u'message': u'request timestamp expired'} # 2. {u'message': u'Insufficient funds'} # 3. {'status' : 'rejected', 'reject_reason': 'post-only'} # ''' # error_status_codes = ['rejected'] log.debug("Order msg: \n%s" % (pprint.pformat(order, 4))) msg = order.get('msg') status = order.get('status') or order.get('X') if (msg): log.error("FAILED Order: error msg: %s status: %s" % (msg, status)) return None # Valid Order product_id = order.get('symbol') or order.get("s") order_id = order.get('clientOrderId') or order.get('c') order_type = order.get('type') or order.get("o") if order_type == "MARKET": order_type = "market" elif order_type == "LIMIT": order_type = 'limit' if (status == None and (product_id != None and order_id != None)): log.debug("must be an ACK for order_id (%s)" % (order_id)) # For ACK all values might be 0, careful with calculations status = "NEW" if status in [ 'NEW', 'PARTIALLY_FILLED', 'FILLED', 'CANCELED', 'PENDING_CANCEL', 'REJECTED', 'EXPIRED' ]: if status == "NEW": status_type = "open" elif status == 'FILLED': status_type = "filled" elif status in ['CANCELED', 'EXPIRED']: # order status update message status_type = "canceled" elif status == 'REJECTED': log.error("order rejected msg:%s" % (order)) return None else: #, 'PARTIALLY_FILLED' log.critical('unhandled order status(%s)' % (status)) return None # order_type = order.get('order_type') #could be None else: s = "****** unknown order status: %s" % (status) log.critical(s) raise Exception(s) create_time = order.get('O') or order.get('time') if create_time: create_time = datetime.utcfromtimestamp( int(create_time) / 1000).replace(tzinfo=tzutc()).astimezone( tzlocal()).isoformat() # else: # create_time = datetime.now().isoformat() update_time = order.get('updateTime') or order.get( 'transactTime') or order.get('T') or order.get('O') or None if update_time: update_time = datetime.utcfromtimestamp( int(update_time) / 1000).replace(tzinfo=tzutc()).astimezone( tzlocal()).isoformat() else: update_time = datetime.now().isoformat() side = order.get('side') or order.get('S') or None if side == None: log.critical("unable to get order side %s(%s)" % (product_id, order_id)) raise Exception("unable to get order side") elif side == 'BUY': side = 'buy' elif side == 'SELL': side = 'sell' # Money matters price = float(order.get('price') or 0) request_size = float(order.get('q') or order.get('origQty') or 0) filled_size = float(order.get('z') or order.get('executedQty') or 0) remaining_size = float(0) # FIXME: jork: funds = float(order.get('Z') or order.get('cummulativeQuoteQty') or 0) fees = float(order.get('n') or 0) if price == 0 and funds != 0 and filled_size != 0: price = funds / filled_size # avg size calculation fills = float(order.get('fills') or 0) if fills: qty = float(0.0) comm = float(0.0) for fill in fills: qty += float(fill.get('qty') or 0) comm += float(fill.get('commission') or 0) if fees == 0: fees = float(comm) # if status == "FILLED": # total_val = float(order.get('executed_value') or 0) # if total_val and filled_size and not price: # price = total_val/filled_size # if (funds == 0): # funds = total_val + fees # log.debug ("calculated fill price: %g size: %g"%(price, filled_size)) # if filled_size and remaining_size: # request_size = filled_size + remaining_size if (request_size == 0): request_size = remaining_size + filled_size log.debug( "price: %g fund: %g req_size: %g filled_size: %g remaining_size: %g fees: %g" % (price, funds, request_size, filled_size, remaining_size, fees)) norm_order = Order(order_id, product_id, status_type, order_type=order_type, side=side, request_size=request_size, filled_size=filled_size, remaining_size=remaining_size, price=price, funds=funds, fees=fees, create_time=create_time, update_time=update_time) return norm_order
def setUp(self): self.start_time = datetime(1970, 1, 1, 12, tzinfo=tzutc()) self.ref_time = datetime(1979, 1, 1, 0, tzinfo=tzutc()) self.create_date = datetime(2017, 4, 15, 12, tzinfo=tzlocal()) super(TestNWBFileIO, self).setUp() self.path = "test_pynwb_io_hdf5.h5"
import tweepy from smarttypes.config import * from smarttypes.utils import email_utils from smarttypes.model.twitter_session import TwitterSession from smarttypes.model.twitter_credentials import TwitterCredentials from smarttypes.model.twitter_user import TwitterUser from dateutil import tz from datetime import datetime HERE = tz.tzlocal() UTC = tz.tzutc() def get_rate_limit_status(api_handle): rate_limit_status_dict = api_handle.rate_limit_status() remaining_hits = rate_limit_status_dict['remaining_hits'] reset_time_in_seconds = rate_limit_status_dict['reset_time_in_seconds'] reset_time_utc = datetime.utcfromtimestamp(reset_time_in_seconds) reset_time_utc = reset_time_utc.replace(tzinfo=UTC) reset_time = reset_time_utc.astimezone(HERE) return remaining_hits, reset_time def get_signin_w_twitter_url(postgres_handle): auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET, callback=CALLBACK) request_token = auth._get_request_token() TwitterSession.create(request_token.key, request_token.secret, postgres_handle) url = "https://api.twitter.com/oauth/authenticate" # might want this: '&force_login=true'