def process_item(self, item, spider): # map lastModified if 'lastModified' in item: try: item['lastModified'] = float(item['lastModified']) except: try: date = dateutil.parser.parse(item['lastModified']) item['lastModified'] = int(date.timestamp()) except: logging.warn('Unable to parse given lastModified date ' + item['lastModified']) del item['lastModified'] if 'typicalLearningTime' in item['lom']['educational']: time = item['lom']['educational']['typicalLearningTime'] mapped = None splitted = time.split(':') if len(splitted) == 3: mapped = int(splitted[0]) * 60 * 60 + int( splitted[1]) * 60 + int(splitted[2]) if mapped == None: logging.warn('Unable to map given typicalLearningTime ' + time + ' to numeric value') item['lom']['educational']['typicalLearningTime'] = mapped return item
def process_item(self, item, spider): # map lastModified if "lastModified" in item: try: item["lastModified"] = float(item["lastModified"]) except: try: date = dateutil.parser.parse(item["lastModified"]) item["lastModified"] = int(date.timestamp()) except: logging.warn( "Unable to parse given lastModified date " + item["lastModified"] ) del item["lastModified"] if "typicalLearningTime" in item["lom"]["educational"]: time = item["lom"]["educational"]["typicalLearningTime"] mapped = None splitted = time.split(":") if len(splitted) == 3: mapped = ( int(splitted[0]) * 60 * 60 + int(splitted[1]) * 60 + int(splitted[2]) ) if mapped == None: logging.warn( "Unable to map given typicalLearningTime " + time + " to numeric value" ) item["lom"]["educational"]["typicalLearningTime"] = mapped return item
def _parse_programme(self, element): data = self._parse_element(element) date = data['start'] return dict(name=data['title']['text'][:Programme.name.max_length], date=date.date(), time=date.time(), timestart=int(date.timestamp()), timestop=int(data['stop'].timestamp()), channel=data['channel'][:Programme.channel.max_length])
def msglogclearer(num_days=187): ''' Used to manage amount of log files. Keeps only those within input number of days. default: 187 days ''' date = datetime.today() - timedelta(days=num_days) limittime=date.timestamp() list_of_logs = glob.glob('./Mobius_logs/Logs_Messages/*.pkl') logsMTimes = [(f,getmtime(f)) for f in list_of_logs] for f,t in logsMTimes: comparetime=float(t) if comparetime < limittime: os.remove(file)
def to_number(self, value): date = self.to_datetime(value) if date is not None: return date.timestamp()
def daily(): nav_list = NAV_LIST nav = "daily" # Check if valid date was provided as GET parameter, default to today (at midnight) if not try: date = datetime.strptime(request.args.get('date', default=""), '%Y-%m-%d') except ValueError: date = datetime.combine(datetime.today(), datetime.min.time()) # Formatted date strings when rendering page and buttons to other dates date_str = date.strftime('%Y-%m-%d') prev_date_str = (date-timedelta(days=1)).strftime('%Y-%m-%d') next_date_str = (date+timedelta(days=1)).strftime('%Y-%m-%d') tab_list = client_format.keys() # Check if valid tab was provided as GET parameter, default to default to first tab if not try: tab = request.args.get('tab') if not tab in client_format.keys(): raise ValueError() except ValueError: tab = next(iter(client_format)) graph_data = OrderedDict() # The data used in the render template (see format below) if tab == "Location": # Location tab uses separate template to display map # URL to initialize Google Maps API, to be injected into HTML. Key: value is from local google_maps_key.py file. maps_url = key # Check if valid times were provided as GET parameter, default to all day if not try: # Times are represented by seconds from midnight starttime = int(request.args.get('starttime', default='')) endtime = int(request.args.get('endtime', default='')) except ValueError: starttime = 0 endtime = 86400 # Get list of latitudes. lat_gen is a generator of document snapshots, but will only yield one snapshot for us # given the firebase setup. lat_gen = db.collection(DATABASE_COLLECTION).document(date_str).collection('gps_lat').stream() # Avoid a server error if there's no data for the day (lat_gen yields no values) try: lat_reading_dict = next(lat_gen).to_dict()["seconds"] # dict format: {'second': reading}, ex. {'10': 334} except StopIteration: location_pairs = None return render_template('daily_location.html', **locals()) lat_reading_list = \ sorted({int(k): v for k, v in lat_reading_dict.items() if starttime <= int(k) <= endtime}.items()) sec_list, lat_list = zip(*lat_reading_list) # Get list of longitudes in the same manner lon_gen = db.collection(DATABASE_COLLECTION).document(date_str).collection('gps_lon').stream() try: lon_reading_dict = next(lon_gen).to_dict()["seconds"] except StopIteration: location_pairs = None return render_template('daily_location.html', **locals()) lon_reading_list = \ sorted({int(k): v for k, v in lon_reading_dict.items() if starttime <= int(k) <= endtime}.items()) sec_list, lon_list = zip(*lon_reading_list) location_pairs = zip(lat_list, lon_list) # [(lat0, lon0), (lat1, lon1), ...] return render_template('daily_location.html', **locals()) else: # Loop through every sensor the current tab should show a reading for for sensor_id in client_format[tab]["lines"]: # Find the info about the sensor sensor = db_format[sensor_id] # Ensure the sensor is in the database if sensor is not None and "name" in sensor: graph_data[sensor["name"]] = OrderedDict() # Loop through all the sensor readings for the day being viewed db_data = db.collection(DATABASE_COLLECTION).document(date_str).collection(sensor_id).stream() try: readings = next(db_data).to_dict()["seconds"] # The map within the sensor's document except StopIteration: continue # Skip sensors not in database except KeyError: continue # Convert keys from strings to ints and sort (conversion required for sort to be correct) sorted_readings = sorted({int(k) : v for k, v in readings.items()}.items()) # Convert the sorted list of tuples into two separate lists using zip times, readings = zip(*sorted_readings) # Downsample data if needed if len(readings) > MAX_POINTS: times, readings = avg_downsample(np.array(times), np.array(readings), MAX_POINTS) for time, reading in zip(times, readings): unix = int(date.timestamp() + time)*1000 graph_data[sensor["name"]][unix] = reading return render_template('daily.html', **locals())
# print(url) logging.debug("test access url - %s", url) test_credentials(url) worksheet = ssht.worksheet_by_title(sheet_name) # захардкожен номер столбца 2 today_row = get_day_row_num(worksheet, 2, datetime.today().date()) for user in users: logging.info("user: %s", user) i = 0 # дата для запроса в амо date = datetime.strptime(date_start_str, "%Y-%m-%d %H:%M:%S") while i < number_of_days: count = 0 logging.info("date: % s", date) timestamp_start = int(date.timestamp()) timestamp_stop = int( (date + timedelta(hours=15, minutes=59)).timestamp()) url = init_url+url_api + \ create_href(user, param["cls1"], timestamp_start, timestamp_stop) events = get_events(url) if events == -1: logging.critical("Programm stopped") exit() # print(events) if events is not None: num = set() for item in events: if not check_for_exlude(item): num.add(item["entity"]["id"]) count = len(num)