def __init__(self): base.do_app_init() self.logger = base.get_logger(logger_name='meterman', log_file=base.log_file) self.logger.info('Running as user: '******'RestApi'] if rest_api_config is not None and rest_api_config.getboolean( 'run_rest_api'): self.api_ctrl = meter_man_api.ApiCtrl( self, rest_api_config.getint('flask_port'), rest_api_config['user'], rest_api_config['password'], rest_api_config.getboolean('access_lan_only'), log_file=base.log_file) self.api_ctrl.run()
def uptime_data(): """Returns uptime data. """ return { 'uptime': mark(uptime.uptime(), 'duration'), 'boottime': uptime.boottime() }
def start(): time.sleep(20) shadow, client = common.setup_aws_shadow_client(settings.aws_endpoint, settings.aws_root_certificate, settings.aws_private_key, settings.aws_certificate, settings.device_name) JSONPayload = '{"state":{"reported":{"connected":"true"}}}' client.shadowUpdate(JSONPayload, customShadowCallback_Update, 5) handler.setup(dht_sensor_pin, loudness_sensor_pin) d1 = datetime.min boot_time = boottime() should_read = False try: while True: d2 = datetime.now() d = d2 - d1 if d.total_seconds() > 10.0: if (should_read == False): should_read = should_read_co2(boot_time) [co2, temperature, humidity, loudness] = handler.read_data(should_read) send_data(client, co2, temperature, humidity, loudness) d1 = d2 else: time.sleep(1) except KeyboardInterrupt: JSONPayload = '{"state":{"reported":{"connected":"false"}}}' client.shadowUpdate(JSONPayload, customShadowCallback_Update, 5) shadow.disconnect() handler.cleanup() print('stopped')
def debug_info(num, frame): proc_info = """ ===== DEBUG INFO ===== DATE : {} UPTIME : {} OS : {} CPU : {} BIT : {} PID : {} PPID : {} USER : {} Python : {} ====================== """.format( datetime.now().strftime("%Y/%m/%d %H:%M:%S"), uptime.boottime(), pf.system(), pf.processor(), str(64 if sys.maxsize > 2 ** 32 else 32) + " bit", os.getpid(), os.getppid(), os.environ.get("USER"), str(sys.version_info.major) + "." + str(sys.version_info.minor), ) print(Color.BLUE, "\n", textwrap.dedent(proc_info).strip()) print(Color.END) return
def __init__(self, con): log.info("Logging into DCC") self._version = 20171023 self.comms = con if not self.comms.identity.username: log.error("Username not found") raise ValueError("Username not found") elif not self.comms.identity.password: log.error("Password not found") raise ValueError("Password not found") thread = threading.Thread(target=self.comms.receive) thread.daemon = True # This thread will continuously run in background to receive response or actions from DCC thread.start() # Wait for Subscription to be complete and then proceed to publish message time.sleep(0.5) self._iotcc_json = self._create_iotcc_json() self._iotcc_json_load_retry = int( read_liota_config('IOTCC_PATH', 'iotcc_load_retry')) self.enable_reboot_getprop = read_liota_config( 'IOTCC_PATH', 'enable_reboot_getprop') self.counter = 0 self.recv_msg_queue = self.comms.userdata self.boottime = boottime() self.dev_file_path = self._get_file_storage_path("dev_file_path") # Liota internal entity file system path special for iotcc self.entity_file_path = self._get_file_storage_path("entity_file_path") self.file_ops_lock = Lock()
def get_metrics(self): """Update system metric GUI elements once per second.""" # Gather metrics. cpu = psu.cpu_percent() memory = psu.virtual_memory()[2] uptime_var = (str(datetime.now() - boottime())[:-7]) time = datetime.now().strftime("{}".format(self.time_format)) # CPU (red text when >90%) self.gui_widget_values[0].configure(text=cpu) if cpu >= 90.0: self.gui_widget_values[0].configure(fg="red") else: self.gui_widget_values[0].configure(fg="black") # MEMORY (red text when >90%) self.gui_widget_values[1].configure(text=memory) if memory >= 90.0: self.gui_widget_values[1].configure(fg="red") else: self.gui_widget_values[1].configure(fg="black") # UPTIME self.gui_widget_values[2].configure(text=uptime_var) # SYSTEM CLOCK self.gui_widget_values[3].configure(text=time) self.master.after( 1000, self.get_metrics) # Continue calling this function every second.
def __init__(self, con): log.info("Logging into DCC") self._version = 20171023 self.comms = con if not self.comms.identity.username: log.error("Username not found") raise ValueError("Username not found") elif not self.comms.identity.password: log.error("Password not found") raise ValueError("Password not found") thread = threading.Thread(target=self.comms.receive) thread.daemon = True # This thread will continuously run in background to receive response or actions from DCC thread.start() # Wait for Subscription to be complete and then proceed to publish message time.sleep(0.5) self._iotcc_json = self._create_iotcc_json() self._iotcc_json_load_retry = int(read_liota_config('IOTCC_PATH', 'iotcc_load_retry')) self.enable_reboot_getprop = read_liota_config('IOTCC_PATH', 'enable_reboot_getprop') self.counter = 0 self.recv_msg_queue = self.comms.userdata self.boottime = boottime() self.dev_file_path = self._get_file_storage_path("dev_file_path") # Liota internal entity file system path special for iotcc self.entity_file_path = self._get_file_storage_path("entity_file_path") self.file_ops_lock = Lock()
def summary_info(): disk_parts = [] nic_parts = [] virt = psutil.virtual_memory() swap = psutil.swap_memory() disks = psutil.disk_partitions() nics = psutil.net_io_counters(pernic=True) freq = psutil.cpu_freq() for disk in disks: disk_parts.append(disk.mountpoint) for nic in nics: nic_parts.append(nic) print('Uptime: %s (Booted: %s)' % (seconds_to_days(uptime()), boottime().strftime('%c'))) if freq is not None: print('CPUs: %d (Frequency - Current (%s), Min (%s), Max (%s))' % (psutil.cpu_count(), freq.current, freq.min, freq.max)) else: print('CPUs: %s' % psutil.cpu_count()) print('Memory: Total %s, Available %s, Free %.2f %%' % (human_size(virt.total), human_size(virt.available), (100 - virt.percent))) print('Swap: Total %s, Available %s, Free %.2f %%' % (human_size(swap.total), human_size(swap.free), (100 - swap.percent))) print('Disks: %s (%s)' % (len(disks), ', '.join(disk_parts))) print('NICs: %s (%s)' % (len(nics), ', '.join(nic_parts))) print('Processes: %s' % len(psutil.pids()))
def returnHealth(): health_status = { "Status": "OK", "uptime": jsonable_encoder(uptime()), "boottime": jsonable_encoder(boottime()) } return JSONResponse(status_code=200, content=health_status)
def status() -> str: """App status page""" data = { 'kio-node': kio_version, 'operational': True, 'uptime': uptime.boottime().__str__() } return jsonify(data)
def get_reboot_time(): ''' :return: datetime object of moment of boot ''' boottime = uptime.boottime() tz_offset = datetime.datetime.now() - datetime.datetime.utcnow() utcboottime = boottime - tz_offset return utcboottime
def _windows_platform_data(): ''' Use the platform module for as much as we can. ''' # Provides: # productcode # deviceclass # boottime # nisysapi_enabled grains = {} productcode = '' deviceclass = 'Desktop' boottime = '' nisysapi_enabled = False if HAS_WMI: try: with salt.utils.winapi.Com(): wmi_c = wmi.WMI() # 'productcode' and 'deviceclass' # http://msdn.microsoft.com/en-us/library/windows/desktop/aa394102%28v=vs.85%29.aspx systeminfo = wmi_c.Win32_ComputerSystem()[0] if hasattr(systeminfo, 'OEMStringArray'): oem_strings = systeminfo.OEMStringArray if oem_strings is not None: for item in oem_strings: colon = item.find(':') if colon != -1: key = item[:colon] value = item[colon + 1:] if key == 'TargetID': productcode = value elif key == 'DeviceClass': deviceclass = value # 'nisysapi_enabled' matched_services = wmi_c.Win32_Service(name='niminionagent') if matched_services and matched_services[0].State == 'Running': nisysapi_enabled = True except Exception as exc: # pylint: disable=broad-except global WMI_FAILURE_LOGGED # pylint: disable=global-statement if not WMI_FAILURE_LOGGED: WMI_FAILURE_LOGGED = True log.error('Exception occurred when using WMI: %s', exc, exc_info=True) localboottime = uptime.boottime() boottime = datetime.utcfromtimestamp( localboottime.timestamp()).isoformat() + 'Z' grains.update({ 'productcode': productcode, 'deviceclass': deviceclass, 'boottime': boottime, 'nisysapi_enabled': nisysapi_enabled }) return grains
def get_self_cluster_id(): """Return the id for the cluster we are running on.""" return str( uuid.uuid5( uuid.NAMESPACE_URL, "https://github.com/josh00/experimental-results-framework?cluster-id=" + str(uuid.getnode()) + "&boottime=" + str(time.mktime(uptime.boottime()))))
def getPGUID(self, process): b = boottime() assert(b != None) pguid = str(process.pid) + '.' + str(b) # save the guid. if it does not exist in the db, it will be created self.savePGUID(process, pguid) self.config['logger'].debug('Returning guid %s for process %s with pid %d' % (pguid, process.name, process.pid)) return pguid
def getPGUID(self, process): b = boottime() assert (b != None) pguid = str(process.pid) + '.' + str(b) # save the guid. if it does not exist in the db, it will be created self.savePGUID(process, pguid) self.config['logger'].debug( 'Returning guid %s for process %s with pid %d' % (pguid, process.name, process.pid)) return pguid
def get_time(): # print("get_time") time = uptime.uptime() days = time // (24 * 3600) hours = (time - days * 3600 * 24) // 3600 minutes = (time - days * 3600 * 24 - hours * 3600) // 60 seconds = time - days * 3600 * 24 - hours * 3600 - minutes * 60 return { "Uptime": f"{int(days)} days, {int(hours)} hours, {int(minutes)} minutes, {int(seconds)} seconds", "BootTime": str(uptime.boottime().replace(microsecond=0)) }
def get(self): return { "system_id": uuid.getnode(), "uptime": uptime(), "boottime": boottime(), "system": platform.system(), "node": platform.node(), "release": platform.release(), "version": platform.version(), "machine": platform.machine(), "processor": platform.processor(), }
def main(event_loop=None): """Scriptworker entry point: get everything set up, then enter the main loop. Args: event_loop (asyncio.BaseEventLoop, optional): the event loop to use. If None, use ``asyncio.get_event_loop()``. Defaults to None. """ context, credentials = get_context_from_cmdln(sys.argv[1:]) log.info("Scriptworker starting up at {} UTC".format( arrow.utcnow().format())) log.info("Worker FQDN: {}".format(socket.getfqdn())) log_worker_metric(context, "instanceBoot", timestamp=uptime.boottime()) cleanup(context) context.event_loop = event_loop or asyncio.get_event_loop() done = False async def _handle_sigterm(): log.info("SIGTERM received; shutting down") nonlocal done done = True if context.running_tasks is not None: await context.running_tasks.cancel() async def _handle_sigusr1(): """Stop accepting new tasks.""" log.info("SIGUSR1 received; no more tasks will be taken") nonlocal done done = True context.event_loop.add_signal_handler( signal.SIGTERM, lambda: asyncio.ensure_future(_handle_sigterm())) context.event_loop.add_signal_handler( signal.SIGUSR1, lambda: asyncio.ensure_future(_handle_sigusr1())) log_worker_metric(context, "workerReady") while not done: try: context.event_loop.run_until_complete( async_main(context, credentials)) except Exception: log.critical("Fatal exception", exc_info=1) raise else: log.info("Scriptworker stopped at {} UTC".format( arrow.utcnow().format())) log.info("Worker FQDN: {}".format(socket.getfqdn()))
def render(self, data, accepted_media_type=None, renderer_context=None): blocklist_cpf_quantity = len(data[0].get('cpf', None)) system_uptime = boottime() system_uptime = str(datetime.now() - system_uptime) cpf_query_quantity_since_last_uptime = len(data[1].get( 'apirequestlog', None)) data = { 'Uptime do sistema': system_uptime, 'Quantidade de CPFs na blocklist': blocklist_cpf_quantity, 'Quantidade de consultas desde o ultimo restart do servidor': cpf_query_quantity_since_last_uptime } return super(ServerStatusJSONRenderer, self).render(data, accepted_media_type, renderer_context)
def handle_client_command(t_msg): global config command, options, words = extract_texts(t_msg.text) if command != "pi": return {"text": u"并不懂你在说什么"} if len(options) > 0: subcommand = options[0][1:] # drop the leading slash else: if len(words) > 0: subcommand = words[0] else: subcommand = "ping" if subcommand in ("ping", "pong"): return {"text": ("pong" if subcommand == "ping" else "ping")} elif subcommand == "uptime": try: import uptime except ImportError: return {"text": u"没有安装 uptime 模块哦"} else: return {"text": u"启动于 北京时间 {}".format(uptime.boottime().strftime("%Y-%m-%d %H:%M:%S"))} elif subcommand == "free": try: import psutil except ImportError: return {"text": u"没有安装 psutil 模块哦"} else: memory_usage = psutil.virtual_memory() swap_usage = psutil.swap_memory() return { "text": (u"内存使用率 {:.2f}%,共有 {:d} MB\n" u"SWAP 使用率 {:.2f}%,共有 {:d} MB").format( memory_usage.percent, memory_usage.total / 1024 / 1024, swap_usage.percent, swap_usage.total / 1024 / 1024, ) } elif subcommand == "photo": if t_msg.from_user.name != config["owner"] and t_msg.from_user.name[1:] != config["owner"]: return {"text": u"区区凡人,竟敢对我下这种命令"} return {"photo": upload_photo()} elif subcommand in ("temp", "temperature", "pressure", "env"): return {"text": read_env(subcommand)} else: return {"text": u"当听不懂你在说什么时,我会假装看风景"}
def host_metrics(): """Frequently updated host data """ result = {} # volatile... result['physical_memory'] = psutil.virtual_memory().total result['virtual_memory'] = psutil.swap_memory().total result['local_drives'] = local_drives() result['logged_in_users'] = list(get_loggedinusers()) result['last_logged_on_user'] = get_last_logged_on_user() # memory usage current_process = psutil.Process() result['wapt-memory-usage'] = dir(current_process.memory_info()) result['last_bootup_time'] = uptime.boottime() return result
async def show_stats(self, ctx): appinfo = await self.bot.application_info() em = { "title": "About Senko-san", "description": "Hey, I'm the divine messenger fox, Senko-san!~\n" "Your fluffy helpful bot written in [python](https://www.python.org) using [discord.py](https://github.com/Rapptz/discord.py)!", "color": 0xf0c561, "thumbnail": { "url": str(self.bot.user.avatar_url) }, "fields": [{ "name": "Owner", "value": "User: {}#{}\nId: {}".format(appinfo.owner.name, appinfo.owner.discriminator, appinfo.owner.id), "inline": True }, { "name": "Versions", "value": "Senko-san v{}\ndiscord.py v{}\npython {}".format( app_version, discord.__version__, platform.python_version()), "inline": True }, { "name": "System Info", "value": "OS: {}\nUptime: {}\nBoottime: {}".format( platform.platform(), self.seconds_to_time_str(uptime()), boottime().strftime('%B %d %Y %H:%M:%S')) }] } await ctx.send(embed=discord.Embed.from_dict(em))
def get_system_data(self): topic = "autopial/system/hostname" value = socket.getfqdn() self.publish(topic, value) topic = "autopial/system/boottime" value = uptime.boottime().isoformat() self.publish(topic, value) topic = "autopial/system/cpu" value = { "usage": psutil.cpu_percent(interval=1), "frequency": psutil.cpu_freq()[0], "vcpu": psutil.cpu_count(), } self.publish(topic, value) topic = "autopial/system/ram" data = psutil.virtual_memory() value = { "free": data.available, "total": data.total, "used": data.used, "usage": float(data.used) / float(data.total) * 100.0 } self.publish(topic, value) topic = "autopial/system/swap" data = psutil.swap_memory() value = { "free": data.free, "total": data.total, "used": data.used, "usage": float(data.used) / float(data.total) * 100.0 } self.publish(topic, value) return
Enable basic can over a PCAN USB device. """ import logging logger = logging.getLogger(__name__) from can.interfaces.PCANBasic import * from can.bus import BusABC from can.message import Message boottimeEpoch = 0 try: import uptime import datetime boottimeEpoch = (uptime.boottime() - datetime.datetime.utcfromtimestamp(0)).total_seconds() except: boottimeEpoch = 0 # Set up logging logging.basicConfig(level=logging.WARNING) log = logging.getLogger('can.pcan') class Bus(BusABC): def __init__(self, channel, *args, **kwargs): """A PCAN USB interface to CAN. :param str channel: The can interface name. An example would be PCAN_USBBUS1
def collect(self,obj_id,part_id,part_name,slot_value): try: col_time = time.strftime('%Y-%m-%d %X') collect_data_list = [] def publish(*args,**kwargs): item_num = collect_metrics.metrics.get('host_common').get(args[2]) if item_num is not None: #9#print args collect_data_list.append((args[0],args[1],item_num[0],args[3],item_num[1],col_time)) #System startup time boot_datetime = uptime.boottime() boot_time = boot_datetime.strftime('%Y-%m-%d %X') #How many seconds the system started boot_uptime = uptime.uptime() system_uptime = int(uptime.uptime()) #local_time local_time = time.strftime('%Y-%m-%d %X') #Maximum number of opened files if os.access('/proc/sys/fs/file-max',os.R_OK): fr = open('/proc/sys/fs/file-max','r') file_max_str = fr.readline() fr.close() file_max_re = re.search('\d+',file_max_str) if file_max_re is not None: max_opened_files = file_max_re.group() #Maximum number of processes if os.access('/proc/sys/kernel/pid_max',os.R_OK): fr = open('/proc/sys/kernel/pid_max','r') pid_max_str = fr.readline() fr.close() pid_max_re = re.search('\d+',pid_max_str) if pid_max_re is not None: max_processes = pid_max_re.group() #Max running processes Max_run_proc_res = commands.getstatusoutput('ps -eo stat|grep R|wc -l') if Max_run_proc_res[0] == 0: max_running_processes = int(Max_run_proc_res[1]) #Number of logged in users who_res = commands.getstatusoutput('who|wc -l') if who_res[0] == 0: logged_users_num = int(who_res[1]) #system kernel info system_info = 'RHEL'+platform.linux_distribution()[1]+' '+platform.uname()[2] #host name host_name = socket.gethostname() #cpu name phy_count logical_count cpu_name_res = commands.getstatusoutput("cat /proc/cpuinfo |grep 'model name'|sort|uniq|awk -F':' '{print $2}'") if cpu_name_res[0] == 0: cpu_name = ','.join([cpu_name_one.strip() for cpu_name_one in cpu_name_res[1].split('\n')]) cpu_phy_logical_res = commands.getstatusoutput('lscpu') if cpu_phy_logical_res[0] == 0: cpu_phy_count = re.search(r'\nSocket\(s\):\s+(\d+)',cpu_phy_logical_res[1]).group(1) cpu_logical_count = re.search(r'\nCPU\(s\):\s+(\d+)',cpu_phy_logical_res[1]).group(1) raid_card_name = '' raid_card_brand = '' raid_card_cache_size = '' raid_card_res = commands.getstatusoutput("%s/MegaCli -AdpAllInfo -aALL"%self.curr_file_path) if raid_card_res[0] == 0: raid_info_re = re.search(r'Product Name\s+:\s*(.*?)\n.*Memory Size\s+:\s*(\d+)',raid_card_res[1],re.S) if raid_info_re is not None: raid_card_name = raid_info_re.group(1) raid_card_cache_size = raid_info_re.group(2) if 'PERC' in raid_card_name: raid_card_brand = 'DELL' else: raid_card_brand = 'LSI' publish(obj_id,part_id,'boot_time',boot_time) publish(obj_id,part_id,'system_uptime',system_uptime) publish(obj_id,part_id,'local_time',local_time) publish(obj_id,part_id,'max_opened_files',max_opened_files) publish(obj_id,part_id,'max_processes',max_processes) publish(obj_id,part_id,'max_running_processes',max_running_processes) publish(obj_id,part_id,'logged_users_num',logged_users_num) publish(obj_id,part_id,'system_info',system_info) publish(obj_id,part_id,'host_name',host_name) publish(obj_id,part_id,'cpu_name',cpu_name) publish(obj_id,part_id,'cpu_phy_count',cpu_phy_count) publish(obj_id,part_id,'cpu_logical_count',cpu_logical_count) publish(obj_id,part_id,'raid_card_name',raid_card_name) publish(obj_id,part_id,'raid_card_brand',raid_card_brand) publish(obj_id,part_id,'raid_card_cache_size',raid_card_cache_size) insertdb.insertdb(collect_data_list) except Exception,ex: log_format.logger.error('error:get host info faild'+str(ex.args))
#Ignore pass return data def savedata(jsonfile,d): with open(jsonfile, 'w') as f: json.dump(d, f) if __name__ == '__main__': wait_for_internet(timeout=120) datafile = "data.json" dateformat = "%Y %m %d %H:%M:%S %Z" olddata = {} newdata = {} olddata = parsedata(datafile) api = getTwitterAPIHandle() bootup_time = get_localzone().localize(uptime.boottime()) str_bootup_time = bootup_time.strftime(dateformat) newdata = {"boot_time":str_bootup_time} #Send a message if the data has changed if olddata <> newdata: savedata(datafile,newdata) print ("Sending a message") api.send_direct_message(user="******", text="Booted up at %s"%(str_bootup_time)) else: print ("No update. Ignoring")
def main(request): dashboard = page("dashboard", request) if not request.user.is_authenticated(): return redirect('/login/') else: # Create top user list TopUsers = {0: {'username': '','coffees': 0}} week_start = datetime.now() - timedelta(5) Users = User.objects.values('id', 'username', 'first_name', 'last_name') i=0 for user in Users: TopUsers.update({i: {'username': user['username'], 'coffees': len(Coffee.objects.filter(datetime__range=[week_start, datetime.now()], user__username=user['username'])) }}) i+=1 Temp = {} for i in xrange(0,len(TopUsers)): for j in xrange(i+1, len(TopUsers)): if TopUsers[i]['coffees'] < TopUsers[j]['coffees']: Temp = TopUsers[i] TopUsers[i] = TopUsers[j] TopUsers[j] = Temp TopUsersList = "" for i in TopUsers: if i<5: TopUsersList += "<tr>" TopUsersList += "<td>" + TopUsers[i]['username'] + "</td>" TopUsersList += "<td>" + str(TopUsers[i]['coffees']) + "</td>" TopUsersList += "</tr>" # Create coffee consum chart ConsumChart = { 1: {'month': _("January"), 'small': 0, 'middle': 0, 'long': 0}, 2: {'month': _("February"), 'small': 0, 'middle': 0, 'long': 0}, 3: {'month': _("March"), 'small': 0, 'middle': 0, 'long': 0}, 4: {'month': _("April"), 'small': 0, 'middle': 0, 'long': 0}, 5: {'month': _("May"), 'small': 0, 'middle': 0, 'long': 0}, 6: {'month': _("June"), 'small': 0, 'middle': 0, 'long': 0}, 7: {'month': _("July"), 'small': 0, 'middle': 0, 'long': 0}, 8: {'month': _("August"), 'small': 0, 'middle': 0, 'long': 0}, 9: {'month': _("September"), 'small': 0, 'middle': 0, 'long': 0}, 10: {'month': _("October"), 'small': 0, 'middle': 0, 'long': 0}, 11: {'month': _("November"), 'small': 0, 'middle': 0, 'long': 0}, 12: {'month': _("December"), 'small': 0, 'middle': 0, 'long': 0}} for i in ConsumChart: start, end = "","" start = datetime(datetime.now().year, i, 01, 00, 00, 00) if i == 2: try: end = datetime(datetime.now().year, i, 29, 23, 59, 59) except ValueError: end = datetime(datetime.now().year, i, 28, 23, 59, 59) else: try: end = datetime(datetime.now().year, i, 31, 23, 59, 59) except ValueError: end = datetime(datetime.now().year, i, 30, 23, 59, 59) for value in ConsumChart[i]: if not value == 'month': ConsumChart[i][value] = len(Coffee.objects.filter(datetime__range=[start, end], typ__exact=value)) ConsumChartList = "['" + _("Month") + "','" + _("Small coffees") + "','" + _("Middle coffees") + "','" + _("Long coffees") + "']," for i in ConsumChart: ConsumChartList += "['" + ConsumChart[i]['month'] + "', " ConsumChartList += str(ConsumChart[i]['small']) + "," ConsumChartList += str(ConsumChart[i]['middle']) + "," ConsumChartList += str(ConsumChart[i]['long']) + "]," return render(request, 'dashboard.html', { 'page': dashboard, 'TopUsersList': TopUsersList, 'uptime': uptime.boottime(), 'ConsumChartList': ConsumChartList })
def get_boot_time_as_string(): return str(boottime())
class StatusViewSet(MultipleModelAPIViewSet): queryList = [ (Cpf.objects.filter(blocked=1), CpfStatusSerializer), (APIRequestLog.objects.filter(requested_at__gte=boottime()), QueryStatusSerializer), ] renderer_classes = [ServerStatusJSONRenderer]
def get_boot_unix_time() -> float: return uptime.boottime().timestamp()
def check_uptime(self, message): """ uptime: report system uptime """ uptime = dt.now() - boottime() hours = uptime.seconds // 3600 minutes = (uptime.seconds // 60) % 60 self.reply('Host has been up for {} days {} minutes and {} seconds.'.format(uptime.days, hours, minutes))
PCAN_DICT_STATUS, ) # Set up logging log = logging.getLogger("can.pcan") MIN_PCAN_API_VERSION = version.parse("4.2.0") try: # use the "uptime" library if available import uptime # boottime() and fromtimestamp() are timezone offset, so the difference is not. if uptime.boottime() is None: boottimeEpoch = 0 else: boottimeEpoch = (uptime.boottime() - datetime.fromtimestamp(0)).total_seconds() except ImportError as error: log.warning( "uptime library not available, timestamps are relative to boot time and not to Epoch UTC", exc_info=True, ) boottimeEpoch = 0 try: # Try builtin Python 3 Windows API from _overlapped import CreateEvent from _winapi import WaitForSingleObject, WAIT_OBJECT_0, INFINITE
from .sig import DISCOVERABLE log = logging.getLogger('synkk:groklog') LOG_PREFIX = 'SYNKK' # dmesg time is super annoying # my dmesg times (reported by seconds since boot and /proc/uptime or via --ctime) # are off by 50 or 60 seconds (depending on the boottime compuation used). # I tried quite a few things, including misc/*.c ... # ... it took me a while to notice, but all my dmesg times are off, not my boottime # computations. # I'm teaching the dmesg log tailer to find the end of the file and just # disabling all this stuff for now. Leaving it in, cuz I may circle back to it. # (I'd rather not introduce a boottime fudgefactor...) BOOTTIME = time.mktime(uptime.boottime().timetuple()) plan_item = namedtuple("plan_item", ['src', 'regex', 'reject', 'cast']) class GrokLog(object): re_plan = ( # TODO: these plan rules should really come from config plan_item( src=None, reject=True, cast=None, regex=re.compile(r'^\[(?P<_time>[^\]]+)\]\s+(?P<_log_ent>.+)')), plan_item( src='_log_ent', reject=True, cast=None,
args = parser.parse_args() less_value_timedelta = datetime.timedelta(days=args.ld, hours=args.lh, minutes=args.lm) greater_value_timedelta = datetime.timedelta(days=args.gd, hours=args.gh, minutes=args.gm) return less_value_timedelta, greater_value_timedelta less_value_timedelta, greater_value_timedelta = get_values() less_value = int(less_value_timedelta.total_seconds() / 60) greater_value = int(greater_value_timedelta.total_seconds() / 60) now = datetime.datetime.now() boottime = boottime() uptime = now - boottime less_diff = uptime - less_value_timedelta less_diff_minutes = int(less_diff.total_seconds() / 60) greater_diff = uptime - greater_value_timedelta greater_diff_minutes = int(greater_diff.total_seconds() / 60) if less_diff_minutes < 0: print('Uptime of {0} is less than {1} minutes!'.format(uptime, less_value)) exit(2) elif greater_diff_minutes > 0: print('Uptime of {0} exceeds {1} minutes!'.format(uptime, greater_value)) exit(2) else:
def getBootTimeAsQTime(): return QtCore.QDateTime(boottime()).time()
def uptime_info(): print('Uptime: %s.' % seconds_to_days(uptime())) print('Booted: %s' % boottime().strftime('%c'))
def Update(self): self.SetTopicValue(TOPIC, str(uptime.boottime()))
This interface is for windows only, otherwise use socketCAN. """ from __future__ import absolute_import, division import logging from can import BusABC, Message from .usb2canabstractionlayer import * bootTimeEpoch = 0 try: import uptime import datetime bootTimeEpoch = (uptime.boottime() - datetime.datetime.utcfromtimestamp(0)).total_seconds() except: bootTimeEpoch = 0 # Set up logging log = logging.getLogger('can.usb2can') def format_connection_string(deviceID, baudrate='500'): """setup the string for the device config = deviceID + '; ' + baudrate """ return "%s; %s" % (deviceID, baudrate)
Enable basic can over a PCAN USB device. """ import logging logger = logging.getLogger(__name__) from can.interfaces.PCANBasic import * from can.bus import BusABC from can.message import Message boottimeEpoch = 0 try: import uptime import datetime boottimeEpoch = (uptime.boottime() - datetime.datetime.utcfromtimestamp(0)).total_seconds() except: boottimeEpoch = 0 # Set up logging logging.basicConfig(level=logging.WARNING) log = logging.getLogger('can.pcan') class PcanBus(BusABC): def __init__(self, channel, *args, **kwargs): """A PCAN USB interface to CAN. :param str channel: The can interface name. An example would be PCAN_USBBUS1
def savedata(jsonfile,d): with open(jsonfile, 'w') as f: json.dump(d, f) if __name__ == '__main__': wait_for_internet(timeout=120) datafile = "data.json" olddata = {} newdata = {} #print (datetime.fromtimestamp(os.path.getmtime(datafile))) #print (uptime.boottime()) #Consider the file only if it was modified after the last boot if (os.path.isfile(datafile)) and ((datetime.fromtimestamp(os.path.getmtime(datafile))) > uptime.boottime()): olddata = parsedata(datafile) api = getTwitterAPIHandle() my_ip = json.load(urllib.urlopen('http://jsonip.com'))['ip'] newdata = {"external_ip":my_ip} #Send a message if the data has changed if olddata <> newdata: savedata(datafile,newdata) print ("Sending a message") api.send_direct_message(user="******", text="External IP is %s"%(my_ip)) else: print ("No update. Ignoring") # print(api.me().name)
def __init__(self, master): # self.master is a frame that's a child of master for window padding purposes. # Widget grid layouts are children of self.master. self.master = Frame(master, padx=10, pady=10) self.master.grid() # Font and other layout variables. # font_label is applied to each label in col 0 (metrics). # font_value is applied to each label in col 1 (values). self.font_family = "Helvetica" self.font_size = "9" self.font_label = (self.font_family, self.font_size, "bold") self.font_value = (self.font_family, self.font_size) self.whitespace_height = 12 # Change time format for different operating systems. if os.name == "nt": self.time_format = "%#I:%M:%S %p" else: self.time_format = "%-I:%M:%S %p" # Create widget labels and EXIT button. self.gui_widget_metrics = [ Label(self.master, text="CPU (%):", font=self.font_label), # CPU Label(self.master, text="Memory (%):", font=self.font_label), # MEMORY Label(self.master, text="Uptime:", font=self.font_label), # UPTIME Label(self.master, text="System Clock:", font=self.font_label), # SYSTEM CLOCK Label(self.master, text="Boot Time:", font=self.font_label), # BOOT TIME Frame(self.master, height=self.whitespace_height), # WHITESPACE Button(self.master, text="EXIT", font=self.font_value, bg="white smoke", command=master.quit) # EXIT BUTTON ] # Create initial widget values and ALWAYS ON TOP button. self.on_top_check = IntVar( value=1 ) # Initial state for ALWAYS ON TOP button. Defaults to checked. self.gui_widget_values = [ Label(self.master, text="{}".format(psu.cpu_percent()), font=self.font_value), # CPU Label(self.master, text="{}".format(psu.virtual_memory()[2]), font=self.font_value), # MEMORY Label(self.master, text=str(datetime.now() - boottime())[:-7], font=self.font_value), # UPTIME Label(self.master, text=datetime.now().strftime("{}".format(self.time_format)), font=self.font_value), # SYSTEM CLOCK Label(self.master, text="{}".format(boottime().strftime("%b %d %Y, {}".format( self.time_format))), font=self.font_value), # BOOT TIME Frame(self.master, height=self.whitespace_height), # WHITESPACE Checkbutton(master=self.master, text="Always On Top", font=self.font_value, variable=self.on_top_check, command=partial(self.update_on_top, master)) # ALWAYS ON TOP BUTTON ] # Loop through widgets and display on grid (col 0 - metrics). for index, label in list(enumerate(self.gui_widget_metrics)): # if index == EXIT button, center button in cell if index == 6: label.grid(sticky="nsew", row=index, column=0, padx=(8, 0)) else: label.grid(sticky="w", row=index, column=0, padx=(8, 0), pady=(1, 0)) # Loop through widgets and display on grid (col 1 - values). for index, label in list(enumerate(self.gui_widget_values)): # if index == ALWAYS ON TOP checkbox, center checkbox in cell if index == 6: label.grid(sticky="nsew", row=index, column=1, padx=(16, 8)) else: label.grid(sticky="w", row=index, column=1, padx=(16, 8), pady=(1, 0)) # Start system metric update loop - calls self.get_metrics(). master.after(1000, self.get_metrics)