def updateEntry(self, thumbID, thumbFile): if (os_path.exists(thumbFile) == True): os_remove(thumbFile) idx = self.getMovieCategoryIndexByThumbID(thumbID) if idx is not None: print "[ZDF Mediathek] updateEntry", thumbID, thumbFile, idx self.entry_changed(idx)
def keySave(self): fname = "/universe/.%s.cfg" % (self.universe) out = open(fname, "w") line = "universe_lock:" + str(self.universe_lock.value) + "\nuniverse_pin:" + str(self.universe_pin.value) + "\nuniverse_force_reboot:" + str(self.universe_force_reboot.value) + "\n" out.write(line) out.close() current_universe = self.whereIAm() if current_universe == self.universe: if self.universe_force_reboot.value == True: if fileExists("/bin/bh_parallel_mount"): os_remove("/bin/bh_parallel_mount") else: if not fileExists("/bin/bh_parallel_mount"): path = "/universe/" + self.universe path1 = path + "/etc" path2 = path + "/usr" path3 = path + "/var/lib/opkg" out = open("/bin/bh_parallel_mount",'w') line = "mount -o bind %s /etc > /tmp/jump.tmp\n" % (path1) out.write(line) line = "mount -o bind %s /var/lib/opkg > /tmp/jump.tmp\n" % (path3) out.write(line) line = "mount -t unionfs -o dirs=%s:/usr=ro none /usr > /tmp/jump.tmp\n" % (path2) out.write(line) out.write("exit 0\n\n") out.close() system("chmod 0755 /bin/bh_parallel_mount") self.close()
def keySave(self): fname = '/universe/.%s.cfg' % self.universe out = open(fname, 'w') line = 'universe_lock:' + str(self.universe_lock.value) + '\nuniverse_pin:' + str(self.universe_pin.value) + '\nuniverse_force_reboot:' + str(self.universe_force_reboot.value) + '\n' out.write(line) out.close() current_universe = self.whereIAm() if current_universe == self.universe: if self.universe_force_reboot.value == True: if fileExists('/bin/bh_parallel_mount'): os_remove('/bin/bh_parallel_mount') elif not fileExists('/bin/bh_parallel_mount'): path = '/universe/' + self.universe path1 = path + '/etc' path2 = path + '/usr' path3 = path + '/var/lib/opkg' out = open('/bin/bh_parallel_mount', 'w') line = 'mount -o bind %s /etc > /tmp/jump.tmp\n' % path1 out.write(line) line = 'mount -o bind %s /var/lib/opkg > /tmp/jump.tmp\n' % path3 out.write(line) line = 'mount -t unionfs -o dirs=%s:/usr=ro none /usr > /tmp/jump.tmp\n' % path2 out.write(line) out.write('exit 0\n\n') out.close() system('chmod 0755 /bin/bh_parallel_mount') self.close()
def __init__(self, session, title): Screen.__init__(self, session) self.mytitle = title self.flist = [] ivalue = '' step = 0 if fileExists('/tmp/cpanel.tmp'): f = open('/tmp/cpanel.tmp', 'r') for line in f.readlines(): line = line.replace('\n', '') line = line.strip() if step == 0: ivalue = line step = 1 else: res = (line, ivalue) self.flist.append(res) step = 0 f.close() os_remove('/tmp/cpanel.tmp') self['list'] = List(self.flist) self.onShown.append(self.setWindowTitle) self['actions'] = ActionMap(['WizardActions', 'ColorActions'], {'ok': self.KeyOk, 'back': self.close})
def callbackLog(self): if os_path.exists('/tmp/ipkinstall.log'): os_remove('/tmp/ipkinstall.log') if self.count_failed_install + self.count_failed_remove == 0: self.close(True) else: self.close(False)
def updateList(self): self.activityTimer.stop() self.list = [] self.conflist = [] rc = system('blkid > /tmp/blkid.log') f = open('/tmp/blkid.log', 'r') for line in f.readlines(): if line.find('/dev/sd') == -1: continue parts = line.strip().split() device = parts[0][5:-2] partition = parts[0][5:-1] pos = line.find('UUID') + 6 end = line.find('"', pos) uuid = line[pos:end] dtype = self.get_Dtype(device) category = dtype[0] png = LoadPixmap(dtype[1]) size = self.get_Dsize(device, partition) model = self.get_Dmodel(device) mountpoint = self.get_Dpoint(uuid) name = '%s: %s' % (category, model) description = _(' device: %s size: %s\n mountpoint: %s' % (parts[0], size, mountpoint)) self.list.append((name, description, png)) description = '%s %s %s' % (name, size, partition) self.conflist.append((description, uuid)) self['list'].list = self.list self['lab1'].hide() os_remove('/tmp/blkid.log')
def getMemo(self): ramused = 0 swapused = 0 totused = 0 rc = system("free > /tmp/ninfo.tmp") if fileExists("/tmp/ninfo.tmp"): f = open("/tmp/ninfo.tmp",'r') for line in f.readlines(): parts = line.strip().split() if parts[0] == "Mem:": ramused = int(( int(parts[2]) * 100) / int(parts[1])) elif parts[0] == "Swap:": if int(parts[1]) > 1: swapused = int(( int(parts[2]) * 100) / int(parts[1])) elif parts[0] == "Total:": totused = int(( int(parts[2]) * 100) / int(parts[1])) f.close() os_remove("/tmp/ninfo.tmp") self.smallmontxt += _("Ram in use: ") + str(ramused) + " %\n" self.smallmontxt += _("Swap in use: ") + str(swapused) + " %\n" self["ramg"].setValue(int(((ramused *100) /120) + 50)) self["swapg"].setValue(int(((swapused *100) /120) + 50)) self["memtg"].setValue(int(((totused *100) /120) + 50))
def KeyOk(self): self.sel = self["list"].getCurrent() if self.sel: self.sel = self.sel[2] if self.sel == 0: self.session.open(Nab_downArea) elif self.sel == 1: self.session.openWithCallback(self.runUpgrade, MessageBox, _("Do you want to update Black Hole image?")+"\n"+_("\nAfter pressing OK, please wait!")) elif self.sel == 2: self.checkPanel() elif self.sel == 3: self.checkPanel2() elif self.sel == 4: self.session.open(Nab_uninstPanel) elif self.sel == 5: staturl = "http://www.vuplus-community.net/bhaddons/index.php?op=outmestats2" downfile = "/tmp/cpanel.tmp" if fileExists(downfile): os_remove(downfile) self.session.openWithCallback(self.StatsDone, Nab_ConnectPop, staturl, downfile) elif self.sel == 6: self.session.open(addonsParentalConfig) else: nobox = self.session.open(MessageBox, _("Function Not Yet Available"), MessageBox.TYPE_INFO) nobox.setTitle(_("Info"))
def updateList(self): self.list = [] if fileExists("/tmp/bhspeed.tmp"): os_remove("/tmp/bhspeed.tmp") for plug in self.pluglist: cmd = "opkg status %s >> /tmp/bhspeed.tmp" % (plug[1]) system(cmd) for plug in self.pluglist: item = NoSave(ConfigSelection(default = "Enabled", choices = [("Enabled", _("Enabled")), ("Disabled", _("Disabled"))])) installed = self.checkInst(plug[1]) if installed == True: item.value = "Enabled" else: item.value = "Disabled" res = getConfigListEntry(plug[0], item) self.list.append(res) self["config"].list = self.list self["config"].l.setList(self.list) self["lab1"].setText(_("Please disable ALL the plugins you don't need to use.\nThis will Speed Up Image Performance."))
def statshow(self): if fileExists("/tmp/cpanel.tmp"): strview = _("Black Hole Image Statistics:\n\n_____________________________________\n") step = 0 f = open("/tmp/cpanel.tmp",'r') for line in f.readlines(): if step == 0: strview += _("Total Connections: \t") elif step == 1: strview += _("Today Connections: \t") elif step == 2: strview += _("Available Forums: \t") elif step == 3: step = step + 1 continue elif step == 4: strview += _("Shouts sent by users:\t") elif step == 5: step = step + 1 continue elif step == 6: step = step + 1 continue elif step == 7: strview += _("Top downloaded File:\t") elif step == 8: strview += _("Total Downloads: \t") strview += line step = step + 1 f.close() os_remove("/tmp/cpanel.tmp") self["infotext"].setText(strview)
def __init__(self, session, title): Screen.__init__(self, session) self.mytitle = title self.flist = [] ivalue = "" step = 0 if fileExists("/tmp/cpanel.tmp"): f = open("/tmp/cpanel.tmp",'r') for line in f.readlines(): line = line.replace('\n', '') line = line.strip() if step == 0: ivalue = line step = 1 else: res = (line, ivalue) self.flist.append(res) step = 0 f.close() os_remove("/tmp/cpanel.tmp") self["list"] = List(self.flist) self.onShown.append(self.setWindowTitle) self["actions"] = ActionMap(["WizardActions", "ColorActions"], { "ok": self.KeyOk, "back": self.close })
def updateVpn(self): rc = system('ps > /tmp/nvpn.tmp') self['labrun'].hide() self['labstop'].hide() self['labactive'].setText(_('Inactive')) self['key_yellow'].setText(_('Set Active')) self.my_vpn_active = False self.my_vpn_run = False if fileExists('/etc/default/openvpn'): self['labactive'].setText(_('Active/Autostart enabled')) self['key_yellow'].setText(_('Deactivate')) self.my_vpn_active = True if fileExists('/tmp/nvpn.tmp'): f = open('/tmp/nvpn.tmp', 'r') for line in f.readlines(): if line.find('openvpn') != -1: self.my_vpn_run = True f.close() os_remove('/tmp/nvpn.tmp') if self.my_vpn_run == True: self['labstop'].hide() self['labrun'].show() self['key_red'].setText(_('Restart')) else: self['labstop'].show() self['labrun'].hide() self['key_red'].setText(_('Start'))
def deleteConfirmed(self, confirmed): if confirmed: try: os_remove(self.delname) except OSError,e: print "delete failed:", e self.session.open(MessageBox, _("Delete failed!"), MessageBox.TYPE_ERROR)
def release(self): if not os_path_isfile(self._path): raise Exception('Can\'t release unacquired Lock!') if current_thread() != self._owner: raise Exception('Can\'t release Lock, not the right Thread!') os_remove(self._path) self._owner = None
def removeLangClosed(self, status): print '[Language manager] status:%s' % status path = '/usr/share/enigma2/po/' folderpathfile = path + self.removelist[self.currentIndex] + '/LC_MESSAGES/enigma2.mo' foldermess = path + self.removelist[self.currentIndex] + '/LC_MESSAGES/' folderpath = path + self.removelist[self.currentIndex] if os_path.exists(folderpathfile): os_remove(folderpathfile) if os_path.exists(foldermess): removeDir(foldermess) if os_path.exists(folderpath): removeDir(folderpath) self.createMenu() self.currentIndex = self.currentIndex + 1 if self.currentIndex < len(self.removelist): lang = self.removelist[self.currentIndex] if lang == 'pt_BR': lang = 'pt-br' cmd = 'opkg remove enigma2-language-' + lang self.container = eConsoleAppContainer() self.container.appClosed.append(self.removeLangClosed) self.container.execute(cmd) else: self.removelist = [] self.removing = False self.createMenu()
def setPixmapCB(self, picInfo = None): if os_isfile(self.tmpfile): if config.plugins.GoogleMaps.cache_enabled.value is not True: os_remove(self.tmpfile) ptr = self.picload.getData() if ptr and self.instance: self.instance.setPixmap(ptr)
def setPixmapCB(self, picInfo = None): if os_isfile(self.tmpfile): if False: os_remove(self.tmpfile) ptr = self.picload.getData() if ptr and self.instance: self.instance.setPixmap(ptr.__deref__())
def onLoadFailed(self,error): print "WebPixmap:onLoadFAILED", error if self.default and self.instance: print "showing 404", self.default self.picload.startDecode(self.default) if os_isfile(self.tmpfile): os_remove(self.tmpfile)
def getHddtemp(self): temperature = "N/A" temperc = 0 hdd_dev = BhU_find_hdd() hddloc = "/dev/" + hdd_dev if hdd_dev: cmd = "hddtemp -w " + hddloc + " > /tmp/ninfo.tmp" rc = system(cmd) if fileExists("/tmp/ninfo.tmp"): f = open("/tmp/ninfo.tmp",'r') for line in f.readlines(): if line.find('WARNING') != -1: continue parts = line.strip().split(':') temperature = parts[2].strip() pos = temperature.find(' ') temperature = temperature[0:pos] if temperature.isdigit(): temperc = int(temperature) else: temperature = "N/A" f.close() os_remove("/tmp/ninfo.tmp") self["hddtempg"].setValue(temperc + 64) self.smallmontxt += "HDD temp: " + temperature + " C"
def updateList(self): self.activityTimer.stop() self.list = [ ] self.conflist = [ ] rc = system("blkid > /tmp/blkid.log") f = open("/tmp/blkid.log",'r') for line in f.readlines(): parts = line.strip().split() device = parts[0][5:-2] partition = parts[0][5:-1] pos = line.find("UUID") + 6 end = line.find('"', pos) uuid = line[pos:end] dtype = self.get_Dtype(device) category = dtype[0] png = LoadPixmap(dtype[1]) size = self.get_Dsize(device, partition) model = self.get_Dmodel(device) mountpoint = self.get_Dpoint(uuid) name = "%s: %s" % (category, model) description = " Dispositivo: %s Capacidad: %s\n Punto Montaje: %s" % (parts[0], size, mountpoint) self.list.append((name, description, png)) description = "%s %s %s" % (name, size, partition) self.conflist.append((description, uuid)) self["list"].list = self.list self["lab1"].hide() os_remove("/tmp/blkid.log")
def updateVpn(self): rc = system("ps > /tmp/nvpn.tmp") self["labrun"].hide() self["labstop"].hide() self["labactive"].setText(_("Inactive")) self["key_yellow"].setText(_("Set Active")) self.my_vpn_active = False self.my_vpn_run = False if fileExists("/etc/rc3.d/S40openvpn"): self["labactive"].setText(_("Active/Autostart enabled")) self["key_yellow"].setText(_("Deactivate")) self.my_vpn_active = True if fileExists("/tmp/nvpn.tmp"): f = open("/tmp/nvpn.tmp",'r') for line in f.readlines(): if line.find('openvpn') != -1: self.my_vpn_run = True f.close() os_remove("/tmp/nvpn.tmp") if self.my_vpn_run == True: self["labstop"].hide() self["labrun"].show() self["key_red"].setText(_("Restart")) else: self["labstop"].show() self["labrun"].hide() self["key_red"].setText(_("Start"))
def remove(self, path): path = self._resolve_path(path) if isdir(path): # Remove folder contents rmtree(path) else: os_remove(path)
def test_all_imports_pyx(): """ Tests: test_all_imports_pyx: for rebuild, syntax correctness and internal imports """ print('::: TEST: test_all_imports_pyx()') remove_files = [] remove_dirs = [] all_modules_path = [] for root, dirnames, filenames in walk(ROOT_PACKAGE_PATH): all_modules_path.extend(glob(root + '/*.pyx')) for pyx_module_file_path in all_modules_path: module_filename = path_basename(pyx_module_file_path) module_filename_no_ext = path_splitext(module_filename)[0] cython_extension_module_path, cython_module_c_file_path, cython_build_dir_path = build_cython_extension( pyx_module_file_path, cython_force_rebuild=True ) so_loader = ExtensionFileLoader(module_filename_no_ext, cython_extension_module_path) so_loader.load_module(module_filename_no_ext) # add for cleanup remove_files.append(cython_module_c_file_path) remove_dirs.append(cython_build_dir_path) # Cleanup try: for file_ in remove_files: if path_exists(file_): os_remove(file_) for dir_ in remove_dirs: if path_exists(dir_): rmtree(dir_) except Exception as err: raise Exception('test_all_imports_pyx', 'Could not cython_clean_up: Exception: <{}>'.format(err))
def getHddtemp(self): temperature = 'N/A' temperc = 0 hdd_dev = BhU_find_hdd() hddloc = '/dev/' + hdd_dev if hdd_dev: cmd = 'hddtemp -w ' + hddloc + ' > /tmp/ninfo.tmp' rc = system(cmd) if fileExists('/tmp/ninfo.tmp'): f = open('/tmp/ninfo.tmp', 'r') for line in f.readlines(): if line.find('WARNING') != -1: continue parts = line.strip().split(':') temperature = parts[2].strip() pos = temperature.find(' ') temperature = temperature[0:pos] if temperature.isdigit(): temperc = int(temperature) else: temperature = 'N/A' f.close() os_remove('/tmp/ninfo.tmp') self['hddtempg'].setValue(temperc + 64) self.smallmontxt += 'HDD temp: ' + temperature + ' C'
def tryNextPackage(self): if self.currentIndex < len(self.installList): self.fileextension = self.getFileType(self.installList[self.currentIndex]) pkgname = str(os_path.basename(self.installList[self.currentIndex])) self.currentpkgname = pkgname.split('_')[0] self['package'].setText(self.currentpkgname) self.setTitle(_('Connecting') + '...') self['status'].setText(_('Connecting to server....')) self.target = '/tmp/' + self.installNameList[self.currentIndex] + self.fileextension print '[TS multInstaller] Downloading %s to %s' % (self.installList[self.currentIndex], self.target) self.webfile = self.installList[self.currentIndex].strip() self.downloader = downloadWithProgress(self.webfile, self.target) self.downloader.addProgress(self.progress) self.downloader.start().addCallback(self.responseCompleted).addErrback(self.responseFailed) elif len(self.removeList) > 0: self.currentIndex = 0 self.removeIpk() else: if not len(self.removeList) == 0: self.setTitle(_('Install') + ' & ' + _('Remove')) else: self.setTitle(_('Install')) if not self.autoclose: self['package'].setText(_('Terminate')) self['status'].setText(_('%d package(s) installed, %d package(s) removed, %d package(s) failed,\n press ok to see log or cancel to exit.') % (self.count_success, self.count_removed, self.count_failed_install + self.count_failed_remove)) else: if os_path.exists('/tmp/ipkinstall.log'): os_remove('/tmp/ipkinstall.log') self.close(False)
def _cleanedUp(self): if fileExists(self.filePath): try: os_remove(self.filePath) except: printDBG('Problem with removing old buffering file') if fileExists(self.GST_FLV_DEMUX_IS_DEMUXING_INFINITE_FILE): try: os_remove(self.GST_FLV_DEMUX_IS_DEMUXING_INFINITE_FILE) except: printDBG('Problem with removing gstreamer flag file [%s]' % self.GST_FLV_DEMUX_IS_DEMUXING_INFINITE_FILE)
def updateInfo(self): rc = system("df -h > /tmp/syinfo.tmp") text = _("BOX\n") + _("Brand:") + "\tGOLDEN MEDIA\n" f = open("/proc/stb/info/boxtype",'r') text += _("Model:\t") + f.readline() f.close() # f = open("/proc/stb/info/chipset",'r') # text += _("Chipset:\t") + f.readline() +"\n" # f.close() text += _("MEMORY\n") memTotal = memFree = swapTotal = swapFree = 0 for line in open("/proc/meminfo",'r'): parts = line.split(':') key = parts[0].strip() if key == "MemTotal": memTotal = parts[1].strip() elif key in ("MemFree", "Buffers", "Cached"): memFree += int(parts[1].strip().split(' ',1)[0]) elif key == "SwapTotal": swapTotal = parts[1].strip() elif key == "SwapFree": swapFree = parts[1].strip() text += _("Total memory:") + "\t%s\n" % memTotal text += _("Free memory:") + "\t%s kB\n" % memFree text += _("Swap total:") + "\t%s \n" % swapTotal text += _("Swap free:") + "\t%s \n" % swapFree text += "\n" + _("STORAGE") + "\n" f = open("/tmp/syinfo.tmp",'r') line = f.readline() parts = line.split() text += parts[0] + "\t" + parts[1].strip() + " " + parts[2].strip() + " " + parts[3].strip() + " " + parts[4] + "\n" line = f.readline() parts = line.split() text += _("Flash") + "\t" + parts[1].strip() + " " + parts[2].strip() + " " + parts[3].strip() + " " + parts[4] + "\n" for line in f.readlines(): if line.find('/media/') != -1: line = line.replace('/media/', ' ') parts = line.split() if len(parts) == 6: text += parts[5] + "\t" + parts[1].strip() + " " + parts[2].strip() + " " + parts[3].strip() + " " + parts[4] + "\n" f.close() os_remove("/tmp/syinfo.tmp") text += "\n" + _("SOFTWARE") + "\n" # f = open("/etc/bhversion",'r') # text += "Firmware v.:\t" + f.readline() # f.close() text += "Enigma2 v.: \t" + about.getEnigmaVersionString() + "\n" text += "Kernel v.: \t" + about.getKernelVersionString() + "\n" text += "\n" + _("FIRMWARE") + "\n" text += "Image v.: \t" + about.getImageTypeString() + "\n" # text += "OpenGl v.: \t" + _("GLS 2.0 ") + "\n" text += "OE CORE v.: \t" + _("OPENPLI 5.0 ") + "\n" text += "CODER N.: \t" + _("SODO ") + "\n" text += "BETA TESTER N.1: \t" + _("YOUSSEF EL-ARABI ") + "\n" text += "BETA TESTER N.2: \t" + _("POP-AZERTY ") + "\n" self["lab1"].setText(text)
def run(self): need_normal_clean = True exclude_files = [] remove_files = [] remove_dirs = [] # remove also: DIRS: `build, dist, cover, *._pyxbld, *.egg-info` # and FILES in MAIN_PACKAGE_PATH: `*.so, *.c` and cython annotate html if self.all: need_normal_clean = True for dir_ in {'build', 'dist', 'cover'}: dir_path = path_join(ROOT_PACKAGE_PATH, dir_) if path_exists(dir_path): remove_dirs.append(dir_path) for root, dirs, files in os_walk(ROOT_PACKAGE_PATH): for dir_ in dirs: if '_pyxbld' in dir_ or 'egg-info' in dir_: remove_dirs.append(path_join(root, dir_)) # remove FILES in MAIN_PACKAGE_PATH: `*.so, *.c` and cython annotate html for root, dirs, files in os_walk(MAIN_PACKAGE_PATH): for file_ in files: if file_ not in exclude_files: if path_splitext(file_)[-1] in {'.so', '.c'}: remove_files.append(path_join(root, file_)) tmp_name, tmp_ext = path_splitext(file_) if tmp_ext == '.pyx': # Check if we have a html with the same name check_html_path = path_join(root, tmp_name + '.html') if isfile(check_html_path): remove_files.append(check_html_path) # do the general clean if need_normal_clean: for file_ in {'.coverage', 'MANIFEST'}: if path_exists(file_): remove_files.append(file_) for root, dirs, files in os_walk(ROOT_PACKAGE_PATH): for file_ in files: if file_ not in exclude_files: if path_splitext(file_)[-1] in {'.pyc', '.pyo', '.pyd', '.o', '.orig'}: remove_files.append(path_join(root, file_)) for dir_ in dirs: if '__pycache__' in dir_: remove_dirs.append(path_join(root, dir_)) # REMOVE ALL SELECTED # noinspection PyBroadException try: for file_ in remove_files: if path_exists(file_): os_remove(file_) for dir_ in remove_dirs: if path_exists(dir_): rmtree(dir_) except Exception: pass
def __init__(self, session, myidf): Screen.__init__(self, session) self["key_green"] = Label(_('Download')) self["key_yellow"] = Label(_('Preview')) self["infotext"] = ScrollLabel() self.tcat = "" step = 0 strview = "TITLE: " if fileExists("/tmp/cpanel.tmp"): f = open("/tmp/cpanel.tmp",'r') for line in f.readlines(): line = self.cleanhtml(line) line = line.replace('\n', '') line = line.strip() if step == 0: self.fileN = line step = 1 elif step == 1: strview += line strview += "\n\n" step = 2 elif step == 2: strview += "By: " strview += line step = 3 elif step == 3: strview += " " + line + "\n\n" step = 4 elif step == 4: strview += "Size: " + line step = 5 elif step == 5: strview += " Downloads: " + line + "\n" step = 6 elif step == 6: self.tcat = line step = 7 elif step == 7: strview += "---------------------------------------------------------------------\n" + line + "\n" step = 8 else: strview += line + "\n" f.close() os_remove("/tmp/cpanel.tmp") self["infotext"].setText(strview) self["actions"] = ActionMap(["WizardActions", "ColorActions", "DirectionActions"], { "ok": self.KeyGreend, "back": self.close, "green": self.KeyGreend, "yellow": self.KeyYellowd, "up": self["infotext"].pageUp, "down": self["infotext"].pageDown })
def __deleteService(self, protocol): filepath = "%s%s.service" %(self.AVAHI_SERVICES_DIR, protocol) if path.exists(filepath): os_remove(filepath) return True return False
def KeyGreen(self): if self.moni_state == 0: self.moniShow() rc = system('df > /tmp/ninfo.tmp') mytext = '' flused = 0 fltot = 0 flperc = 0 cfused = 0 cftot = 0 cfperc = 0 usused = 0 ustot = 0 usperc = 0 hdused = 0 hdtot = 0 hdperc = 0 mountflash = '/' if self.extendedFlash == True: mountflash = '/usr' if fileExists('/tmp/ninfo.tmp'): f = open('/tmp/ninfo.tmp', 'r') for line in f.readlines(): meas = 'M' line = line.replace('part1', ' ') parts = line.strip().split() totsp = len(parts) - 1 if parts[totsp] == mountflash: if flused: continue flused = parts[totsp - 1] flperc = int(flused.replace('%', '')) fltot = int(parts[totsp - 4]) if fltot > 1000000: fltot = fltot / 1000 meas = 'Gb' capacity = '%d.%03d ' % (fltot / 1000, fltot % 1000) mytext += _('FLASH: ') + capacity + meas + _(' in use: ') + flused + '\n' mytext += _('Total: ') + parts[totsp - 4] + _(' Used: ') + parts[totsp - 3] + _(' Free: ') + parts[totsp - 2] + '\n\n' fltot = int(parts[totsp - 4]) flused = int(parts[totsp - 3]) if parts[totsp] == '/media/cf': if cfused: continue cfused = parts[totsp - 1] cfperc = int(cfused.replace('%', '')) cftot = int(parts[totsp - 4]) if cftot > 1000000: cftot = cftot / 1000 meas = 'Gb' capacity = '%d.%03d ' % (cftot / 1000, cftot % 1000) mytext += 'CF: ' + capacity + meas + _(' in use: ') + cfused + '\n' mytext += _('Total: ') + parts[totsp - 4] + _(' Used: ') + parts[totsp - 3] + _(' Free: ') + parts[totsp - 2] + '\n\n' cftot = int(parts[totsp - 4]) cfused = int(parts[totsp - 3]) if parts[totsp] == '/media/usb': if usused: continue usused = parts[totsp - 1] usperc = int(usused.replace('%', '')) ustot = int(parts[totsp - 4]) if ustot > 1000000: ustot = ustot / 1000 meas = 'Gb' capacity = '%d.%03d ' % (ustot / 1000, ustot % 1000) mytext += _('USB: ') + capacity + meas + _(' in use: ') + usused + '\n' mytext += _('Total: ') + parts[totsp - 4] + _(' Used: ') + parts[totsp - 3] + _(' Free: ') + parts[totsp - 2] + '\n\n' ustot = int(parts[totsp - 4]) usused = int(parts[totsp - 3]) if parts[totsp] == '/media/hdd': if hdused: continue strview = parts[totsp - 1].replace('%', '') if strview.isdigit(): hdused = parts[totsp - 1] hdperc = int(hdused.replace('%', '')) hdtot = int(parts[totsp - 4]) if hdtot > 1000000: hdtot = hdtot / 1000 meas = 'Gb' capacity = '%d.%03d ' % (hdtot / 1000, hdtot % 1000) mytext += _('HDD: ') + capacity + meas + _(' in use: ') + hdused + '\n' mytext += _('Total: ') + parts[totsp - 4] + _(' Used: ') + parts[totsp - 3] + _(' Free: ') + parts[totsp - 2] + '\n\n' hdtot = int(parts[totsp - 4]) hdused = int(parts[totsp - 3]) f.close() os_remove('/tmp/ninfo.tmp') meas = 'M' ftot = fltot + cftot + ustot + hdtot fused = int(flused) + int(cfused) + int(usused) + int(hdused) ffree = ftot - fused fperc = 0 if ftot > 100: fperc = fused * 100 / ftot if ftot > 1000000: ftot = ftot / 1000 meas = 'Gb' if ftot > 1000000000: ftot = ftot / 1000000 meas = 'Tera' ftot = '%d.%03d ' % (ftot / 1000, ftot % 1000) ftot += meas meas = 'M' if fused > 1000000: fused = fused / 1000 meas = 'Gb' if fused > 1000000000: fused = fused / 1000000 meas = 'Tera' fused = '%d.%03d ' % (fused / 1000, fused % 1000) fused += meas meas = 'M' if ffree > 1000000: ffree = ffree / 1000 meas = 'Gb' if ffree > 1000000000: ffree = ffree / 1000000 meas = 'Tera' ffree = '%d.%03d ' % (ffree / 1000, ffree % 1000) ffree += meas mytext += _('Total Space: ') + ftot + _(' in use: ') + str(fperc) + '% \n' mytext += _('Total: ') + ftot + _(' Used: ') + fused + _(' Free: ') + ffree self['moni2'].setText(mytext)
def make_raw_temp_file(*args, **kwargs): fd, tmp_file_name = mkstemp(*args, **kwargs) try: yield (fd, tmp_file_name) finally: os_remove(tmp_file_name)
def run(self): """ Called when process initializes. """ # Log management to prevent overwriting # Allow the bin/<skyline_app>.d to manage the log if os.path.isfile(skyline_app_logwait): try: os_remove(skyline_app_logwait) except OSError: logger.error('error - failed to remove %s, continuing' % skyline_app_logwait) pass now = time() log_wait_for = now + 5 while now < log_wait_for: if os.path.isfile(skyline_app_loglock): sleep(.1) now = time() else: now = log_wait_for + 1 logger.info('starting %s run' % skyline_app) if os.path.isfile(skyline_app_loglock): logger.error( 'error - bin/%s.d log management seems to have failed, continuing' % skyline_app) try: os_remove(skyline_app_loglock) logger.info('log lock file removed') except OSError: logger.error('error - failed to remove %s, continuing' % skyline_app_loglock) pass else: logger.info('bin/%s.d log management done' % skyline_app) logger.info('%s :: started roomba' % skyline_app) while 1: now = time() # Make sure Redis is up try: self.redis_conn.ping() except: logger.error( '%s :: roomba can\'t connect to redis at socket path %s' % (skyline_app, settings.REDIS_SOCKET_PATH)) sleep(10) # @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow # @modified 20191115 - Bug #3266: py3 Redis binary objects not strings # Branch #3262: py3 if settings.REDIS_PASSWORD: self.redis_conn = StrictRedis( password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH) else: self.redis_conn = StrictRedis( unix_socket_path=settings.REDIS_SOCKET_PATH) # @added 20191115 - Bug #3266: py3 Redis binary objects not strings # Branch #3262: py3 self.redis_conn = get_redis_conn(skyline_app) self.redis_conn_decoded = get_redis_conn_decoded(skyline_app) continue # Spawn processes pids = [] for i in range(1, settings.ROOMBA_PROCESSES + 1): if not self.skip_mini: logger.info( '%s :: starting vacuum process on mini namespace' % skyline_app) p = Process(target=self.vacuum, args=(i, settings.MINI_NAMESPACE, settings.MINI_DURATION + settings.ROOMBA_GRACE_TIME)) pids.append(p) p.start() logger.info('%s :: starting vacuum process' % skyline_app) p = Process( target=self.vacuum, args=(i, settings.FULL_NAMESPACE, settings.FULL_DURATION + settings.ROOMBA_GRACE_TIME)) pids.append(p) p.start() # Send wait signal to zombie processes # for p in pids: # p.join() # deroomba - kill any lingering vacuum processes # Changed to manage Roomba processes as edge cases related to I/O # wait have been experienced that resulted in Roomba stalling so a # ROOMBA_TIMEOUT setting was added and here we use the pattern # described by http://stackoverflow.com/users/2073595/dano at # http://stackoverflow.com/a/26064238 to monitor and kill any # stalled processes rather than using p.join(TIMEOUT) - 20160505 # @earthgecko ref 1342 logger.info('%s :: allowing vacuum process/es %s seconds to run' % (skyline_app, str(settings.ROOMBA_TIMEOUT))) start = time() while time() - start <= settings.ROOMBA_TIMEOUT: if any(p.is_alive() for p in pids): # Just to avoid hogging the CPU sleep(.1) else: # All the processes are done, break now. time_to_run = time() - start logger.info('%s :: vacuum processes completed in %.2f' % (skyline_app, time_to_run)) break else: # We only enter this if we didn't 'break' above. logger.info('%s :: timed out, killing all Roomba processes' % (skyline_app)) for p in pids: p.terminate() p.join() # sleeping in the main process is more CPU efficient than sleeping # in the vacuum def also roomba is quite CPU intensive so we only # what to run roomba once every minute process_runtime = time() - now roomba_optimum_run_duration = 60 if process_runtime < roomba_optimum_run_duration: sleep_for = (roomba_optimum_run_duration - process_runtime) logger.info('%s :: sleeping %.2f for due to low run time' % (skyline_app, sleep_for)) sleep(sleep_for)
def removeFiles(self, targetdir): for root, dirs, files in os_walk(targetdir): for name in files: os_remove(os_path.join(root, name))
def jumP(self): path = '/universe/' + self.destination path1 = path + '/etc' path2 = path + '/usr' path3 = path + '/var/lib/opkg' pathspinorig = '/usr/share/spinners/' + self.destination + '/*' pathspindest = path2 + '/share/enigma2/skin_default/spinner/' if self.destination != 'Black Hole': if not pathExists(path): createDir(path) if not pathExists(path1): createDir(path1) cmd = 'cp -r /etc %s' % path system(cmd) if not pathExists(path3): pathtmp = path + '/var' createDir(pathtmp) pathtmp = pathtmp + '/lib' createDir(pathtmp) cmd = 'cp -r /var/lib/opkg %s/var/lib' % path system(cmd) if not pathExists(path2): createDir(path2) pathtmp = path2 + '/share' createDir(pathtmp) pathtmp = pathtmp + '/enigma2' createDir(pathtmp) pathtmp = pathtmp + '/skin_default' createDir(pathtmp) pathtmp = pathtmp + '/spinner' createDir(pathtmp) cmd = 'cp -f %s %s' % (pathspinorig, pathspindest) system(cmd) if fileExists('/bin/bh_parallel_mount'): os_remove('/bin/bh_parallel_mount') if self.destination != 'Black Hole': if self.destination_force_reboot == 'False': out = open('/bin/bh_parallel_mount', 'w') line = 'mount -o bind %s /etc > /tmp/jump.tmp\n' % path1 out.write(line) line = 'mount -o bind %s /var/lib/opkg > /tmp/jump.tmp\n' % path3 out.write(line) line = 'mount -t unionfs -o dirs=%s:/usr=ro none /usr > /tmp/jump.tmp\n' % path2 out.write(line) out.write('exit 0\n\n') out.close() system('chmod 0755 /bin/bh_parallel_mount') out = open('/bin/bh_jump', 'w') out.write('#!/bin/sh\n\n') out.write('telinit 4\n') if self.current_universe != 'Black Hole': out.write('fuser -km /etc > /tmp/jump.tmp\n') out.write('umount -l /etc > /tmp/jump.tmp\n') out.write('umount -l /usr > /tmp/jump.tmp\n') out.write('umount -l /var/lib/opkg > /tmp/jump.tmp\n') if self.destination != 'Black Hole': out.write('sleep 1\n') line = 'mount -o bind %s /etc > /tmp/jump.tmp\n' % path1 out.write(line) line = 'mount -o bind %s /var/lib/opkg > /tmp/jump.tmp\n' % path3 out.write(line) line = 'mount -t unionfs -o dirs=%s:/usr=ro none /usr > /tmp/jump.tmp\n' % path2 out.write(line) out.write('sleep 1\n') out.write('telinit 3\n\n') out.write('exit 0\n\n') out.close() rc = system('chmod 0755 /bin/bh_jump') self.jump_on_close = True configfile.save() self.close()
def deleteConfirmed(self, confirmed): if confirmed: try: os_remove(self.delname) except OSError,e: self.session.open(MessageBox, _("Delete failed!, %s") %e, MessageBox.TYPE_ERROR)
def deleteFile(filepath): try: os_remove(filepath) except: print '\nWARNING: Not found the file: '+filepath+'\n'
def update_now_tar(self, cItem): restart = cItem.get('retstart', True) printDBG('TSIplayer: Start Update') #crc='' #_url = 'https://gitlab.com/Rgysoft/iptv-host-e2iplayer' #try: # crc_data = re.findall('/Rgysoft/iptv-host-e2iplayer/commit/([^"^\']+?)[\'"]',self.cm.getPage(_url)[1], re.S) # if crc_data: # crc=crc_data[0] # printDBG('TSIplayer: crc = '+crc) # else: printDBG('TSIplayer: crc not found') #except: # printDBG('TSIplayer: Get Main URL Error') # return '' crc = '' _url = 'https://gitlab.com/Rgysoft/iptv-host-e2iplayer/-/refs/master/logs_tree/?format=json&o' try: crc_data = re.findall('commit.*?id":"(.*?)"', self.cm.getPage(_url)[1], re.S) if crc_data: crc = crc_data[0] printDBG('TSIplayer: crc = ' + crc) else: printDBG('TSIplayer: crc not found') except: printDBG('TSIplayer: Get Main URL Error') return '' tmpDir = GetTmpDir() source = os_path.join(tmpDir, 'iptv-host-e2iplayer.tar.gz') dest = os_path.join(tmpDir, '') _url = 'https://gitlab.com/Rgysoft/iptv-host-e2iplayer/repository/archive.tar.gz?ref=master' try: output = open(source, 'wb') output.write(self.getPage(_url)[1]) output.close() os_system('sync') printDBG('TSIplayer: Download iptv-host-e2iplayer.tar.gz OK') except: if os_path.exists(source): os_remove(source) printDBG('TSIplayer: Download Error iptv-host-e2iplayer.tar.gz') return '' cmd = 'tar -xzf "%s" -C "%s" 2>&1' % (source, dest) try: os_system(cmd) os_system('sync') printDBG('TSIplayer: Unpacking OK') except: printDBG('TSIplayer: Unpacking Error') os_system('rm -f %s' % source) os_system('rm -rf %siptv-host-e2iplayer-%s' % (dest, crc)) return '' try: od = '%siptv-host-e2iplayer-master-%s/' % (dest, crc) do = resolveFilename(SCOPE_PLUGINS, 'Extensions/') cmd = 'cp -rf "%s"/* "%s"/ 2>&1' % (os_path.join( od, 'IPTVPlayer'), os_path.join(do, 'IPTVPlayer')) printDBG('<<<<<<<<<<<<<<<<<<<<<<<<<<cmd=' + cmd) os_system(cmd) os_system('sync') printDBG('TSIplayer: Copy OK') except: printDBG('TSIplayer: Copy Error') os_system('rm -f %s' % source) os_system('rm -rf %siptv-host-e2iplayer-master-%s' % (dest, crc)) return '' printDBG('TSIplayer: Deleting temporary files') os_system('rm -f %s' % source) os_system('rm -rf %siptv-host-e2iplayer-master-%s' % (dest, crc)) if restart: try: from enigma import quitMainloop quitMainloop(3) except Exception as e: printDBG('TSIplayer: Erreur=' + str(e)) pass params = { 'category': 'none', 'title': 'Update End. Please manual restart enigma2', 'name': 'update' } self.addDir(params) return ''
def create_current_obs_graphs(self): location_objects = [] for i in range(0, len(self.loc_names)): root_folder = "C:\Users\Nathan\Documents\Storm Chasing\Chases\\" date_path = arrow_now().format('YYYY-MM-DD') for j in range(0, len(self.obs_list)): if self.loc_names[i] == self.obs_list[j].code: code = self.obs_list[j].code loc_name = self.obs_list[j].loc_name lat = self.obs_list[j].lat lon = self.obs_list[j].lon height = self.obs_list[j].height break with open(root_folder + date_path + "\Observations\\" + self.loc_names[i] + ".csv") as f: for line in f: line = line.split(',') location_objects.append( ObservationLocation(code, loc_name, lat, lon, height)) location_objects[len(location_objects) - 1].time = line[0] location_objects[len(location_objects) - 1].temp = line[1] location_objects[len(location_objects) - 1].dew = line[2] location_objects[len(location_objects) - 1].rain = line[3] location_objects[len(location_objects) - 1].pressure = line[4] location_objects[len(location_objects) - 1].lcl = line[5] location_objects[len(location_objects) - 1].rel_hum = line[6] location_objects[len(location_objects) - 1].wind_vel = line[7] location_objects[len(location_objects) - 1].wind_dir = str( line[8]).replace('\n', '') print t = 0 dpi_int = 40 fig = plt.figure(figsize=(16, 9)) for k in self.loc_names: loc_obs_temp = [] loc_obs_dew = [] loc_obs_rain = [] loc_obs_pressure = [] loc_obs_lcl = [] loc_obs_rel_hum = [] loc_obs_wind_vel = [] loc_obs_wind_dir = [] for j in location_objects: if k == j.code: loc_obs_temp.append([j.time, j.temp]) loc_obs_dew.append([j.time, j.dew]) loc_obs_rain.append([j.time, j.rain]) loc_obs_pressure.append([j.time, j.pressure]) loc_obs_lcl.append([j.time, j.lcl]) loc_obs_rel_hum.append([j.time, j.rel_hum]) loc_obs_wind_vel.append([j.time, j.wind_vel]) loc_obs_wind_dir.append([j.time, j.wind_dir]) if True: print print str( int(round(100 * float(t) / float(len(self.loc_names)), 0))) + "% Done" t += 1 print "Beginning to create graphs for", k if len(loc_obs_temp) > 1: # Code to create graphs go here. # Create temperature graph first graph_type = "temperature" file_short_list = [] for f in listdir(root_folder + date_path + "\Observations\\"): if re_match(k + "-" + graph_type, f): file_short_list.append(f) if len(file_short_list) > 1: file_short_list.sort() file_short_list.remove(file_short_list[-1]) for g in file_short_list: os_remove(root_folder + date_path + "\Observations\\" + g) print "Creating", graph_type, "graph" x = [] y = [] for m in loc_obs_temp: try: y.append(float(m[1])) x.append(datetime.fromtimestamp(float(m[0]))) except ValueError: continue try: time_now = arrow_get(x[len(x) - 1]).format('HH-mm') with open(root_folder + date_path + "\Observations\\" + k + "-" + graph_type + "-" + time_now + ".png"): print "Graph Already Exists" except IOError: ax = fig.add_subplot(111) ax.plot(x, y) for xy in zip(x, y): ax.annotate(xy[1], xy=xy, textcoords='data') buf = (max(y) - min(y)) * .1 ax.get_yaxis().get_major_formatter().set_useOffset( False) ax.axis( [x[0], x[len(x) - 1], min(y) - buf, max(y) + buf]) fig.tight_layout(pad=3) print "Saving" fig.savefig( root_folder + date_path + "\Observations\\" + k + "-" + graph_type + "-" + time_now + ".png", dpi=dpi_int, ) fig.clf() except IndexError: pass # Create dew points graph graph_type = "dew-point" file_short_list = [] for f in listdir(root_folder + date_path + "\Observations\\"): if re_match(k + "-" + graph_type, f): file_short_list.append(f) if len(file_short_list) > 1: file_short_list.sort() file_short_list.remove(file_short_list[-1]) for g in file_short_list: os_remove(root_folder + date_path + "\Observations\\" + g) print "Creating", graph_type, "graph" x = [] y = [] for m in loc_obs_dew: try: y.append(float(m[1])) x.append(datetime.fromtimestamp(float(m[0]))) except ValueError: continue try: time_now = arrow_get(x[len(x) - 1]).format('HH-mm') with open(root_folder + date_path + "\Observations\\" + k + "-" + graph_type + "-" + time_now + ".png"): print "Graph Already Exists" except IOError: ax = fig.add_subplot(111) ax.plot(x, y) for xy in zip(x, y): ax.annotate(xy[1], xy=xy, textcoords='data') buf = (max(y) - min(y)) * .1 ax.get_yaxis().get_major_formatter().set_useOffset( False) ax.axis( [x[0], x[len(x) - 1], min(y) - buf, max(y) + buf]) fig.tight_layout(pad=3) print "Saving" fig.savefig(root_folder + date_path + "\Observations\\" + k + "-" + graph_type + "-" + time_now + ".png", dpi=dpi_int) fig.clf() except IndexError: pass # Create rain points graph graph_type = "rain" file_short_list = [] for f in listdir(root_folder + date_path + "\Observations\\"): if re_match(k + "-" + graph_type, f): file_short_list.append(f) if len(file_short_list) > 1: file_short_list.sort() file_short_list.remove(file_short_list[-1]) for g in file_short_list: os_remove(root_folder + date_path + "\Observations\\" + g) print "Creating", graph_type, "graph" x = [] y = [] for m in loc_obs_rain: try: y.append(float(m[1])) x.append(datetime.fromtimestamp(float(m[0]))) except ValueError: continue try: time_now = arrow_get(x[len(x) - 1]).format('HH-mm') with open(root_folder + date_path + "\Observations\\" + k + "-" + graph_type + "-" + time_now + ".png"): print "Graph Already Exists" except IOError: ax = fig.add_subplot(111) ax.plot(x, y) for xy in zip(x, y): ax.annotate(xy[1], xy=xy, textcoords='data') buf = (max(y) - min(y)) * .1 ax.get_yaxis().get_major_formatter().set_useOffset( False) ax.axis( [x[0], x[len(x) - 1], min(y) - buf, max(y) + buf]) fig.tight_layout(pad=3) print "Saving" fig.savefig(root_folder + date_path + "\Observations\\" + k + "-" + graph_type + "-" + time_now + ".png", dpi=dpi_int) fig.clf() except IndexError: pass # Create Pressure Graph graph_type = "pressure" file_short_list = [] for f in listdir(root_folder + date_path + "\Observations\\"): if re_match(k + "-" + graph_type, f): file_short_list.append(f) if len(file_short_list) > 1: file_short_list.sort() file_short_list.remove(file_short_list[-1]) for g in file_short_list: os_remove(root_folder + date_path + "\Observations\\" + g) print "Creating", graph_type, "graph" x = [] y = [] for m in loc_obs_pressure: try: y.append(float(m[1])) x.append(datetime.fromtimestamp(float(m[0]))) except ValueError: continue try: time_now = arrow_get(x[len(x) - 1]).format('HH-mm') with open(root_folder + date_path + "\Observations\\" + k + "-" + graph_type + "-" + time_now + ".png"): print "Graph Already Exists" except IOError: ax = fig.add_subplot(111) ax.plot(x, y) for xy in zip(x, y): ax.annotate(xy[1], xy=xy, textcoords='data') buf = (max(y) - min(y)) * .1 ax.get_yaxis().get_major_formatter().set_useOffset( False) ax.axis( [x[0], x[len(x) - 1], min(y) - buf, max(y) + buf]) fig.tight_layout(pad=3) print "Saving" fig.savefig(root_folder + date_path + "\Observations\\" + k + "-" + graph_type + "-" + time_now + ".png", dpi=dpi_int) fig.clf() except IndexError: pass # Create LCL Graph graph_type = "lcl" file_short_list = [] for f in listdir(root_folder + date_path + "\Observations\\"): if re_match(k + "-" + graph_type, f): file_short_list.append(f) if len(file_short_list) > 1: file_short_list.sort() file_short_list.remove(file_short_list[-1]) for g in file_short_list: os_remove(root_folder + date_path + "\Observations\\" + g) print "Creating", graph_type, "graph" x = [] y = [] for m in loc_obs_lcl: try: y.append(float(m[1])) x.append(datetime.fromtimestamp(float(m[0]))) except ValueError: continue try: time_now = arrow_get(x[len(x) - 1]).format('HH-mm') with open(root_folder + date_path + "\Observations\\" + k + "-" + graph_type + "-" + time_now + ".png"): print "Graph Already Exists" except IOError: ax = fig.add_subplot(111) ax.plot(x, y) for xy in zip(x, y): ax.annotate(xy[1], xy=xy, textcoords='data') buf = (max(y) - min(y)) * .1 ax.get_yaxis().get_major_formatter().set_useOffset( False) ax.axis( [x[0], x[len(x) - 1], min(y) - buf, max(y) + buf]) fig.tight_layout(pad=3) print "Saving" fig.savefig(root_folder + date_path + "\Observations\\" + k + "-" + graph_type + "-" + time_now + ".png", dpi=dpi_int) fig.clf() except IndexError: pass # Create relative humidity Graph graph_type = "rel_hum" file_short_list = [] for f in listdir(root_folder + date_path + "\Observations\\"): if re_match(k + "-" + graph_type, f): file_short_list.append(f) if len(file_short_list) > 1: file_short_list.sort() file_short_list.remove(file_short_list[-1]) for g in file_short_list: os_remove(root_folder + date_path + "\Observations\\" + g) print "Creating", graph_type, "graph" x = [] y = [] for m in loc_obs_rel_hum: try: y.append(float(m[1])) x.append(datetime.fromtimestamp(float(m[0]))) except ValueError: continue try: time_now = arrow_get(x[len(x) - 1]).format('HH-mm') with open(root_folder + date_path + "\Observations\\" + k + "-" + graph_type + "-" + time_now + ".png"): print "Graph Already Exists" except IOError: ax = fig.add_subplot(111) ax.plot(x, y) for xy in zip(x, y): ax.annotate(xy[1], xy=xy, textcoords='data') buf = (max(y) - min(y)) * .1 ax.get_yaxis().get_major_formatter().set_useOffset( False) ax.axis( [x[0], x[len(x) - 1], min(y) - buf, max(y) + buf]) fig.tight_layout(pad=3) print "Saving" fig.savefig(root_folder + date_path + "\Observations\\" + k + "-" + graph_type + "-" + time_now + ".png", dpi=dpi_int) fig.clf() except IndexError: pass # Create wind speed graph graph_type = "wind" file_short_list = [] for f in listdir(root_folder + date_path + "\Observations\\"): if re_match(k + "-" + graph_type, f): file_short_list.append(f) if len(file_short_list) > 1: file_short_list.sort() file_short_list.remove(file_short_list[-1]) for g in file_short_list: os_remove(root_folder + date_path + "\Observations\\" + g) print "Creating", graph_type, "graph" x = [] y = [] for m in loc_obs_wind_vel: try: y.append(float(m[1])) x.append(datetime.fromtimestamp(float(m[0]))) except ValueError: continue try: time_now = arrow_get(x[len(x) - 1]).format('HH-mm') with open(root_folder + date_path + "\Observations\\" + k + "-" + graph_type + "-" + time_now + ".png"): print "Graph Already Exists" except IOError: ax = fig.add_subplot(111) ax.plot(x, y) for xy in zip(x, y): ax.annotate(xy[1], xy=xy, textcoords='data') buf = (max(y) - min(y)) * .1 ax.get_yaxis().get_major_formatter().set_useOffset( False) ax.axis( [x[0], x[len(x) - 1], min(y) - buf, max(y) + buf]) fig.tight_layout(pad=3) print "Saving" fig.savefig(root_folder + date_path + "\Observations\\" + k + "-" + graph_type + "-" + time_now + ".png", dpi=dpi_int) fig.clf() except IndexError: pass
def updateHdd(self): if self.hdd_dev == '': self.activityTimer = eTimer() self.activityTimer.timeout.get().append(self.myclose) self.activityTimer.start(100, True) else: self['labstop'].hide() self['labrun'].hide() cmd = 'hdparm -C ' + self.hddloc + '> /tmp/hdpar.tmp' rc = system(cmd) strview = '' procf = '/proc/ide/hda/' if self.hddloc.find('host1') != -1: procf = '/proc/ide/hdc/' model = 'Generic' filename = '/sys/block/%s/device/model' % self.hdd_dev if fileExists(filename): model = file(filename).read().strip() strview += _('HARD DISK MODEL:') + ' \t' + model + '\n' size = '0' filename = '/sys/block/%s/size' % self.hdd_dev if fileExists(filename): cap = int(file(filename).read().strip()) cap = cap / 1000 * 512 / 1000 cap = '%d.%03d GB' % (cap / 1024, cap % 1024) strview += _('Disk Size:') + ' \t' + cap + '\n' free = _('Not mounted') f = open('/proc/mounts', 'r') for line in f.readlines(): if line.find('/media/hdd') != -1: stat = statvfs('/media/hdd') free = stat.f_bfree / 1000 * stat.f_bsize / 1000 free = '%d.%03d GB' % (free / 1024, free % 1024) break f.close() strview += _('Available Space:') + '\t' + free + '\n' mysett = self.getHconf() cvalue1 = config.usage.hdd_standby.value if cvalue1 < 12: cvalue1 = 600 cvalue = int(cvalue1) / 60 mystand = str(cvalue) strview += _('Standby:') + '\t\t' + mystand + _(' min.\n') myfile = procf + 'settings' if fileExists(myfile): strview += '_______________________________________________\n' f = open(myfile, 'r') for line in f.readlines(): if line.find('--') != -1: strview += '_______________________________________________\n' continue parts = line.strip().split() if len(parts) > 3: line = parts[0] + '\t' + parts[1] + '\t' + parts[ 2] + '\t' + parts[3] strview += line + '\n' strview += '_______________________________________________\n\n' f.close() self.cur_state = False check = False if fileExists('/tmp/hdpar.tmp'): f = open('/tmp/hdpar.tmp', 'r') for line in f.readlines(): if line.find('active') != -1: check = True f.close() os_remove('/tmp/hdpar.tmp') if check == False: self['labstop'].show() else: self['labrun'].show() self.cur_state = True self['infotext'].setText(strview)
def update_now_zip(self, cItem): restart = cItem.get('retstart', True) printDBG('TSIplayer: Start Update') tmpDir = GetTmpDir() source = os_path.join(tmpDir, 'archive.zip') dest = os_path.join(tmpDir, '') _url = 'https://gitlab.com/Rgysoft/iptv-host-e2iplayer/repository/archive.zip' try: output = open(source, 'wb') output.write(self.cm.getPage(_url)[1]) output.close() os_system('sync') printDBG('TSIplayer: Download archive.zip OK') except: if os_path.exists(source): os_remove(source) printDBG('TSIplayer: Download Error archive.zip') return '' cmd = 'unzip -o "%s" -d "%s"' % (source, dest) try: os_system(cmd) os_system('sync') printDBG('TSIplayer(zip): Unpacking OK') except: printDBG('TSIplayer(zip): Unpacking Error') os_system('rm -f %s' % source) os_system('rm -rf /tmp/iptv-host-e2iplayer*/IPTVPlayer') return '' try: os_system( 'cp -rf /tmp/iptv-host-e2iplayer*/IPTVPlayer /usr/lib/enigma2/python/Plugins/Extensions' ) os_system('sync') printDBG('TSIplayer(Zip): Copy OK') except: printDBG('TSIplayer(Zip): Copy Error') os_system('rm -f %s' % source) os_system('rm -rf /tmp/iptv-host-e2iplayer*/IPTVPlayer') return '' printDBG('TSIplayer: Deleting temporary files') os_system('rm -f %s' % source) os_system('rm -rf /tmp/iptv-host-e2iplayer*/IPTVPlayer') if restart: try: from enigma import quitMainloop quitMainloop(3) except Exception as e: printDBG('TSIplayer: Erreur=' + str(e)) pass params = { 'category': 'none', 'title': 'Update End. Please manual restart enigma2', 'name': 'update' } self.addDir(params) return ''
def installMVI(self, target, sourcefile): """ installs a mvi by overwriting the target with a source mvi """ print("installing %s as %s on %s" % (sourcefile, target[0], target[1])) if os_path.isfile(target[1]): os_remove(target[1]) Console().ePopen("cp %s %s" % (sourcefile, target[1]))
def setPixmapCB(self, picInfo=None): if os_isfile(self.tmpfile): os_remove(self.tmpfile) ptr = self.picload.getData() if ptr and self.instance: self.instance.setPixmap(ptr)
def run(self): """ Called when the process intializes. """ # Log management to prevent overwriting # Allow the bin/<skyline_app>.d to manage the log # In Vista the log management is handled be fetcher, the worker just # waits for the fetcher to do the log managment now = int(time()) log_wait_for = now + 5 while now < log_wait_for: if os.path.isfile(skyline_app_loglock): sleep(.1) now = int(time()) else: now = log_wait_for + 1 logger.info('worker :: starting log management') if os.path.isfile(skyline_app_loglock): logger.error('error :: worker :: bin/%s.d log management seems to have failed, continuing' % skyline_app) try: os_remove(skyline_app_loglock) logger.info('worker :: log lock file removed') except OSError: logger.error('error :: worker :: failed to remove %s, continuing' % skyline_app_loglock) pass else: logger.info('worker :: bin/%s.d log management done' % skyline_app) logger.info('worker :: starting worker') try: VISTA_ENABLED = settings.VISTA_ENABLED logger.info('worker :: VISTA_ENABLED is set to %s' % str(VISTA_ENABLED)) except: VISTA_ENABLED = False logger.info('worker :: warning :: VISTA_ENABLED is not declared in settings.py, defaults to False') last_sent_to_graphite = int(time()) metrics_sent_to_flux = 0 # python-2.x and python3.x handle while 1 and while True differently # while 1: running = True while running: # Make sure Redis is up redis_up = False while not redis_up: try: redis_up = self.redis_conn.ping() if LOCAL_DEBUG: logger.info('worker :: redis is up') except: logger.error('worker :: cannot connect to redis at socket path %s' % (settings.REDIS_SOCKET_PATH)) sleep(2) # @modified 20191111 - Bug #3266: py3 Redis binary objects not strings # Branch #3262: py3 # if settings.REDIS_PASSWORD: # self.redis_conn = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH) # else: # self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH) self.redis_conn = get_redis_conn(skyline_app) self.redis_conn_decoded = get_redis_conn_decoded(skyline_app) metrics_data = [] redis_set = 'vista.fetcher.metrics.json' try: # Get a metric to validate from the Redis set # @modified 20191111 - Bug #3266: py3 Redis binary objects not strings # Branch #3262: py3 # metrics_data = self.redis_conn.smembers(redis_set) metrics_data = self.redis_conn_decoded.smembers(redis_set) if LOCAL_DEBUG: logger.info('worker :: got redis set data - %s' % redis_set) except: logger.error(traceback.format_exc()) logger.error('error :: worker :: retrieving Redis set %s data' % str(redis_set)) if not metrics_data: if LOCAL_DEBUG: logger.info('worker :: no data from Redis set %s' % str(redis_set)) sleep(5) for str_metric_data in metrics_data: delete_set_record = False remote_host_type = None try: # @modified 20191111 - Bug #3266: py3 Redis binary objects not strings # Branch #3262: py3 # Rather using get_redis_conn_decoded # if python_version == 3: # str_metric_data = str_metric_data.decode('UTF-8') metric_data = literal_eval(str_metric_data) remote_host_type = str(metric_data[0]['remote_host_type']) if LOCAL_DEBUG: logger.info('worker :: got data from Redis set for remote_host_type %s' % str(remote_host_type)) except: logger.error(traceback.format_exc()) logger.error('error :: worker :: failed to determine remote_host_type from %s' % str(str_metric_data)) delete_set_record = True if not delete_set_record: try: remote_target = str(metric_data[0]['remote_target']) if LOCAL_DEBUG: logger.info('worker :: got data from Redis set for target %s' % str(remote_target)) except: logger.error(traceback.format_exc()) logger.error('error :: worker :: failed to determine target from %s' % str(str_metric_data)) delete_set_record = True metric = None if not delete_set_record: try: metric = str(metric_data[0]['metric']) if LOCAL_DEBUG: logger.info('worker :: got data from Redis set for metric %s' % str(metric)) except: logger.error(traceback.format_exc()) logger.error('error :: worker :: failed to determine metric from %s' % str(str_metric_data)) delete_set_record = True namespace_prefix = '' if not delete_set_record: try: namespace_prefix = str(metric_data[0]['namespace_prefix']) namespace_prefix = '%s.' % namespace_prefix if not namespace_prefix: namespace_prefix = '' if LOCAL_DEBUG: logger.info('worker :: got data from Redis set for namespace_prefix %s' % str(namespace_prefix)) except: logger.error(traceback.format_exc()) logger.error('error :: worker :: failed to determine namespace_prefix from %s' % str(str_metric_data)) delete_set_record = True have_data = False if not delete_set_record: last_flux_metric_data = None cache_key = 'flux.last.%s' % (metric) try: if python_version == 3: redis_last_flux_metric_data = self.redis_conn.get(cache_key).decode('UTF-8') else: redis_last_flux_metric_data = self.redis_conn.get(cache_key) redis_last_flux_metric_data = redis_last_flux_metric_data last_flux_metric_data = literal_eval(redis_last_flux_metric_data) if LOCAL_DEBUG: logger.info('worker :: got last_flux_metric_data from Redis') except: logger.error(traceback.format_exc()) logger.error('error :: worker :: retrieving Redis key %s data' % str(cache_key)) last_flux_metric_data = False last_flux_timestamp = None if last_flux_metric_data: try: last_flux_timestamp = int(last_flux_metric_data[0]) if LOCAL_DEBUG: logger.info('worker :: got last_flux_timestamp - %s' % str(last_flux_timestamp)) except: logger.error(traceback.format_exc()) logger.error('error :: worker :: failed determining last_flux_timestamp') last_flux_timestamp = False # Determine the timestamp of the current minute to apply # VISTA_DO_NOT_SUBMIT_CURRENT_MINUTE time_now = int(time()) # current_minute = datetime.datetime.utcfromtimestamp(time_now).strftime('%Y-%m-%d %H:%M') current_minute_hour = int(datetime.datetime.utcfromtimestamp(time_now).strftime('%H')) current_minute_minute = int(datetime.datetime.utcfromtimestamp(time_now).strftime('%M')) current_datetime = datetime.datetime.utcfromtimestamp(time_now).replace(hour=current_minute_hour, minute=current_minute_minute, second=0, microsecond=0) current_minute_timestamp_start = int(current_datetime.strftime('%s')) datapoint = None last_timestamp_with_data = None timeseries = [] # @added 20200107 - Task #3376: Enable vista and flux to deal with lower frequency data metric_resolution = 60 metric_resolution_determined = False try: if python_version == 3: datapoints_str = literal_eval(metric_data[0]['datapoints']) metric_datapoints = literal_eval(datapoints_str) else: # metric_datapoints = metric_data[0]['datapoints'] datapoints_str = literal_eval(metric_data[0]['datapoints']) metric_datapoints = literal_eval(datapoints_str) # for value, timestamp in metric_data[0]['datapoints']: if LOCAL_DEBUG: len_metric_datapoints = len(metric_datapoints) logger.info('worker :: got %s metric_datapoints - %s' % ( str(len_metric_datapoints), str(metric_datapoints))) # @added 20200107 - Task #3376: Enable vista and flux to deal with lower frequency data # Determine resolution resolution_timestamps = [] for metric_datapoint in metric_datapoints: timestamp = int(metric_datapoint[0]) resolution_timestamps.append(timestamp) timestamp_resolutions = [] if resolution_timestamps: last_timestamp = None for timestamp in resolution_timestamps: if last_timestamp: resolution = timestamp - last_timestamp timestamp_resolutions.append(resolution) last_timestamp = timestamp else: last_timestamp = timestamp if timestamp_resolutions: try: timestamp_resolutions_count = Counter(timestamp_resolutions) ordered_timestamp_resolutions_count = timestamp_resolutions_count.most_common() metric_resolution = int(ordered_timestamp_resolutions_count[0][0]) if metric_resolution > 0: metric_resolution_determined = True except: logger.error(traceback.format_exc()) logger.error('error :: worker :: failed to determine metric_resolution from %s' % ( str(metric_data))) if metric_resolution_determined: cache_key = 'vista.last.resolution.%s' % metric try: # Update Redis key self.redis_conn.setex(cache_key, 3600, metric_resolution) except: logger.error(traceback.format_exc()) logger.error('error :: fetcher :: failed to set Redis key - %s' % ( cache_key)) for metric_datapoint in metric_datapoints: # @20191010 - Branch #3140: vista # fetcher passes through preformatted data points that # are in the same format/order for both graphite and # prometheus # if remote_host_type == 'graphite': # value = float(metric_datapoint[0]) # timestamp = int(metric_datapoint[1]) # if remote_host_type == 'prometheus': # value = float(metric_datapoint[1]) # timestamp = int(metric_datapoint[0]) timestamp = int(metric_datapoint[0]) value = float(metric_datapoint[1]) append_to_timeseries = False if last_flux_timestamp: if int(timestamp) > last_flux_timestamp: # timeseries.append([timestamp, value]) append_to_timeseries = True else: # timeseries.append([timestamp, value]) append_to_timeseries = True # Here if the timestamp of the data point falls # within the current minute, it is discarded and not # sent to flux, to ensure that high frequency metrics # can have their minutely bins fully populated before # they are submitted to Graphite if settings.VISTA_DO_NOT_SUBMIT_CURRENT_MINUTE: if int(timestamp) >= current_minute_timestamp_start: append_to_timeseries = False if append_to_timeseries: timeseries.append([timestamp, value]) last_timestamp_with_data = 0 for timestamp, value in timeseries[::-1]: has_value = False if value == 0.0: has_value = True if value: has_value = True if has_value: last_timestamp_with_data = int(timestamp) datapoint = value break if last_timestamp_with_data: have_data = True except: logger.error(traceback.format_exc()) logger.error('error :: worker :: failed to determine datapoints from %s' % ( str(metric_data))) delete_set_record = True if not timeseries: logger.info('worker :: after processing, there were no valid data points in %s' % ( str(metric_data))) delete_set_record = True if not have_data and timeseries: logger.error('error :: worker :: failed to determine last_timestamp_with_data from %s' % ( str(metric_data))) delete_set_record = True if delete_set_record: try: redis_set = 'vista.fetcher.metrics.json' self.redis_conn.srem(redis_set, str_metric_data) except: logger.error(traceback.format_exc()) logger.error('error :: worker :: failed to delete data from Redis set %s, data - ' % ( str(redis_set), str(str_metric_data))) continue if not metric: continue valid_data = True if last_flux_timestamp and last_timestamp_with_data: if int(last_timestamp_with_data) <= last_flux_timestamp: valid_data = False if not valid_data: redis_set = 'vista.fetcher.metrics.json' logger.info('worker :: no valid data in fetched data removing from Redis set %s - data - %s' % ( redis_set, str(str_metric_data))) try: self.redis_conn.srem(redis_set, str_metric_data) except: logger.error(traceback.format_exc()) logger.error('error :: worker :: failed to delete data from Redis set %s, data - %s' % ( redis_set, str(str_metric_data))) continue if valid_data: flux_host = 'http://%s:%s' % (settings.FLUX_IP, settings.FLUX_PORT) # Resample resample_at = None if resample_at == 'none' or resample_at == '0Min': resample_at = False if resample_at == 'None' or resample_at == '0min': resample_at = False if resample_at is None or resample_at == '0' or resample_at == 0: resample_at = False if resample_at: try: df = pd.DataFrame(timeseries) df.columns = ['timestamp', 'value'] df['timestamp'] = pd.to_datetime(df['timestamp'], unit='s', origin='unix') df = df.set_index('timestamp') resampled_df = df.resample(resample_at).sum() resampled_timeseries = [] for index, row in resampled_df.iterrows(): timestamp = int(index.strftime('%s')) resampled_timeseries.append([timestamp, row[0]]) timeseries = resampled_timeseries timeseries_length = len(timeseries) logger.info('worker :: time series resampled at %s resulting in %s data points to send to Graphite' % ( str(resample_at), str(timeseries_length))) except: logger.error(traceback.format_exc()) logger.error('error :: worker :: failed to resample time series at %s for %s with time series %s' % ( str(resample_at), str(metric), str(timeseries))) for timestamp, value in timeseries: # @added 20200225 - Bug #3476: vista - handle very large floats # Handle very large floats # So that flux is never passed a value=1.00243039089e+11 if 'e' in str(value): datapoint = format_float_positional(value) else: datapoint = float(value) flux_url = '%s/metric_data?metric=%s&value=%s×tamp=%s&key=%s' % ( flux_host, metric, str(datapoint), str(timestamp), settings.FLUX_SELF_API_KEY) success = False try: response = requests.get(flux_url) if response.status_code == 200: success = True elif response.status_code == 204: success = True except: logger.error(traceback.format_exc()) logger.error('error :: worker :: failed to request %s' % str(flux_url)) if not success: logger.error('error :: worker :: http status code - %s, reason - %s from %s' % ( str(response.status_code), str(response.reason), str(flux_url))) logger.debug('debug :: timeseries - %s' % str(timeseries)) if success: metrics_sent_to_flux += 1 redis_set = 'vista.fetcher.metrics.json' # @added 20191011 - Task #3258: Reduce vista logging timeseries_length = len(timeseries) # @modified 20191011 - Task #3258: Reduce vista logging # logger.info('worker :: data submitted to flux OK, removing data from Redis set %s' % ( # redis_set)) logger.info('worker :: %s data points submitted to flux OK for %s' % ( str(timeseries_length), metric)) try: self.redis_conn.srem(redis_set, str_metric_data) except: logger.error(traceback.format_exc()) logger.error('error :: worker :: failed to delete data from Redis set %s, data - %s' % ( redis_set, str(str_metric_data))) redis_set = 'vista.fetcher.unique_metrics' try: self.redis_conn.sadd(redis_set, remote_target) except: logger.error(traceback.format_exc()) logger.error('error :: worker :: failed to add %s to Redis set %s' % ( remote_target, redis_set)) time_now = int(time()) if (time_now - last_sent_to_graphite) >= 60: logger.info('worker :: metrics sent_to_flux in last 60 seconds - %s' % str(metrics_sent_to_flux)) send_metric_name = '%s.metrics_sent_to_flux' % skyline_app_graphite_namespace try: send_graphite_metric(parent_skyline_app, send_metric_name, str(metrics_sent_to_flux)) last_sent_to_graphite = int(time()) metrics_sent_to_flux = 0 except: logger.error(traceback.format_exc()) logger.error('error :: worker :: failed to send_graphite_metric %s with %s' % ( send_metric_name, str(metrics_sent_to_flux)))
def getSpace(self): rc = system('df > /tmp/ninfo.tmp') flused = 0 fltot = 0 flperc = 0 cfused = 0 cftot = 0 cfperc = 0 usused = 0 ustot = 0 usperc = 0 hdused = 0 hdtot = 0 hdperc = 0 fperc = 0 if fileExists('/tmp/ninfo.tmp'): f = open('/tmp/ninfo.tmp', 'r') for line in f.readlines(): line = line.replace('part1', ' ') parts = line.strip().split() totsp = len(parts) - 1 if parts[totsp] == '/': strview = parts[totsp - 1].replace('%', '') if strview.isdigit(): flperc = int(parts[totsp - 1].replace('%', '')) fltot = int(parts[totsp - 4]) flused = int(parts[totsp - 3]) if parts[totsp] == '/usr': self.extendedFlash = True strview = parts[totsp - 1].replace('%', '') if strview.isdigit(): flperc = int(parts[totsp - 1].replace('%', '')) fltot = int(parts[totsp - 4]) flused = int(parts[totsp - 3]) if parts[totsp] == '/media/cf': cfperc = int(parts[totsp - 1].replace('%', '')) cftot = int(parts[totsp - 4]) cfused = int(parts[totsp - 3]) if parts[totsp] == '/media/usb': strview = parts[totsp - 1].replace('%', '') if strview.isdigit(): usperc = int(parts[totsp - 1].replace('%', '')) ustot = int(parts[totsp - 4]) usused = int(parts[totsp - 3]) if parts[totsp] == '/media/hdd': strview = parts[totsp - 1].replace('%', '') if strview.isdigit(): hdperc = int(parts[totsp - 1].replace('%', '')) hdtot = int(parts[totsp - 4]) hdused = int(parts[totsp - 3]) f.close() os_remove('/tmp/ninfo.tmp') ftot = cftot + ustot + hdtot fused = int(cfused) + int(usused) + int(hdused) if ftot > 100: fperc = fused * 100 / ftot self.smallmontxt += _('Flash in use: ') + str(flperc) + ' %\n' self.smallmontxt += _('Cf in use: ') + str(cfperc) + ' %\n' self.smallmontxt += _('Usb in use: ') + str(usperc) + ' %\n' self.smallmontxt += _('Hdd in use: ') + str(hdperc) + ' %\n' self['spacetg'].setValue(int(fperc * 100 / 120 + 50)) self['cffg'].setValue(int(cfperc * 100 / 120 + 50)) self['usbg'].setValue(int(usperc * 100 / 120 + 50)) self['hddg'].setValue(int(hdperc * 100 / 120 + 50)) self['flashg'].setValue(int(flperc * 100 / 120 + 50))
def onImageLoaded(self, dummy): self.currPic = loadPic(self.tmpfile, self.width, self.height, 0, 1, 0, 1) os_remove(self.tmpfile) self.callBack(pixmap=self.currPic)
def save(self, file_name: str, is_branch: bool = False): """Save database, append commit to new branch function.""" author_name = self.FileAuthor.text( ) or self.FileAuthor.placeholderText() branch_name = '' if is_branch else self.branch_current.text() commit_text = self.FileDescription.text() while not author_name: author_name, ok = QInputDialog.getText( self, "Author", "Please enter author's name:", QLineEdit.Normal, "Anonymous") if not ok: return while not branch_name.isidentifier(): branch_name, ok = QInputDialog.getText( self, "Branch", "Please enter a branch name:", QLineEdit.Normal, "master") if not ok: return while not commit_text: commit_text, ok = QInputDialog.getText(self, "Commit", "Please add a comment:", QLineEdit.Normal, "Update mechanism.") if not ok: return if (file_name != self.file_name.absoluteFilePath()) and isfile(file_name): os_remove(file_name) print("The original file has been overwritten.") self.__connect_database(file_name) is_error = False with _db.atomic(): if author_name in (user.name for user in UserModel.select()): author_model = (UserModel.select().where( UserModel.name == author_name).get()) else: author_model = UserModel(name=author_name) if branch_name in (branch.name for branch in BranchModel.select()): branch_model = (BranchModel.select().where( BranchModel.name == branch_name).get()) else: branch_model = BranchModel(name=branch_name) args = { 'author': author_model, 'description': commit_text, 'mechanism': _compress(self.__point_expr_func()), 'linkcolor': _compress(self.__link_expr_func()), 'storage': _compress(list(self.__storage_data_func())), 'pathdata': _compress(self.__path_data_func()), 'collectiondata': _compress(self.__collect_data_func()), 'triangledata': _compress(self.__triangle_data_func()), 'inputsdata': _compress( tuple((b, d) for b, d, a in self.__inputs_data_func())), 'algorithmdata': _compress(self.__algorithm_data_func()), 'branch': branch_model, } try: args['previous'] = (CommitModel.select().where( CommitModel.id == self.commit_current_id.value()).get()) except CommitModel.DoesNotExist: args['previous'] = None new_commit = CommitModel(**args) try: author_model.save() branch_model.save() new_commit.save() except Exception as e: print(str(e)) _db.rollback() is_error = True else: self.history_commit = CommitModel.select().order_by( CommitModel.id) if is_error: os_remove(file_name) print("The file was removed.") return self.read(file_name) print(f"Saving \"{file_name}\" successful.") size = QFileInfo(file_name).size() print("Size: " + (f"{size / 1024 / 1024:.02f} MB" if size / 1024 // 1024 else f"{size / 1024:.02f} KB"))
def errorIconDownload(self, error=None, item=None): item.error = True if os_path.exists(item.filename): # delete 0 kb file os_remove(item.filename)
def test_internal_correct_and_save(): """ Test internal correction saving method. """ calib = CalibratePSEye() fn_c = calibsdir + '/camera_params.csv' # Asserts for t in (int, float, complex, list, tuple, range, dict, set, frozenset, bool, bytes, bytearray, memoryview): try: calib.correct_and_save(t) except TypeError: pass else: raise RuntimeError('Failed to catch %s imgpath' % t.__name__) calib.load_calibrations(fn_c) cp = calib.calibpath calib.calibpath = None try: calib.correct_and_save('file-that-does-not-exist') except RuntimeError: pass else: raise RuntimeError('Failed to catch _calib_path is None') # Saving calib.calibpath = cp imgpath = testdir + '/raw' storeddir = testdir + '/00000000-000000_undistorted' storedcp = testdir + '/00000000-000000_camera_params.csv' if os_exists(storeddir): rmtree(storeddir) if os_exists(storedcp): os_remove(storedcp) ud1 = calib.correct_and_save(imgpath) try: # Proper saving if not os_exists(storeddir) or not os_exists(storedcp): raise RuntimeError('Error creating corrected directories') imgcount1 = len([f for f in listdir(imgpath) if f[-4:].lower() == '.jpg']) imgcount2 = len([f for f in listdir(storeddir) if f[-4:].lower() == '.jpg']) if imgcount1 != imgcount2: raise RuntimeError('Not all images were saved') # Correct calibration # Check pre-save equality imglist = [f for f in listdir(imgpath) if f[-4:].lower() == '.jpg'] rawimg = [cv_imread(imgpath + '/' + f) for f in imglist] ud2 = calib.correct(rawimg) # will know if `correct` works if not array_equal(ud1, ud2): raise RuntimeError('Failed pre-save equality check') # Check post-save equality for i in range(len(imglist)): fnud = storeddir + ('/_f%s' % str(i+1).zfill(5)) + '.jpg' cv_imwrite(fnud, ud2[i,...], (IMWRITE_JPEG_QUALITY, 100)) ud1list = [cv_imread(storeddir + '/' + f) for f in imglist] ud2list = [cv_imread(storeddir + '/_' + f) for f in imglist] ud1reload = asarray(ud1list, dtype='uint8') ud2reload = asarray(ud2list, dtype='uint8') if not array_equal(ud1reload, ud2reload): raise RuntimeError('Failed reload equality check') finally: os_remove(storedcp) rmtree(storeddir) try: if os_exists(storedcp): raise RuntimeError('failed to deleted cameraParams csv') if os_exists(storeddir): raise RuntimeError('failed to remove undistored img dir') except AssertionError: raise RuntimeError('Exception during test cleanup')
def updateList(self): self.list = [] mypath = DeliteGetSkinPath() rc = system('df -h > /tmp/syinfo.tmp') mypixmap = mypath + 'icons/icon_home_BH.png' png = LoadPixmap(mypixmap) name = 'Black Hole' title = MultiContentEntryText(pos=(120, 30), size=(480, 50), font=0, text=name) png = MultiContentEntryPixmapAlphaTest(pos=(0, 3), size=(100, 100), png=png) self.list.append([name, title, png]) mypixmap = mypath + 'icons/icon_avalon.png' png = LoadPixmap(mypixmap) name = 'Avalon' title = MultiContentEntryText(pos=(120, 30), size=(480, 50), font=0, text=name) png = MultiContentEntryPixmapAlphaTest(pos=(0, 3), size=(100, 100), png=png) self.list.append([name, title, png]) mypixmap = mypath + 'icons/icon_chaos.png' png = LoadPixmap(mypixmap) name = 'Chaos' title = MultiContentEntryText(pos=(120, 30), size=(480, 50), font=0, text=name) png = MultiContentEntryPixmapAlphaTest(pos=(0, 3), size=(100, 100), png=png) self.list.append([name, title, png]) mypixmap = mypath + 'icons/icon_ghost.png' png = LoadPixmap(mypixmap) name = 'Ghost' title = MultiContentEntryText(pos=(120, 30), size=(480, 50), font=0, text=name) png = MultiContentEntryPixmapAlphaTest(pos=(0, 3), size=(100, 100), png=png) self.list.append([name, title, png]) self['list'].setList(self.list) self.current_universe = self.whereIAm() txt = _('You are in %s universe.') % self.current_universe self['lab1'].setText(txt) btot = buse = bempty = utot = uuse = uempty = '' f = open('/tmp/syinfo.tmp', 'r') for line in f.readlines(): parts = line.split() tot = len(parts) - 1 if parts[tot].strip() == '/': btot = parts[tot - 4].strip() buse = parts[tot - 1].strip() bempty = parts[tot - 2].strip() elif parts[tot].strip() == '/universe': utot = parts[tot - 4].strip() uuse = parts[tot - 1].strip() uempty = parts[tot - 2].strip() break f.close() os_remove('/tmp/syinfo.tmp') text = _( 'Black Hole details:\nBlack Hole is the original matrix of all Parallel Universes and resides in its own phisycal space.\n' ) text += _('Estimated size: %s \n') % btot text += _('Occupied space: %s \n') % buse text += _('Empty space: %s \n\n') % bempty text += _( 'Parallel Universes details:\nParallel Universes share the same space because they are all together in the same place, but in different dimensions.\n' ) text += _('Estimated size: %s \n') % utot text += _('Occupied space: %s \n') % uuse text += _('Empty space: %s \n\n') % uempty self['lab2'].setText(text) pos = 0 sel = self['list'].getCurrent() for x in self.list: if x[0] == self.current_universe: self['list'].moveToIndex(pos) break pos += 1
def KeyGreen(self): if self.moni_state == 0: self.moniShow() rc = system("df > /tmp/ninfo.tmp") mytext = "" flused = 0 fltot = 0 flperc = 0 cfused = 0 cftot = 0 cfperc = 0 usused = 0 ustot = 0 usperc = 0 hdused = 0 hdtot = 0 hdperc = 0 mountflash = "/" if self.extendedFlash == True: mountflash = "/usr" if fileExists("/tmp/ninfo.tmp"): f = open("/tmp/ninfo.tmp", 'r') for line in f.readlines(): meas = "M" line = line.replace('part1', ' ') parts = line.strip().split() totsp = (len(parts) - 1) if parts[totsp] == mountflash: if flused: continue flused = parts[totsp - 1] flperc = int(flused.replace('%', '')) fltot = int(parts[totsp - 4]) if fltot > 1000000: fltot = fltot / 1000 meas = "Gb" capacity = "%d.%03d " % (fltot / 1000, fltot % 1000) mytext += _("FLASH: ") + capacity + meas + _( " in use: ") + flused + "\n" mytext += _("Total: ") + parts[totsp - 4] + _( " Used: ") + parts[totsp - 3] + _( " Free: ") + parts[totsp - 2] + "\n\n" fltot = int(parts[totsp - 4]) flused = int(parts[totsp - 3]) if parts[totsp] == "/media/cf": if cfused: continue cfused = parts[totsp - 1] cfperc = int(cfused.replace('%', '')) cftot = int(parts[totsp - 4]) if cftot > 1000000: cftot = cftot / 1000 meas = "Gb" capacity = "%d.%03d " % (cftot / 1000, cftot % 1000) mytext += ("CF: ") + capacity + meas + _( " in use: ") + cfused + "\n" mytext += _("Total: ") + parts[totsp - 4] + _( " Used: ") + parts[totsp - 3] + _( " Free: ") + parts[totsp - 2] + "\n\n" cftot = int(parts[totsp - 4]) cfused = int(parts[totsp - 3]) if parts[totsp] == "/media/usb": if usused: continue usused = parts[totsp - 1] usperc = int(usused.replace('%', '')) ustot = int(parts[totsp - 4]) if ustot > 1000000: ustot = ustot / 1000 meas = "Gb" capacity = "%d.%03d " % (ustot / 1000, ustot % 1000) mytext += _("USB: ") + capacity + meas + _( " in use: ") + usused + "\n" mytext += _("Total: ") + parts[totsp - 4] + _( " Used: ") + parts[totsp - 3] + _( " Free: ") + parts[totsp - 2] + "\n\n" ustot = int(parts[totsp - 4]) usused = int(parts[totsp - 3]) if parts[totsp] == "/media/hdd": if hdused: continue strview = parts[totsp - 1].replace('%', '') if strview.isdigit(): hdused = parts[totsp - 1] hdperc = int(hdused.replace('%', '')) hdtot = int(parts[totsp - 4]) if hdtot > 1000000: hdtot = hdtot / 1000 meas = "Gb" capacity = "%d.%03d " % (hdtot / 1000, hdtot % 1000) mytext += _("HDD: ") + capacity + meas + _( " in use: ") + hdused + "\n" mytext += _("Total: ") + parts[totsp - 4] + _( " Used: ") + parts[totsp - 3] + _( " Free: ") + parts[totsp - 2] + "\n\n" hdtot = int(parts[totsp - 4]) hdused = int(parts[totsp - 3]) f.close() os_remove("/tmp/ninfo.tmp") meas = "M" ftot = fltot + cftot + ustot + hdtot fused = int(flused) + int(cfused) + int(usused) + int(hdused) ffree = (ftot - fused) fperc = 0 if ftot > 100: fperc = (fused * 100) / ftot if ftot > 1000000: ftot = ftot / 1000 meas = "Gb" if ftot > 1000000000: ftot = ftot / 1000000 meas = "Tera" ftot = "%d.%03d " % (ftot / 1000, ftot % 1000) ftot += meas meas = "M" if fused > 1000000: fused = fused / 1000 meas = "Gb" if fused > 1000000000: fused = fused / 1000000 meas = "Tera" fused = "%d.%03d " % (fused / 1000, fused % 1000) fused += meas meas = "M" if ffree > 1000000: ffree = ffree / 1000 meas = "Gb" if ffree > 1000000000: ffree = ffree / 1000000 meas = "Tera" ffree = "%d.%03d " % (ffree / 1000, ffree % 1000) ffree += meas mytext += _("Total Space: ") + ftot + _(" in use: ") + str( fperc) + "% \n" mytext += _("Total: ") + ftot + _(" Used: ") + fused + _( " Free: ") + ffree self["moni2"].setText(mytext)
def monitor_processes_in_background(self): from time import sleep as time_sleep if config.use_python_smtplib: import smtplib from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart if config.use_mailx: from os import remove as os_remove while True: self.list_currently_working_processes( config.ps_command, config.column_with_pid_for_ps_command) list_of_broken_processes = list() for process_group in self.list_of_processes_to_check: # CHECK PROCESSES for process in process_group.processes_list: number_of_found_instances = 0 # COUNT NUMBER OF INSTANCES # for running_process in self.list_of_processes_on_current_hostname: if process.pattern in running_process: number_of_found_instances += 1 if number_of_found_instances == process.number_of_instances: # CHECK UPDATE ON LOGFILE # if process.log_update > 0 and process.log_path: final_path_to_log = '' path, file = os_path.split(process.log_path) path_listed_elements = os_listdir(path) path_listed_elements.sort( key=lambda x: os_path.getmtime( os_path.join(path, x)), reverse=True) for file_name in path_listed_elements: if file in file_name: final_path_to_log = os_path.join( path, file_name) break last_logfile_date = datetime.fromtimestamp( os_stat(final_path_to_log).st_mtime) now_minus_time = datetime.now() - timedelta( seconds=process.log_update) if last_logfile_date < now_minus_time: last_update_time = datetime.now( ) - last_logfile_date treshold = timedelta( seconds=process.log_update) list_of_broken_processes.append( (process.name, number_of_found_instances, process.number_of_instances, last_update_time, treshold)) else: continue elif number_of_found_instances != process.number_of_instances: list_of_broken_processes.append( (process.name, number_of_found_instances, process.number_of_instances)) # IF FOUND BROKEN PROCESS SEND MAIL # if list_of_broken_processes: # PREPARE MAIL BODY # mail_body = style.mail_header for broken_process in list_of_broken_processes: if len(broken_process) == 3: name, number_of_working_processes, number_of_expected_working_processes = broken_process mail_body += ''.join([ (style.mail_one_line_format % (number_of_working_processes, number_of_expected_working_processes, '', '', name)) ]) elif len(broken_process) == 5: name, number_of_working_processes, number_of_expected_working_processes, last_update, update_treshold = broken_process mail_body += ''.join([( style.mail_one_line_format % (number_of_working_processes, number_of_expected_working_processes, str(last_update)[:7], str(update_treshold), name)) ]) # SENT MAIL # # USING PYTHON LIBRARIES # if config.use_python_smtplib: try: receivers_data = ';'.join(config.receivers) msg = MIMEMultipart() msg['From'] = config.sender msg['To'] = receivers_data msg['Subject'] = (config.subject % socket_gethostname()) msg.attach(MIMEText(mail_body, 'plain')) server = smtplib.SMTP(config.smtp_server, config.smtp_port) server.ehlo() server.starttls() server.ehlo() server.login(config.sender, config.sender_password) text = msg.as_string() server.sendmail(config.sender, config.receivers, text) except: sys_exit(text.text_11) # USING MAILX # if config.use_mailx: try: file_body_path = os_path.join( config.path_to_script, config.sent_body_file_name) with open(file_body_path, 'w') as file: file.write(mail_body) command = ''.join([ '( cat ', file_body_path, ' ) | mailx -s "', (config.subject % socket_gethostname()), '" "', (','.join(config.receivers)), '"' ]) os_system(command) os_remove(file_body_path) except KeyError: sys_exit(text.text_16) time_sleep( config.when_found_broken_processes_next_check_in_seconds) time_sleep(config.check_processes_each_how_many_seconds)
def getSpace(self): rc = system("df > /tmp/ninfo.tmp") flused = 0 fltot = 0 flperc = 0 cfused = 0 cftot = 0 cfperc = 0 usused = 0 ustot = 0 usperc = 0 hdused = 0 hdtot = 0 hdperc = 0 fperc = 0 if fileExists("/tmp/ninfo.tmp"): f = open("/tmp/ninfo.tmp", 'r') for line in f.readlines(): line = line.replace('part1', ' ') parts = line.strip().split() totsp = (len(parts) - 1) if parts[totsp] == "/": strview = parts[totsp - 1].replace('%', '') if strview.isdigit(): flperc = int(parts[totsp - 1].replace('%', '')) fltot = int(parts[totsp - 4]) flused = int(parts[totsp - 3]) if parts[totsp] == "/usr": self.extendedFlash = True strview = parts[totsp - 1].replace('%', '') if strview.isdigit(): flperc = int(parts[totsp - 1].replace('%', '')) fltot = int(parts[totsp - 4]) flused = int(parts[totsp - 3]) if parts[totsp] == "/media/cf": cfperc = int(parts[totsp - 1].replace('%', '')) cftot = int(parts[totsp - 4]) cfused = int(parts[totsp - 3]) if parts[totsp] == "/media/usb": strview = parts[totsp - 1].replace('%', '') if strview.isdigit(): usperc = int(parts[totsp - 1].replace('%', '')) ustot = int(parts[totsp - 4]) usused = int(parts[totsp - 3]) if parts[totsp] == "/media/hdd": strview = parts[totsp - 1].replace('%', '') if strview.isdigit(): hdperc = int(parts[totsp - 1].replace('%', '')) hdtot = int(parts[totsp - 4]) hdused = int(parts[totsp - 3]) f.close() os_remove("/tmp/ninfo.tmp") ftot = cftot + ustot + hdtot fused = int(cfused) + int(usused) + int(hdused) if ftot > 100: fperc = (fused * 100) / ftot self.smallmontxt += _("Flash in use: ") + str(flperc) + " %\n" self.smallmontxt += _("Cf in use: ") + str(cfperc) + " %\n" self.smallmontxt += _("Usb in use: ") + str(usperc) + " %\n" self.smallmontxt += _("Hdd in use: ") + str(hdperc) + " %\n" self["spacetg"].setValue(int(((fperc * 100) / 120) + 50)) self["cffg"].setValue(int(((cfperc * 100) / 120) + 50)) self["usbg"].setValue(int(((usperc * 100) / 120) + 50)) self["hddg"].setValue(int(((hdperc * 100) / 120) + 50)) self["flashg"].setValue(int(((flperc * 100) / 120) + 50))
def makeActionOnDownloadItem(self, ret): item = self.getSelItem() if None != ret and None != item: printDBG("makeActionOnDownloadItem " + ret[1] + (" for downloadIdx[%d]" % item.downloadIdx)) if ret[1] == "play": title = item.fileName try: title = os_path.basename(title) title = os_path.splitext(title)[0] except Exception: printExc() # when we watch we no need update sts self.DM.setUpdateProgress(False) player = ret[2] if "mini" == player: self.session.openWithCallback(self.leaveMoviePlayer, IPTVMiniMoviePlayer, item.fileName, title) elif player in ["exteplayer", "extgstplayer"]: additionalParams = {} if item.fileName.split('.')[-1] in [ 'mp3', 'm4a', 'ogg', 'wma', 'fla', 'wav', 'flac' ]: additionalParams[ 'show_iframe'] = config.plugins.iptvplayer.show_iframe.value additionalParams[ 'iframe_file_start'] = config.plugins.iptvplayer.iframe_file.value additionalParams[ 'iframe_file_end'] = config.plugins.iptvplayer.clear_iframe_file.value if 'sh4' == config.plugins.iptvplayer.plarform.value: additionalParams['iframe_continue'] = True else: additionalParams['iframe_continue'] = False if "exteplayer" == player: self.session.openWithCallback(self.leaveMoviePlayer, IPTVExtMoviePlayer, item.fileName, title, None, 'eplayer', additionalParams) else: self.session.openWithCallback(self.leaveMoviePlayer, IPTVExtMoviePlayer, item.fileName, title, None, 'gstplayer', additionalParams) else: self.session.openWithCallback(self.leaveMoviePlayer, IPTVStandardMoviePlayer, item.fileName, title) elif self.localMode: if ret[1] == "remove": try: os_remove(item.fileName) for idx in range(len(self.localFiles)): if item.fileName == self.localFiles[idx].fileName: del self.localFiles[idx] self.reloadList(True) break except Exception: printExc() elif ret[1] == "continue": self.DM.continueDownloadItem(item.downloadIdx) elif ret[1] == "retry": self.DM.retryDownloadItem(item.downloadIdx) elif ret[1] == "stop": self.DM.stopDownloadItem(item.downloadIdx) elif ret[1] == "remove": self.DM.removeDownloadItem(item.downloadIdx) elif ret[1] == "delet": self.DM.deleteDownloadItem(item.downloadIdx) elif ret[1] == "move": self.DM.moveToTopDownloadItem(item.downloadIdx)
def tearDown(self): try: os_remove('BlobStoreTest.test_default.txt') except OSError: pass super().tearDown()
def addonsconn(self): myicon = "/tmp/" + self.fileP png = loadPic(myicon, 1280, 720, 0, 0, 0, 1) self["lab1"].instance.setPixmap(png) os_remove(myicon)
def performCustomAction(self, privateData): retCode = RetHost.ERROR retlist = [] if privateData['action'] == 'remove_file': try: ret = self.host.sessionEx.waitForFinishOpen( MessageBox, text=_('Are you sure you want to remove file "%s"?') % privateData['file_path'], type=MessageBox.TYPE_YESNO, default=False) if ret[0]: os_remove(privateData['file_path']) retlist = ['refresh'] retCode = RetHost.OK except Exception: printExc() if privateData['action'] == 'rename_file': try: path, fileName = os_path.split(privateData['file_path']) name, ext = os_path.splitext(fileName) ret = self.host.sessionEx.waitForFinishOpen( GetVirtualKeyboard(), title=_('Set file name'), text=name) printDBG('rename_file new name[%s]' % ret) if isinstance(ret[0], basestring): newPath = os_path.join(path, ret[0] + ext) printDBG('rename_file new path[%s]' % newPath) if not os_path.isfile(newPath) and not os_path.islink( newPath): os_rename(privateData['file_path'], newPath) retlist = ['refresh'] retCode = RetHost.OK else: retlist = [_('File "%s" already exists!') % newPath] except Exception: printExc() elif privateData['action'] == 'cut_file': self.cFilePath = privateData['file_path'] self.cType = 'cut' retCode = RetHost.OK elif privateData['action'] == 'copy_file': self.cFilePath = privateData['file_path'] self.cType = 'copy' retCode = RetHost.OK elif privateData['action'] == 'paste_file': try: ok = True cutPath, cutFileName = os_path.split(self.cFilePath) newPath = os_path.join(privateData['path'], cutFileName) if os_path.isfile(newPath): retlist = [_('File "%s" already exists') % newPath] ok = False else: ret = {'sts': True, 'code': 0, 'data': ''} if self.cType == 'cut': try: os_rename(self.cFilePath, newPath) self.needRefresh = cutPath except Exception: printExc() cmd = 'mv -f "%s" "%s"' % (self.cFilePath, newPath) ret = iptv_execute_wrapper(cmd) elif self.cType == 'copy': cmd = 'cp "%s" "%s"' % (self.cFilePath, newPath) ret = iptv_execute_wrapper(cmd) if ret['sts'] and 0 != ret['code']: retlist = [(_('Moving file from "%s" to "%s" failed.\n') % (self.cFilePath, newPath)) + (_('Error code: %s\n') % ret['code']) + (_('Error message: %s\n') % ret['data'])] ok = False if ok: self.cType = '' self.cFilePath = '' retlist = ['refresh'] retCode = RetHost.OK except Exception: printExc() elif privateData['action'] == 'umount_iso_file': cmd = 'umount "{0}"'.format( privateData['iso_mount_path']) + ' 2>&1' ret = iptv_execute_wrapper(cmd) if ret['sts'] and 0 != ret['code']: # normal umount failed, so detach filesystem only cmd = 'umount -l "{0}"'.format( privateData['iso_mount_path']) + ' 2>&1' ret = iptv_execute_wrapper(cmd) return RetHost(retCode, value=retlist)
def removeCacheFile(self, filePath): cacheFile = self._getCacheFileName(filePath) try: os_remove(cacheFile) except Exception: printExc()
def rm_alias(filename): os_remove(filename)
def run(self): """ Called when the process intializes. """ # Log management to prevent overwriting # Allow the bin/<skyline_app>.d to manage the log if os.path.isfile(skyline_app_logwait): try: os_remove(skyline_app_logwait) except OSError: logger.error('error - failed to remove %s, continuing' % skyline_app_logwait) pass now = time() log_wait_for = now + 5 while now < log_wait_for: if os.path.isfile(skyline_app_loglock): sleep(.1) now = time() else: now = log_wait_for + 1 logger.info('starting %s run' % skyline_app) if os.path.isfile(skyline_app_loglock): logger.error( 'error - bin/%s.d log management seems to have failed, continuing' % skyline_app) try: os_remove(skyline_app_loglock) logger.info('log lock file removed') except OSError: logger.error('error - failed to remove %s, continuing' % skyline_app_loglock) pass else: logger.info('bin/%s.d log management done' % skyline_app) logger.info('%s :: started worker' % skyline_app) FULL_NAMESPACE = settings.FULL_NAMESPACE MINI_NAMESPACE = settings.MINI_NAMESPACE MAX_RESOLUTION = settings.MAX_RESOLUTION full_uniques = '%sunique_metrics' % FULL_NAMESPACE mini_uniques = '%sunique_metrics' % MINI_NAMESPACE pipe = self.redis_conn.pipeline() last_send_to_graphite = time() queue_sizes = [] # python-2.x and python3.x handle while 1 and while True differently # while 1: running = True while running: # Make sure Redis is up try: self.redis_conn.ping() except: logger.error( '%s :: can\'t connect to redis at socket path %s' % (skyline_app, settings.REDIS_SOCKET_PATH)) sleep(10) self.redis_conn = StrictRedis( unix_socket_path=settings.REDIS_SOCKET_PATH) pipe = self.redis_conn.pipeline() continue try: # Get a chunk from the queue with a 15 second timeout chunk = self.q.get(True, 15) # @modified 20170317 - Feature #1978: worker - DO_NOT_SKIP_LIST # now = time() now = int(time()) for metric in chunk: # Check if we should skip it if self.in_skip_list(metric[0]): continue # Bad data coming in if metric[1][0] < now - MAX_RESOLUTION: continue # Append to messagepack main namespace key = ''.join((FULL_NAMESPACE, metric[0])) pipe.append(key, packb(metric[1])) pipe.sadd(full_uniques, key) if not self.skip_mini: # Append to mini namespace mini_key = ''.join((MINI_NAMESPACE, metric[0])) pipe.append(mini_key, packb(metric[1])) pipe.sadd(mini_uniques, mini_key) pipe.execute() except Empty: logger.info('%s :: worker queue is empty and timed out' % skyline_app) except WatchError: logger.error('%s :: WatchError - %s' % (skyline_app, str(key))) except NotImplementedError: pass except Exception as e: logger.error('%s :: error: %s' % (skyline_app, str(e))) # Log progress if self.canary: logger.info('%s :: queue size at %d' % (skyline_app, self.q.qsize())) queue_sizes.append(self.q.qsize()) # Only send average queue mertics to graphite once per 10 seconds now = time() last_sent_graphite = now - last_send_to_graphite if last_sent_graphite > 10: number_queue_sizes = len(queue_sizes) total_of_queue_sizes = sum(queue_sizes) if total_of_queue_sizes > 0: average_queue_size = total_of_queue_sizes / number_queue_sizes else: average_queue_size = 0 logger.info( '%s :: total queue size for the last 10 seconds - %s' % (skyline_app, str(total_of_queue_sizes))) logger.info( '%s :: total queue values known for the last 10 seconds - %s' % (skyline_app, str(number_queue_sizes))) logger.info( '%s :: average queue size for the last 10 seconds - %s' % (skyline_app, str(average_queue_size))) # self.send_graphite_metric('skyline.horizon.' + SERVER_METRIC_PATH + 'queue_size', self.q.qsize()) # self.send_graphite_metric('queue_size', average_queue_size) send_metric_name = '%s.queue_size' % skyline_app_graphite_namespace send_graphite_metric(skyline_app, send_metric_name, average_queue_size) # reset queue_sizes and last_sent_graphite queue_sizes = [] last_send_to_graphite = time()