def uninstall(self, install_path, force=False): if os.path.islink(install_path): # Don't remove the actual activity dir if it's a symbolic link # because we may be removing user data. os.unlink(install_path) return xdg_data_home = os.getenv('XDG_DATA_HOME', os.path.expanduser('~/.local/share')) mime_dir = os.path.join(xdg_data_home, 'mime') installed_mime_path = os.path.join(mime_dir, 'packages', '%s.xml' % self._bundle_id) if os.path.exists(installed_mime_path): os.remove(installed_mime_path) os.spawnlp(os.P_WAIT, 'update-mime-database', 'update-mime-database', mime_dir) mime_types = self.get_mime_types() if mime_types is not None: installed_icons_dir = os.path.join(xdg_data_home, 'icons/sugar/scalable/mimetypes') if os.path.isdir(installed_icons_dir): for f in os.listdir(installed_icons_dir): path = os.path.join(installed_icons_dir, f) if os.path.islink(path) and \ os.readlink(path).startswith(install_path): os.remove(path) self._uninstall(install_path)
def do_devhelp(self, document): # Get the word at the cursor start = document.get_iter_at_mark(document.get_insert()) end = start.copy() # If just after a word, move back into it c = start.get_char() if self._is_word_separator(c): start.backward_char() # Go backward while True: c = start.get_char() if not self._is_word_separator(c): if not start.backward_char(): break else: start.forward_char() break # Go forward while True: c = end.get_char() if not self._is_word_separator(c): if not end.forward_char(): break else: break if end.compare(start) > 0: text = document.get_text(start,end,False).strip() if text: # FIXME: We need a dbus interface for devhelp soon... os.spawnlp(os.P_NOWAIT, 'devhelp', 'devhelp', '-s', text)
def on_open_in_web_browser_activated (self, action): model, rows = self.treeview[self.current_treeview_name].get_selection ().get_selected_rows () iter = model.get_iter (rows[0]) youtube_id = model.get_value (iter, 3) """Open the video in the browser""" os.spawnlp (os.P_NOWAIT, "xdg-open", "xdg-open", "http://www.youtube.com/watch?v=" + urllib.quote (youtube_id) + self.get_fmt_string ())
def toolbox_triggered(self): try: os.waitpid(-1, os.WNOHANG) except OSError: pass # See if it is already running... ok, lock_file = utils.lock_app('hp-toolbox', True) if ok: # able to lock, not running... utils.unlock(lock_file) path = utils.which('hp-toolbox') if path: path = os.path.join(path, 'hp-toolbox') else: log.error("Unable to find hp-toolbox on PATH.") self.tray_icon.showMessage("HPLIP Status Service", self.__tr("Unable to locate hp-toolbox on system PATH."), self.icon_error, 5000) return log.debug(path) os.spawnlp(os.P_NOWAIT, path, 'hp-toolbox') else: # ...already running, raise it device.Event('', '', EVENT_RAISE_DEVICE_MANAGER).send_via_dbus(SessionBus(), 'com.hplip.Toolbox')
def handle_hplip_updation(self): log.debug("handle_hplip_updation upgrade_notify =%d"%(self.user_settings.upgrade_notify)) path = utils.which('hp-upgrade') if self.user_settings.upgrade_notify is False: log.debug("upgrade notification is disabled in systray ") if path: path = os.path.join(path, 'hp-upgrade') log.debug("Running hp-upgrade: %s " % (path)) # this just updates the available version in conf file. But won't notify os.spawnlp(os.P_NOWAIT, path, 'hp-upgrade', '--check') time.sleep(5) try: os.waitpid(0, os.WNOHANG) except OSError: pass return current_time = time.time() if int(current_time) > self.user_settings.upgrade_pending_update_time: path = utils.which('hp-upgrade') if path: path = os.path.join(path, 'hp-upgrade') log.debug("Running hp-upgrade: %s " % (path)) os.spawnlp(os.P_NOWAIT, path, 'hp-upgrade', '--notify') time.sleep(5) else: log.error("Unable to find hp-upgrade --notify on PATH.") else: log.debug("upgrade schedule time is not yet completed. schedule time =%d current time =%d " %(self.user_settings.upgrade_pending_update_time, current_time)) try: os.waitpid(0, os.WNOHANG) except OSError: pass
def decode(frommp3name, towavname): if frommp3name.lower().endswith(".mp3"): os.spawnlp(os.P_WAIT,"mpg123","mpg123","--quiet","--wav", towavname,frommp3name) elif frommp3name.lower().endswith(".flac"): os.spawnlp(os.P_WAIT,"flac","flac","-d", "--totally-silent", "-o", towavname, frommp3name)
def download(self, video, dest, default=None): if not video.url: print >>sys.stderr, 'Error: the direct URL is not available.' return 4 def check_exec(executable): with open('/dev/null', 'w') as devnull: process = subprocess.Popen(['which', executable], stdout=devnull) if process.wait() != 0: print >>sys.stderr, 'Please install "%s"' % executable return False return True dest = self.obj_to_filename(video, dest, default) if video.url.startswith('rtmp'): if not check_exec('rtmpdump'): return 1 args = ('rtmpdump', '-e', '-r', video.url, '-o', dest) elif video.url.startswith('mms'): if not check_exec('mimms'): return 1 args = ('mimms', '-r', video.url, dest) else: if check_exec('wget'): args = ('wget', '-c', video.url, '-O', dest) elif check_exec('curl'): args = ('curl', '-C', '-', video.url, '-o', dest) else: return 1 os.spawnlp(os.P_WAIT, args[0], *args)
def process_default(self, event): try: if event.name.split('.')[-1] == 'gcf' or event.name.split('.')[-1] == 'sac': date = event.name.split('_')[0] hour = event.name.split('_')[1][:4] component = event.name.split('.')[0][-1] station = event.pathname.split('/')[-2].lower() output = '%s_%s%s%s'%(date,hour,station,component) input = event.pathname folder = 'C%s'%date dest = config.get('FOLDER','sacdest')+'/'+folder print ' date %s\n hour %s\n comp %s\n station %s\n output %s\n input %s\n folder %s\n dest %s'%(date,hour,component,station,output,input,folder,dest) if event.name.split('.')[-1] == 'gcf': fixedName = event.path+'/'+output+'.gcf' os.rename(input,fixedName) a = os.spawnlp(os.P_WAIT,'gcf2sac','gcf2sac',fixedName,'-o:'+dest) if event.name.split('.')[-1] == 'sac': if not os.path.exists(dest): os.system('mkdir %s'%dest) print 'moving %s %s'%(event.pathname,dest) os.spawnlp(os.P_WAIT, 'mv', 'mv',event.pathname, dest) #cmd = 'rsync -avr -e ssh /mnt/seismData/sac [email protected]:/home/user/' #os.spawnlp(os.P_WAIT, 'rsync', 'rsync',event.pathname, dest) except: print "File doesn't match with name pattern: %s"%event.name
def run(): cwd = os.getcwd() tutalk_path = os.path.join(cwd, "..", "tutalk") os.chdir(tutalk_path) params = sys.argv[1:] # params.insert(0, "./TuTalk.py") os.spawnlp(os.P_NOWAIT, "./TuTalk.py", "./TuTalk.py", *params)
def make_mo_gettext(): """ Calls 'msgfmt' from GNU gettext to genearte object files (.mo) from the translation files (.po). Note: As this function usese the $PATH variable (with spawnlp) it doesn't work under Windows. """ print "Generating gettext mo files:", po_files = 'po/*.po' mo_base_dir = 'locale/%s/LC_MESSAGES/' conv_program = 'msgfmt' for lang_file in glob(po_files): language = basename(lang_file)[:-3] mo_dir = mo_base_dir % language print language, try: makedirs(mo_dir) except OSError, inst: if inst.strerror != 'File exists': print 'Warning: ', inst.file, inst.strerror, 'ignoring.' # normalize path for windows # lang_file_norm = normpath(lang_file) mo_dir_norm = normpath(mo_dir) # mo_file = mo_dir_norm + "/quizdrill.mo" #print conv_program, lang_file, "-o", mo_file # debugging spawnlp(P_WAIT, conv_program, conv_program, lang_file_norm, "-o", mo_file)
def start_job_daemon(max_delay=0, jobdaemon_program=None, jobdaemon_host=None, jobdaemon_port=None, store_url=None, session_dir=None): """ Start the GC3Pie "job daemon". Actual startup of the child process is delayed by a random amount up to *max_delay* seconds. By default *max_delay* is 0 (i.e. the job daemon process is started immediately) but this can be used to avoid multiple concurrent starts from separate threads. """ # we cannot simply use `cfg.*` as default values, since Python # evaluates default values when reading the function definition jobdaemon = jobdaemon_program or cfg.jobdaemon if jobdaemon_host is None: jobdaemon_host = cfg.jobdaemon_host if jobdaemon_port is None: jobdaemon_port = cfg.jobdaemon_port if store_url is None: store_url = cfg.db_master_uri + '#table=tasks' if session_dir is None: session_dir = cfg.jobdaemon_session sleep(max_delay * random()) logger.info("Trying to start GC3Pie job daemon.") os.spawnlp(os.P_NOWAIT, jobdaemon, jobdaemon, '--session', session_dir, '--store-url', store_url, '--listen', (jobdaemon_host + ':' + jobdaemon_port))
def KillPID(self): """ Kill VNC instance, called by the Stop Button or Application ends. @author: Derek Buranen @author: Aaron Gerber """ if self.returnPID != 0: print _("Processes.KillPID(%s)") % str(self.returnPID) if sys.platform == "win32": import win32api PROCESS_TERMINATE = 1 handle = win32api.OpenProcess(PROCESS_TERMINATE, False, self.returnPID) win32api.TerminateProcess(handle, -1) win32api.CloseHandle(handle) elif re.match("(?:open|free|net)bsd|linux", sys.platform): # New processes are created when you made connections. So if you kill self.returnPID, # you're just killing the dispatch process, not the one actually doing business... os.spawnlp(os.P_NOWAIT, "pkill", "pkill", "-f", "vncviewer") os.spawnlp(os.P_NOWAIT, "pkill", "pkill", "-f", "x11vnc") else: os.kill(self.returnPID, signal.SIGKILL) try: os.waitpid(self.returnPID, 0) except: pass self.returnPID = 0 return
def usb_lsusb_string(sysfs_path): bus, dev = os.path.basename(os.path.realpath(sysfs_path)).split('-') try: # JRH: why this cruel and unusual punishment to run lsusb? # It turns out that the Raspberry Pi's Python 2.7 library is # old (2.7.1) and has a problem when calling os.fork() and using # _DummyThread. It tosses an exception, which is caught but # annoyingly printed. This method of collecting results from # lsusb avoids calling os.fork() and the annoying exception. (fd, filename) = tempfile.mkstemp() os.spawnlp(os.P_WAIT, 'sh', 'sh', '-c', "/usr/bin/lsusb -v -s %s:%s > %s" % (bus, dev, filename)) f = os.fdopen(fd, "r") desc = f.read() f.close() finally: os.remove(filename) # descriptions from device iManufacturer = re_group('iManufacturer\s+\w+ (.+)', desc) iProduct = re_group('iProduct\s+\w+ (.+)', desc) iSerial = re_group('iSerial\s+\w+ (.+)', desc) or '' # descriptions from kernel idVendor = re_group('idVendor\s+0x\w+ (.+)', desc) idProduct = re_group('idProduct\s+0x\w+ (.+)', desc) # create descriptions. prefer text from device, fall back to the others return '%s %s %s' % (iManufacturer or idVendor, iProduct or idProduct, iSerial)
def launchxTerm(self,event=None): d = self._getCurrentNodePath() curdir = os.getcwd() os.chdir(d) os.spawnlp(os.P_NOWAIT, 'xterm', '-title Leo') os.chdir(curdir)
def toolboxTriggered(self): try: os.waitpid(-1, os.WNOHANG) except OSError: pass # See if it is already running... ok, lock_file = utils.lock_app('hp-toolbox', True) if ok: # able to lock, not running... utils.unlock(lock_file) path = utils.which('hp-toolbox') if path: path = os.path.join(path, 'hp-toolbox') else: self.tray_icon.showMessage(self.__tr("HPLIP Status Service"), self.__tr("Unable to locate hp-toolbox on system PATH."), QSystemTrayIcon.Critical, TRAY_MESSAGE_DELAY) log.error("Unable to find hp-toolbox on PATH.") return #log.debug(path) log.debug("Running hp-toolbox: hp-toolbox") os.spawnlp(os.P_NOWAIT, path, 'hp-toolbox') else: # ...already running, raise it self.sendMessage('', '', EVENT_RAISE_DEVICE_MANAGER, interface='com.hplip.Toolbox')
def do_download(self, video): if not video: print >>sys.stderr, 'Video not found: %s' % video return 3 if not video.url: print >>sys.stderr, 'Error: the direct URL is not available.' return 4 def check_exec(executable): with open('/dev/null', 'w') as devnull: process = subprocess.Popen(['which', executable], stdout=devnull) if process.wait() != 0: print >>sys.stderr, 'Please install "%s"' % executable return False return True dest = self.get_filename(video) if video.url.startswith('rtmp'): if not check_exec('rtmpdump'): return 1 args = ('rtmpdump', '-e', '-r', video.url, '-o', dest) elif video.url.startswith('mms'): if not check_exec('mimms'): return 1 args = ('mimms', video.url, dest) else: if not check_exec('wget'): return 1 args = ('wget', video.url, '-O', dest) os.spawnlp(os.P_WAIT, args[0], *args) self.set_linkname(video)
def __on_button_press(self, event): if event.button != 1: return False logging.debug("activated doc %s", self) os.spawnlp(os.P_NOWAIT, 'gnome-open', 'gnome-open', self.__doc.get_link())
def main(self): if len(self.setup_profiles) != 1 or self.show == True: self.window.show() gtk.main() else: unity = os.path.join(self.unity_location, 'client', 'main.py') os.spawnlp(os.P_NOWAIT, unity, unity, '-p', self.setup_profiles[0])
def EnsureSnakeDir(): """check and creak snake dir""" def _GetHashedName(): path = os.getcwd() m = hashlib.md5() m.update(path) return m.hexdigest() def _CreateDirs(): if not os.path.exists(Flags.LOCAL_SNAKE_DIR): Abort('No [%s], please create and chmod 777 for it! ' % Flags.LOCAL_SNAKE_DIR) if not ModeMatch(Flags.LOCAL_SNAKE_DIR, 511): # 511 (decimal) is the same as 777 (octal). Abort('Please chmod 777 for [%s]' % Flags.LOCAL_SNAKE_DIR) path = os.path.join(Flags.LOCAL_SNAKE_DIR, _GetHashedName()) if not os.path.exists(path): try: os.mkdir(path) except: Abort('create %s failed, maybe no access?' % path) try: os.symlink(path, Flags.SNAKE_OUT) except: Abort('please try to rm -rf [%s]' % Flags.SNAKE_OUT) MkDir(os.path.join(path, 'opt')) MkDir(os.path.join(path, 'dbg')) if not os.path.islink(Flags.SNAKE_OUT): if os.path.exists(Flags.SNAKE_OUT): print BuildMessage('Deleting the old output dir %s' % Flags.SNAKE_OUT, 'WARNING') os.spawnlp(os.P_WAIT, 'rm', 'rm', '-rf', Flags.SNAKE_OUT) _CreateDirs() elif not os.path.exists(Flags.SNAKE_OUT): _CreateDirs()
def get_ibus_bus(): """Get the ibus bus object, possibly starting the ibus daemon if it's not already running. :raises: **RuntimeError** in the case of ibus-daemon being unavailable. """ bus = IBus.Bus() if bus.is_connected(): return bus main_loop = GLib.MainLoop() timeout = 5 GLib.timeout_add_seconds(timeout, lambda *args: main_loop.quit()) bus.connect("connected", lambda *args: main_loop.quit()) os.spawnlp(os.P_NOWAIT, "ibus-daemon", "ibus-daemon", "--xim") main_loop.run() if not bus.is_connected(): raise RuntimeError( "Could not start ibus-daemon after %d seconds." % (timeout)) return bus
def _run_indexer(self): xdg_data_dirs = os.getenv('XDG_DATA_DIRS', '/usr/local/share/:/usr/share/') for path in xdg_data_dirs.split(':'): indexer = os.path.join(path, 'library-common', 'make_index.py') if os.path.exists(indexer): os.spawnlp(os.P_WAIT, 'python', 'python', indexer)
def posttrans_hook(conduit): pkgs = [] patch_required = False # If we aren't root, we can't have updated anything if os.geteuid(): return # See what packages have files that were patched confpkgs = conduit.confString('main', 'packages') if not confpkgs: return tmp = confpkgs.split(",") for confpkg in tmp: pkgs.append(confpkg.strip()) conduit.info(2, "lxc-patch: checking if updated pkgs need patching...") ts = conduit.getTsInfo() for tsmem in ts.getMembers(): for pkg in pkgs: if fnmatch(pkg, tsmem.po.name): patch_required = True if patch_required: conduit.info(2, "lxc-patch: patching container...") os.spawnlp(os.P_WAIT, "lxc-patch", "lxc-patch", "--patch", "/")
def download_obj(self, obj, dest): def check_exec(executable): with open('/dev/null', 'w') as devnull: process = subprocess.Popen(['which', executable], stdout=devnull) if process.wait() != 0: print 'Please install "%s"' % executable return False return True dest = self.obj_to_filename(obj, dest) if obj.url.startswith('rtmp'): if not check_exec('rtmpdump'): return 1 args = ('rtmpdump', '-e', '-r', obj.url, '-o', dest) elif obj.url.startswith('mms'): if not check_exec('mimms'): return 1 args = ('mimms', '-r', obj.url, dest) elif u'm3u8' == obj.ext: _dest, _ = os.path.splitext(dest) dest = u'%s.%s' % (_dest, 'mp4') args = ('wget',) + tuple(line for line in self.read_url(obj.url) if not line.startswith('#')) + ('-O', dest) else: if check_exec('wget'): args = ('wget', '-c', obj.url, '-O', dest) elif check_exec('curl'): args = ('curl', '-C', '-', obj.url, '-o', dest) else: return 1 os.spawnlp(os.P_WAIT, args[0], *args)
def launch_uri(self, uri, mimetype = None): assert uri, "Must specify URI to launch" child = os.fork() if not child: # Inside forked child os.setsid() os.environ['zeitgeist_LAUNCHER'] = uri os.environ['DESKTOP_STARTUP_ID'] = self.make_startup_id(uri) os.spawnlp(os.P_NOWAIT, "gnome-open", "gnome-open", uri) os._exit(0) else: os.wait() if not mimetype: mimetype = "application/octet-stream" try: # Use XDG to lookup mime type based on file name. # gtk_recent_manager_add_full requires it. import xdg.Mime mimetype = xdg.Mime.get_type_by_name(uri) if mimetype: mimetype = str(mimetype) return mimetype except (ImportError, NameError): # No mimetype found for URI: %s pass return child
def tryStartDBUSServer(self): try: self.setupDeviceManager() except dbus.DBusException, ex: self.retry.setEnabled(False) os.spawnlp( os.P_NOWAIT, "ffado-dbus-server" ) QTimer.singleShot(2000, self.connectToDBUS)
def install_mime_type(self, install_path): """ Update the mime type database and install the mime type icon """ xdg_data_home = os.getenv("XDG_DATA_HOME", os.path.expanduser("~/.local/share")) mime_path = os.path.join(install_path, "activity", "mimetypes.xml") if os.path.isfile(mime_path): mime_dir = os.path.join(xdg_data_home, "mime") mime_pkg_dir = os.path.join(mime_dir, "packages") if not os.path.isdir(mime_pkg_dir): os.makedirs(mime_pkg_dir) installed_mime_path = os.path.join(mime_pkg_dir, "%s.xml" % self._bundle_id) self._symlink(mime_path, installed_mime_path) os.spawnlp(os.P_WAIT, "update-mime-database", "update-mime-database", mime_dir) mime_types = self.get_mime_types() if mime_types is not None: installed_icons_dir = os.path.join(xdg_data_home, "icons/sugar/scalable/mimetypes") if not os.path.isdir(installed_icons_dir): os.makedirs(installed_icons_dir) for mime_type in mime_types: mime_icon_base = os.path.join(install_path, "activity", mime_type.replace("/", "-")) svg_file = mime_icon_base + ".svg" info_file = mime_icon_base + ".icon" self._symlink(svg_file, os.path.join(installed_icons_dir, os.path.basename(svg_file))) self._symlink(info_file, os.path.join(installed_icons_dir, os.path.basename(info_file)))
def uninstall(self, force=False, delete_profile=False): install_path = self.get_path() if os.path.islink(install_path): # Don't remove the actual activity dir if it's a symbolic link # because we may be removing user data. os.unlink(install_path) return xdg_data_home = os.getenv("XDG_DATA_HOME", os.path.expanduser("~/.local/share")) mime_dir = os.path.join(xdg_data_home, "mime") installed_mime_path = os.path.join(mime_dir, "packages", "%s.xml" % self._bundle_id) if os.path.exists(installed_mime_path): os.remove(installed_mime_path) os.spawnlp(os.P_WAIT, "update-mime-database", "update-mime-database", mime_dir) mime_types = self.get_mime_types() if mime_types is not None: installed_icons_dir = os.path.join(xdg_data_home, "icons/sugar/scalable/mimetypes") if os.path.isdir(installed_icons_dir): for f in os.listdir(installed_icons_dir): path = os.path.join(installed_icons_dir, f) if os.path.islink(path) and os.readlink(path).startswith(install_path): os.remove(path) if delete_profile: bundle_profile_path = env.get_profile_path(self._bundle_id) if os.path.exists(bundle_profile_path): os.chmod(bundle_profile_path, 0775) shutil.rmtree(bundle_profile_path, ignore_errors=True) self._uninstall(install_path)
def upload_machine(path): script_dir = os.path.dirname(os.path.abspath(__file__)) script = os.path.join(script_dir, "uploader.py") for album in os.listdir(path): if os.path.isdir(album): album_path = os.path.join(path, album) os.spawnlp(os.P_NOWAIT, script, script, album_path, "/dev/null")
def spawn_caches(self, *ports, **kwargs): names = [] for port in ports: name = "qcache{port}".format(port=port) names.append(name) args = [os.P_NOWAIT, "docker", "docker", "run", "--net=host", "-p", "{port}:{port}".format(port=port), "-v", "{dir}:/certs".format(dir=os.path.dirname(os.path.abspath(__file__))), "--rm", "--name", name, "tobgu/qcache:{version}".format(version=QCACHE_VERSION), "qcache", "--port={port}".format(port=port)] if 'certfile' in kwargs: args.append("--cert-file=/certs/%s" % kwargs['certfile']) if 'auth' in kwargs: args.append('--basic-auth=%s' % kwargs['auth']) os.spawnlp(os.P_NOWAIT, "docker", "docker", "rm", "-f", name) time.sleep(1.0) os.spawnlp(*args) # Let the processes start time.sleep(2.0) self.caches.update(names) return names
def worklog(title, diff, log_string='updated by express-gen'): """Save a diff to the ACL worklog""" from time import strftime,localtime from trigger.utils.rcs import RCS date = strftime('%Y%m%d', localtime()) file = os.path.join(settings.FIREWALL_DIR, 'workdocs', 'workdoc.' + date) rcs = RCS(file) if not os.path.isfile(file): print 'Creating new worklog %s' % file f = open(file,"w") f.write("# vi:noai:\n\n") f.close() rcs.checkin('.') print 'inserting the diff into the worklog %s' % file rcs.lock_loop() fd = open(file,"a") fd.write('"%s"\n' % title) fd.write(diff) fd.close() print 'inserting %s into the load queue' % title rcs.checkin(log_string) # Use acl to insert into queue, should be replaced with API call os.spawnlp(os.P_WAIT, 'acl', 'acl', '-i', title)
def run_coverage(*args): os.spawnlp(os.P_WAIT, 'python', 'python', coverage.__file__, *args)
file.write("[Paths]\nPlugins = '.'\n".encode('utf-8')) #Copy the Info.plist file shutil.copy(baseDir+'Info.plist',contentsDir+'Info.plist') #Copy the qt_menu.nib directory (TODO: is this the place to look for it?) potential_dirs = ['/opt/local/libexec/qt4/Library/Frameworks/QtGui.framework/Versions/4/Resources/', '/opt/local/Library/Frameworks/QtGui.framework/Versions/4/Resources/'] for d in potential_dirs: if os.path.isdir(d + 'qt_menu.nib'): shutil.copytree(d + 'qt_menu.nib', resourcesDir+'qt_menu.nib') break else: raise RuntimeError('Could not find qt_menu.nib') # Add notes with open(os.path.join(appDir, 'install.txt'), 'wb') as f: f.write(OSX_INSTALL_NOTES.encode()) #Package in a dmg dmgFile=appDir+'pyzo.dmg' # Create the dmg if createDmg: if os.spawnlp(os.P_WAIT,'hdiutil','hdiutil','create','-fs','HFSX', '-format','UDZO',dmgFile, '-imagekey', 'zlib-level=9', '-srcfolder',appDir,'-volname', 'pyzo')!=0: raise OSError('creation of the dmg failed')
def convertToGrayScale(self, image, output): input = TemporaryFile.create('.tif') image.save(input, 'TIFF') os.spawnlp(os.P_WAIT, 'convert', 'convert', '-type', 'grayscale', '-depth', '8', input, output)
args = parser.parse_args() lgdir = args.directory + '/' recordfile = lgdir + 'records.txt' files = glob.glob(lgdir + '*.igc') + glob.glob(lgdir + '*.IGC') # Get logger header records if os.path.isfile(recordfile): records = glidertrace.read_header_records(recordfile) files_new = list( set(files) - set([record.get('logger_file') for record in records])) if len(files_new) > 0: glidertrace.write_header_records(files_new, recordfile, append=True) editor = os.getenv('EDITOR', 'gedit') x = os.spawnlp(os.P_WAIT, editor, editor, recordfile) records = glidertrace.read_header_records(recordfile) else: glidertrace.write_header_records(files, recordfile) editor = os.getenv('EDITOR', 'gedit') x = os.spawnlp(os.P_WAIT, editor, editor, recordfile) records = glidertrace.read_header_records(recordfile) files = [record.get('logger_file') for record in records] labels = [ '{} {}'.format(record.get('pilot'), record.get('compno')) for record in records ] labels, files = zip(*sorted(zip(labels, files))) # alphabetize # Choose colours
def extractMetaInfo(self, data): # First of all we'll see what strigi can do for us. If there is a text tag # it means it's some kind of text file (plain text, pdf, ps, doc, odf, etc..) # Otherwise, we'll try to treat it as an image, and OCR it. if not data: return '' dir=tempfile.mkdtemp() buf = base64.decodestring(data) f = open('%s/object.tmp' % dir,'wb') try: f.write( buf ) finally: f.close() # Analyze strigi's xmlindexer output f = os.popen('xmlindexer %s' % dir, 'r') try: output = f.read() finally: f.close() # Define namespaces metaInfo = None mimeTypes = [] strigiText = None try: doc = etree.fromstring( output ) tags = doc.xpath( '//file/text/text()' ) if tags: strigiText = tags[0].strip() tags = doc.xpath( "//file/value[@name='http://freedesktop.org/standards/xesam/1.0/core#mimeType']/text()" ) mimeTypes += tags # Newer versions use semanticdestkop.org ontologies. tags = doc.xpath( "//file/value[@name='http://www.semanticdesktop.org/ontologies/2007/01/19/nie#mimeType']/text()" ) mimeTypes += tags except: pass if 'application/pdf' in mimeTypes: f = os.popen( 'pdftotext -enc UTF-8 -nopgbrk %s/object.tmp -' % dir, 'r') try: metaInfo = f.read() finally: f.close() elif 'application/vnd.oasis.opendocument.text' in mimeTypes: f = os.popen( 'odt2txt --encoding=UTF-8 %s/object.tmp' % dir, 'r' ) try: metaInfo = f.read() finally: f.close() elif 'application/x-ole-storage' in mimeTypes: f = os.popen( 'antiword %s/object.tmp' % dir, 'r' ) try: metaInfo = f.read() finally: f.close() # Test it at the very end in case some of the applications (pdftotext, odt2txt or antiword) # are not installed. if not metaInfo: metaInfo = strigiText if not metaInfo: # We couldn't get text information with other methods, let's see if it's an image os.spawnlp(os.P_WAIT, 'convert', 'convert', '-type', 'grayscale', '-depth', '8', dir + '/object.tmp', dir + '/object.tif' ) if os.path.exists( dir + '/object.tif' ): c = ocr.Classifier() c.prepareImage( dir + '/object.tif' ) r = c.ocr() metaInfo = r['text'].strip() # TODO: Use language detection to choose different dictionary in TSearch2? # If so, that should apply to text/pdf/etc.. files too #r['language'] if isinstance( metaInfo, str ): metaInfo = str( metaInfo, 'utf-8', errors='ignore' ) shutil.rmtree( dir, True ) return metaInfo
re.search("[.0-9]+", os.popen("uname -r").read()).group().split(".")[:3])) except: pass else: if linux_version >= [2, 6, 9]: define_macros['HAVE_SYS_PRCTL_H'] = 1 elif sys.platform == 'darwin': # __darwin__ symbol is not defined; __APPLE__ is instead. define_macros['__darwin__'] = 1 elif 'bsd' in sys.platform: # OMG, how many of them are? # Old BSD versions don't have setproctitle # TODO: not tested on an "old BSD" if 0 == os.spawnlp(os.P_WAIT, 'grep', 'grep', '-q', 'setproctitle', '/usr/include/unistd.h', '/usr/include/stdlib.h'): define_macros['HAVE_SETPROCTITLE'] = 1 else: define_macros['HAVE_PS_STRING'] = 1 # NOTE: the library may work on HP-UX using pstat # thus setting define_macros['HAVE_SYS_PSTAT_H'] # see http://www.noc.utoronto.ca/~mikep/unix/HPTRICKS # But I have none handy to test with. mod_spt = Extension( 'setproctitle', define_macros=list(define_macros.items()), sources=[ 'src/setproctitle.c', 'src/spt_debug.c',
def startDownload(self): pid = os.spawnlp(os.P_NOWAIT, "transmission-cli", "transmission-cli", "-f", os.path.join(KILL_SCRIPT, "killscript.sh"), self.magnet) return {"magnet": self.magnet, "pid": pid}
form = pl.zeros(N) lmags = pl.log(amps[:N-1]+sm) mags = lmags check = True while(check): ceps = pl.rfft(lmags) ceps[coefs:] = 0 form[:N-1] = pl.irfft(ceps) for i in range(0,N-1): if lmags[i] < form[i]: lmags[i] = form[i] diff = mags[i] - form[i] if diff > thresh: check = True else: check = False return pl.exp(form)+sm for n in range(0,L,H): if(L-n < N): break amps,freqs = pva.analysis(signal[n:n+N]) amps,freqs = scale(amps,freqs,trans) output[n:n+N] += pvs.synthesis(amps,freqs) scal = max(output)/max(signal) output = pl.array(output*zdbfs/scal,dtype='int16') wf.write(sys.argv[3],sr,output) import os try: os.spawnlp(os.P_WAIT, 'sndfile-play', 'sndfile-play', sys.argv[3]) except: pass
def __init__(self, test, \ DATA_REPOS=[AIPS_DIR+'/data'], \ WORKING_DIR='/tmp/casa_regression_work/', \ RESULT_DIR='/tmp/casa_regression_result/', \ retemplate=False, cleanup=True, CPP_PROFILE=False, RESULT_SUBDIR='', REDIRECT=True, PY_PROFILE=True): """cleanup: set to False to keep data around. WORKING_DIR: directory for intermediate files RESULT_DIR: directory where final products go CPP_PROFILE: set to True to enable C++ profiling. This requires that the command 'sudo opcontrol' must work. You also need the 'dot' tool distributed as part of graphviz. Run 'dot -Txxx' to verify that your dot installation supports PNG images. Note, a profile is created only for the casapy process. If you want to include profiles for async / child processes, refer to the documentation for opreport.""" casalog.showconsole(onconsole=True) TEMPLATE_RESULT_DIR = AIPS_DIR + '/data/regression/' tests = [test] if type(tests) != type([]): raise TypeError self.resultdir = RESULT_DIR self.imdir = WORKING_DIR + '/IMAGES/' self.tester = testbase(WORKING_DIR) self.imagertests = [] self.result = [] self.numTests = 0 ####Get the directories right self.tester.setDataBaseDir(DATA_REPOS) self.tester.setScriptsDir(SCRIPT_REPOS) self.tester.setResultDir(RESULT_DIR) self.tester.setWorkingDir(WORKING_DIR) self.resultsubdir = '' print SCRIPT_REPOS if ((len(tests) == 1) and (tests[0] == 'all')): self.numTests = self.tester.locateTests() else: self.numTests = self.tester.locateTests(tests) testName = '' #pdb.set_trace() for k in range(self.numTests): ### cleanup before each test if not dry and cleanup: self.tester.cleanup() self.tester.createDirs() uname1 = os.uname()[1] if self.tester.testname(k)[0:6] == 'tests/': testName = string.split(self.tester.testname(k)[6:], ".py")[0] else: testName = string.split(self.tester.testname(k), ".py")[0] if not RESULT_SUBDIR: self.resultsubdir = self.resultdir + "/result-" + \ testName + "-" + \ uname1 + "-" + \ time.strftime('%Y_%m_%d_%H_%M') else: self.resultsubdir = self.resultdir + "/" + RESULT_SUBDIR if not os.path.isdir(self.resultsubdir): os.mkdir(self.resultsubdir) logfilename = testName + '.log' if (os.path.isfile(self.resultsubdir + '/' + logfilename)): os.remove(self.resultsubdir + '/' + logfilename) # redirect stdout and stderr and casalog print 'Run test ' + testName if REDIRECT: print "Redirect stdout/stderr to", self.resultsubdir + '/' + logfilename save_stdout = sys.stdout save_stderr = sys.stderr fsock = open(self.resultsubdir + '/' + logfilename, 'w') sys.stdout = logger("STDOUT", [save_stdout, fsock]) sys.stderr = logger("STDERR", [save_stderr, fsock]) testlog = self.tester.workingDirectory + "/test.log" open(testlog, "w").close() # create empty file casalog.setlogfile( testlog) # seems to append to an existing file try: self.tester.getTest(self.tester.testname(k), testName) if PY_PROFILE: if RESULT_SUBDIR != testName: profilepage = RESULT_DIR + '/' + time.strftime( '%Y_%m_%d/') + testName + '_profile.html' else: profilepage = RESULT_DIR + '/' + RESULT_SUBDIR + '/' + 'profile.html' process_data = "%s/profile.txt" % self.tester.workingDirectory os.system("echo -n > " + process_data) pp = SCRIPT_REPOS + '/profileplot.py' # for release pyt = sys.executable if not os.path.isfile(pp): pp = SCRIPT_REPOS + '/../profileplot.py' # for devel profileplot_pid = os.spawnlp( os.P_NOWAIT, pyt, pyt, pp, testName, RESULT_DIR + ("/" + RESULT_SUBDIR if RESULT_SUBDIR == testName else ''), profilepage, process_data, str(os.getpid())) prof = cProfile.Profile() else: prof = False presentDir = os.getcwd() os.chdir(self.tester.workingDirectory) short_description = self.tester.getDescription(testName, k) if short_description != None and short_description.find( "'") >= 0: print >> sys.stderr, \ "Warning: Short description contains ': '%s'" % \ short_description short_description = short_description.replace("'", "") try: self.op_init(CPP_PROFILE) time1 = time.time() mem1 = commands.getoutput('env -i ps -p ' + str(os.getpid()) + ' -o rss | tail -1') if prof: #prof.runctx("(leResult, leImages)=self.tester.runtests(testName, k, dry)", globals(), locals()) #prof.runctx("(leResult, leImages)=self.tester.runtests(testName, k, dry)", gl, lo) #prof.run("(leResult, leImages) = self.tester.runtests(testName, k, dry)") (leResult, leImages) = prof.runcall(self.tester.runtests, testName, k, dry) else: (leResult, leImages) = self.tester.runtests(testName, k, dry) # returns absolute_paths, relative_paths exec_success = True except: leResult = [] exec_success = False print >> sys.stderr, "%s failed, dumping traceback:" % testName traceback.print_exc() # print and swallow exception mem2 = commands.getoutput('env -i ps -p ' + str(os.getpid()) + ' -o rss | tail -1') time2 = time.time() time2 = (time2 - time1) / 60.0 print "Net memory allocated:", (int(mem2) - int(mem1)) / 1024, "MB" if prof: try: prof.dump_stats(self.resultsubdir + '/cProfile.profile') except: print >> sys.stderr, "Failed to write profiling data!" self.op_done(CPP_PROFILE) # Dump contents of any *.log file produced # by the regression script # # !! Does not handle out of diskspace # files = os.listdir('.') for f in files: if f != 'casa.log' and \ re.compile('.log$').search(f) != None: for line in open(f, 'r'): #print f + ' ddd'+line if REDIRECT: fsock.write(f + ': ' + line.rstrip()) else: print f + ': ' + line.rstrip() # # Report and deal with out of diskspace # space_left = commands.getoutput( \ "df -kP " + self.tester.workingDirectory + \ " | awk '{print $4}' | tail -1") space_left_h = commands.getoutput( \ "df -hP " + self.tester.workingDirectory + \ " | awk '{print $4}' | tail -1") space_used = commands.getoutput( \ "du -kc " + self.tester.workingDirectory + \ " | tail -1 | awk '{print $1}'") space_used_h = commands.getoutput( \ "du -hc " + self.tester.workingDirectory + \ " | tail -1 | awk '{print $1}'") if int(space_left) < 1000 * 1000: print >> sys.stderr, "Warning: Only " + \ space_left_h + ' disk space left, ' + \ space_used_h + ' used' # Clean up early, so that this infrastructure can continue if not exec_success and cleanup: self.tester.cleanup() # Copy C++ profiling info if CPP_PROFILE: os.system('cp cpp_profile.* ' + self.resultsubdir) os.chdir(presentDir) if PY_PROFILE: # Terminate profiling process os.kill(profileplot_pid, signal.SIGHUP) status = os.waitpid(profileplot_pid, 0)[1] #print str(profileplot_pid) + ' exit: ' + str(status) pagename = time.strftime( '%Y_%m_%d/') + testName + '_profile.html' # entries common for all tests based on this run self.result_common = {} self.result_common['CASA'] = "'" + self.get_casa_version( ) + "'", "CASA version" self.result_common['host'] = uname1, "os.uname[1]" self.result_common['platform'] = "'" + self.get_platform( )[0] + " " + self.get_platform()[1] + "'", "OS" self.result_common['date'] = time.strftime( '%Y_%m_%d_%H_%M'), "" self.result_common['testid'] = testName, "test name" if short_description != None: self.result_common[ 'description'] = "'" + short_description + "'", "test short description" # Figure out data repository version if os.system("which svnversion >/dev/null") == 0: (errorcode, datasvnr) = commands.getstatusoutput( 'cd ' + DATA_REPOS[0] + ' && svnversion 2>&1 | grep -vi warning') else: errorcode = 1 if errorcode != 0 or datasvnr == "exported": # If that didn't work, look at ./version in the toplevel dir (errorcode, datasvnr) = commands.getstatusoutput( \ 'cd '+DATA_REPOS[0]+" && grep -E 'Rev:' version" \ ) if errorcode != 0: datasvnr = "Unknown version" self.result_common[ 'data_version'] = "'" + datasvnr + "'", "Data repository version" # execution test exec_result = self.result_common.copy() exec_result['version'] = 2, "version of this file" exec_result['type'] = "exec", "test type" exec_result['time'] = time2 * 60, "execution time in seconds" exec_result[ 'disk'] = space_used, "disk space (KB) in use after test" exec_result['runlog'] = logfilename, "execution logfile" if PY_PROFILE: # read time/memory data mem = "" try: process_file = open(process_data, "r") except: print "Warning: Failed to open file:", process_data process_file = None else: process_file = None if process_file != None: lineno = 0 for line in process_file: lineno += 1 if len(line) > 0 and line[0] != '#': try: (t, m_virtual, m_resident, nfiledesc, cpu_us, cpu_sy, cpu_id, cpu_wa) = line.split() mem = mem + \ str(t) + ',' + \ str(m_virtual) + ',' + \ str(m_resident) + ',' + \ str(nfiledesc) + ',' + \ str(cpu_us) + ',' + \ str(cpu_sy) + ',' + \ str(cpu_id) + ',' + \ str(cpu_wa) + ';' except: print >> sys.stderr, "Error parsing %s:%d: '%s'" % \ (process_data, lineno, line) process_file.close() exec_result[ 'resource'] = mem, "time(s),virtual(Mbytes),resident(Mbytes),nfiledesc,cpu_us,cpu_sy,cpu_id,cpu_wa" whatToTest = self.tester.whatQualityTest() keys = [] #if len(whatToTest) != 0: # keys=whatToTest.keys() # print 'THE KEYS ARE ', keys for j in range(len(leResult)): templateImage = TEMPLATE_RESULT_DIR + "/" + testName + "/reference/" + leImages[ j] if retemplate: if os.access(templateImage, os.F_OK): shutil.rmtree(templateImage) print 'TemplateImage ' + templateImage print 'theImage ' + leResult[j] print 'theImage ' + leImages[j] product_exists = os.access(leResult[j], os.F_OK) template_exists = os.access(templateImage, os.F_OK) if product_exists and retemplate: print 'Create template from', leResult[j] if not os.path.isdir(TEMPLATE_RESULT_DIR + "/" + testName): os.mkdir(TEMPLATE_RESULT_DIR + "/" + testName) shutil.copytree(leResult[j], templateImage) if not product_exists: print >> sys.stderr, leResult[j], 'missing!' exec_success = False whatToTest[leResult[j]] = [] if not template_exists: print >> sys.stderr, templateImage, 'missing!' for leQualityTest in whatToTest[leResult[j]]: print leResult[j] + ' WHAT : ', whatToTest[leResult[j]] self.result = self.result_common.copy() self.result['version'] = 1, "version of this file" self.result['type'] = leQualityTest, "test type" self.result['image'] = leImages[j], "test image" if not product_exists: self.result[ 'status'] = 'fail', "result of regression test" self.result[ 'reason'] = "'Product image missing'", "reason of failure" elif not template_exists: self.result[ 'status'] = 'fail', "result of regression test" self.result[ 'reason'] = "'Reference image missing'", "reason of failure" else: if os.access(self.imdir, os.F_OK): shutil.rmtree(self.imdir) if (leQualityTest == 'simple'): self.simpleStats(leResult[j], templateImage, testName, WORKING_DIR, RESULT_DIR) elif (leQualityTest == 'pol2'): self.polImageTest(leResult[j], templateImage, testName, WORKING_DIR, RESULT_DIR, 2) elif (leQualityTest == 'pol4'): self.polImageTest(leResult[j], templateImage, testName, WORKING_DIR, RESULT_DIR, 4) elif (leQualityTest == 'cube'): self.cubeImageTest(leResult[j], templateImage, testName, WORKING_DIR, RESULT_DIR) elif (leQualityTest == 'ms'): self.visStats(leResult[j], templateImage, testName, WORKING_DIR, RESULT_DIR) # RI add visStats method here, image ones use ImageTest so going to have to build a MSTest class else: self.polImageTest(leResult[j], templateImage, testName, WORKING_DIR, RESULT_DIR, 1) # Pick up any images produced in test if os.path.isdir(self.imdir): i = 0 for image in os.listdir(self.imdir): i = i + 1 shutil.copy(self.imdir + '/' + image, \ self.resultsubdir+'/'+os.path.basename(image)) self.result[ 'imagefile_' + str(i)] = "'" + os.path.basename( image ) + "'", 'regression image ' + str(i) self.create_log(leImages[j].replace('/', '-')) # Create exec log now that we now if # required images were produced exec_result['status'] = ( "fail", "pass")[exec_success], "execution status" self.result = exec_result self.create_log("") # Restore stdout/stderr if REDIRECT: sys.stderr = save_stderr sys.stdout = save_stdout fsock.close() casalog.setlogfile("casa.log") os.system("sed 's/^/casa.log: /' " + testlog + " >> " + self.resultsubdir + '/' + logfilename) if not dry and cleanup: self.tester.cleanup() except: if REDIRECT: sys.stderr = save_stderr sys.stdout = save_stdout fsock.close() casalog.setlogfile("casa.log") os.system("sed 's/^/casa.log: /' " + testlog + " >> " + self.resultsubdir + '/' + logfilename) print "Unexpected error:", sys.exc_info()[0] raise # end for k... print "Created ", self.resultsubdir
def _spawn_daemon(self): LOG.debug("Spawning Grapejuice daemon") os.spawnlp(os.P_NOWAIT, sys.executable, sys.executable, "-m", "grapejuiced", "daemonize")
def run(self): build.run(self) if not self.dry_run: os.spawnlp(os.P_WAIT, 'sh', 'sh', '-c', 'cd .. && gmake lib') self.copy_file(os.path.join('..', 'build', 'lib', 'libgoldilocks.so'), os.path.join(self.build_lib, 'edgold'))
# Find the SVN-ACTION string from the CustomLog format # davautocheck.sh uses. If that changes, this will need # to as well. Currently it's # %t %u %{SVN-REPOS-NAME}e %{SVN-ACTION}e leading = ' '.join(words[:4]) action = ' '.join(words[4:]) # Parse the action and write the reconstructed action to # the temporary file. Ignore the returned trailing text, # as we have none in the davautocheck ops log. parser.linenum += 1 try: parser.parse(action) except svn_server_log_parse.Error: sys.stderr.write('error at line %d: %s\n' % (parser.linenum, action)) raise fp.write(leading + ' ' + parser.action + '\n') fp.close() # Check differences between original and reconstructed files # (should be identical). result = os.spawnlp(os.P_WAIT, 'diff', 'diff', '-u', log_file, tmp) if result == 0: sys.stderr.write('OK\n') sys.exit(result) finally: try: os.unlink(tmp) except Exception as e: sys.stderr.write('os.unlink(tmp): %s\n' % (e, ))
def main(num_runs, repeat, pre_create_dag_runs, executor_class, dag_ids): # pylint: disable=too-many-locals """ This script can be used to measure the total "scheduler overhead" of Airflow. By overhead we mean if the tasks executed instantly as soon as they are executed (i.e. they do nothing) how quickly could we schedule them. It will monitor the task completion of the Mock/stub executor (no actual tasks are run) and after the required number of dag runs for all the specified dags have completed all their tasks, it will cleanly shut down the scheduler. The dags you run with need to have an early enough start_date to create the desired number of runs. Care should be taken that other limits (DAG concurrency, pool size etc) are not the bottleneck. This script doesn't help you in that regard. It is recommended to repeat the test at least 3 times (`--repeat=3`, the default) so that you can get somewhat-accurate variance on the reported timing numbers, but this can be disabled for longer runs if needed. """ # Turn on unit test mode so that we don't do any sleep() in the scheduler # loop - not needed on master, but this script can run against older # releases too! os.environ['AIRFLOW__CORE__UNIT_TEST_MODE'] = 'True' os.environ['AIRFLOW__CORE__DAG_CONCURRENCY'] = '500' # Set this so that dags can dynamically configure their end_date os.environ['AIRFLOW_BENCHMARK_MAX_DAG_RUNS'] = str(num_runs) os.environ['PERF_MAX_RUNS'] = str(num_runs) if pre_create_dag_runs: os.environ['AIRFLOW__SCHEDULER__USE_JOB_SCHEDULE'] = 'False' from airflow.jobs.scheduler_job import SchedulerJob from airflow.models.dagbag import DagBag from airflow.utils import db dagbag = DagBag() dags = [] with db.create_session() as session: pause_all_dags(session) for dag_id in dag_ids: dag = dagbag.get_dag(dag_id) dag.sync_to_db(session=session) dags.append(dag) reset_dag(dag, session) next_run_date = dag.normalize_schedule(dag.start_date or min(t.start_date for t in dag.tasks)) for _ in range(num_runs - 1): next_run_date = dag.following_schedule(next_run_date) end_date = dag.end_date or dag.default_args.get('end_date') if end_date != next_run_date: message = ( f"DAG {dag_id} has incorrect end_date ({end_date}) for number of runs! " f"It should be " f" {next_run_date}") sys.exit(message) if pre_create_dag_runs: create_dag_runs(dag, num_runs, session) ShortCircutExecutor = get_executor_under_test(executor_class) executor = ShortCircutExecutor(dag_ids_to_watch=dag_ids, num_runs=num_runs) scheduler_job = SchedulerJob(dag_ids=dag_ids, do_pickle=False, executor=executor) executor.scheduler_job = scheduler_job total_tasks = sum(len(dag.tasks) for dag in dags) if 'PYSPY' in os.environ: pid = str(os.getpid()) filename = os.environ.get('PYSPY_O', 'flame-' + pid + '.html') os.spawnlp(os.P_NOWAIT, 'sudo', 'sudo', 'py-spy', 'record', '-o', filename, '-p', pid, '--idle') times = [] # Need a lambda to refer to the _latest_ value fo scheduler_job, not just # the initial one code_to_test = lambda: scheduler_job.run() # pylint: disable=unnecessary-lambda for count in range(repeat): gc.disable() start = time.perf_counter() code_to_test() times.append(time.perf_counter() - start) gc.enable() print("Run %d time: %.5f" % (count + 1, times[-1])) if count + 1 != repeat: with db.create_session() as session: for dag in dags: reset_dag(dag, session) executor.reset(dag_ids) scheduler_job = SchedulerJob(dag_ids=dag_ids, do_pickle=False, executor=executor) executor.scheduler_job = scheduler_job print() print() msg = "Time for %d dag runs of %d dags with %d total tasks: %.4fs" if len(times) > 1: print((msg + " (±%.3fs)") % (num_runs, len(dags), total_tasks, statistics.mean(times), statistics.stdev(times))) else: print(msg % (num_runs, len(dags), total_tasks, times[0])) print() print()
def open_with_external_app(self, w, app): if self.currentFilename: os.spawnlp(os.P_NOWAIT, app, app, self.currentFilename)
def bibtex(f): return os.spawnlp(os.P_WAIT, 'bibtex', 'bibtex', f)
def addDirectory(self, dir): ecode = os.spawnlp(os.P_WAIT, self.cmd, self.cmd, "add", dir) if ecode != 0: return False return True
#print(buf) logf.flush( ) # after event, actually write the buffered output to disk os.fsync(logf.fileno()) if (imageSaves > 1) and (tBest2 != tBest): cv.imwrite(fname32, bestImg2) # save 2nd best image if ((imageSaves > 2) and (tBest3 != tBest) and (tBest3 != tBest2)): cv.imwrite(fname33, bestImg3) # save 3rd best image # print("saved: " + fname3 + " , " + fname32 + " , " + fname33) if (inDriveway): os.spawnlp(os.P_NOWAIT, '/usr/bin/curl', 'curl', '-T', fname3, ftpDir + fname3, '--user', FPASS) inDriveway = False motionNow = False mRun = 0 # count of consecutive motion frames mCountMax = 0 # maximum mCount throughout event s = [] # empty out list xpos = [] xdelt = [] rdist = [] minR = 1000 # force it high minR2 = 1000 # force it high minR3 = 1000 # force it high maxY = 0 # force it low
continue try: psutil.Process(pid).kill() print "pid %d killed" % pid except psutil.NoSuchProcess: pass else: print "No pids file, continue..." if args.stop: sys.exit(0) if args.build: # Create images print "Creating images..." os.spawnlp(os.P_WAIT, 'create-images.sh', 'create-images.sh') for device in devices: print "Entering device %s bootloader..." % device.name # Enter fastboot mode on board serial = pexpect.spawn("picocom -b 115200 %s" % device.console, timeout=60) os.spawnvp(os.P_WAIT, 'command_relay.py', (power_cmd + "%s off" % device.relay).split()) time.sleep(5) os.spawnvp(os.P_WAIT, 'command_relay.py', (power_cmd + "%s on" % device.relay).split()) serial.expect(["Hit any key to stop autoboot"]) serial.sendline('b') serial.expect(["=>"]) serial.sendline("nand scrub.part -y UBI") serial.expect(["=>"])
def resolv_func(notification=None, action=None, user_data=None): os.spawnlp(os.P_NOWAIT, unison_exec, unison_exec, str(hostname) + '-sync') close_func()
def gnuplotTable(table, outputFile, gnuplotOptions={}): """table - 2D list of data. The first row is taken as headers. outputFile - The output of GnuPlot goes here (in .eps format). gnuplotOptions - A dict used to customize the behaviour of gnuplot.""" # Make table into tables, unless it already is tables = table if not isinstance(tables[0][0], list) and not isinstance( tables[0][0], tuple): tables = (tables, ) # For the tables to work, the first column must have the same header, not necissarily the same values for table in tables: assert (table[0][0] == tables[0][0][0]) # Copy the user supplied options into our options dictionary options = dict(DEFAULT_OPTIONS) options["xlabel"] = tables[0][0][0] for key, value in gnuplotOptions.items(): options[key] = value # Add "set key " to the front of the key option, if it was specified if options["key"] != "": if options["key"] is False: options["key"] = "set nokey" else: options["key"] = "set key " + options["key"] # Add "set size " to the front of the size option, if it was specified if options["size"] != "": options["size"] = "set size " + options["size"] # Add "set pointsize " to the front of the size option, if it was specified if options["pointsize"] != "": options["pointsize"] = "set pointsize " + str(options["pointsize"]) color = "color" solid = "set terminal postscript solid" if not options["color"] and options['plottype'] != 'barchart': color = "monochrome" solid = "" if options["color"] and options["dashed"]: solid = "" boxWidth = None if options['plottype'] == 'barchart': options['barchart'] = True options['xformat'] = "%s" xtics = [None] # Number of X values = number of rows in the tables numXValues = len(tables[0]) - 1 # Number of boxes = number of columns in the tables numBoxes = 0 for table in tables: numBoxes += (len(table[0]) - 1) for i, row in enumerate(table): #~ print i, row if i == 0: continue if i < len(xtics): assert (row[0] == xtics[i]) else: xtics.append(row[0]) row[0] = i # This box width is sufficient to leave one empty box between clusters boxWidth = 1.0 / (1 + numBoxes) options['xrange'] = "[%f:%f]" % (1 - (boxWidth * numBoxes) / 2, numXValues + (boxWidth * numBoxes) / 2) options['grid'] = 'noxtics ytics linewidth 2.0' options['plottype'] = 'boxes' xticString = "( " for i, x in enumerate(xtics): if i == 0: continue xticString += '"%s" %d, ' % (x, i) xticString = xticString[:-2] + ")" #set xtics rotate %s\n options[ 'boxstuff'] = 'set ticscale 0 0\nset xtics %s\nset boxwidth %f\nset style fill solid border -1' % ( xticString, boxWidth) # Gnuplot output scriptfile = """ set title "%s" set xlabel "%s" set ylabel "%s" set grid %s # Set the axes to engineering notation set format x '%s' set format y '%s' set xrange %s set yrange %s set terminal postscript "Helvetica" %d set terminal postscript %s # color or monochrome %s # Use solid or dotted lines set terminal postscript eps enhanced set output "%s" %s %s %s %s """ % (options["title"], options["xlabel"], options["ylabel"], options["grid"], options["xformat"], options["yformat"], options["xrange"], options["yrange"], options["fontsize"], color, solid, outputFile, options["key"], options["size"], options["boxstuff"], options["pointsize"]) if 'xtics' in options: scriptfile += 'set xtics %s\n' % options['xtics'] if 'calculated' in options: scriptfile += 'f(x) = %s\n' % options['calculated'] tempDataFiles = [] plotLines = [] linecount = 1 for table in tables: data = tempfile.NamedTemporaryFile() # Skip the headers in the data files. On occasion they confuse GnuPlot for line in table[1:]: data.write("\t".join([str(i) for i in line])) data.write("\n") data.flush() tempDataFiles.append(data) headings = table[0] for i, heading in enumerate(headings[1:]): # Skip this column if it is an error bar column. Error bar columns # are expressed in the ORIGINAL table columns: That is, column 1 # there = column 0 here (we skip the x-axis column if "errorbars" in options and (i in options["errorbars"] or i - 1 in options["errorbars"]): continue plottype = options["plottype"] if i in options["plottypes"]: plottype = options["plottypes"][i] color = "" if linecount in options["colors"]: color = "linecolor %s" % (options["colors"][linecount]) if boxWidth: offset = (-(numBoxes - 1) / 2.0 + i) * boxWidth #~ print "numBoxes =", numBoxes, "boxWidth =", boxWidth, "i =",i, "offset =", offset plotLines.append(' "%s" using ($1+%f):%d title "%s" with %s' % (data.name, offset, i + 2, heading, plottype)) else: plotLines.append( ' "%s" using 1:%d title "%s" with %s linetype %d linewidth %d %s' % (data.name, i + 2, heading, plottype, linecount, options["linewidth"], color)) if "errorbars" in options and i + 1 in options["errorbars"]: # Set the linetype so it looks the same as the line we drew # Set pointsize to 0 because otherwise gnuplot puts a cross or other point at the midpoint errorbar_linetype = 1 if options["color"] and color == "": color = "linecolor %d" % linecount plotLines.append( '"%s" using 1:%d:%d:%d notitle with yerrorbars linetype 1 linewidth %f %s pointsize 0' % (data.name, i + 2, i + 3, i + 4, options["linewidth"] * 0.25, color)) linecount += 1 if 'calculated' in options: plotLines.insert(0, 'f(x) with lines') scriptfile += "plot " + ", ".join(plotLines) script = tempfile.NamedTemporaryFile() script.write(scriptfile) script.flush() #~ import shutil #~ shutil.copyfile( data.name, "data.txt" ) #~ shutil.copyfile( script.name, "script.txt" ) code = os.spawnlp(os.P_WAIT, options["gnuplot"], options["gnuplot"], script.name) assert (code == 0) script.close() for data in tempDataFiles: data.close() if not options['color'] or options['dashed']: if 'barchart' in options and options['barchart']: hackBarChartColor(outputFile, numBoxes) else: hackDottedStyle(outputFile)
def play(sound): os.spawnlp(os.P_NOWAIT, 'afplay', 'afplay', '.resources/music/%s' % sound)
def mailto_func(notification, action, mail_subject, mail_body, mail_receiver, cmd_output): os.spawnlp(os.P_NOWAIT, xdgemail_exec, xdgemail_exec, '--subject', str(mail_subject), '--body', str(mail_body) + str(cmd_output), str(mail_receiver)) close_func()
def _call(args): # use partial substitute return os.spawnlp(os.P_WAIT, args[0], args[0], *args[1:])
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # import sys import os from gtk import gtk_version, pygtk_version from optparse import OptionParser if __name__ == "__main__": if (gtk_version >= (2, 10, 0) and pygtk_version >= (2, 10, 0)): parser = OptionParser() parser.add_option("-s", "--profile-selection", action="store_true", dest="profile_selection", default=False, help="Startup Profile Setup") (options, args) = parser.parse_args() home = os.environ['HOME'] global_install = os.path.join(sys.path[0], 'Profile.py') user_install = os.path.join(home, 'Profile.py') if os.path.isfile(user_install): unity = user_install else: unity = global_install if options.profile_selection == True: os.spawnlp(os.P_NOWAIT, unity, unity, '-s') else: os.spawnlp(os.P_NOWAIT, unity, unity) else: print "Error: UnityLobby requires at least GTK v2.10.0 and PyGTK v2.10.0, your versions of GTK and PyGTK are", gtk_version, " and ", pygtk_version
def dofile(src,dst): os.spawnlp(os.P_WAIT, "install", "install", "-m0644", src, dst)
def show_in_google_earth(cls, track): (center_lat, lat_range) = track.get_mid_point_range("lats") (center_lng, lng_range) = track.get_mid_point_range("lngs") max_alt = max(track.trackpoints.gps_elevs) + 1000 min_hr = 120.0 max_hr = 180.0 num_hr_colors = 20 fname = "/tmp/" + track.get_start_time_as_str() + ".kml" f = open(fname, "w+") print>>f, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" print>>f, "<kml xmlns=\"http://www.opengis.net/kml/2.2\">" print>>f, " <Document>" print>>f, " <name>Track " + track.get_start_time_as_str() + "</name>" print>>f, " <gx:FlyTo>" print>>f, " <gx:duration>0.1</gx:duration>" print>>f, " <gx:flyToMode>smooth</gx:flyToMode>" print>>f, " <Camera>" print>>f, " <longitude>" + str(center_lng) + "</longitude>" print>>f, " <latitude>" + str(center_lat) + "</latitude>" print>>f, " <altitude>" + str(max_alt) + "</altitude>" print>>f, " <tilt>33</tilt>" print>>f, " </Camera>" print>>f, " </gx:FlyTo>" print>>f, " <ScreenOverlay id=\"khScreenOverlay756\">" print>>f, " <Icon><href>hr-legend.png</href></Icon>" print>>f, " <overlayXY x=\"0\" y=\"0\" xunits=\"fraction\" yunits=\"fraction\"/>" print>>f, " <screenXY x=\"0\" y=\"30\" xunits=\"pixels\" yunits=\"pixels\"/>" print>>f, " <size x=\"60\" y=\"220\" xunits=\"pixels\" yunits=\"pixels\"/>" print>>f, " </ScreenOverlay>" # for i in range(120, 180, 5): print i, GoogleEarth.data_point_to_color(i, min_hr, max_hr) for i in range(0, num_hr_colors): color_hr = float(i) * (max_hr - min_hr) / float(num_hr_colors) + min_hr # print i, color_hr, GoogleEarth.data_point_to_color(color_hr, min_hr, max_hr) print>>f, " <Style id=\"color" + str(i) + "\">" print>>f, " <LineStyle>" print>>f, " <color>ff" + GoogleEarth.data_point_to_color(color_hr, min_hr, max_hr) + "</color>" print>>f, " <width>6</width>" print>>f, " </LineStyle>" print>>f, " </Style>" for i in range(0, len(track.trackpoints), 5): av_hr = numpy.average(track.trackpoints.hrs[i:i+5]) color_num = int(float(num_hr_colors) * (av_hr - min_hr) / (max_hr - min_hr)) if color_num < 0: color_num = 0 if color_num >= num_hr_colors: color_num = num_hr_colors - 1 print>>f, " <Placemark>" print>>f, " <name>Absolute Extruded</name>" print>>f, " <styleUrl>#color" + str(color_num) + "</styleUrl>" print>>f, " <LineString>" print>>f, " <altitudeMode>clampToGround</altitudeMode>" print>>f, " <coordinates>" for t in range(i, i + 6): if t >= len(track.trackpoints.lngs): break print>>f, " " + str(track.trackpoints.lngs[t]) + "," + str(track.trackpoints.lats[t]) print>>f, " </coordinates>" print>>f, " </LineString>" print>>f, " </Placemark>" print>>f, " </Document>" print>>f, "</kml>" f.close() shutil.copy("hr-legend.png", "/tmp/hr-legend.png") os.spawnlp(os.P_NOWAIT, "google-earth", "google-earth", fname)
#!/usr/bin/env python import os import subprocess import time # Which server to use: serverPath = "http://localhost:8888" serverPath = "http://gaetxtest.appspot.com" # Which of the servelets to test: apiPath = "/counter" # Amount of time (seconds) to sleep between requests. Use negative value to completely disable sleep sleepDuration = -10.1 # Clear all Data first os.spawnlp(os.P_WAIT, 'curl', 'curl', '--data', "dummy=dummy", serverPath+apiPath+"/clearAll") mobSize = 400 print "Settings:" print "Server path:", serverPath for i in range(mobSize): data = 'dummy=dummy' fd = open("out/testCounter"+str(i)+".txt", "w+") subprocess.Popen(["curl", "-s", "--data", data, serverPath+apiPath+"/increment"], stdout=fd) if (sleepDuration >= 0): time.sleep(sleepDuration)
def eqawarn(lines): cmd = "source '%s/isolated-functions.sh' ; " % \ os.environ["PORTAGE_BIN_PATH"] for line in lines: cmd += "eqawarn \"%s\" ; " % line os.spawnlp(os.P_WAIT, "bash", "bash", "-c", cmd)
def open_file_linux(filename): if check_kde(): os.spawnlp(os.P_NOWAIT, "kfmclient", "kfmclient", "exec", "file://" + filename) else: os.spawnlp(os.P_NOWAIT, "gnome-open", "gnome-open", filename)
def dodir(path): os.spawnlp(os.P_WAIT, "install", "install", "-d", path)