def install_pkgs(self, pkg_lst, mnt_pt): for pkg in pkg_lst: self.install_package(mnt_pt, pkg) #clean apt package cache util.run_command("chroot %s /usr/bin/apt-get clean" % mnt_pt)
def get_dev(user_mod=''): ''' use v4l-info to determine device for a given driver module ''' if user_mod == 'bttv0': user_mod = 'bttv' with run_command('ls /dev/video*', do_popen=True) as pop_: for line in pop_: devname = line.strip() if hasattr(devname, 'decode'): devname = devname.decode() if not os.path.exists('/usr/bin/v4l-info'): print('YOU NEED TO INSTALL v4l-conf') exit(0) driver = '' with run_command('v4l-info %s 2> /dev/null | grep driver' % devname, do_popen=True) as v4linfo: for line in v4linfo: if hasattr(line, 'decode'): line = line.decode() if line != '': driver = line.split()[2].strip('"') if user_mod in driver or (not user_mod and driver == 'em28xx'): return devname
def build_ns3(config, build_examples, build_tests, args, build_options): cmd = [sys.executable, "waf", "configure"] + args if build_examples: cmd.append("--enable-examples") if build_tests: cmd.append("--enable-tests") try: ns3_traces, = config.getElementsByTagName("ns-3-traces") except ValueError: # Don't print a warning message here since regression traces # are no longer used. pass else: cmd.extend([ "--with-regression-traces", os.path.join("..", ns3_traces.getAttribute("dir")), ]) try: pybindgen, = config.getElementsByTagName("pybindgen") except ValueError: print "Note: configuring ns-3 without pybindgen" else: cmd.extend([ "--with-pybindgen", os.path.join("..", pybindgen.getAttribute("dir")), ]) run_command(cmd) # waf configure ... run_command([sys.executable, "waf", "build"] + build_options)
def run(self,input): filename = tempfile.mktemp() f = open(filename,'wb') cmd = self.command if input: cmd += '< %s' % input.pathname util.run_command(cmd, f.write, self.callback.log) f.close() return {'output': File(filename)}
def _disable_ttys(self, mnt_pt): #disable gettys util.run_command("sed -i -e 's/^\([1-6].*:respawn*\)/#\1/' -e 's/^T/#\t/' %s/etc/inittab" % mnt_pt) text = "S0:12345:respawn:/sbin/getty -L console 9600 vt100\n" util.write_to_file("%s/etc/inittab" % mnt_pt, "a", text)
def netanim_download(): local_file = required_netanim_version + ".tar.bz2" remote_file = constants.NETANIM_RELEASE_URL + "/" + local_file print "Retrieving NetAnim from " + remote_file urllib.urlretrieve(remote_file, local_file) print "Uncompressing " + local_file run_command(["tar", "-xjf", local_file]) print "Rename %s as %s" % (required_netanim_version, constants.LOCAL_NETANIM_PATH) os.rename(required_netanim_version, constants.LOCAL_NETANIM_PATH)
def copy_kernel_modules(self, mnt_pt): #disable hw clock #os.chmod("%s/etc/init.d/hwclock.sh" % mnt_pt, 0644) #copy kernek modules kernel_version = os.path.basename(self._kernel)[8:] util.run_command("cp -r /lib/modules/%s \ %s/lib/modules/%s" % (kernel_version, mnt_pt, kernel_version))
def run(self, **k): input = k["test data"] model = k["model"] filename = tempfile.mktemp() cmd = "%s %s %s %s" % (svm_predict_pathname, input.pathname, model.pathname, filename) log = self.callback.log log("# Running %s\n" % cmd) util.run_command(cmd, log, log) return {"output": TextFile(filename, autodel=True)}
def pipeline_monogenic_validation(work_dir=os.environ['OBER_OUT'] + '/requests/monogenic/work', index_segments_dir=os.environ['OBER_OUT'] + '/requests/monogenic/work/index_segments', region_size=100, theta_affinity=0.95, theta_weight=0.5, regenerate_segments=True, snps=None, # np.array([6, 8]), debug=1, debug_sample=512): # Load SNPs problem = im.io.read_plink(prefix=work_dir + '/monogenic.12', pedigree=im.itu.HUTT_PED, haplotype=None, frames=None) # Testing: simulate aligned samples output (hap types should be 2 in the imputed genotype output line) problem.haplotype.poo_phase = np.zeros((problem.num_samples,), dtype=np.byte) problem.haplotype.poo_phase[np.array([0, 1])] = 1 problem.haplotype.poo_phase[np.array([2, 3])] = -1 # Create segments only for the regions around each snp if regenerate_segments: for row in (problem.info.snp[snps] if snps is not None else problem.info.snp): # Find SNP's region (the one containing its base-pair position) chrom, bp = row['chrom'], row['base_pair'] phasing_dir = '%s/phasing/chr%d' % (os.environ['OBER_OUT'], chrom) index_segments_chrom_dir = '%s/chr%d' % (index_segments_dir, chrom) info_file = '%s/hutt.phased.info.npz' % (phasing_dir,) info = im.io.read_info_npz(info_file) snp_bp = info.snp['base_pair'] snp_index = util.nearest_neighbor_in_list_tree(bp, snp_bp, util.list_index_tree(snp_bp)) snp_index = snp_index if snp_bp[snp_index] <= bp else snp_index - 1 start = region_size * (snp_index / region_size) stop = start + region_size segment_file = '%s/segments-%d-%d.out' % (index_segments_chrom_dir, start, stop) if not os.path.exists(segment_file): util.mkdir_if_not_exists(index_segments_chrom_dir) util.run_command('find-segments-of-snp-range %d %d < %s/segments.out > %s' % (start, stop, phasing_dir, segment_file)) # Index segments if regenerate_segments or \ not os.path.exists('%s/metadata.npz' % (index_segments_chrom_dir,)) or \ not os.path.exists('%s/region-%d.npz' % (index_segments_chrom_dir, start)): index_segments_beagle.main(segment_file, info_file, segment_file, index_segments_chrom_dir, snp_index=snp_index, debug=2, theta_affinity=theta_affinity, theta_weight=theta_weight) # Impute using the newly generated segment index _, t = im.v.iv.impute_problem(problem, debug=debug, remove_partial_calls=True, segment_location=index_segments_dir, # if regenerate_segments else None, snps=snps, debug_sample=debug_sample) im.io.write_plink(im.Problem(genotype=t.imputed, pedigree=im.examples.hutt_pedigree(), haplotype=None, frames=None), work_dir + '/imputed.12', save_frames=False, save_haplotype=False) im.cgi.io_cgi.write_imputed(t, sys.stdout, poo_phase=problem.haplotype.poo_phase) with open(work_dir + '/imputed.12.lgen', 'wb') as f: im.cgi.io_cgi.write_imputed_lgen(t, f) return t
def install(self): """mount disk image and download base system""" #mount disk image mnt_pt = "/mnt/%s" % self._name os.mkdir(mnt_pt) util.run_command("mount -o loop %s %s" % (self._path, mnt_pt)) #download base system print "Downloading %s base system\n" % self._release return util.run_command("debootstrap --arch %s %s %s %s" % (self._arch, self._release, mnt_pt, self._location))
def post_install(self): # create /etc/fstab, /etc/hostname, /etc/network/interfaces, /etc/hosts # create xen config file, unmount disk image mnt_pt = "/mnt/%s" % self._name print "Setting up guest OS %s\n" % self._name print "Copying kernel modules\n" self.copy_kernel_modules(mnt_pt) print "Disabling extra ttys\n" self._disable_ttys(mnt_pt) print "Setting up apt\n" text = """# %s/stable deb http://archive.ubuntu.com/ubuntu/ %s main restricted universe multiverse deb http://security.ubuntu.com/ubuntu %s-security main restricted universe multiverse """ % ( self._release, self._release, self._release, ) self._apt_setup(text, mnt_pt) print "installing libc6-xen, udev, ssh\n" self.install_pkgs(["libc6-xen", "openssh-server", "udev"], mnt_pt) # create /etc/fstab print "Setting up filesystem table\n" self.create_fstab(mnt_pt) print "Setting up networking\n" self.network_setup(mnt_pt) # hack to prevent nash-hotplug from hogging cpu self.kill_nash_hotplug(mnt_pt) print "Creating initrd for %s\n" % self._name self.create_initrd() print "Generating xen configuration file /etc/xen/%s\n" % self._name self.create_xen_config() # unmount filesystem util.run_command("umount %s" % mnt_pt) os.rmdir(mnt_pt) print "Installation of guest domain %s complete!!!" % self._name return 0
def list_auto_snapshot_sets(self, tag = None): """ Returns a list of zfs filesystems and volumes tagged with the "com.sun:auto-snapshot" property set to "true", either set locally or inherited. Snapshots are excluded from the returned result. Keyword Arguments: tag: A string indicating one of the standard auto-snapshot schedules tags to check (eg. "frequent" will map to the tag: com.sun:auto-snapshot:frequent). If specified as a zfs property on a zfs dataset, the property corresponding to the tag will override the wildcard property: "com.sun:auto-snapshot" Default value = None """ #Get auto-snap property in two passes. First with the global #value, then overriding with the label/schedule specific value included = [] excluded = [] cmd = [ZFSCMD, "list", "-H", "-t", "filesystem,volume", "-o", "name,com.sun:auto-snapshot", "-s", "name"] if tag: overrideprop = "com.sun:auto-snapshot:" + tag scmd = [ZFSCMD, "list", "-H", "-t", "filesystem,volume", "-o", "name," + overrideprop, "-s", "name"] outdata,errdata = util.run_command(scmd) for line in outdata.rstrip().split('\n'): line = line.split() if line[1] == "true": included.append(line[0]) elif line[1] == "false": excluded.append(line[0]) outdata,errdata = util.run_command(cmd) for line in outdata.rstrip().split('\n'): line = line.split() # Only set values that aren't already set. Don't override try: included.index(line[0]) continue except ValueError: try: excluded.index(line[0]) continue except ValueError: # Dataset is not listed in either list. if line[1] == "true": included.append(line[0]) return included
def get_regression_traces(ns3_dir, regression_branch): print """ # # Get the regression traces # """ # ns3_dir is the directory into which we cloned the repo # regression_branch is the repo in which we will find the traces. Variations like this should work: # ns-3-dev-ref-traces # craigdo/ns-3-dev-ref-traces # craigdo/ns-3-tap-ref-traces regression_traces_dir = os.path.split(regression_branch)[-1] regression_branch_url = constants.REGRESSION_TRACES_REPO + regression_branch print "Synchronizing reference traces using Mercurial." try: if not os.path.exists(regression_traces_dir): run_command(["hg", "clone", regression_branch_url, regression_traces_dir]) else: run_command(["hg", "-q", "pull", "--cwd", regression_traces_dir, regression_branch_url]) run_command(["hg", "-q", "update", "--cwd", regression_traces_dir]) except OSError: # this exception normally means mercurial is not found if not os.path.exists(regression_traces_dir_name): traceball = regression_tbranch + constants.TRACEBALL_SUFFIX print "Retrieving " + traceball + " from web." urllib.urlretrieve(constants.REGRESSION_TRACES_URL + traceball, traceball) run_command(["tar", "-xjf", traceball]) print "Done." return regression_traces_dir
def _disable_ttys(self, mnt_pt): util.run_command("rm -f %s/etc/event.d/tty[2-6]" % mnt_pt) lines = util.pipe_command("cat %s/etc/event.d/tty1" % mnt_pt) if self._release == "feisty": lines[-1] = "exec /sbin/getty -L 9600 console vt100\n" else: lines[-1] = "respawn /sbin/getty -L 9600 console vt100\n" fd = None try: fd = open("%s/etc/event.d/tty1" % mnt_pt, "w") fd.writelines(lines) finally: if fd is not None: fd.close()
def build_ns3(config, build_examples, build_tests): cmd = [ "python", "waf", "configure", ] if build_examples: cmd.append("--enable-examples") if build_tests: cmd.append("--enable-tests") try: ns3_traces, = config.getElementsByTagName("ns-3-traces") except ValueError: # Don't print a warning message here since regression traces # are no longer used. pass else: cmd.extend([ "--with-regression-traces", os.path.join("..", ns3_traces.getAttribute("dir")), ]) try: pybindgen, = config.getElementsByTagName("pybindgen") except ValueError: print "Note: configuring ns-3 without pybindgen" else: cmd.extend([ "--with-pybindgen", os.path.join("..", pybindgen.getAttribute("dir")), ]) try: nsc, = config.getElementsByTagName("nsc") except ValueError: print "Note: configuring ns-3 without NSC" else: # Build NSC if the architecture supports it if sys.platform not in ['linux2']: arch = None else: arch = os.uname()[4] if arch == 'x86_64' or arch == 'i686' or arch == 'i586' or arch == 'i486' or arch == 'i386': cmd.extend(["--with-nsc", os.path.join("..", nsc.getAttribute("dir"))]) else: print "Note: configuring ns-3 without NSC (architecture not supported)" run_command(cmd) run_command(["python", "waf"])
def list_filesystems(self, pattern = None): """ List pattern matching filesystems sorted by name. Keyword arguments: pattern -- Filter according to pattern (default None) """ filesystems = [] # Need to first ensure no other thread is trying to # build this list at the same time. Datasets._filesystemslock.acquire() if Datasets.filesystems == None: Datasets.filesystems = [] cmd = [ZFSCMD, "list", "-H", "-t", "filesystem", \ "-o", "name,mountpoint", "-s", "name"] try: outdata,errdata = util.run_command(cmd, True) except OSError, message: raise RuntimeError, "%s subprocess error:\n %s" % \ (cmd, str(message)) if err != 0: Datasets._filesystemslock.release() raise RuntimeError, '%s failed with exit code %d\n%s' % \ (str(cmd), err, errdata) for line in outdata.rstrip().split('\n'): line = line.rstrip().split() Datasets.filesystems.append([line[0], line[1]])
def get_user_property(self, prop, local=False): if local == True: cmd = [ZFSCMD, "get", "-s", "local", "-H", "-o", "value", prop, self.name] else: cmd = [ZFSCMD, "get", "-H", "-o", "value", prop, self.name] outdata,errdata = util.run_command(cmd) return outdata.rstrip()
def get_default_schedules(): """ Finds the default schedules that are enabled (online, offline or degraded) """ #This is not the fastest method but it is the safest, we need #to ensure that default schedules are processed in the pre-defined #order to ensure that the overlap between them is adhered to #correctly. monthly->weekly->daily->hourly->frequent. They have #to be processed first and they HAVE to be in the correct order. _defaultSchedules = [] for s in factoryDefaultSchedules: instanceName = "%s:%s" % (BASESVC,s) cmd = [smf.SVCSCMD, "-H", "-o", "state", instanceName] _scheddetaillock.acquire() try: outdata,errdata = util.run_command(cmd) finally: _scheddetaillock.release() result = outdata.rstrip() # Note that the schedules, being dependent on the time-slider service # itself will typically be in an offline state when enabled. They will # transition to an "online" state once time-slider itself comes # "online" to satisfy it's dependency if result == "online" or result == "offline" or result == "degraded": instance = AutoSnap(s) try: _defaultSchedules.append(instance.get_schedule_details()) except RuntimeError, message: raise RuntimeError, "Error getting schedule details for " + \ "default auto-snapshot SMF instance:" + \ "\n\t" + instanceName + "\nDetails:\n" + \ str(message)
def make_temp_copy(temp_dir_with_slash, filename, head=False): """ Create a temporary copy of a file, either from the index or from HEAD. """ # TODO: Once all the hooks can take straight text rather than files, use git show instead: # git_cat_command = "git show :%(f)s" % dict(f=filename) # git_out, git_err, git_rc = run_command(git_cat_command) # if git_err or git_rc: # return None # return git_out # contents of <filename> in the index temp_filename = os.path.join(temp_dir_with_slash, filename) if os.path.isfile(temp_filename): os.remove(temp_filename) if head: git_archive_command = "git archive HEAD -- %s" % (filename, ) untar_command = "tar -x -C %s" % (temp_dir_with_slash, ) git_out, git_err, git_rc = run_piped_commands([git_archive_command, untar_command]) else: git_checkout_command = "git checkout-index --prefix=%s -- %s" % (temp_dir_with_slash, filename) git_out, git_err, git_rc = run_command(git_checkout_command) if git_out or git_err or git_rc: print("# Internal hook error:\n%(out)s\n%(err)s\n" % dict(out=git_out, err=git_err)) sys.exit(1) return temp_filename
def get_creation_time(self): if self.__creationTime == None: cmd = [ZFSCMD, "get", "-H", "-p", "-o", "value", "creation", self.name] outdata,errdata = util.run_command(cmd) self.__creationTime = long(outdata.rstrip()) return self.__creationTime
def get_custom_schedules(): """ Finds custom schedules ie. not the factory default 'monthly', 'weekly', 'hourly', 'daily' and 'frequent' schedules """ _customSchedules = [] cmd = [smf.SVCSCMD, "-H", "-o", "state,FMRI", BASESVC] _scheddetaillock.acquire() try: outdata,errdata = util.run_command(cmd) finally: _scheddetaillock.release() for line in outdata.rstrip().split('\n'): line = line.rstrip().split() state = line[0] fmri = line[1] fmri = fmri.rsplit(":", 1) label = fmri[1] if label not in factoryDefaultSchedules: # Note that the schedules, being dependent on the time-slider service # itself will typically be in an offline state when enabled. They will # transition to an "online" state once time-slider itself comes # "online" to satisfy it's dependency if state == "online" or state == "offline" or state == "degraded": instance = AutoSnap(label) try: _customSchedules.append(instance.get_schedule_details()) except RuntimeError, message: raise RuntimeError, "Error getting schedule details " + \ "for custom auto-snapshot SMF " + \ "instance:\n\t" + label + "\n" + \ "Details:\n" + str(message)
def make_temp_copy(temp_dir_with_slash, filename): """ Create a temporary copy of a file from the index. """ # TODO: Once all the hooks can take straight text rather than files, just return text. temp_filename = os.path.join(temp_dir_with_slash, filename) if os.path.isfile(temp_filename): os.remove(temp_filename) git_cat_command = "git show :%(f)s" % dict(f=filename) git_out, git_err, git_rc = run_command(git_cat_command) if git_err or git_rc: print("# Internal hook error:\n%(out)s\n%(err)s\n" % dict(out=git_out, err=git_err)) sys.exit(1) try: os.makedirs(os.path.dirname(temp_filename)) except OSError: # dir already exists pass with open(temp_filename, "wb") as temp_outfile: temp_outfile.write(git_out) return temp_filename
def file_passes(self, temp_filename, original_filename=None): atexit.register(os.remove, temp_filename) # clean up after ourselves if original_filename is None: original_filename = temp_filename #import subprocess #p = subprocess.Popen(["/home/ha/Downloads/cloc-1.56.pl", "--quiet", "--yaml", temp_filename], stdout=subprocess.PIPE) #out, err = p.communicate() out, err, returnCode = run_command('cloc --quiet --yaml ' + temp_filename) dataMap = yaml.load(out) scalaMap = dataMap['Scala'] comment_percentage = scalaMap['comment'] * 100.0/ scalaMap['code'] result_msg = "%s :: Code lines count: %s, Comment lines count: %s, Target comment percentage: %s, comment/code percentage: %s" % (str(self), scalaMap['code'], scalaMap['comment'], REQUIRED_COMMENT_PERCENTAGE, comment_percentage) if comment_percentage < REQUIRED_COMMENT_PERCENTAGE: return False, "FAIL " + result_msg else: return True, "PASS " + result_msg
def get_ns3(ns3_branch): print """ # # Get NS-3 # """ ns3_dir = os.path.split(ns3_branch)[-1] ns3_branch_url = constants.NSNAM_CODE_BASE_URL + ns3_branch if not os.path.exists(ns3_dir): print "Cloning ns-3 branch" run_command(['hg', 'clone', ns3_branch_url, ns3_dir]) else: print "Updating ns-3 branch" run_command(['hg', '--cwd', ns3_dir, 'pull', '-u']) return ns3_dir
def generate(treebankdir, conlldir, sections, parallelism): p = Pool(parallelism) sectioniter = range(25) if len(sections) == 0 else sections for s in sectioniter: sectionDir = str(s).zfill(2) mkdir(conlldir,sectionDir) tstart = time.time() for sourcefile in glob.glob('/'.join([os.path.join(treebankdir, sectionDir),mrgfilename('*')])): basefilename = os.path.basename(sourcefile) run_command(pennconverter.substitute(pennconverterdir='.',penntreefile=sourcefile, conllfile=os.path.join(conlldir,sectionDir)+'/'+ dpfilename(noext(basefilename))), False) tend = time.time() print "Generated section %s in %s"%(sectionDir,secondsToStr(tend-tstart))
def get_prop(self, propgroup, propname): cmd = [SVCPROPCMD, "-c", "-p", \ propgroup + '/' + propname,\ self.instanceName] outdata,errdata = util.run_command(cmd) result = outdata.rstrip() return result
def list_zpools(): """Returns a list of all zpools on the system""" result = [] cmd = [ZPOOLCMD, "list", "-H", "-o", "name"] outdata,errdata = util.run_command(cmd) for line in outdata.rstrip().split('\n'): result.append(line.rstrip()) return result
def __get_health(self): """ Returns pool health status: 'ONLINE', 'DEGRADED' or 'FAULTED' """ cmd = [ZPOOLCMD, "list", "-H", "-o", "health", self.name] outdata,errdata = util.run_command(cmd) result = outdata.rstrip() return result
def get_mountpoint(self): if (self.__mountpoint == None): cmd = [ZFSCMD, "get", "-H", "-o", "value", "mountpoint", \ self.name] outdata,errdata = util.run_command(cmd) result = outdata.rstrip() self.__mountpoint = result return self.__mountpoint
def run(self,input,**k): c = self.config cmd = '%s -l %g -u %g ' % (svm_scale_pathname, c.lower, c.upper) if c.y_lower: cmd += "-y %g %g " % (c.y_lower, c.y_upper) if k['scale info input']: cmd += '-r %s ' % k['scale info input'].pathname filename = tempfile.mktemp() sio = LibsvmScaleFile(filename,autodel=True) cmd += '-s %s ' % filename cmd += input.pathname log = self.callback.log log('# Running %s\n' % cmd) t = util.tee() util.run_command(cmd,t,log) return {'output': LibsvmInputFile(t.get_filename(),autodel=True), 'scale info output': sio}
def get_verbose(self): cmd = [SVCPROPCMD, "-c", "-p", \ DAEMONPROPGROUP + '/' + "verbose", \ self.instanceName] outdata, errdata = util.run_command(cmd) result = outdata.rstrip() if result == "true": return True else: return False
def run(self, input, **k): c = self.config cmd = '%s -l %g -u %g ' % (svm_scale_pathname, c.lower, c.upper) if c.y_lower: cmd += "-y %g %g " % (c.y_lower, c.y_upper) if k['scale info input']: cmd += '-r %s ' % k['scale info input'].pathname filename = tempfile.mktemp() sio = LibsvmScaleFile(filename, autodel=True) cmd += '-s %s ' % filename cmd += input.pathname log = self.callback.log log('# Running %s\n' % cmd) t = util.tee() util.run_command(cmd, t, log) return { 'output': LibsvmInputFile(t.get_filename(), autodel=True), 'scale info output': sio }
def _run(self): hh_config = self.config['hhblits'] id = self.config['id'] input_file = self.config['path']['input'] output_file = os.path.join(self.config['path']['output'], id + '.a3m') log_name = os.path.join(self.config['path']['output'], id + '.hhblog') args = [ hh_config['command'], '-i', input_file, '-d', hh_config['uniprot_db'], '-oa3m', output_file, '-n', str(hh_config['n_iters']), '-maxfilt', str(hh_config['maxfilt']), '-diff', str(hh_config['diff']), '-id', str(hh_config['id']), '-cov', str(hh_config['cov']), '-e', str(hh_config['e_value']), '-cpu', str(hh_config['n_threads']) ] util.run_command(args, log_name)
def has_clones(self): """Returns True if the snapshot has any dependent clones""" cmd = [ZFSCMD, "list", "-H", "-o", "origin,name"] outdata, errdata = util.run_command(cmd) for line in outdata.rstrip().split('\n'): details = line.rstrip().split() if details[0] == self.name and \ details[1] != '-': return True return False
def get_referenced_size(self): """ How much unique storage space is used by this snapshot. Answer in bytes """ cmd = [ZFSCMD, "get", "-H", "-p", \ "-o", "value", "referenced", \ self.name] outdata, errdata = util.run_command(cmd) return long(outdata.rstrip())
def attempt_recover(self): # Check if previous stage is done and if no jobs are pending/running pending_tasks = self.queue.status()['ntasks']['Pending'] if (self.previous_stage.is_done() and pending_tasks == 0 and len(self.get_jobs_in_queue()) == 0 and self.get_current_retries() < MAX_RETRIES): command = 'qdo recover {}'.format(self.name) output = run_command(command) print(output) self.increment_retries()
def find_dependency_errors(self): errors = [] #FIXME - do this in one pass. for dep in self.svcdeps: cmd = [SVCSCMD, "-H", "-o", "state", dep] outdata, errdata = util.run_command(cmd) result = outdata.rstrip() if result != "online": errors.append("%s\t%s" % (result, dep)) return errors
def get_user_property(self, prop, local=False): if local == True: cmd = [ ZFSCMD, "get", "-s", "local", "-H", "-o", "value", prop, self.name ] else: cmd = [ZFSCMD, "get", "-H", "-o", "value", prop, self.name] outdata, errdata = util.run_command(cmd) return outdata.rstrip()
def run_list(args): host_string = build_host_string(args) log(f"Running list {host_string}:{args.PORT}:{args.PATH} with depth {args.DEPTH} ..", end="\n\n") if args.FIND is True: cut_start = len(args.PATH) + 2 run_command([ "ssh", "-p", args.PORT, host_string, f"find {args.PATH} -maxdepth {args.DEPTH} | cut -c {cut_start}-" ]) else: color = "never" if args.COLOR is True: color = "always" run_command([ "ssh", "-p", args.PORT, host_string, f"exa --recurse --level {args.DEPTH} --long --all --all --git --color {color} {args.PATH}" ])
def deploy_backend(self, backend): """Deploy provider.""" self.copy_template('{}-backend.tf'.format(backend)) if backend == 's3': self.copy_runway('s3') else: self.copy_runway('nos3') with change_dir(self.base_dir): return run_command(['runway', 'deploy'])
def run_psipred(self): print "-" * 60 print "Running PsiPred" id = self.config['id'] fasta_file = self.config["path"]["input"] matrix_file = os.path.join(self.config['path']['output'], id + '.mtx') ss_file = os.path.join(self.config['path']['output'], id + '.ss') args = [ self.config['psipred']['command'], matrix_file, os.path.join(self.config['psipred']['data'], 'weights.dat'), os.path.join(self.config['psipred']['data'], 'weights.dat2'), os.path.join(self.config['psipred']['data'], 'weights.dat3') ] util.run_command(args, ss_file) print "Done\n"
def run(self): """ Runs the provisioning for this module. Does the following. 1. Run Nmap port scanning on localhost """ logger = logging.getLogger() output = run_command(['nmap', '-p0-', 'localhost']) logger.info('Nmap output:\n' + output[1].decode('utf-8'))
def make_pdf(self, tex, fname): """Write a pdf that can be printed to print address information on an envelope. Don't use this for windowed envelopes. For those, you want to add the address to the document that goes in the envelope. FNAME is a string to which we'll write the pdf. """ tex_fname = os.path.splitext(fname)[0] + '.tex' with open(tex_fname, 'w') as fh: fh.write(tex) run_command( "pdflatex -interaction nonstopmode -halt-on-error -file-line-error %s" % tex_fname) os.unlink(os.path.splitext(fname)[0] + ".aux") os.unlink(os.path.splitext(fname)[0] + ".log") os.unlink(os.path.splitext(fname)[0] + ".tex")
def run_psipred_pass2(self): print "-" * 60 print "Running PsiPredPass2" id = self.config['id'] ss_file = os.path.join(self.config['path']['output'], id + '.ss') ss2_file = os.path.join(self.config['path']['output'], id + '.ss2') horiz_file = os.path.join(self.config['path']['output'], id + '.horiz') args = [ self.config['psipred_pass2']['command'], os.path.join(self.config['psipred']['data'], 'weights_p2.dat'), str(self.config['psipred_pass2']['n_iters']), str(self.config['psipred_pass2']['DCA']), str(self.config['psipred_pass2']['DCB']), ss2_file, ss_file ] util.run_command(args, horiz_file) print "Done\n"
def run_blast(self): print "-" * 60 print "Running Blast" id = self.config['id'] fasta_file = self.config["path"]["input"] output_name = os.path.join(self.config['path']['output'], id + '.chk') log_name = os.path.join(self.config['path']['output'], id + '.blast') blast_config = self.config['blast'] args = [ blast_config['command'], '-a', str(blast_config['n_threads']), '-b', '0', '-j', str(blast_config['n_iters']), '-h', str(blast_config['e_value']), '-d', 'nr', '-i', fasta_file, '-C', output_name ] util.run_command(args, log_name) print "Done\n"
def hold(self, tag): """ Place a hold on the snapshot with the specified "tag" string. """ # FIXME - fails if hold is already held # Be sure it genuninely exists before trying to place a hold if self.exists() == False: return cmd = [PFCMD, ZFSCMD, "hold", tag, self.name] outdata, errdata = util.run_command(cmd)
def run(self, **k): input = k['training data'] if k['optional parameter']: self.config.__dict__.update(k['optional parameter'].get_data()) filename = tempfile.mktemp() cmd = self.make_command(input.pathname, filename) log = self.callback.log log('# Running %s\n' % cmd) t = util.tee(log) util.run_command(cmd, t, t) if self.config.fold == 0: m = LibsvmModelFile(filename, autodel=True) else: m = None # no model file while doing cv return { 'stdout/stderr': TextFile(t.get_filename(), autodel=True), 'model': m }
def run(self,**k): input = k['training data'] out_filename = tempfile.mktemp() png_filename = tempfile.mktemp() c = self.config cmd = "%s -svmtrain %s " % (grid_pathname, svm_train_pathname) cmd += "-gnuplot %s -out %s " % (gnuplot_pathname, out_filename) cmd += "-png %s " % png_filename cmd += "-log2c %s,%s,%s " % (c.c_begin, c.c_end, c.c_step) cmd += "-log2g %s,%s,%s " % (c.g_begin, c.g_end, c.g_step) cmd += input.pathname log = self.callback.log log('# Running %s\n' % cmd) t = util.tee(log) util.run_command(cmd,t,t) best_c, best_g, best_rate = map(float,open(t.get_filename()).readlines()[-1].split()) return {'contour': ImageFile(png_filename,autodel=True), 'result': TextFile(out_filename,autodel=True), 'stdout/stderr': TextFile(t.get_filename(),autodel=True), 'best parameter': ConfigData(cost=best_c,gamma=best_g)}
def list_children(self): cmd = [ ZFSCMD, "list", "-H", "-r", "-t", "filesystem", "-o", "name", self.name ] outdata, errdata = util.run_command(cmd) result = [] for line in outdata.rstrip().split('\n'): if line.rstrip() != self.name: result.append(line.rstrip()) return result
def get_auto_snap(self, schedule=None): if schedule: cmd = [ZFSCMD, "get", "-H", "-o", "value", \ "com.sun:auto-snapshot", self.name] cmd = [ZFSCMD, "get", "-H", "-o", "value", \ "com.sun:auto-snapshot", self.name] outdata, errdata = util.run_command(cmd) if outdata.rstrip() == "true": return True else: return False
def schedule_one_job(self, nodes, hrs, dryrun=True): script_path = gen_farm_script(FARM_QNAME, nodes, int(hrs * 60), 'regular', IMAGE_TAG, SDO_SCRIPT_DIR, 'haswell', 64) command = 'sbatch {}'.format(script_path) command += " --bbf={}".format(BBF) if BURST_BUFFER else "" if dryrun: print(command) else: output = run_command(command) self.record_job(output)
def _find_msvc(version, install_vswhere=True): vswhere = util.where('vswhere') # if that fails, install vswhere and try again if not vswhere and install_vswhere: result = util.run_command('choco', 'install', '--no-progress', 'vswhere') if result.returncode == 0: return _find_msvc(version, False) return None, None compiler = None vc_version = None # Grab installed version result = util.run_command('vswhere', '-legacy', '-version', version, '-property', 'installationVersion', quiet=True) text = result.output m = re.match('(\d+)\.?', text) if m: vc_version = m.group(1) if not vc_version or vc_version != version: return None, None # Grab installation path result = util.run_command('vswhere', '-legacy', '-version', version, '-property', 'installationPath', quiet=True) text = result.output compiler = text.strip() return compiler, vc_version
def get_ns3(ns3_branch): print(""" # # Get NS-3 # """) ns3_dir = 'ns-3-dev' if ns3_branch != "master": ns3_dir = ns3_branch if not os.path.exists(ns3_dir): if ns3_branch == "master": print("Cloning ns-3 development repository") run_command(['git', 'clone', constants.NSNAM_CODE_BASE_URL]) else: print("Cloning ns-3 development repository and checking out branch %s" % ns3_branch) run_command(['git', 'clone', constants.NSNAM_CODE_BASE_URL, '--branch', ns3_branch, ns3_branch]) else: if ns3_branch == "master": print("Updating ns-3 repository") run_command(['git', '-C', ns3_dir, 'pull']) else: print("Suppressing update on existing %s directory containing a non-master branch" % ns3_branch) print("Exiting...") sys.exit(0) return ns3_dir
def prepare_for_beats(vm_name): # Read the current config file config = util.read_config_file(config_name) print('\nDeploying beats on the VM: ' + vm_name) # Get where the config files are located. This can be found on config.ini file elascale_config_dir = config.get('swarm', 'elascale_config_dir') # Get where the Elascale root directory are located. This can be found on config.ini file elascale_certs_dir = config.get('swarm', 'elascale_certs_dir') # Copy the metricbeat.yml on swarm-master to the new node result = util.run_command("sudo docker-machine scp " + elascale_config_dir + "metricbeat.yml " + vm_name + ":~") # Change the hostname on the new metricbeat.yml sed_command = "sed -i \"s/name: \".*\"/name: \"" + vm_name + "\"/g\" ~/metricbeat.yml" result = util.run_command("sudo docker-machine ssh " + vm_name + " " + sed_command) # Copy the dockbeat.yml on swarm-master to the new node result = util.run_command("sudo docker-machine scp " + elascale_config_dir + "dockbeat.yml " + vm_name + ":~") # Copy elasticsearch_certificate.yml to the new machine result = util.run_command("sudo docker-machine scp " + elascale_certs_dir + "elasticsearch_certificate.pem " + vm_name + ":~/certs") # Create /volumes/dockbeat-logs dir on the new node (required for dockbeat to work properly) result = util.run_command("sudo docker-machine ssh " + vm_name + " sudo mkdir -p /volumes/dockbeat-logs/") print( 'dockbeat and metricbeat yml files have been copied/configured successfully.' )
def run_runway(self, template, command='deploy'): """Deploy serverless template.""" template_dir = os.path.join(self.templates_dir, template) if os.path.isdir(template_dir): self.logger.info('Executing test "%s" in directory "%s"', template, template_dir) with change_dir(template_dir): self.logger.info('Running "runway %s" on %s', command, template_dir) return run_command(['runway', command], self.environment) else: self.logger.error('Directory not found: %s', template_dir) return 1
def get_macroservice(microservice): command = "sudo docker service ps " + microservice result = util.run_command(command) # The result will contain the line that has the node name label_line = re.search('iot-(.*)', result.output).group(1) # split the line to get only the node name macroservice = "iot-" + label_line.split(" ")[0] print("Macroservice: " + macroservice + "\n") return macroservice
def main(): compile_path = Path("../output/transaction") if not compile_path.is_dir(): command = 'mkdir -p ../output/transaction'.split() for line in run_command(command): print(line) tx_hash = parse_arguments() if tx_hash is None: tx_hash = get_transaction_hash() transaction = transaction_by_hash(remove_hex_0x(tx_hash)) print(transaction)
def check_macroservice_status(vm_name): result = util.run_command("sudo docker-machine ls") # split by newline vms = str(result.output).split("\n") # Ref: https://stackoverflow.com/questions/4843158/check-if-a-python-list-item-contains-a-string-inside-another-string # I basically use some fancy way to filter out the elements that contain any errors # If none found, I return empty [] filters = ["Unknown", "Timeout"] error = [x for x in vms if any(xx in x for xx in filters)] return error
def list_children(self): """Returns a recursive list of child snapshots of this snapshot""" cmd = [ ZFSCMD, "list", "-t", "snapshot", "-H", "-r", "-o", "name", self.fsname ] outdata, errdata = util.run_command(cmd) result = [] for line in outdata.rstrip().split('\n'): if re.search("@%s" % (self.snaplabel), line) and \ line != self.name: result.append(line) return result
def build_netanim(qmakepath): qmake = 'qmake' qmakeFound = False try: run_command([qmake, '-v']) print "qmake found" qmakeFound = True except: print "Could not find qmake in the default path" try: if qmakeFound == False: run_command(['qmake-qt4', '-v']) qmake = 'qmake-qt4' print "qmake-qt4 found" except: print "Could not find qmake-qt4 in the default path" if qmakepath: print "Setting qmake to user provided path" qmake = qmakepath try: if sys.platform in ['darwin']: run_command([qmake, '-spec', 'macx-g++', 'NetAnim.pro']) else: run_command([qmake, 'NetAnim.pro']) run_command(['make']) except OSError: print "Error building NetAnim. Ensure the path to qmake is correct." print "Could not find qmake or qmake-qt4 in the default PATH." print "Use ./build.py --qmake-path <Path-to-qmake>, if qmake is installed in a non-standard location" print "Note: Some systems use qmake-qt4 instead of qmake" print "Skipping NetAnim ...." pass except: print "Error building NetAnim." print "Skipping NetAnim ...." pass