def profile(self, repo): # for gprof we can run each test in the application # however, we must move and/or rename gman.out each time # and then we should go ahead and read hatchet profile profiles = {} for test in repo.itertests(): vprint(self.verbose, 'Profiling test \'{}\'...'.format(test['name'])) exec_path = os.path.join(test['prefix'], test['executable']) cmd = Command(exec_path) try: cmd(test['args']) except: vprint(self.verbose, 'Running test \'{}\' failed...'.format(test['name'])) continue if not os.path.isfile('gmon.out'): vprint(self.verbose, 'Unable to read profile...') continue # read in profile with gprof and gprof2dot dotfile_name = 'profile-dot-graph.dot' gprof = Command('gprof') gprof2dot = Command('gprof2dot') with open(dotfile_name, 'w+') as outFile: gprof2dot(gprof(exec_path), '-n0', '-e0', _out=outFile) # finally read this into hatchet gf = ht.GraphFrame.from_gprof_dot(dotfile_name) profiles[test['name']] = gf return profiles
def task(dataset, _job): startTime = time.time() vtp.setJob(_job) vtp.gotoTmp() rm('-rf', 'vehicleTracking') vtp.getVT() vtp.cmakeVT() vtp.makeVT('basicDetector') basicDetector = Command("util/basicDetector") vtp.makeVT('detectionAccuracy') detectionAccuracy = Command("util/detectionAccuracy") cp('-r', vtp.srcDir + '/data/labels/skycomp1', '.') results = dict() for threshold in np.arange(5, 251, 5): basicDetector('-r', x, y, w, h, '-s', sz, '-n', n, '-g', vtp.gpuDev, '-t', threshold, "-d", "diff", dataset) out = vtp.detectionAccuracy(l=dataset, d='detections.pb', t=' '.join(str(t) for t in trainFrames), T=' '.join(str(t) for t in testFrames)) results[threshold] = out return results
def exe(ctx, context, target_url, pending, success, failure, error, command): def _status_set(state, description): log.info('%s -> %s', context, state) ctx.invoke(status_set, context=context, target_url=target_url, state=state, description=description) try: _status_set('pending', pending) cmd = Command(command[0]) cmd = cmd.bake(command[1:]) if len(command) > 1 else cmd cmd(_fg=True) _status_set('success', success) exit(0) except ErrorReturnCode as e: log.error('Command failed with exit code: %d', e.exit_code) _status_set('failure', failure) except CommandNotFound as e: log.error('Command not found: %s', e) _status_set('error', error) except Exception as e: log.error(e, exc_info=True) _status_set('error', error) exit(1)
def generate_pkgspec(pkgoutdir, spectemplate, pkgname): obsservicedir = '/usr/lib/obs/service/' outdir = ('--outdir', pkgoutdir) olddir = os.getcwd() try: os.chdir(pkgoutdir) renderspec = Command(os.path.join(obsservicedir, 'renderspec')) renderspec('--input-template', os.path.join(olddir, spectemplate), '--output-name', pkgname + '.spec', *outdir) format_spec_file = Command( os.path.join(obsservicedir, 'format_spec_file')) format_spec_file(*outdir) # configure a download cache to avoid downloading the same files download_env = os.environ.copy() download_env["CACHEDIRECTORY"] = os.path.join(os.path.expanduser("~"), ".cache", "download_files") download_files = Command(os.path.join(obsservicedir, 'download_files')) download_files(_env=download_env, *outdir) finally: os.chdir(olddir)
class HPCToolkitProfiler(Profiler): def __init__(self, profiler_settings, verbose=False): super().__init__(profiler_settings, verbose) self.hpcrun_cmd = Command('hpcrun') self.hpcrun_cmd = self.hpcrun_cmd.bake('-e', 'WALLCLOCK@5000') self.hpcstruct_cmd = Command('hpcstruct') self.hpcprof_cmd = Command('mpirun') self.hpcprof_cmd = self.hpcprof_cmd.bake('-np', '1', 'hpcprof-mpi', '--metric-db', 'yes') def profile(self, repo): profiles = {} for test in repo.itertests(): vprint(self.verbose, 'Profiling test \'{}\'...'.format(test['name'])) exec_path = os.path.join(test['prefix'], test['executable']) hpcstruct_name = '{}.hpcstruct'.format(test['name']) hpcmeasurements_name = 'hpctoolkit-{}-measurements'.format( test['name']) hpcdatabase_name = 'hpctoolkit-{}-database'.format(test['name']) # try to generate hpcstruct try: self.hpcstruct_cmd(exec_path, '--output', hpcstruct_name) except: vprint(self.verbose, 'Failed to create hpcstruct file...') continue # run test try: self.hpcrun_cmd('--output', hpcmeasurements_name, exec_path, test['args']) except: vprint(self.verbose, 'Running test \'{}\' failed...'.format(test['name'])) continue # generate profile try: self.hpcrun_cmd('--output', hpcmeasurements_name, exec_path, test['args']) self.hpcprof_cmd('-S', hpcstruct_name, '-I', './+', '--output', hpcdatabase_name, hpcmeasurements_name) except: vprint(self.verbose, 'Running test \'{}\' failed...'.format(test['name'])) continue # finally read hatchet profile profiles[test['name']] = ht.GraphFrame.from_hpctoolkit( hpcdatabase_name) # and now delete the leftover files/folders rm('-r', hpcstruct_name, hpcmeasurements_name, hpcdatabase_name) return profiles
def __init__(self, profiler_settings, verbose=False): super().__init__(profiler_settings, verbose) self.hpcrun_cmd = Command('hpcrun') self.hpcrun_cmd = self.hpcrun_cmd.bake('-e', 'WALLCLOCK@5000') self.hpcstruct_cmd = Command('hpcstruct') self.hpcprof_cmd = Command('mpirun') self.hpcprof_cmd = self.hpcprof_cmd.bake('-np', '1', 'hpcprof-mpi', '--metric-db', 'yes')
def run(cls, root_path, test, base_url): logging.debug("run test") module, class_name, test_name = (test.module, test.class_name, test.test_name) temp_path = mkdtemp() try: logging.debug("creating venv") venv_path = "%s/venv" % temp_path.rstrip("/") virtualenv.create_environment(venv_path, site_packages=False, unzip_setuptools=True, use_distribute=True) logging.debug("installing funkload") pip = Command("%s/bin/pip" % venv_path) pip.install(FUNKLOAD_GIT) for dep in test.deps: logging.debug("install deps") pip.install(dep) logging.debug("creating command") fl_run_test = Command("%s/bin/fl-run-test" % venv_path) logging.debug("command created") logging.debug( "run command: %s" % ( "%s/bin/fl-run-test %s %s -u %s" % (venv_path, module, ("%s.%s" % (class_name, test_name)), base_url) ) ) result = fl_run_test( module, "%s.%s" % (class_name, test_name), u=base_url, _env={"PYTHONPATH": "$PYTHONPATH:%s" % join(root_path.rstrip("/"), "bench")}, simple_fetch=True, _cwd=temp_path, ) logging.debug("command run") logging.debug("get result") exit_code = result.exit_code text = result.stdout + result.stderr with open(join(temp_path, "funkload.log")) as fl_log: logging.debug("write log") log = fl_log.read() except ErrorReturnCode: err = sys.exc_info()[1] text = err.stderr exit_code = 1 log = err.stderr + err.stdout logging.error(log) logging.debug("test run") return FunkLoadTestRunResult(exit_code, text, log)
def runFile(self, blendFile): imageDir = self.imageDirectory + blendFile.split("/")[-1].replace( ".blend", "") + "/" try: cmd = Command(self.blenderplayer) upbgeargs = ("-p", self.pythonMainScript, blendFile, "-", imageDir) cmd.run(upbgeargs, _out=debugSh) except ErrorReturnCode: return False return True
def runFile(self, blendFile): try: cmd = Command(self.blenderplayer) upbgeargs = ("-p", self.pythonMainScript, blendFile) if len(self.args) > 0: upbgeargs += ("-", self.args) cmd.run(upbgeargs, _out=debugSh) except ErrorReturnCode: return False return True
def test_command_wrapper(self): from sh import Command, which ls = Command(which("ls")) wc = Command(which("wc")) c1 = int(wc(ls("-A1"), l=True)) c2 = len(os.listdir(".")) self.assertEqual(c1, c2)
def get_command(self): """ Returns a reusable sh.Command object that can execute multiple different SFTP commands. """ # A list of arguments that will be added to the base command args = [] # Buffer size is always available args.append('-B') args.append(self.buffer_size) # Bandwidth limit is always available args.append('-l') args.append(self.bandwidth_limit) # Preserving file and directory metadata is optional if self.should_preserve_meta: args.append('-p') # Immediate flushing is optional if self.should_flush: args.append('-f') # Compression is optional if self.is_compression_enabled: args.append('-C') # Forcing a particular IP version is optional if self.force_ip_type: args.append(ip_type_map[self.force_ip_type]) # Port is optional if self.port: args.append('-P') args.append(self.port) # Identity file is optional if self.identity_file: args.append('-i') args.append(self.identity_file) # SSH config file is optional if self.ssh_config_file: args.append('-F') args.append(self.ssh_config_file) # Base command to build additional arguments into command = Command(self.sftp_command) command = command.bake(*args) return command
def task(dataset, modelLocation, frameDiff, detectorSize, netParams, _job): for k in netParams.keys(): netParams[k] = int(netParams[k]) startTime = time.time() vtp.setJob(_job) vtp.gotoTmp() rm('-rf', 'vehicleTracking') vtp.getVT() vtp.cmakeParams.append('-DTRAIN_ITERATIONS=' + str(caffeIterations)) vtp.cmakeParams.append('-DDETECTOR_WIDTH=' + str(detectorSize)) vtp.cmakeParams.append('-DDETECTOR_HEIGHT=' + str(detectorSize)) vtp.changeNetParams(**netParams) vtp.cmakeVT() vtp.makeVT('basicDetector') basicDetector = Command("util/basicDetector") vtp.makeVT('detectionAccuracy') detectionAccuracy = Command("util/detectionAccuracy") cp('-r', vtp.srcDir + '/data/labels/skycomp1', '.') cp(vtp.srcDir + '/../negatives.yml', '.') mkdir('-p', 'src/caffe') cp( '{0}/vehicle_detector_train_iter_{1}.caffemodel'.format( modelLocation, caffeIterations), 'src/caffe/') cp('{0}/mean.cvs'.format(modelLocation), 'src/caffe/') vtp.makeVT('buildNet') results = dict() for threshold in np.arange(-2.0, 4.0, 0.2): print "running with threshold = {0}".format(threshold) bdArgs = [ '-r', x, y, w, h, '-s', sz, '-n', n, '-g', vtp.gpuDev, '-t', threshold, dataset ] if frameDiff != 0: bdArgs.append('-f') bdArgs.append(frameDiff) vtp.basicDetector(*bdArgs) out = vtp.detectionAccuracy(l=dataset, d='detections.pb', t=' '.join(str(t) for t in trainFrames), T=' '.join(str(t) for t in testFrames)) results[threshold] = out return results
def trace(self): try: mkdir("-p", self.traceDirectory) cmd = Command(self.blenderplayer) cmd.run("-p", traceMainScript, self.testFile, "-", self.imageDirectory, _out=debugSh) except ErrorReturnCode: msgerr("Failed trace branch {} blenderplayer".format(self.branch)) return False else: msgstat("Sucess trace branch {} blenderplayer".format(self.branch))
def task(index): hmw = Command('hyperopt-mongo-worker') hmw('--mongo={0}:{1}/{2}'.format(hostname, 1234, dbname), _out=_print_line, _err=_print_line) return 'done'
def __init__(self, command): super(PostgresBackend, self).__init__(command) postgres_executable = getattr(settings, 'DBDEV_POSTGRES_EXECUTABLE', 'postgres') pg_ctl_executable = getattr(settings, 'DBDEV_POSTGRES_PG_CTL_EXECUTABLE', 'pg_ctl') psql_executable = getattr(settings, 'DBDEV_POSTGRES_PSQL_EXECUTABLE', 'psql') createdb_executable = getattr(settings, 'DBDEV_POSTGRES_CREATEDB_EXECUTABLE', 'createdb') pg_dump_executable = getattr(settings, 'DBDEV_POSTGRES_PG_DUMP_EXECUTABLE', 'pg_dump') self.serverlogfile = os.path.join(self.datadir, 'serverlog.log') environ = { 'PGPORT': str(DBSETTINGS['PORT']) } common_command_kwargs = dict( _out=self.stdout, _err=self.stderr, _env=environ, _out_bufsize=1) self.postgres = Command(postgres_executable).bake( p=DBSETTINGS['PORT'], **common_command_kwargs) self.pg_ctl = Command(pg_ctl_executable).bake( '-w', l=self._server_logfile, D=self.datadir, **common_command_kwargs) self.psql = Command(psql_executable).bake( p=DBSETTINGS['PORT'], **common_command_kwargs) self.createdb = Command(createdb_executable).bake( '-e', p=DBSETTINGS['PORT'], **common_command_kwargs) self.pg_dump = Command(pg_dump_executable).bake( p=DBSETTINGS['PORT'], dbname=DBSETTINGS['NAME'], **common_command_kwargs)
def get_hosts(inventory_path, group_name): ansible_inventory = Command('ansible-inventory') json_inventory = json.loads( ansible_inventory('-i', inventory_path, '--list').stdout) if group_name not in json_inventory: raise AssertionError('Group %r not found.' % group_name) hosts = [] if 'hosts' in json_inventory[group_name]: return json_inventory[group_name]['hosts'] else: children = json_inventory[group_name]['children'] for child in children: if 'hosts' in json_inventory[child]: for host in json_inventory[child]['hosts']: if host not in hosts: hosts.append(host) else: grandchildren = json_inventory[child]['children'] for grandchild in grandchildren: if 'hosts' not in json_inventory[grandchild]: raise AssertionError('Group nesting cap exceeded.') for host in json_inventory[grandchild]['hosts']: if host not in hosts: hosts.append(host) return hosts
def detectionAccuracy(self, **kwargs): da = Command("util/detectionAccuracy") out = da(**kwargs) print "Got stdout from detectionAccuracy:" print out.stdout mode = ['TRAIN', 'TEST'] value = ['TP', 'FP', 'DP', 'FN'] pattern = ''.join('{0} {1}:\s+(?P<{0}_{1}>\d+).*'.format(m,v) for m in mode for v in value) match = re.search(pattern, out.stdout, re.DOTALL) if not match: return None d = match.groupdict() d = dict([(k, int(v)) for k,v in d.iteritems() ]) tp = float(d['TEST_TP']) fn = float(d['TEST_FN']) dp = float(d['TEST_DP']) fp = dp + float(d['TEST_FP']) if tp > 0: p = tp/(tp+fp) r = tp/(tp+fn) d['TEST_P'] = p d['TEST_R'] = r d['TEST_F2'] = 5.0*p*r/(4*p+r) d['TEST_MR'] = fn/(fn+tp) if 'T' in kwargs: n = len(kwargs['T']) d['TEST_FPPI'] = fp/float(n) return d
def jenkins_job_trigger(repo, github_opts, cloudsource, ptfdir): print("triggering jenkins job with " + htdocs_url + ptfdir) jenkins = Command( os.path.abspath( os.path.join(os.path.dirname(sys.argv[0]), 'jenkins/jenkins-job-trigger'))) job_parameters = ( 'nodenumber=2', 'networkingplugin=openvswitch') if repo in JOB_PARAMETERS: job_parameters = JOB_PARAMETERS[repo] job_parameters += ('all_noreboot',) print(jenkins( 'openstack-mkcloud', '-p', "github_pr=crowbar/%s:%s" % (repo, github_opts), "cloudsource=" + cloudsource, # trailing slash required # to prevent wget from traversing all test-updates 'UPDATEREPOS=' + htdocs_url + ptfdir + "/", 'mkcloudtarget=all_noreboot', *job_parameters))
def mount_nas(): try: mountnas = Command("dims.nas.mount") mountnas() except Exception as exception_: print u"[!] Exception: {0}".format(exception_) cleanexit(1)
def doSim(err, fileName, weights, _job): vtp.setJob(_job) vtp.gotoTmp() rm('-rf', 'vehicleTracking') vtp.getVT() vtp.cmakeVT() vtp.makeVT('tracker') tracker = Command("javaTracker/runTracker.sh") ret = {} for w in weights: rm('-rf', 'output.csv') for line in tracker(f=fileName, p=err, n=err, g=False, a=5, w=w, _iter=True): print(line) with open('output.csv', 'r') as f: out = f.read().split(',') out = [o.strip() for o in out] print out ret[w] = out vtp.export('frame_output.csv') return ret
def call(name, *args, _show=True, _capture=False, _ignore=False): """Call a shell program with arguments.""" msg = CMD_PREFIX + ' '.join([name] + list(args)) if _show: common.show(msg) else: log.debug(msg) if name == 'cd' and len(args) == 1: return os.chdir(args[0]) try: program = Command(name) if _capture: line = program(*args).strip() log.debug(OUT_PREFIX + line) return line else: for line in program(*args, _iter='err'): log.debug(OUT_PREFIX + line.strip()) except ErrorReturnCode as exc: msg = "\n IN: '{}'{}".format(os.getcwd(), exc) if _ignore: log.debug("Ignored error from call to '%s'", name) else: raise ShellError(msg)
class VCS(object): """Base object for VCS wrappers. .. warning:: Instantiation will raise ``OSError`` if the necessary command line tool is not available on the system. .. attribute:: cmd_name If set this will override the guess-from-class-name method of defining the command name for the given VCS. """ def __init__(self): cmd_name = getattr(self, 'cmd_name', self.__class__.__name__.lower()) try: self.command = Command(cmd_name) except CommandNotFound: raise OSError(errno.ENOPROTOOPT, '%s not found' % cmd_name) def validate(self, allow_modified=False): """Ensure cwd is a VCS repository. :param bool allow_modified: Allow operation on dirty trees """ raise NotImplementedError def add(self, files): """Add files to be committed. :param list files: Files to add """ self.command.add(*files) def commit(self, files, message): """Commit files to repository. :param list files: Files to add """ raise NotImplementedError def tag(self, name, message): """Create version tag :param str name: Tag to create :param str message: Message to associate with tag """ raise NotImplementedError
def ghs_set_status(repo, head_sha1, status): ghs = Command( os.path.abspath( os.path.join(os.path.dirname(sys.argv[0]), 'github-status/github-status.rb'))) ghs('-r', 'crowbar/' + repo, '-c', head_sha1, '-a', 'set-status', '-s', status)
def _run_alembic_upgrade(): import sh from sh import Command try: flask_cmd = Command('flask') flask_cmd('db', 'upgrade') except sh.ErrorReturnCode as e: print(e.stderr)
def ghs_set_status(org, repo, head_sha1, status): ghs = Command( os.path.abspath( os.path.join(os.path.dirname(sys.argv[0]), 'github-status/github-status.rb'))) ghs('-r', org + '/' + repo, '--context', 'suse/mkcloud/testbuild', '-c', head_sha1, '-a', 'set-status', '-s', status)
def run(self, cache, args=[]): if self.url.fragment is None: raise Invalid('Arx can not execute tarball URLs that have no ' 'fragment.') program = cache.join('program', self.url.fragment.split('/')[-1]) self.place(cache, program) chmod('a+rx', str(program)) cmd = Command(str(program)) cmd(*args)
def run(cls, root_path, test, base_url): logging.debug("run test") module, class_name, test_name = (test.module, test.class_name, test.test_name) temp_path = mkdtemp() try: logging.debug("creating venv") venv_path = "%s/venv" % temp_path.rstrip('/') virtualenv.create_environment(venv_path, site_packages=False, unzip_setuptools=True, use_distribute=True) logging.debug("installing funkload") pip = Command("%s/bin/pip" % venv_path) pip.install(FUNKLOAD_GIT) for dep in test.deps: logging.debug("install deps") pip.install(dep) logging.debug("creating command") fl_run_test = Command("%s/bin/fl-run-test" % venv_path) logging.debug("command created") logging.debug("run command: %s" % ("%s/bin/fl-run-test %s %s -u %s" % (venv_path, module, ("%s.%s" % (class_name, test_name)), base_url))) result = fl_run_test(module, "%s.%s" % (class_name, test_name), u=base_url, _env={ "PYTHONPATH": '$PYTHONPATH:%s' % join(root_path.rstrip('/'), "bench") }, simple_fetch=True, _cwd=temp_path) logging.debug("command run") logging.debug("get result") exit_code = result.exit_code text = result.stdout + result.stderr with open(join(temp_path, 'funkload.log')) as fl_log: logging.debug("write log") log = fl_log.read() except ErrorReturnCode: err = sys.exc_info()[1] text = err.stderr exit_code = 1 log = err.stderr + err.stdout logging.error(log) logging.debug("test run") return FunkLoadTestRunResult(exit_code, text, log)
def add_pr_to_checkout(repo, pr_id, head_sha1, pr_branch, spec): sh.curl( '-s', '-k', '-L', "https://github.com/crowbar/%s/compare/%s...%s.patch" % (repo, pr_branch, head_sha1), '-o', 'prtest.patch') sh.sed('-i', '-e', 's,Url:.*,%define _default_patch_fuzz 2,', '-e', 's,%patch[0-36-9].*,,', spec) Command('/usr/lib/build/spec_add_patch')(spec, 'prtest.patch') iosc('vc', '-m', "added PR test patch from %s#%s (%s)" % (repo, pr_id, head_sha1))
def getVersion(self, branch, blenderplayer): try: cmd = Command(blenderplayer) cmd.run(printVersionFile, printVersionBlendFile, _out=debugSh) except ErrorReturnCode: msgerr("Failed run branch {} blenderplayer".format(branch)) return False else: msgstat("Sucess run branch {} blenderplayer".format(branch)) with open(printVersionFile, "r") as file: lines = file.readlines() blenderVersion = lines[0][0:-1] upbgeVersion = lines[1][0:-1] msgstat("blender version: {}, UPBGE version: {}".format( blenderVersion, upbgeVersion)) return (blenderVersion, upbgeVersion)
def task(dataset, threshold, frameDiff, iteration, _job): startTime = time.time() vtp.setJob(_job) vtp.gotoTmp() rm('-rf', 'vehicleTracking') vtp.getVT() vtp.cmakeParams.append('-DTRAIN_ITERATIONS='+str(caffeIterations)) vtp.cmakeVT() vtp.makeVT('labeledDataToDB') labeledDataToDB = Command("util/labeledDataToDB") vtp.makeVT('basicDetector') #basicDetector = Command("util/basicDetector") vtp.makeVT('detectionAccuracy') #detectionAccuracy = Command("util/detectionAccuracy") cp('-r', vtp.srcDir+'/data/labels/skycomp1', '.') cp(vtp.srcDir+'/../negatives.yml', '.') results = [] i = -1 #for i in range(trainIterations): bestF2 = 0.0 while time.time() < (startTime + 60*60*runHours): i+=1 rm('-rf', 'src/caffe/train.leveldb', 'src/caffe/test.leveldb') labeledDataParams = dict(l=dataset, n='negatives.yml', t=' '.join(str(t) for t in trainFrames), T=' '.join(str(t) for t in testFrames)) if i != 0: labeledDataParams['d']='detections.pb' if frameDiff: labeledDataParams['f']=frameDiff labeledDataToDB(**labeledDataParams) vtp.makeVT('trainNet') vtp.makeVT('buildNet') bdArgs = ['-r', x, y, w, h, '-s', sz, '-n', n, '-g', vtp.gpuDev, '-t', threshold, dataset ] if frameDiff != 0: bdArgs.append('-f') bdArgs.append(frameDiff) vtp.basicDetector(*bdArgs) out = vtp.detectionAccuracy(l=dataset, d='detections.pb', t=' '.join(str(t) for t in trainFrames), T=' '.join(str(t) for t in testFrames)) results.append(out) if out['TEST_F2'] > bestF2: bestF2 = out['TEST_F2'] vtp.export('src/caffe/mean.cvs') vtp.export('src/caffe/vehicle_detector_train_iter_'+str(caffeIterations)+'.caffemodel') vtp.export('negatives.yml') return results
def run(self, cache, args=[]): if not True: raise Invalid('Directories can not be run as commands.') if not os.access(str(self.resolved), os.X_OK): if self.dot: chmod('a+rx', str(self.resolved)) else: log.error('Not able to mark `%s` as executable :/' % self.resolved) cmd = Command(str(self.resolved)) cmd(*args)
def check_llvm(): try: llvm_path = './llvm/Release+Asserts' assert isfile(llvm_path+'/lib/ocaml/llvm.cma') # is llvm.cma there? llvm_config = Command(llvm_path+'/bin/llvm-config') ver = llvm_config('--version') # try calling llvm-config print ver assert '3.2' in ver return True except: return False
def _run_query(self, input_string): # Trying to make the buffering process working. try: self.output = StringIO() def grab_output(line): self.output.write(line) self.gnash = Command("./Gnash.exe") self.gnash(_in=input_string, _out=grab_output).wait() except: print "Error, cannot run the query on Gnash"
def ansible(self, cmd='ansible-playbook'): tmp_inventory_dir = os.path.join(data_dir(), 'vagrant-inventory') if not os.path.isdir(tmp_inventory_dir): os.makedirs(tmp_inventory_dir) # create a temporary inventory file for ansible tmp_inventory_file = os.path.join(tmp_inventory_dir, self.vm_name()) with open(tmp_inventory_file, 'w') as f: f.write('%s ansible_ssh_host=%s ansible_ssh_port=22 ' 'ansible_ssh_private_key_file=%s' % ( self.vm_name(), self.ip(), self.ssh_key() )) ansible = Command(cmd) new_env = ansible_env(os.environ.copy()) return ansible.bake('-i', tmp_inventory_file, '--extra-vars', '@%s' % self.project.config_file(), _env=new_env, _out_bufsize=0, _err_bufsize=0)
def __init__(self, command): super(PostgresBackend, self).__init__(command) postgres_executable = getattr(settings, 'DBDEV_POSTGRES_EXECUTABLE', 'postgres') pg_ctl_executable = getattr(settings, 'DBDEV_POSTGRES_PG_CTL_EXECUTABLE', 'pg_ctl') psql_executable = getattr(settings, 'DBDEV_POSTGRES_PSQL_EXECUTABLE', 'psql') createdb_executable = getattr(settings, 'DBDEV_POSTGRES_CREATEDB_EXECUTABLE', 'createdb') createdb_locale = getattr(settings, 'DBDEV_POSTGRES_CREATEDB_LOCALE', 'en_US.UTF-8') pg_dump_executable = getattr(settings, 'DBDEV_POSTGRES_PG_DUMP_EXECUTABLE', 'pg_dump') pg_restore_executable = getattr(settings, 'DBDEV_POSTGRES_PG_RESTORE_EXECUTABLE', 'pg_restore') self.serverlogfile = os.path.join(self.datadir, 'serverlog.log') environ = { 'PGPORT': str(self.dbsettings['PORT']) } common_command_kwargs = dict( _out=self.sh_stdout_handler, _err=self.sh_stderr_handler, _env=environ, _out_bufsize=1) self.postgres = Command(postgres_executable).bake( p=self.dbsettings['PORT'], **common_command_kwargs) self.pg_ctl = Command(pg_ctl_executable).bake( '-w', l=self._server_logfile, D=self.datadir, **common_command_kwargs) self.psql = Command(psql_executable).bake( p=self.dbsettings['PORT'], **common_command_kwargs) self.createdb = Command(createdb_executable).bake( '-e', encoding='utf-8', template='template0', locale=createdb_locale, p=self.dbsettings['PORT'], **common_command_kwargs) self.pg_dump = Command(pg_dump_executable).bake( p=self.dbsettings['PORT'], dbname=self.dbsettings['NAME'], no_privileges=True, **common_command_kwargs) self.pg_restore = Command(pg_restore_executable).bake( p=self.dbsettings['PORT'], dbname=self.dbsettings['NAME'], no_privileges=True, no_acl=True, no_owner=True, **common_command_kwargs)
class PostgresBackend(BaseDbdevBackend): def __init__(self, command): super(PostgresBackend, self).__init__(command) postgres_executable = getattr(settings, 'DBDEV_POSTGRES_EXECUTABLE', 'postgres') pg_ctl_executable = getattr(settings, 'DBDEV_POSTGRES_PG_CTL_EXECUTABLE', 'pg_ctl') psql_executable = getattr(settings, 'DBDEV_POSTGRES_PSQL_EXECUTABLE', 'psql') createdb_executable = getattr(settings, 'DBDEV_POSTGRES_CREATEDB_EXECUTABLE', 'createdb') createdb_locale = getattr(settings, 'DBDEV_POSTGRES_CREATEDB_LOCALE', 'en_US.UTF-8') pg_dump_executable = getattr(settings, 'DBDEV_POSTGRES_PG_DUMP_EXECUTABLE', 'pg_dump') pg_restore_executable = getattr(settings, 'DBDEV_POSTGRES_PG_RESTORE_EXECUTABLE', 'pg_restore') self.serverlogfile = os.path.join(self.datadir, 'serverlog.log') environ = { 'PGPORT': str(self.dbsettings['PORT']) } common_command_kwargs = dict( _out=self.sh_stdout_handler, _err=self.sh_stderr_handler, _env=environ, _out_bufsize=1) self.postgres = Command(postgres_executable).bake( p=self.dbsettings['PORT'], **common_command_kwargs) self.pg_ctl = Command(pg_ctl_executable).bake( '-w', l=self._server_logfile, D=self.datadir, **common_command_kwargs) self.psql = Command(psql_executable).bake( p=self.dbsettings['PORT'], **common_command_kwargs) self.createdb = Command(createdb_executable).bake( '-e', encoding='utf-8', template='template0', locale=createdb_locale, p=self.dbsettings['PORT'], **common_command_kwargs) self.pg_dump = Command(pg_dump_executable).bake( p=self.dbsettings['PORT'], dbname=self.dbsettings['NAME'], no_privileges=True, **common_command_kwargs) self.pg_restore = Command(pg_restore_executable).bake( p=self.dbsettings['PORT'], dbname=self.dbsettings['NAME'], no_privileges=True, no_acl=True, no_owner=True, **common_command_kwargs) def _create_user(self): self.psql('postgres', '-e', c="CREATE ROLE {USER} WITH PASSWORD '{PASSWORD}' SUPERUSER LOGIN;".format(**DBSETTINGS)) def _create_database(self): self.createdb(self.dbsettings['NAME'], owner=self.dbsettings['USER']) def init(self): if os.path.exists(self.datadir): self.stderr.write('The data directory ({}) already exists.'.format(self.datadir)) raise SystemExit() else: self.create_datadir_if_not_exists() self.pg_ctl('init', '-D', self.datadir) self.start_database_server() self._create_user() self._create_database() self.stdout.write('') self.stdout.write('=' * 70) self.stdout.write('') self.stdout.write('Successfully:') self.stdout.write('- Initialized postgres in "{}".'.format(self.datadir)) self.stdout.write('- Created the "{USER}"-role with password'.format(**DBSETTINGS)) self.stdout.write(' "{PASSWORD}" and superuser previleges'.format(**DBSETTINGS)) self.stdout.write('- Created an empty database named "{NAME}".'.format(**DBSETTINGS)) self.stdout.write('') self.stdout.write('The postgres server is running on port {PORT}.'.format(**DBSETTINGS)) self.stdout.write('You can stop it with:') self.stdout.write('') self.stdout.write(' $ python manage.py dbdev_stopserver') self.stdout.write('') self.stdout.write('And you can shutdown and destroy the entire setup using:') self.stdout.write('') self.stdout.write(' $ python manage.py dbdev_destroy') self.stdout.write('') self.stdout.write('=' * 70) def destroy(self): self.stop_database_server() if os.path.exists(self.datadir): self.remove_datadir() self.stdout.write('Successfully stopped the Postgres server and removed "{}".'.format( self.datadir)) # def _server_is_running(self): # return os.path.exists(os.path.join(self.datadir, 'postmaster.pid')) @property def _server_logfile(self): return os.path.join(self.datadir, 'serverlog.log') def run_database_server_in_foreground(self): p = self.postgres('-D', self.datadir, _bg=True) try: p.wait() except KeyboardInterrupt: try: self._stop_database_server() except ErrorReturnCode: p.kill() def start_database_server(self): self.pg_ctl('start') def _stop_database_server(self): return self.pg_ctl('stop') def stop_database_server(self): try: self._stop_database_server() except ErrorReturnCode: pass # The error message from postgres is shown to the user, so no more is needed from us def load_dbdump(self, dumpfile): if dumpfile.endswith('.dump'): print(self.pg_restore(dumpfile)) else: self.psql(self.dbsettings['NAME'], f=dumpfile) def create_dbdump(self, dumpfile): #args = ['-f', dumpfile, '-a', '--column-inserts'] #for tablename in exclude: #args.append('--exclude-table-data={}'.format(tablename)) #args.append('--exclude-table={}'.format(tablename)) #args.append('--exclude-schema={}'.format(tablename)) self.pg_dump(f=dumpfile) def backup(self, directory): backupfile = os.path.join(directory, 'backup.sql') self.pg_dump(f=backupfile) def restore(self, directory): backupfile = os.path.join(directory, 'backup.sql') self.load_dbdump(backupfile) def serverinfo(self): try: self.pg_ctl.status() except ErrorReturnCode: pass
def __init__(self): cmd_name = getattr(self, 'cmd_name', self.__class__.__name__.lower()) try: self.command = Command(cmd_name) except CommandNotFound: raise OSError(errno.ENOPROTOOPT, '%s not found' % cmd_name)