def profile(self, repo):
        # for gprof we can run each test in the application
        # however, we must move and/or rename gman.out each time
        # and then we should go ahead and read hatchet profile
        profiles = {}
        for test in repo.itertests():
            vprint(self.verbose,
                   'Profiling test \'{}\'...'.format(test['name']))
            exec_path = os.path.join(test['prefix'], test['executable'])
            cmd = Command(exec_path)

            try:
                cmd(test['args'])
            except:
                vprint(self.verbose,
                       'Running test \'{}\' failed...'.format(test['name']))
                continue

            if not os.path.isfile('gmon.out'):
                vprint(self.verbose, 'Unable to read profile...')
                continue

            # read in profile with gprof and gprof2dot
            dotfile_name = 'profile-dot-graph.dot'
            gprof = Command('gprof')
            gprof2dot = Command('gprof2dot')
            with open(dotfile_name, 'w+') as outFile:
                gprof2dot(gprof(exec_path), '-n0', '-e0', _out=outFile)

            # finally read this into hatchet
            gf = ht.GraphFrame.from_gprof_dot(dotfile_name)

            profiles[test['name']] = gf

        return profiles
Beispiel #2
0
def generate_pkgspec(pkgoutdir, spectemplate, pkgname):

    obsservicedir = '/usr/lib/obs/service/'
    outdir = ('--outdir', pkgoutdir)

    olddir = os.getcwd()
    try:
        os.chdir(pkgoutdir)
        renderspec = Command(os.path.join(obsservicedir, 'renderspec'))

        renderspec('--input-template', os.path.join(olddir, spectemplate),
                   '--output-name', pkgname + '.spec', *outdir)

        format_spec_file = Command(
            os.path.join(obsservicedir, 'format_spec_file'))
        format_spec_file(*outdir)

        # configure a download cache to avoid downloading the same files
        download_env = os.environ.copy()
        download_env["CACHEDIRECTORY"] = os.path.join(os.path.expanduser("~"),
                                                      ".cache",
                                                      "download_files")

        download_files = Command(os.path.join(obsservicedir, 'download_files'))
        download_files(_env=download_env, *outdir)
    finally:
        os.chdir(olddir)
Beispiel #3
0
def task(dataset, _job):
    startTime = time.time()
    vtp.setJob(_job)

    vtp.gotoTmp()
    rm('-rf', 'vehicleTracking')
    vtp.getVT()
    vtp.cmakeVT()

    vtp.makeVT('basicDetector')
    basicDetector = Command("util/basicDetector")
    vtp.makeVT('detectionAccuracy')
    detectionAccuracy = Command("util/detectionAccuracy")

    cp('-r', vtp.srcDir + '/data/labels/skycomp1', '.')

    results = dict()

    for threshold in np.arange(5, 251, 5):
        basicDetector('-r', x, y, w, h, '-s', sz, '-n', n, '-g', vtp.gpuDev,
                      '-t', threshold, "-d", "diff", dataset)

        out = vtp.detectionAccuracy(l=dataset,
                                    d='detections.pb',
                                    t=' '.join(str(t) for t in trainFrames),
                                    T=' '.join(str(t) for t in testFrames))
        results[threshold] = out

    return results
Beispiel #4
0
    def run(cls, root_path, test, base_url):
        logging.debug("run test")
        module, class_name, test_name = (test.module, test.class_name,
                                         test.test_name)
        temp_path = mkdtemp()

        try:
            logging.debug("creating venv")
            venv_path = "%s/venv" % temp_path.rstrip('/')
            virtualenv.create_environment(venv_path,
                                          site_packages=False,
                                          unzip_setuptools=True,
                                          use_distribute=True)

            logging.debug("installing funkload")
            pip = Command("%s/bin/pip" % venv_path)
            pip.install(FUNKLOAD_GIT)

            for dep in test.deps:
                logging.debug("install deps")
                pip.install(dep)

            logging.debug("creating command")
            fl_run_test = Command("%s/bin/fl-run-test" % venv_path)
            logging.debug("command created")

            logging.debug("run command: %s" %
                          ("%s/bin/fl-run-test %s %s -u %s" %
                           (venv_path, module,
                            ("%s.%s" % (class_name, test_name)), base_url)))
            result = fl_run_test(module,
                                 "%s.%s" % (class_name, test_name),
                                 u=base_url,
                                 _env={
                                     "PYTHONPATH":
                                     '$PYTHONPATH:%s' %
                                     join(root_path.rstrip('/'), "bench")
                                 },
                                 simple_fetch=True,
                                 _cwd=temp_path)
            logging.debug("command run")

            logging.debug("get result")
            exit_code = result.exit_code
            text = result.stdout + result.stderr

            with open(join(temp_path, 'funkload.log')) as fl_log:
                logging.debug("write log")
                log = fl_log.read()

        except ErrorReturnCode:
            err = sys.exc_info()[1]
            text = err.stderr
            exit_code = 1
            log = err.stderr + err.stdout
            logging.error(log)

        logging.debug("test run")
        return FunkLoadTestRunResult(exit_code, text, log)
    def __init__(self, profiler_settings, verbose=False):
        super().__init__(profiler_settings, verbose)

        self.hpcrun_cmd = Command('hpcrun')
        self.hpcrun_cmd = self.hpcrun_cmd.bake('-e', 'WALLCLOCK@5000')
        self.hpcstruct_cmd = Command('hpcstruct')
        self.hpcprof_cmd = Command('mpirun')
        self.hpcprof_cmd = self.hpcprof_cmd.bake('-np', '1', 'hpcprof-mpi',
                                                 '--metric-db', 'yes')
Beispiel #6
0
    def test_command_wrapper(self):
        from sh import Command, which

        ls = Command(which("ls"))
        wc = Command(which("wc"))

        c1 = int(wc(ls("-A1"), l=True))
        c2 = len(os.listdir("."))

        self.assertEqual(c1, c2)
def task(dataset, modelLocation, frameDiff, detectorSize, netParams, _job):
    for k in netParams.keys():
        netParams[k] = int(netParams[k])
    startTime = time.time()
    vtp.setJob(_job)
    vtp.gotoTmp()
    rm('-rf', 'vehicleTracking')
    vtp.getVT()
    vtp.cmakeParams.append('-DTRAIN_ITERATIONS=' + str(caffeIterations))
    vtp.cmakeParams.append('-DDETECTOR_WIDTH=' + str(detectorSize))
    vtp.cmakeParams.append('-DDETECTOR_HEIGHT=' + str(detectorSize))
    vtp.changeNetParams(**netParams)
    vtp.cmakeVT()

    vtp.makeVT('basicDetector')
    basicDetector = Command("util/basicDetector")
    vtp.makeVT('detectionAccuracy')
    detectionAccuracy = Command("util/detectionAccuracy")

    cp('-r', vtp.srcDir + '/data/labels/skycomp1', '.')
    cp(vtp.srcDir + '/../negatives.yml', '.')

    mkdir('-p', 'src/caffe')
    cp(
        '{0}/vehicle_detector_train_iter_{1}.caffemodel'.format(
            modelLocation, caffeIterations), 'src/caffe/')
    cp('{0}/mean.cvs'.format(modelLocation), 'src/caffe/')

    vtp.makeVT('buildNet')

    results = dict()
    for threshold in np.arange(-2.0, 4.0, 0.2):
        print "running with threshold = {0}".format(threshold)
        bdArgs = [
            '-r', x, y, w, h, '-s', sz, '-n', n, '-g', vtp.gpuDev, '-t',
            threshold, dataset
        ]
        if frameDiff != 0:
            bdArgs.append('-f')
            bdArgs.append(frameDiff)
        vtp.basicDetector(*bdArgs)

        out = vtp.detectionAccuracy(l=dataset,
                                    d='detections.pb',
                                    t=' '.join(str(t) for t in trainFrames),
                                    T=' '.join(str(t) for t in testFrames))
        results[threshold] = out

    return results
Beispiel #8
0
def task(index):
    hmw = Command('hyperopt-mongo-worker')
    hmw('--mongo={0}:{1}/{2}'.format(hostname, 1234, dbname),
        _out=_print_line,
        _err=_print_line)

    return 'done'
Beispiel #9
0
def exe(ctx, context, target_url, pending, success, failure, error, command):
    def _status_set(state, description):
        log.info('%s -> %s', context, state)
        ctx.invoke(status_set,
                   context=context,
                   target_url=target_url,
                   state=state,
                   description=description)

    try:
        _status_set('pending', pending)
        cmd = Command(command[0])
        cmd = cmd.bake(command[1:]) if len(command) > 1 else cmd
        cmd(_fg=True)
        _status_set('success', success)
        exit(0)
    except ErrorReturnCode as e:
        log.error('Command failed with exit code: %d', e.exit_code)
        _status_set('failure', failure)
    except CommandNotFound as e:
        log.error('Command not found: %s', e)
        _status_set('error', error)
    except Exception as e:
        log.error(e, exc_info=True)
        _status_set('error', error)

    exit(1)
Beispiel #10
0
def jenkins_job_trigger(repo, github_opts, cloudsource, ptfdir):
    print("triggering jenkins job with " + htdocs_url + ptfdir)

    jenkins = Command(
        os.path.abspath(
            os.path.join(os.path.dirname(sys.argv[0]),
                         'jenkins/jenkins-job-trigger')))

    job_parameters = (
        'nodenumber=2', 'networkingplugin=openvswitch')

    if repo in JOB_PARAMETERS:
        job_parameters = JOB_PARAMETERS[repo]

    job_parameters += ('all_noreboot',)

    print(jenkins(
        'openstack-mkcloud',
        '-p',
        "github_pr=crowbar/%s:%s" % (repo, github_opts),
        "cloudsource=" + cloudsource,
        # trailing slash required
        # to prevent wget from traversing all test-updates
        'UPDATEREPOS=' + htdocs_url + ptfdir + "/",
        'mkcloudtarget=all_noreboot',
        *job_parameters))
Beispiel #11
0
def mount_nas():
    try:
        mountnas = Command("dims.nas.mount")
        mountnas()
    except Exception as exception_:
        print u"[!] Exception: {0}".format(exception_)
        cleanexit(1)
Beispiel #12
0
 def detectionAccuracy(self, **kwargs):
     da = Command("util/detectionAccuracy")
     out = da(**kwargs)
     print "Got stdout from detectionAccuracy:"
     print out.stdout
     mode = ['TRAIN', 'TEST']
     value = ['TP', 'FP', 'DP', 'FN']
     pattern = ''.join('{0} {1}:\s+(?P<{0}_{1}>\d+).*'.format(m,v) for m in mode for v in value)
     match = re.search(pattern, out.stdout, re.DOTALL)
     if not match:
         return None
     d = match.groupdict()
     d = dict([(k, int(v)) for k,v in d.iteritems() ])
     tp = float(d['TEST_TP'])
     fn = float(d['TEST_FN'])
     dp = float(d['TEST_DP'])
     fp = dp + float(d['TEST_FP'])
     if tp > 0:
         p = tp/(tp+fp)
         r = tp/(tp+fn)
         d['TEST_P'] = p
         d['TEST_R'] = r
         d['TEST_F2'] = 5.0*p*r/(4*p+r)
         d['TEST_MR'] = fn/(fn+tp)
     if 'T' in kwargs:
         n = len(kwargs['T'])
         d['TEST_FPPI'] = fp/float(n)
     return d
Beispiel #13
0
def doSim(err, fileName, weights, _job):

    vtp.setJob(_job)
    vtp.gotoTmp()
    rm('-rf', 'vehicleTracking')
    vtp.getVT()
    vtp.cmakeVT()
    vtp.makeVT('tracker')
    tracker = Command("javaTracker/runTracker.sh")
    ret = {}
    for w in weights:
        rm('-rf', 'output.csv')
        for line in tracker(f=fileName,
                            p=err,
                            n=err,
                            g=False,
                            a=5,
                            w=w,
                            _iter=True):
            print(line)

        with open('output.csv', 'r') as f:
            out = f.read().split(',')
            out = [o.strip() for o in out]
            print out
            ret[w] = out
    vtp.export('frame_output.csv')
    return ret
Beispiel #14
0
def get_hosts(inventory_path, group_name):
    ansible_inventory = Command('ansible-inventory')
    json_inventory = json.loads(
        ansible_inventory('-i', inventory_path, '--list').stdout)

    if group_name not in json_inventory:
        raise AssertionError('Group %r not found.' % group_name)

    hosts = []
    if 'hosts' in json_inventory[group_name]:
        return json_inventory[group_name]['hosts']
    else:
        children = json_inventory[group_name]['children']
        for child in children:
            if 'hosts' in json_inventory[child]:
                for host in json_inventory[child]['hosts']:
                    if host not in hosts:
                        hosts.append(host)
            else:
                grandchildren = json_inventory[child]['children']
                for grandchild in grandchildren:
                    if 'hosts' not in json_inventory[grandchild]:
                        raise AssertionError('Group nesting cap exceeded.')
                    for host in json_inventory[grandchild]['hosts']:
                        if host not in hosts:
                            hosts.append(host)
        return hosts
Beispiel #15
0
def call(name, *args, _show=True, _capture=False, _ignore=False):
    """Call a shell program with arguments."""
    msg = CMD_PREFIX + ' '.join([name] + list(args))
    if _show:
        common.show(msg)
    else:
        log.debug(msg)

    if name == 'cd' and len(args) == 1:
        return os.chdir(args[0])

    try:
        program = Command(name)
        if _capture:
            line = program(*args).strip()
            log.debug(OUT_PREFIX + line)
            return line
        else:
            for line in program(*args, _iter='err'):
                log.debug(OUT_PREFIX + line.strip())
    except ErrorReturnCode as exc:
        msg = "\n  IN: '{}'{}".format(os.getcwd(), exc)
        if _ignore:
            log.debug("Ignored error from call to '%s'", name)
        else:
            raise ShellError(msg)
Beispiel #16
0
def _run_alembic_upgrade():
    import sh
    from sh import Command
    try:
        flask_cmd = Command('flask')
        flask_cmd('db', 'upgrade')
    except sh.ErrorReturnCode as e:
        print(e.stderr)
Beispiel #17
0
def ghs_set_status(repo, head_sha1, status):
    ghs = Command(
        os.path.abspath(
            os.path.join(os.path.dirname(sys.argv[0]),
                         'github-status/github-status.rb')))

    ghs('-r', 'crowbar/' + repo,
        '-c', head_sha1, '-a', 'set-status', '-s', status)
Beispiel #18
0
def ghs_set_status(org, repo, head_sha1, status):
    ghs = Command(
        os.path.abspath(
            os.path.join(os.path.dirname(sys.argv[0]),
                         'github-status/github-status.rb')))

    ghs('-r', org + '/' + repo,
        '--context', 'suse/mkcloud/testbuild',
        '-c', head_sha1, '-a', 'set-status', '-s', status)
Beispiel #19
0
 def run(self, cache, args=[]):
     if self.url.fragment is None:
         raise Invalid('Arx can not execute tarball URLs that have no '
                       'fragment.')
     program = cache.join('program', self.url.fragment.split('/')[-1])
     self.place(cache, program)
     chmod('a+rx', str(program))
     cmd = Command(str(program))
     cmd(*args)
Beispiel #20
0
 def runFile(self, blendFile):
     imageDir = self.imageDirectory + blendFile.split("/")[-1].replace(
         ".blend", "") + "/"
     try:
         cmd = Command(self.blenderplayer)
         upbgeargs = ("-p", self.pythonMainScript, blendFile, "-", imageDir)
         cmd.run(upbgeargs, _out=debugSh)
     except ErrorReturnCode:
         return False
     return True
Beispiel #21
0
def add_pr_to_checkout(repo, pr_id, head_sha1, pr_branch, spec):
    sh.curl(
        '-s', '-k', '-L',
        "https://github.com/crowbar/%s/compare/%s...%s.patch" %
        (repo, pr_branch, head_sha1), '-o', 'prtest.patch')
    sh.sed('-i', '-e', 's,Url:.*,%define _default_patch_fuzz 2,', '-e',
           's,%patch[0-36-9].*,,', spec)
    Command('/usr/lib/build/spec_add_patch')(spec, 'prtest.patch')
    iosc('vc', '-m',
         "added PR test patch from %s#%s (%s)" % (repo, pr_id, head_sha1))
Beispiel #22
0
 def runFile(self, blendFile):
     try:
         cmd = Command(self.blenderplayer)
         upbgeargs = ("-p", self.pythonMainScript, blendFile)
         if len(self.args) > 0:
             upbgeargs += ("-", self.args)
         cmd.run(upbgeargs, _out=debugSh)
     except ErrorReturnCode:
         return False
     return True
Beispiel #23
0
def check_llvm():
    try:
        llvm_path = './llvm/Release+Asserts'
        assert isfile(llvm_path+'/lib/ocaml/llvm.cma') # is llvm.cma there?
        llvm_config = Command(llvm_path+'/bin/llvm-config')
        ver = llvm_config('--version') # try calling llvm-config
        print ver
        assert '3.2' in ver 
        return True
    except:
        return False
def task(dataset, threshold, frameDiff, iteration, _job):
    startTime = time.time()
    vtp.setJob(_job)
    vtp.gotoTmp()
    rm('-rf', 'vehicleTracking')
    vtp.getVT()
    vtp.cmakeParams.append('-DTRAIN_ITERATIONS='+str(caffeIterations))
    vtp.cmakeVT()

    vtp.makeVT('labeledDataToDB')
    labeledDataToDB = Command("util/labeledDataToDB")
    vtp.makeVT('basicDetector')
    #basicDetector = Command("util/basicDetector")
    vtp.makeVT('detectionAccuracy')
    #detectionAccuracy = Command("util/detectionAccuracy")

    cp('-r', vtp.srcDir+'/data/labels/skycomp1', '.')
    cp(vtp.srcDir+'/../negatives.yml', '.')

    results = []
    i = -1
    #for i in range(trainIterations):
    bestF2 = 0.0
    while time.time() < (startTime + 60*60*runHours):
        i+=1
        rm('-rf', 'src/caffe/train.leveldb', 'src/caffe/test.leveldb')
        labeledDataParams = dict(l=dataset, n='negatives.yml', 
                t=' '.join(str(t) for t in trainFrames),
                T=' '.join(str(t) for t in testFrames))
        if i != 0: 
            labeledDataParams['d']='detections.pb'
        if frameDiff:
            labeledDataParams['f']=frameDiff

        labeledDataToDB(**labeledDataParams)
        vtp.makeVT('trainNet')
        vtp.makeVT('buildNet')
        bdArgs = ['-r', x, y, w, h, '-s', sz, '-n', n, '-g', vtp.gpuDev, '-t', threshold, dataset ]
        if frameDiff != 0:
            bdArgs.append('-f')
            bdArgs.append(frameDiff)
        vtp.basicDetector(*bdArgs)

        out = vtp.detectionAccuracy(l=dataset, d='detections.pb', 
                t=' '.join(str(t) for t in trainFrames),
                T=' '.join(str(t) for t in testFrames))
        results.append(out)
        if out['TEST_F2'] > bestF2:
            bestF2 = out['TEST_F2']
            vtp.export('src/caffe/mean.cvs')
            vtp.export('src/caffe/vehicle_detector_train_iter_'+str(caffeIterations)+'.caffemodel')
            vtp.export('negatives.yml')

    return results
Beispiel #25
0
 def run(self, cache, args=[]):
     if not True:
         raise Invalid('Directories can not be run as commands.')
     if not os.access(str(self.resolved), os.X_OK):
         if self.dot:
             chmod('a+rx', str(self.resolved))
         else:
             log.error('Not able to mark `%s` as executable :/' %
                       self.resolved)
     cmd = Command(str(self.resolved))
     cmd(*args)
Beispiel #26
0
    def _run_query(self, input_string):
        # Trying to make the buffering process working.
        try:
            self.output = StringIO()

            def grab_output(line):
                self.output.write(line)

            self.gnash = Command("./Gnash.exe")
            self.gnash(_in=input_string, _out=grab_output).wait()
        except:
            print "Error, cannot run the query on Gnash"
Beispiel #27
0
 def system(self, progname, *args):
     dprint("EXEC", progname, *["'{}'".format(arg) for arg in args])
     env = {k: v for k, v in self.variables.items() if k in self.exports}
     command = Command(progname)
     try:
         return command(*args,
                        _in=self.stdin,
                        _out=self.stdout,
                        _err=self.stderr,
                        _cwd=self.cwd,
                        _env=env).exit_code
     except ErrorReturnCode as e:
         return e.exit_code
Beispiel #28
0
    def get_command(self):
        """ Returns a reusable sh.Command object that can execute multiple different SFTP commands.
        """
        # A list of arguments that will be added to the base command
        args = []

        # Buffer size is always available
        args.append('-B')
        args.append(self.buffer_size)

        # Bandwidth limit is always available
        args.append('-l')
        args.append(self.bandwidth_limit)

        # Preserving file and directory metadata is optional
        if self.should_preserve_meta:
            args.append('-p')

        # Immediate flushing is optional
        if self.should_flush:
            args.append('-f')

        # Compression is optional
        if self.is_compression_enabled:
            args.append('-C')

        # Forcing a particular IP version is optional
        if self.force_ip_type:
            args.append(ip_type_map[self.force_ip_type])

        # Port is optional
        if self.port:
            args.append('-P')
            args.append(self.port)

        # Identity file is optional
        if self.identity_file:
            args.append('-i')
            args.append(self.identity_file)

        # SSH config file is optional
        if self.ssh_config_file:
            args.append('-F')
            args.append(self.ssh_config_file)

        # Base command to build additional arguments into
        command = Command(self.sftp_command)
        command = command.bake(*args)

        return command
Beispiel #29
0
 def __init__(self,
              scylla_data_dir,
              db_path,
              storage_obj,
              nodetool_path='/usr/bin/nodetool',
              cqlsh_path='/usr/bin/cqlsh',
              cqlsh_host='127.0.0.1',
              cqlsh_port='9042',
              prefix='scyllabackup',
              max_workers=4):
     self.scylla_data_dir = scylla_data_dir
     self.db = DB(db_path)
     self.db_path = db_path
     self.nodetool = Command(nodetool_path)
     self.cqlsh = Command(cqlsh_path).bake(cqlsh_host, cqlsh_port)
     self._upload_queue = gevent.queue.JoinableQueue()
     self._download_queue = gevent.queue.JoinableQueue()
     self._delete_queue = gevent.queue.JoinableQueue()
     self._verify_queue = gevent.queue.JoinableQueue()
     self._storage = storage_obj
     self._prefix = prefix
     self.db_key = self._prefix + '/' + os.path.basename(self.db_path)
     self.max_workers = max_workers
Beispiel #30
0
def fits2png(fits_dir, png_dir):
    """
    Convert fits to png files based on the D1 method
    """
    cmd_tpl = '%s -cmap Cool'\
        ' -zoom to fit -scale log -scale mode minmax -export %s -exit'
    from sh import Command
    ds9 = Command(ds9_path)
    #fits = '/Users/chen/gitrepos/ml/rgz_rcnn/data/EMU_GAMA23/split_fits/30arcmin/gama_linmos_corrected_clipped0-0.fits'
    #png = '/Users/chen/gitrepos/ml/rgz_rcnn/data/EMU_GAMA23/split_png/30arcmin/gama_linmos_corrected_clipped0-0.png'
    for fits in os.listdir(fits_dir):
        if (fits.endswith('.fits')):
            png = fits.replace('.fits', '.png')
            cmd = cmd_tpl % (osp.join(fits_dir, fits), osp.join(png_dir, png))
            ds9(*(cmd.split()))