Beispiel #1
0
 def action(file_path):
     if re.match('|'.join(INCLUDE_PATTERNS), file_path, re.M | re.I):
         if not re.match('|'.join(EXCLUDE_PATTERNS),
                         file_path,
                         re.M | re.I):
             print 'Formatting file {}'.format(file_path)
             run_cmd(FORMAT_CMD.format(file_path))
Beispiel #2
0
def get_adenylation_domains(fasta, known=None, lagging_strand=False):
    adenylation_domains = []

    fasta_seqs = []
    for fs in SeqIO.parse(fasta, 'fasta'):
        revcom=False
        seq = str(fs.seq)
        pepseq, rf = get_pepseq(seq)
        if rf < 0 == lagging_strand:
            revcom=True
            seq = utils.reverse_complement(seq)
        fasta_seqs.append({'id': fs.id, 'seq': seq, 'pepseq': pepseq, 'rf': rf})
    for fs in fasta_seqs:
        utils.run_cmd([hmmsearch, '--domtblout', 'dump', os.path.abspath('lib/AMP-binding.hmm'), '-'],
                  '>header\n' + pepseq)
        with open('dump') as f:
            out = f.read()
        res_stream = StringIO(out)
        os.remove('dump')
        results = list(SearchIO.parse(res_stream, 'hmmsearch3-domtab'))

        for result in results:
            for i, hsp in enumerate(result.hsps, 1):
                s = hsp.hit_start
                e = hsp.hit_end

                adenylation_domains.append((AdenylationDomain(fs['seq'][s*3:e*3], known, '{}_{}'.format(fs['id'], i), revcom), s, e))

    return adenylation_domains
Beispiel #3
0
def clone_vm(output_path, game_hash, base_vm, name, remote):
    assert re.match(r'[a-zA-Z0-9_-]+\Z', name)
    status(game_hash, "Creating VM: {}".format(name), remote)
    basepath = gamepath(output_path, game_hash)
    run_cmd(['VBoxManage', 'clonevm', base_vm, '--name', name, '--basefolder',
             basepath], "clonevm{}".format(name))
    return os.path.join(basepath, name)
Beispiel #4
0
def guestmount(what, where):
    pidfile = os.tempnam(None, 'guestmountpid')
    run_cmd(['sudo', 'guestmount', '--pid-file', pidfile, '-a', what, '-i',
             where], "guestmount")
    assert os.path.isfile(pidfile)
    logging.info('guestmount pid file %s', pidfile)
    return pidfile
Beispiel #5
0
def deploy_app(request):
    app_id = request.GET.get('app_id')
    app = App.objects.get(id=app_id)
    site_name = app.site.name
    wd = app.wd
    
    config = _get_config(wd)
    print config

    out = ''

    if (config['type'] == 'static'):
        # try:
        out += _update('starting in', run_cmd('pwd', wd=wd, echo=True))
        out += _update('update from repo', run_cmd('git pull origin master', wd=wd, echo=True))
        out += _update("post_update hooks", run_cmd('sh post_update.sh', wd=wd, echo=True))
        out += _update('deploy built site', run_cmd('cp -R %s/* %s' % (
            config['build_dir'], config['dest_dir']
        ), wd=wd, echo=True))
        # except Exception, e:
        #             exc_type, exc_value, exc_traceback = sys.exc_info()
        #             formatted_lines = traceback.format_exc().splitlines()
        #                 
        #             return HttpResponseServerError("<pre>%s\nERROR: %s\n\n%s</pre>" % (out, exc_value, formatted_lines))
    
    do = Deploy.objects.create(app=app, deploy_id='placeholder', output=out, complete=True)
    do.save()
    
    return HttpResponse("<pre>%s\nDEPLOYED: <a href='%s'>%s</a></pre>" % (out, config['url'], config['url']))
Beispiel #6
0
def gen_flash_algo():
    run_cmd([FROMELF, '--bin', ALGO_ELF_PATH, '-o', TMP_DIR_W_TERM])
    try:
        flash_info = FlashInfo(DEV_INFO_PATH)
        ALGO_START = flash_info.get_algo_start()
    except IOError, e:
        print repr(e), e
        ALGO_START = 0x20000000
Beispiel #7
0
def off():
    """
    Turn wifi off.
    """
    with click_spinner.spinner():
        wifi_port = get_wifi_port()
        run_cmd(TURN_WIFI_OFF.format(port=wifi_port))
        typer.echo("Wifi is off.")
def run_cmds(cmds, n):
    global COUNTER
    for _ in range(n):
        cmd = cmds.pop()
        os.environ["CUDA_VISIBLE_DEVICES"] = str(COUNTER % NUM_GPUS)
        COUNTER += 1
        run_cmd(cmd)
    return cmds
Beispiel #9
0
def gen_flash_algo():
    run_cmd([FROMELF, '--bin', ALGO_ELF_PATH, '-o', TMP_DIR_W_TERM])
    try:
        flash_info = FlashInfo(DEV_INFO_PATH)
        ALGO_START = flash_info.get_algo_start()
    except IOError, e:
        print repr(e), e
        ALGO_START = 0x20000000
Beispiel #10
0
def do_bkgonlyfit(configfile,
                  input_file,
                  output_dir,
                  region,
                  lumi,
                  do_validation=False,
                  syst='',
                  use_mc=False,
                  logfile=None,
                  hf_options=None):

    configfile = os.path.abspath(configfile)
    input_file = os.path.abspath(input_file)
    output_dir = os.path.abspath(output_dir)

    if not os.path.exists(output_dir):
        mkdirp(output_dir)

    # move to analysis directory
    old_pwd = os.getenv('PWD')
    susy_dir = os.environ['SUSY_ANALYSIS']
    os.chdir(susy_dir + '/run')

    # Run HistFitter
    opttag = ''
    if use_mc:
        opttag += '_mc'

    if opttag:
        results_dir = 'results/PhotonMetAnalysis_bkgonly%s' % opttag
    else:
        results_dir = 'results/PhotonMetAnalysis_bkgonly'

    options = '-i %s --sr %s --rm --lumi %.2f' % (input_file, region, lumi)
    if do_validation:
        options += ' --val'
    if syst:
        options += syst
    if use_mc:
        options += ' --mc'

    hf_extra_options = ''
    if hf_options is not None:
        hf_extra_options = hf_options

    cmd = 'HistFitter.py -u \'"%s"\' -w -f -V -F bkg %s %s' % (
        options, hf_extra_options, configfile)
    run_cmd(cmd, logfile='hf.log', stdout=True)

    # mv logfile
    if logfile is not None:
        os.system('mv hf.log %s' % os.path.join(old_pwd, logfile))

    # mv from results dir to output dir
    mv_cmd = 'cp %s/* %s/' % (results_dir, output_dir)
    os.system(mv_cmd)

    os.chdir(old_pwd)
Beispiel #11
0
def convert_file(page_image, page_djvu, bitonal=False, quality=48, force_convert=False):
    """
    Convert an image to DJVU with the given parameters:
    
    page_image      : image filename to convert
    page_djvu       : djvu file of the converted image (target)
    bitonal         : bitonal djvu output?
    quality         : decibel quality 16-50
    force_convert   : always run through imagemagick
    """

    directory = os.path.dirname(page_image)

    #create temporary DJVU file
    if bitonal:
        tempPpm  = os.path.join(directory, 'IMG-DJVU-CONVERTER-TEMP-FILE.pbm')
    else:
        tempPpm  = os.path.join(directory, 'IMG-DJVU-CONVERTER-TEMP-FILE.ppm')

    root, ext = os.path.splitext( page_image )

    if not bitonal and ext.lower() in ['.jpg', '.jpeg']:

        if force_convert:
            cmd = ['convert', page_image, tempPpm]
            utils.run_cmd(cmd)
            file = tempPpm
        else:
            file = page_image

        #convert jpg to a temp djvu file
        cmd = ['c44', '-decibel', str(quality), file, page_djvu]
        
        utils.run_cmd(cmd)

    elif bitonal and ext.lower() in ['.tiff','.tif']:
        #convert jpg to a temp djvu file
        cmd = ['cjb2', page_image, page_djvu]
        
        utils.run_cmd(cmd)

    else: #image needs converting

        cmd = ['convert', page_image, tempPpm]
        #print cmd
        utils.run_cmd(cmd)

        if bitonal:
            cmd = ['cjb2', tempPpm, page_djvu]
        else:
            cmd = ['c44', '-decibel', str(quality), tempPpm, page_djvu]
            
        utils.run_cmd(cmd)


    #Remove any leftover temporary files
    if os.path.exists(tempPpm):
        os.remove(tempPpm)
Beispiel #12
0
    def find_best_model(self):
        """
        Compare Bayes factors to find the best fitting model.
        """
        log_file = 'bayes/{}-bayes.log'.format(self.prefix)

        run_cmd(
            ["Rscript", "rscript/bayes_factors.R", self.prefix, MCMC_NUM_BURN],
            stdout=open(log_file, 'w'))
def run_visualization(config):
    s = 'visualization'
    print_intro(s)

    cmd = 'python visualization_utilities.py '
    cmd += build_cmd(config.items(s), prefix='viz/')
    cmd_dir = os.path.join(ROOTDIR, 'visualization')
    run_cmd(cmd, config.get(s, 'logfile'), cmd_dir=cmd_dir, 
        dry_run=config.dry_run, blocking=False)
def run_td_prediction(config):
    s = 'prediction'
    cmd = 'python train.py '
    cmd += build_cmd(config.items(s), prefix='td/')
    cmd_dir = os.path.join(ROOTDIR, 'prediction/td/scripts/training')
    run_cmd(cmd, config.get(s, 'logfile'), cmd_dir=cmd_dir, dry_run=config.dry_run)
    print('Async prediction running in the background...')
    print("Enter 'tmux attach -t a3c' to attach")
    print("Enter 'tmux kill-session -t a3c' to end training manually")
Beispiel #15
0
 def clone_site(self):
     pwd = os.getcwd()
     os.chdir(self.path_to_store)
     cmd = [
         "wget", "--limit-rate=200k", "--no-clobber", "--convert-links",
         "--random-wait", "-rpEe", "robots=off", "-U mozilla", self.url
     ]
     run_cmd(cmd)
     os.chdir(pwd)
def fit_bayes_net(config):
    s = 'generation' 
    cmd = 'julia run_fit_bayes_net.jl '
    cmd += '--input_filepath {} '.format(
        config.get('collection', 'col/output_filepath'))
    cmd += '--output_filepath {} '.format(
        config.get(s, 'base_bn_filepath'))
    cmd_dir = os.path.join(ROOTDIR, 'scene_generation')
    run_cmd(cmd, config.get(s, 'base_bn_logfile'), cmd_dir=cmd_dir, 
            dry_run=config.dry_run)
def generate_prediction_data(config):
    s = 'generation'
    cmd = 'julia -p {} run_collect_dataset.jl '.format(config.get(s, 'nprocs'))
    cmd += '--base_bn_filepath {} '.format(
        config.get(s, 'base_bn_filepath'))
    cmd += '--prop_bn_filepath {} '.format(
        config.get(s, 'prop_bn_filepath'))
    cmd += build_cmd(config.items(s), prefix='gen/')
    cmd_dir = os.path.join(ROOTDIR, 'collection')
    run_cmd(cmd, config.get(s, 'generation_logfile'), cmd_dir=cmd_dir, 
        dry_run=config.dry_run)
Beispiel #18
0
    def setup(self, repo_dir: str, group_id: str, git_url: str):
        project_dir = update_git(root_dir=repo_dir,
                                 group_id=group_id,
                                 git_url=git_url)
        #        try:
        #            copyfile(
        #                ContestantProjectHandler.CONTESTANT_SETTINGS_PATH,
        #                os.path.join(project_dir, ContestantProjectHandler.CONTESTANT_SETTINGS_NAME)
        #            )
        #        except Exception:
        #            logger.log_error("Could not copy django settings for group {}".format(group_id))
        #            raise Exception("Could not copy django settings")
        out, error = run_cmd(cmd="./scripts/remove_extra_files.sh " +
                             project_dir,
                             directory=".")  # TODO handle logs
        if len(error) != 0:
            logger.log_info("error in removing extras: {}".format(str(error)))
        out, error = run_cmd(cmd="./scripts/build_image.sh " + project_dir,
                             directory=".")
        logger.log_log("out: " + str(out) + " err: " + str(error))
        build_msg = out.decode("utf-8")
        logger.log_info(
            "Project for group {} build successfully with message: {}".format(
                group_id, build_msg))
        try:
            image_id = re.search(r"Successfully built ((\w|\d)+)\n",
                                 build_msg).group(1)
        except Exception:
            if re.findall(COPY_REQUIREMENTS_REGEX,
                          build_msg) is not None and re.findall(
                              INSTALL_REQUIREMENTS_REGEX, build_msg) is None:
                logger.log_warn(
                    "Could not find requirements.txt for group {}".format(
                        group_id))
                raise Exception("Could not find requirements.txt file")

#            if re.findall(INSTALL_REQUIREMENTS_REGEX + SUCCESSFUL_STEP_REGEX + ERROR_REGEX, build_msg) is not None:
#                logger.log_warn("Could not install requirements for group {}".format(group_id))
#                raise Exception("Could not install requirements")
            logger.log_warn(
                "Failed to build docker image for group {}.".format(group_id))
            raise Exception(
                "Build error - Build Message Follows\n\n{}".format(build_msg))


#        try:
#            os.remove(os.path.join(project_dir, ContestantProjectHandler.CONTESTANT_SETTINGS_NAME))
#        except Exception:
#            logger.log_error("Could not remove django settings for group {}".format(group_id))
#            raise Exception("Could not remove django settings")

        logger.log_success(
            "Image built for team {} successfully".format(group_id))
        return image_id
Beispiel #19
0
def run_MLP(nr: NonRedundantization, tt: TestTrain):
    meth = MLMethod.WekaMLP
    print(
        f'Training on {tt.training.name}, testing on {tt.testing.name}, meth={meth.name}, nr={nr.name}')

    utils.run_cmd(['./splitlines_csv2arff_MLP.py', '--train_dir', tt.training.name, '--test_dir', tt.testing.name,
                   '--training_csv', f'{tt.training.name}_{nr.name}_4d.csv',
                   '--testing_csv', f'{tt.testing.name}_{nr.name}_4d.csv', '--input_cols', args.in4d, '--train_set',
                   f'{tt.training.name}_{nr.name}', '--test_set', f'{tt.testing.name}_{nr.name}'], args.dry_run)
    utils.run_cmd(['./extract_data_from_logfiles.py', '--directory', os.path.join(tt.testing.name,
                   f'{tt.testing.name}_{nr.name}_testing_data'), '--output_name',
                   f'{tt.testing.name}/{tt.testing.name}_{nr.name}_{meth.name}'], args.dry_run)
def run_job(hadoop_bin, job_conf, date_str):
    """ 根据任务配置job,运行日期date_str执行mapreduce任务 """
    output_path = _parse_template(job_conf['mapreduce']['-output'][0], DATE=date_str)
    try:
        logging.info('Delete directory %s' %output_path)
        cmd = '%s fs -rmr %s' %(hadoop_bin, output_path)
        logging.info('Delete dir:%s' %cmd)
        utils.run_cmd(cmd)
    except Exception:
        logging.warning('Delete directory %s failed' %output_path)
    cmd_str =  make_mapreduce_cmd(hadoop_bin, job_conf, DATE=date_str)
    utils.run_cmd(cmd_str)
Beispiel #21
0
def snapshot_overlay(underlay,
                     overlay,
                     out_file,
                     vmin=.2,
                     vmax=.7,
                     auto_coords=False):
    # first, generate a combined volume (overlay and underlay)
    tmp_vol = tempfile.mktemp(suffix='.nii.gz')
    cmd = 'overlay 0 0 {underlay} -a {overlay} {vmin} {vmax} {output}' \
            .format( underlay = underlay,
                     overlay  = overlay,
                     vmin      = vmin,
                     vmax      = vmax,
                     output   = tmp_vol)
    run_cmd(cmd)

    # second, produce snapshot from combined volume
    if auto_coords:
        sx, sy, sz = image_center_of_gravity(overlay)

        # temp image for each direction
        tmp_x, tmp_y, tmp_z = [
            tempfile.mktemp(suffix='.png') for x in range(3)
        ]

        # produce images
        cmd='slicer {input} -x -{x} {tx} -y -{y} {ty} -z -{z} {tz} -t' \
                .format(input = tmp_vol,
                        x     = sx,
                        tx    = tmp_x,
                        y     = sy,
                        ty    = tmp_y,
                        z     = sz,
                        tz    = tmp_z)
        run_cmd(cmd)

        # combine views into single image
        tmp_img = tempfile.mktemp(suffix='.png')
        cmd = 'montage -tile 3x1 -geometry +0+0 {} {} {} {}'.format(
            tmp_x, tmp_y, tmp_z, tmp_img)
        run_cmd(cmd)

    else:
        # take axial, sagittal, coronal views at center slice
        tmp_img = tempfile.mktemp(suffix='.png')
        cmd = 'slicer {input} -a {output} -t'.format(input=tmp_vol,
                                                     output=tmp_img)
        run_cmd(cmd)

    # scale the image (default is too small)
    cmd = 'convert {input} -scale 300% -trim {output}'.format(input=tmp_img,
                                                              output=out_file)
    run_cmd(cmd)
Beispiel #22
0
def run_fragment_picker(config):
    """
    Run fragment picker protocol. This takes in an amino acid
    sequence and generates two fragment files (3, 9) to be 
    used in the abiinito protocol 
    """
    fragment_flags_file = generate_fragment_flags(config)
    print("Running fragment picker protocol")
    command = "{} @{}".format(config["fragment_picker_script"],
                              fragment_flags_file)
    utils.run_cmd(command, cwd=config["working_dir"], display_stdout=False)
    print("Completed fragment picker protocol")
Beispiel #23
0
 def step_04_get_gene_reads(self, input_dir):
     """
     Uses FragGeneScan to find reads containing fragments of genes
     :param input_dir: string path to input files
     :return: string path to output directory
     """
     log, output_dir = self.initialize_step()
     start_time = time.time()
     if len(os.listdir(output_dir)) > 0:
         log.warning(
             'output directory "%s" is not empty, this step will be skipped',
             output_dir)
     else:
         #input_fps = glob.glob(f"{input_dir}/*.fastq.gz")
         #TODO CHANGE BACK
         input_fp_list = glob.glob(f"{input_dir}/*.fasta")
         #input_fps = glob.glob(f"{input_dir}/*.fasta")
         if len(input_fp_list) == 0:
             raise PipelineException(
                 f'found no fasta files in directory "{input_dir}"')
         log.info(f"input files = {input_fp_list}")
         #log.info("uncompressing input files")
         #uncompressed_input_fps = ungzip_files(*input_fps, target_dir=input_dir, debug=self.debug)
         #for fp in uncompressed_input_fps:
         for fp in input_fp_list:
             #fasta_fp = re.sub(
             #                    string=fp,
             #                    pattern='\.fastq',
             #                    repl='.fasta')
             #log.info(f"converting fastq {fp} to fasta {fasta_fp}")
             #fastq_to_fasta(fp, fasta_fp)
             #os.remove(fp)
             out_fp = os.path.join(
                 output_dir,
                 re.sub(string=os.path.basename(fp),
                        pattern='\.fasta',
                        repl='.frags'))
             log.info(f"writing output of {fp} to {out_fp}")
             run_cmd(
                 [
                     self.frag_executable_fp, f"-genome={fp}",
                     f"-out={out_fp}", "-complete=0",
                     f"-train={self.frag_train_file}",
                     f"thread={self.threads}"
                     # INCLUDE MORE ARGS
                 ],
                 log_file=os.path.join(output_dir, 'log'),
                 debug=self.debug)
     end_time = time.time()
     log.info(f"Time taken for this step: {int((end_time - start_time))}s")
     self.complete_step(log, output_dir)
     return output_dir
Beispiel #24
0
    def odir_limiter(self, odir, max_odirs=-1):
        '''Function to backup previously run output directory to maintain a
        history of a limited number of output directories. It deletes the output
        directory with the oldest timestamps, if the limit is reached. It returns
        a list of directories that remain after deletion.
        Arguments:
        odir: The output directory to backup
        max_odirs: Maximum output directories to maintain as history.

        Returns:
        dirs: Space-separated list of directories that remain after deletion.
        '''
        try:
            # If output directory exists, back it up.
            if os.path.exists(odir):
                ts = run_cmd("date '+" + self.sim_cfg.ts_format + "' -d \"" +
                             "$(stat -c '%y' " + odir + ")\"")
                os.system('mv ' + odir + " " + odir + "_" + ts)
        except IOError:
            log.error('Failed to back up existing output directory %s', odir)

        dirs = ""
        # Delete older directories.
        try:
            pdir = os.path.realpath(odir + "/..")
            # Fatal out if pdir got set to root.
            if pdir == "/":
                log.fatal(
                    "Something went wrong while processing \"%s\": odir = \"%s\"",
                    self.name, odir)
                sys.exit(1)

            if os.path.exists(pdir):
                find_cmd = "find " + pdir + " -mindepth 1 -maxdepth 1 -type d "
                dirs = run_cmd(find_cmd)
                dirs = dirs.replace('\n', ' ')
                list_dirs = dirs.split()
                num_dirs = len(list_dirs)
                if max_odirs == -1:
                    max_odirs = self.max_odirs
                num_rm_dirs = num_dirs - max_odirs
                if num_rm_dirs > -1:
                    rm_dirs = run_cmd(find_cmd +
                                      "-printf '%T+ %p\n' | sort | head -n " +
                                      str(num_rm_dirs + 1) +
                                      " | awk '{print $2}'")
                    rm_dirs = rm_dirs.replace('\n', ' ')
                    dirs = dirs.replace(rm_dirs, "")
                    os.system("/bin/rm -rf " + rm_dirs)
        except IOError:
            log.error("Failed to delete old run directories!")
        return dirs
def gen_flash_algo(filename_path, dev_info_path, algo_bin_path):

    output_file = os.path.splitext(filename_path)[0]
    output_file += ".txt"

    run_cmd([FROMELF, '--bin', filename_path, '-o', TMP_DIR_W_TERM])
    try:
        flash_info = FlashInfo(DEV_INFO_PATH)
        ALGO_START = flash_info.get_algo_start()
        flash_info.printInfo()
    except IOError, e:
        print "No flash info file found: %s, using default start address." % DEV_INFO_PATH
        ALGO_START = 0x20000000
Beispiel #26
0
    def create(self, x, y, z, radius):
        log.info('Creating spherical seed NAME={} MNI=({},{},{}) RADIUS={}mm'.format(self.name,x,y,z,radius))

        self.file = os.path.join(self.dir, '%s_%dmm.nii.gz' % (self.name, radius,))
        cmd = '3dUndump -prefix {ofile} -xyz -orient LPI -master {std} -srad {rad} <(echo \'{x} {y} {z}\')' \
                .format(ofile = self.file,
                        std   = mri_standard,
                        rad   = radius,
                        x     = x,
                        y     = y,
                        z     = z)

        run_cmd(cmd) # blocking
Beispiel #27
0
def test_install_recursive_gobuster():
    recursive_gobuster = Path(tool_paths.get("recursive-gobuster"))

    utils.setup_install_test(recursive_gobuster)

    if recursive_gobuster.parent.exists():
        shutil.rmtree(recursive_gobuster.parent)

    rs = recon_pipeline.ReconShell()

    utils.run_cmd(rs, "install recursive-gobuster")

    assert recursive_gobuster.exists() is True
Beispiel #28
0
def rules(win: Window):
    wid = win.window.wid
    classes = win.window.get_wm_class()

    if len(classes) == 0:
        proc_name = ''
        proc_id = run_cmd(['xdo', 'pid', str(wid)])
        if proc_id:
            proc_name = run_cmd(['ps', '-p', proc_id, '-o', 'comm=']) or ''
        apply_rules(win, wid, '', proc_name)
    else:
        for c in classes:
            apply_rules(win, wid, c, '')
def push_sapmachine_infra(local_repo):
    env = os.environ.copy()
    env['GIT_AUTHOR_NAME'] = 'SapMachine'
    env['GIT_AUTHOR_EMAIL'] = '*****@*****.**'
    env['GIT_COMMITTER_NAME'] = env['GIT_AUTHOR_NAME']
    env['GIT_COMMITTER_EMAIL'] = env['GIT_AUTHOR_EMAIL']
    utils.run_cmd(['git', 'add', jenkins_configuration], cwd=local_repo)
    utils.run_cmd(['git', 'commit', '-m', 'Updated Jenkins configuration.'],
                  cwd=local_repo,
                  env=env)
    utils.run_cmd(['git', 'fetch'], cwd=local_repo, env=env)
    utils.run_cmd(['git', 'rebase'], cwd=local_repo, env=env)
    utils.run_cmd(['git', 'push'], cwd=local_repo, env=env)
Beispiel #30
0
def gen_flash_algo(filename_path, dev_info_path, algo_bin_path):

    output_file = os.path.splitext(filename_path)[0]
    output_file += ".txt"

    run_cmd([FROMELF, '--bin', filename_path, '-o', TMP_DIR_W_TERM])
    try:
        flash_info = FlashInfo(DEV_INFO_PATH)
        ALGO_START = flash_info.get_algo_start()
        flash_info.printInfo()
    except IOError, e:
        print "No flash info file found: %s, using default start address." % DEV_INFO_PATH
        ALGO_START = 0x20000000
Beispiel #31
0
    def generate_cmake_config(self, extra_configs, for_ctest=False):
        ###############################################################################

        # Ctest only needs config options, and doesn't need the leading 'cmake '
        result = "{}-C {}".format("" if for_ctest else "cmake ",
                                  self.get_machine_file())

        # Netcdf should be available. But if the user is doing a testing session
        # where all netcdf-related code is disabled, he/she should be able to run
        # even if no netcdf is available
        stat, f_path, _ = run_cmd("nf-config --prefix")
        if stat == 0:
            result += " -DNetCDF_Fortran_PATH={}".format(f_path)
        stat, c_path, _ = run_cmd("nc-config --prefix")
        if stat == 0:
            result += " -DNetCDF_C_PATH={}".format(c_path)

        # Test-specific cmake options
        for key, value in extra_configs:
            result += " -D{}={}".format(key, value)

        # The output coming from all tests at the same time will be a mixed-up mess
        # unless we tell test-launcher to buffer all output
        if self._extra_verbose:
            result += " -DEKAT_TEST_LAUNCHER_BUFFER=True "

        # User-requested config options
        custom_opts_keys = []
        for custom_opt in self._custom_cmake_opts:
            expect(
                "=" in custom_opt,
                "Error! Syntax error in custom cmake options. Should be `VAR_NAME=VALUE`."
            )
            if "=" in custom_opt:
                name, value = custom_opt.split("=", 1)
                # Some effort is needed to ensure quotes are perserved
                result += " -D{}='{}'".format(name, value)
                custom_opts_keys.append(name)

        # Common config options (unless already specified by the user)
        if "CMAKE_CXX_COMPILER" not in custom_opts_keys:
            result += " -DCMAKE_CXX_COMPILER={}".format(self._cxx_compiler)
        if "CMAKE_C_COMPILER" not in custom_opts_keys:
            result += " -DCMAKE_C_COMPILER={}".format(self._c_compiler)
        if "CMAKE_Fortran_COMPILER" not in custom_opts_keys:
            result += " -DCMAKE_Fortran_COMPILER={}".format(self._f90_compiler)

        if "SCREAM_DYNAMICS_DYCORE" not in custom_opts_keys:
            result += " -DSCREAM_DYNAMICS_DYCORE=HOMME"

        return result
def subselect_data(config, s, dataset_filepath):
    cmd = 'python subselect_dataset.py '
    cmd += '--dataset_filepath {} '.format(dataset_filepath)
    cmd += '--subselect_feature_filepath {} '.format(
        config.get(s, 'subselect_feature_dataset'))
    if config.get(s, 'subselect_proposal') == 'True':
        cmd += '--subselect_proposal '
        cmd += '--subselect_filepath {} '.format(
            config.get(s, 'subselect_dataset'))
        cmd += '--subselect_proposal_filepath {} '.format(
            config.get(s, 'subselect_proposal_dataset'))
    cmd_dir = os.path.join(ROOTDIR, 'collection')
    run_cmd(cmd, config.get(s, 'subselect_logfile'), cmd_dir=cmd_dir, 
        dry_run=config.dry_run)
 def test_SUID_sandbox(self):
     log_file = self.new_temp_file('/tmp/chrome.log')
     url = cm.BASE_TEST_URL + "font-face/font-face-names.html"
     unexpected_strs = ('FATAL:browser_main_loop.cc', 'Running without the SUID sandbox')
     expected_strs = ('FPLOG',)
     
     cmd='timeout 10 xvfb-run --auto-servernum %s --disable-setuid-sandbox --enable-logging=stderr --v=1 --vmodule=frame=1 \
         --user-data-dir=/tmp/temp_profile%s --disk-cache-dir=/tmp/tmp_cache%s %s 2>&1 | tee %s' %\
         (cm.CHROME_MOD_BINARY, ut.rand_str(), ut.rand_str(), url, log_file)
     
     ut.run_cmd(cmd)
     
     self.assert_all_patterns_not_in_file(log_file, unexpected_strs)
     self.assert_all_patterns_in_file(log_file, expected_strs)
Beispiel #34
0
def run_graphs(ds: Dataset, name):
    cmd = ['./stats2graph.py',
           '--directory', ds.name, '--csv_input', f'{name}.csv', '--name_normal',
           f'{name}_normal', '--name_outliers', f'{name}_normal',
           '--name_stats', f'{name}_stats', '--name_graph', name]
    utils.run_cmd(cmd, args.dry_run)
    graphing.sq_error_vs_actual_angle(
        ds.name, f'{name}.csv', f'{name}_sqerror_vs_actual')
    graphing.angle_distribution(
        ds.name, f'{ds.name}_ang.csv', f'{name}_angledistribution')
    graphing.error_distribution(
        ds.name, f'{name}.csv', f'{name}_errordistribution')
    graphing.sq_error_vs_actual_angle(
        ds.name, f'{name}.csv', f'{name}_sqerror_vs_actual')
Beispiel #35
0
def to_word(method, in_fname, out_fname):
    if method == 'jieba':
        cmd = 'python -m jieba -d " " {in_file} > {out_file}'
    elif method == 'mecab':
        cmd = 'mecab {in_file} -O wakati > {out_file}'
    elif method == 'kytea':
        cmd = 'kytea -out tok < {in_file} > {out_file}'
    elif method == 'moses':
        cmd = '$HOME/mosesdecoder/scripts/tokenizer/tokenizer.perl < {in_file} > {out_file}'
    else:
        raise ValueError('word tokenizer not supported.')

    print('tokenizing: {} -> {}'.format(in_fname, out_fname), file=sys.stderr)
    run_cmd(cmd.format(in_file=in_fname, out_file=out_fname))
Beispiel #36
0
def build_asmtools(top_dir, tag=None):
    work_dir = join(top_dir, 'asmtools_work')
    hg_dir = join(work_dir, 'asmtools')
    build_dir = join(hg_dir, 'build')

    mkdir(work_dir)
    chdir(work_dir)

    # clone the asmtools mercurial repository
    hg_clone(asmtools_repo)
    chdir(hg_dir)

    if tag is None:
        # find the latest tag
        tag = get_latest_hg_tag('')

    print(str.format('Using asmtools tag {0}', tag))
    hg_switch_tag(tag)

    chdir(build_dir)

    asmtools_version = tag

    if exists(join(build_dir, 'productinfo.properties')):
        with open('productinfo.properties', 'r') as productinfo_properties:
            lines = productinfo_properties.readlines()
            pattern = re.compile('.*PRODUCT_VERSION.*=.*([0-9]+\.[0-9])+.*')
            for line in lines:
                match = pattern.match(line)
                if match is not None:
                    asmtools_version = match.group(1)
    else:
        with open('build.properties', 'r') as build_properties:
            lines = build_properties.readlines()
            pattern = re.compile(
                '.*BUILD_DIR.*=.*asmtools-([0-9]+\.[0-9])+-build.*')
            for line in lines:
                match = pattern.match(line)
                if match is not None:
                    asmtools_version = match.group(1)

    # run the ant build
    utils.run_cmd(['ant', 'build'])

    # copy the build result
    asmtools_version_string = str.format('asmtools-{0}', asmtools_version)
    copytree(
        join(work_dir, str.format('{0}-build', asmtools_version_string),
             'release'), join(top_dir, 'asmtools-release'))
Beispiel #37
0
def test_update_recursive_gobuster():
    recursive_gobuster = Path(tool_paths.get("recursive-gobuster"))

    utils.setup_install_test()

    if not recursive_gobuster.parent.exists():
        subprocess.run(
            f"sudo git clone https://github.com/epi052/recursive-gobuster.git {recursive_gobuster.parent}".split()
        )

    rs = recon_pipeline.ReconShell()

    utils.run_cmd(rs, "install recursive-gobuster")

    assert recursive_gobuster.exists() is True
Beispiel #38
0
    def test_SUID_sandbox(self):
        log_file = self.new_temp_file('/tmp/chrome.log')
        url = cm.BASE_TEST_URL + "font-face/font-face-names.html"
        unexpected_strs = ('FATAL:browser_main_loop.cc',
                           'Running without the SUID sandbox')
        expected_strs = ('FPLOG', )

        cmd='timeout 10 xvfb-run --auto-servernum %s --disable-setuid-sandbox --enable-logging=stderr --v=1 --vmodule=frame=1 \
            --user-data-dir=/tmp/temp_profile%s --disk-cache-dir=/tmp/tmp_cache%s %s 2>&1 | tee %s'                                                                                                    %\
            (cm.CHROME_MOD_BINARY, ut.rand_str(), ut.rand_str(), url, log_file)

        ut.run_cmd(cmd)

        self.assert_all_patterns_not_in_file(log_file, unexpected_strs)
        self.assert_all_patterns_in_file(log_file, expected_strs)
Beispiel #39
0
def run_snns(nr: NonRedundantization, meth: MLMethod, tt: TestTrain):
    print(
        f'Training on {tt.training.name}, testing on {tt.testing.name}, meth={meth.name}, nr={nr.name}')

    utils.run_cmd(['./pdb2seq.py', '--directory',
                  tt.training.name], args.dry_run)
    utils.run_cmd(['./pdb2seq.py', '--directory',
                  tt.testing.name], args.dry_run)
    # distinguish between making a new papa and running the old papa
    if meth == MLMethod.OrigPAPA:
        run_papa(nr, meth, tt)
    elif meth == MLMethod.RetrainedPAPA:
        run_newpapa(nr, meth, tt)
    else:
        raise ValueError(f'Handling of meth={meth} not implemented')
Beispiel #40
0
def _fetch_tasks_envs(app_id: str, task_count: int) -> dict:
    envs = {}
    for task in range(task_count):
        task_name = remove_prefix(app_id, '/').replace('/', '.') + "__node-{}-server__".format(task)
        log.info("Downloading configuration from task: {}".format(task_name))
        _, out, err = run_cmd("{} task exec {} bash -c 'env'".format(DCOS, task_name))
        for env in out.split('\n'):
            key, value = env.split('=')
            if key not in envs:
                envs[key] = [value]
            else:
                envs[key].append(value)
        _, out, err = run_cmd("{} task exec {} bash -c 'cat $MESOS_SANDBOX/new_user_password'".format(DCOS, task_name))
        envs["NEW_USER_PASSWORD"] = out
    return envs
Beispiel #41
0
    def execute_commands(self, base_infname, base_outfname, n_procs):
        # ----------------------------------------------------------------------------------------
        def get_outfname(iproc):
            return self.subworkdir(iproc, n_procs) + '/' + base_outfname
        # ----------------------------------------------------------------------------------------
        def get_cmd_str(iproc):
            return self.get_vdjalign_cmd_str(self.subworkdir(iproc, n_procs), base_infname, base_outfname, n_procs)

        # start all procs for the first time
        procs, n_tries = [], []
        for iproc in range(n_procs):
            procs.append(utils.run_cmd(get_cmd_str(iproc), self.subworkdir(iproc, n_procs)))
            n_tries.append(1)
            time.sleep(0.1)

        # keep looping over the procs until they're all done
        while procs.count(None) != len(procs):  # we set each proc to None when it finishes
            for iproc in range(n_procs):
                if procs[iproc] is None:  # already finished
                    continue
                if procs[iproc].poll() is not None:  # it's finished
                    utils.finish_process(iproc, procs, n_tries, self.subworkdir(iproc, n_procs), get_outfname(iproc), get_cmd_str(iproc))
            sys.stdout.flush()
            time.sleep(1)

        for iproc in range(n_procs):
            os.remove(self.subworkdir(iproc, n_procs) + '/' + base_infname)

        sys.stdout.flush()
Beispiel #42
0
def crawl_worker(agent_cfg, url_tuple):
    """Crawl given url. Will work in parallel. Cannot be class method."""
    MAX_SLEEP_BEFORE_JOB = 10 # prevent starting all parallel processes at the same instance
    sleep(random() * MAX_SLEEP_BEFORE_JOB) # sleep for a while
    
    try:
        idx, url = url_tuple
        idx = str(idx)
        
        stdout_log =  os.path.join(agent_cfg['job_dir'], fu.get_out_filename_from_url(url, str(idx), '.txt'))
       
        if not url[:5] in ('data:', 'http:', 'https', 'file:'):
            url = 'http://' + url
        
        proxy_opt = mitm.init_mitmproxy(stdout_log[:-4], agent_cfg['timeout'], agent_cfg['mitm_proxy_logs']) if agent_cfg['use_mitm_proxy'] else ""
        
        if not 'chrome_clicker' in agent_cfg['type']:
            cmd = get_visit_cmd(agent_cfg, proxy_opt, stdout_log, url)
            wl_log.info('>> %s (%s) %s' % (url, idx, cmd))
            status, output = ut.run_cmd(cmd) # Run the command
            if status and status != ERR_CMD_TIMEDOUT:
                wl_log.critical('Error while visiting %s(%s) w/ command: %s: (%s) %s' % (url, idx, cmd, status, output))
            else:
                wl_log.info(' >> ok %s (%s)' % (url, idx))
            
        else:
            cr.crawl_url(agent_cfg['type'], url, proxy_opt)
            
        sleep(2) # this will make sure mitmdump is timed out before we start to process the network dump
        if agent_cfg['post_visit_func']: # this pluggable function will parse the logs and do whatever we want
            agent_cfg['post_visit_func'](stdout_log, crawl_id=agent_cfg['crawl_id'])
            
    except Exception as exc:
        wl_log.critical('Exception in worker function %s %s' % (url_tuple, exc))
Beispiel #43
0
def append_page(page, main_file):
    """
    Appends a DjVu page to a main file
    
    page        : single page djvu file
    main_file   : file to append to
    """

    if os.path.exists(main_file):
        #Add the djvu file to the collated file
        cmd = ['djvm','-i', main_file, page]
    else:
        # Create the collated file
        cmd = ['djvm', '-c', main_file, page]

    utils.run_cmd(cmd)
    def tar_scm(self, args, should_succeed=True):
        # simulate new temporary outdir for each tar_scm invocation
        mkfreshdir(self.outdir)

        # osc launches source services with cwd as pkg dir
        # (see run_source_services() in osc/core.py)
        print("chdir to pkgdir: %s" % self.pkgdir)
        os.chdir(self.pkgdir)

        cmdargs = args + ['--outdir', self.outdir]
        quotedargs = ["'%s'" % arg for arg in cmdargs]
        cmdstr = 'python2 %s %s 2>&1' % \
                 (self.tar_scm_bin(), " ".join(quotedargs))
        print
        print ">>>>>>>>>>>"
        print "Running", cmdstr
        print
        (stdout, stderr, ret) = run_cmd(cmdstr)
        if stdout:
            print "--v-v-- begin STDOUT from tar_scm --v-v--"
            print stdout,
            print "--^-^-- end   STDOUT from tar_scm --^-^--"
        if stderr:
            print "\n"
            print "--v-v-- begin STDERR from tar_scm --v-v--"
            print stderr,
            print "--^-^-- end   STDERR from tar_scm --^-^--"
        succeeded = ret == 0
        self.assertEqual(succeeded, should_succeed,
                         "expected tar_scm to " +
                         ("succeed" if should_succeed else "fail"))
        return (stdout, stderr, ret)
Beispiel #45
0
 def radio_current_track(self):
     title = ''
     mpc_state = run_cmd('mpc')
     if '[playing]' in mpc_state:
         title = mpc_state.split('\n')[0]
         if '[SomaFM]: ' in title:
             title = title.split('[SomaFM]: ')[1]
     return title
Beispiel #46
0
 def _run_cmd(self, *cmd):
     cmd = map(str, cmd)
     if self.compose_files is not None:
         for filename in reversed(self.compose_files):
             cmd[1:1] = ["-f", filename]
     if self.project_name is not None:
         cmd[1:1] = ["-p", self.project_name]
     return utils.run_cmd(cmd, workdir=self.dir)
Beispiel #47
0
def guestunmount(mntdir, guestmount_pidfile):
    run_cmd(['sudo', 'guestunmount', mntdir], "guestunmount")
    logging.info('Waiting for guestmount (pidfile %s) to exit...',
                 guestmount_pidfile)
    sleepcount = 0
    while sleepcount < 100:
        try:
            subprocess.check_output(['pgrep', '-F', guestmount_pidfile],
                                    stderr=subprocess.STDOUT)
        except Exception, e:
            # If the return code was non-zero it raises a CalledProcessError.
            # The CalledProcessError object will have the return code in the
            # returncode attribute and any output in the output attribute.
            break
        sleepcount += 1
        if sleepcount % 10 == 0:
            logging.info('    still sleeping (count=%d)...', sleepcount)
        time.sleep(1)
Beispiel #48
0
    def __init__(self, *args, **kwargs):
        self.volume = self.volume_from_mixer()
        self.state_read()

        mpc_state = run_cmd('mpc')
        if '[playing]' in mpc_state:
            self.state['radio_on'] = True
        else:
            self.state['radio_on'] = False
Beispiel #49
0
def gen_flash_algo():
    if len(sys.argv) < 2:
        print "usage: >python flash_algo_gen.py <abs_path_w_elf_name>"
        sys.exit()
        
    ALGO_ELF_PATH_NAME = sys.argv[1]
    ALGO_ELF_PATH, ALGO_ELF_NAME = os.path.split(ALGO_ELF_PATH_NAME)
    DEV_INFO_PATH = join(ALGO_ELF_PATH, "DevDscr")
    ALGO_BIN_PATH = join(ALGO_ELF_PATH, "PrgCode")
    # need some work here to name and locate to a collective folder
    ALGO_TXT_PATH = join(ALGO_ELF_PATH, "flash_algo.txt")

    run_cmd([FROMELF, '--bin', ALGO_ELF_PATH_NAME, '-o', ALGO_ELF_PATH])
    try:
        flash_info = FlashInfo(DEV_INFO_PATH)
        ALGO_START = flash_info.get_algo_start()
    except IOError, e:
        print repr(e), e
        ALGO_START = 0x20000000
Beispiel #50
0
def snapshot_overlay(underlay, overlay, out_file, vmin=.2, vmax=.7, auto_coords=False):
    # first, generate a combined volume (overlay and underlay)
    tmp_vol = tempfile.mktemp(suffix='.nii.gz')
    cmd = 'overlay 0 0 {underlay} -a {overlay} {vmin} {vmax} {output}' \
            .format( underlay = underlay,
                     overlay  = overlay,
                     vmin      = vmin,
                     vmax      = vmax,
                     output   = tmp_vol)
    run_cmd(cmd)

    # second, produce snapshot from combined volume
    if auto_coords:
        sx,sy,sz = image_center_of_gravity(overlay)

        # temp image for each direction
        tmp_x,tmp_y,tmp_z = [tempfile.mktemp(suffix='.png') for x in range(3)]

        # produce images
        cmd='slicer {input} -x -{x} {tx} -y -{y} {ty} -z -{z} {tz} -t' \
                .format(input = tmp_vol,
                        x     = sx,
                        tx    = tmp_x,
                        y     = sy,
                        ty    = tmp_y,
                        z     = sz,
                        tz    = tmp_z)
        run_cmd(cmd)

        # combine views into single image
        tmp_img = tempfile.mktemp(suffix='.png')
        cmd='montage -tile 3x1 -geometry +0+0 {} {} {} {}'.format(tmp_x,tmp_y,tmp_z, tmp_img)
        run_cmd(cmd)

    else:
        # take axial, sagittal, coronal views at center slice
        tmp_img = tempfile.mktemp(suffix='.png')
        cmd = 'slicer {input} -a {output} -t'.format(input=tmp_vol, output=tmp_img)
        run_cmd(cmd)

    # scale the image (default is too small)
    cmd='convert {input} -scale 300% -trim {output}'.format(input=tmp_img, output=out_file)
    run_cmd(cmd)
Beispiel #51
0
def gen_flash_algo():
    run_cmd([FROMELF, '--bin', ALGO_ELF_PATH, '-o', TMP_DIR_W_TERM])
    flash_info = FlashInfo(DEV_INFO_PATH)
    ALGO_START = flash_info.get_algo_start()
    print "ALGO_START = 0x%08x\n" % ALGO_START

    #flash_info.printInfo()

    with open(ALGO_BIN_PATH, "rb") as f, open(ALGO_TXT_PATH, mode="w+") as res:
        # Flash Algorithm
        res.write("""
const uint32_t flash_algo_blob[] = {
    0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,\n
    /*0x020*/ """);

        nb_bytes = ALGO_OFFSET

        bytes_read = f.read(1024)
        while bytes_read:
            bytes_read = unpack(str(len(bytes_read)/4) + 'I', bytes_read)
            for i in range(len(bytes_read)):
                res.write(hex(bytes_read[i]) + ", ")
                nb_bytes += 4
                if (nb_bytes % 0x20) == 0:
                    res.write("\n    /*0x%03X*/ " % nb_bytes)
            bytes_read = f.read(1024)
        
        res.write("\n};\n")
        
        # Address of the functions within the flash algorithm
        stdout, _, _ = run_cmd([FROMELF, '-s', ALGO_ELF_PATH])
        res.write("""
static const TARGET_FLASH flash = {
""")
        for line in stdout.splitlines():
            t = line.strip().split()
            if len(t) != 8: continue
            name, loc = t[1], t[2]
            
            if name in ['Init', 'UnInit', 'EraseChip', 'EraseSector', 'ProgramPage']:
                addr = ALGO_START + ALGO_OFFSET + int(loc, 16)
                res.write("    0x%08X, // %s\n" % (addr,  name))
Beispiel #52
0
    def fc_voxelwise_groupstats(self, seed, ttest=True):
        zmaps = [s.file_zmap for s in self.seed_stats if s.seed == seed]
        zmaps_str = ' '.join(zmaps)

        # concatenate vols to temp file
        tmp = tempfile.mktemp(suffix='.nii.gz')
        cmd = "fslmerge -t {} {}".format(tmp, zmaps_str)
        run_cmd(cmd)

        # calculate mean z-map
        log.info('creating group mean z-map, roi={}'.format(seed.name))
        outfile = os.path.join(self.dir_grp_vols_mean, '{}_z_mean.nii.gz'.format(seed.name))
        cmd = "fslmaths {} -Tmean {}".format(tmp, outfile)
        run_cmd(cmd)

        ### graphics ###
        log.info('Generating snapshot image for results, roi={}'.format(seed.name))
        snap_img = os.path.join(self.dir_grp_imgs,
                                '{}_pearson_z_snapshot.png'.format(seed.name))
        snapshot_overlay(mri_standard, outfile, snap_img, vmin=.2, vmax=.7)

        # add to report
        self.report_seeds.add_img(seed.name, snap_img,
                                  'Group mean functional connectivity with {} (z(r) > 0.2)'.format(seed.name))

        # run t-test
        if ttest:
            log.info('running group t-test on z-maps, roi={}'.format(seed.name))
            outbase = os.path.join(self.dir_grp_vols_ttest, '{}.nii.gz'.format(seed.name))
            cmd = "3dttest++ -setA {} -prefix {} -mask {}".format(tmp, outbase, mri_brain_mask)
            run_cmd(cmd)

        # remove 4d file
        os.remove(tmp)
Beispiel #53
0
 def radio_select(self, index):
     if index != self.state['radio_station']:
         self.state['radio_station'] = index
         self.state_save()
         run_cmd('mpc clear')
         run_cmd('mpc load {}'.format(RADIO_STATIONS[self.state['radio_station']][1]))
         if self.state['radio_on']:
             run_cmd('mpc play')
Beispiel #54
0
 def radio_toggle(self):
     self.state['radio_on'] = not self.state['radio_on']
     self.state_save()
     if self.state['radio_on']:
         run_cmd('mpc stop')
         self.radio_select(self.state['radio_station'])
         run_cmd('mpc play')
     else:
         run_cmd('mpc stop')
Beispiel #55
0
 def volume_from_mixer(self):
     response = run_cmd('amixer sget \'PCM\'')
     matches = re.findall('[0-9]*%', response)
     if matches:
         percent = int(matches[0].rstrip('%'))
         percent = round((percent/10.0)**2)
         nearest = 0
         distance = 100
         for i in VOLUME_VALUES:
             if abs(percent - i) < distance:
                 nearest = i
                 distance = abs(percent - i)
         print nearest
         return nearest
     return 0
Beispiel #56
0
def add_ocr_text(ocr, djvu_filename, page):
    """
    Adds an OCR text later to the page. Does not bother with position.
    
    ocr             : text layer
    djvu_filename   : the djvu file to add the text to
    page            : the page of the file to add the layer to
    """  

    directory = os.path.dirname(djvu_filename)

    djvu_text = u"(page 0 0 1 1\n"

    ocr_lines = ocr.split('\n')

    for line in ocr_lines:
        #escape \ and " characters
        djvu_text += u'(line 0 0 1 1 "%s")\n' % line.replace('\\', '\\\\').replace('"', '\\"').strip()

    djvu_text += ")\n"

    djvu_text_file = os.path.join(directory, 'DJVU-FORMATTED-OCR-TEXT-TEMP-FILE-%d.txt'%page)
    djvu_text_file_handler = codecs.open(djvu_text_file, 'w', 'utf-8')
    djvu_text_file_handler.write(djvu_text)
    djvu_text_file_handler.close()

    # remove the existing text
    cmd = ['djvused', djvu_filename, '-e', 'select %d; remove-txt' % page, "-s"]
    utils.run_cmd(cmd)

    # set the new text
    cmd = ['djvused', djvu_filename, '-e', 'select %d; set-txt %s'% (page, djvu_text_file), "-s"]
    utils.run_cmd(cmd)

    if os.path.exists(djvu_text_file):
        os.remove(djvu_text_file)
Beispiel #57
0
    def test_parse(self):
        test_small = os.path.join(TEST_DIR, 'test_small.py')
        out, err, ret = run_cmd('nosetests', test_small)
        results = NoseResults(err)

        self.assertEqual(results.n_tests, 2)
        self.assertEqual(len(results.failed_tests), 2)
        # we know each test takes at least 2 seconds
        self.assertGreaterEqual(results.test_time, 4)

        names = set([t.name for t in results.failed_tests])
        expected_names = set([
            'sample.test_small.SmallTest.test_small1',
            'sample.test_small.SmallTest.test_small2',
        ])
        self.assertEqual(names, expected_names)

        # all tests should have run in one process
        pids = set([t.pid for t in results.failed_tests])
        self.assertEqual(len(pids), 1)
        self.assertGreater(next(iter(pids)), 1023)
Beispiel #58
0
    def test_nose_output_with_failures(self):
        out, err, ret = run_cmd('nosetests', '-v', TEST_DIR)
        results = Results(err)
        self.assertEqual(results.n_skips, 0)
        self.assertEqual(results.n_failures, 3)
        self.assertEqual(results.n_errors, 0)
        self.assertEqual(results.n_tests, 6)

        short_results_status = [r.status for r in results.shortresults]
        self.assertEqual(short_results_status.count('FAIL'), 3)
        self.assertEqual(short_results_status.count('ok'), 3)

        short_results_names = set([r.name for r in results.shortresults])
        expected_test_list = set([
            "sampletests.v1.test_wumbo.WumboTest.test_set_to_mini",
            "sampletests.v1.test_wumbo.WumboTest.test_set_to_wumbo",
            "sampletests.test_mini.MiniTest.test_failure",
            "sampletests.test_mini.MiniTest.test_set_to_mini",
            "sampletests.test_mini.MiniTest.test_set_to_wumbo",
            "sampletests.test_mini.test_unbound_function",
        ])
        self.assertEqual(short_results_names, expected_test_list)
 def tar_scm(self, args, should_succeed=True):
     # simulate new temporary outdir for each tar_scm invocation
     mkfreshdir(self.outdir)
     cmdargs = args + [ '--outdir', self.outdir ]
     quotedargs = [ "'%s'" % arg for arg in cmdargs ]
     cmdstr = 'bash %s %s 2>&1' % (self.tar_scm_bin(), " ".join(quotedargs))
     print "\n"
     print "-" * 70
     print "Running", cmdstr
     (stdout, stderr, ret) = run_cmd(cmdstr)
     if stdout:
         print "STDOUT:"
         print "------"
         print stdout,
     if stderr:
         print "STDERR:"
         print "------"
         print stderr,
     print "-" * 70
     succeeded = ret == 0
     self.assertEqual(succeeded, should_succeed)
     return (stdout, stderr, ret)