예제 #1
0
def genome_download(name, output_path):
    path = ''.join([output_path + name.replace(" ", "_")])
    os.makedirs(path)
    ngd.download(group="bacteria",
                 genus=name,
                 file_format="fasta",
                 parallel=10,
                 dry_run=True)
    ngd.download(group="bacteria",
                 genus=name,
                 file_format="fasta",
                 parallel=10,
                 dry_run=False,
                 output=path)
    files = []
    for r, d, f in os.walk(path):
        for file in f:
            if '.gz' in file:
                files.append(os.path.join(r, file))

    for f in files:
        sh.gunzip(f)

    files2 = []
    for r, d, f in os.walk(path):
        for file in f:
            if '.fna' in file:
                files2.append(os.path.join(r, file))

    out = ''.join([output_path + "/" + name.replace(" ", "_") + ".fasta"])
    sh.cat(files2, _out=out)
    return path
예제 #2
0
def sh_cat(infiles: typing.List[str],
           outfile: str,
           remove: bool = False,
           error_file: typing.TextIO = sys.stderr,
           verbose: bool = False):
    """Unfortunately, the performance of this code is not very good.
    Apparently the output from the cat command is read into Python
    before being written to the output file. This produces a bottleneck.

    Another option would be to start a subshell and use it to perform output
    redirection for the result of `cat`.  This will be higher performing than
    the present implementation.  It may or may not be the simplest solution,
    depending upon whether it is necessary to outwit features such as .login
    files producing output.

    """
    import sh
    start_time: float = 0
    if verbose:
        print('Using sh_cat.', file=error_file, flush=True)
        start_time = time.time()
    sh.cat(*infiles, _out=outfile)

    if remove:
        if verbose:
            print('Removing files.', file=error_file, flush=True)
        sh.rm(*infiles)

    if verbose:
        print('Done with sh_cat.', file=error_file, flush=True)
        print('Time taken : {}s'.format(time.time() - start_time),
              file=error_file,
              flush=True)
예제 #3
0
async def main(urlfile, outfile: IO[str], gateway_node: SSHNode,
               client_node: Sequence[SSHNode], n_clients: int,
               checkpoint_dir: Optional[pathlib.Path],
               gateway_endpoint: Optional[str], **kwargs):
    """Script entry pooint."""
    logging.basicConfig(
        format='[%(asctime)s] %(name)s - %(levelname)s - %(message)s',
        level=logging.INFO)
    _LOGGER.info("Running script.")

    # Either we have a 1 repetiton per node, or multiple client nodes
    client_node = list(client_node) or [
        SSHNode(),
    ]
    assert n_clients == 1 or len(client_node) == 1
    if len(client_node) == 1 and n_clients > 1:
        for _ in range(n_clients - 1):
            client_node.append(SSHNode(client_node[0].node_address))

    urls = [url for (url, ) in urlfile]

    gateway_endpoint = gateway_endpoint or gateway_node.public_ip
    experiment = TraceCollectionExperiment(
        gateway_node,
        client_node,
        urls,
        checkpoint_dir=checkpoint_dir,
        gateway_endpoint=gateway_endpoint,
        **kwargs,
    )

    checkpoint_filenames = await experiment.run()
    sh.cat(checkpoint_filenames, _out=outfile)

    experiment.clear_checkpoint()
예제 #4
0
def main():
    logging.debug('start')
    if YANDEX_SEARCH_ID == "":
        logging.warn(
            'to enable seach on your site run\n    python3 build3.py "http://website.url/" 123\n    where 123 is yandex search id obtainable on http://site.yandex.ru/searches/new/'
        )

    #create and clear output directory if necessary
    mkdir("-p", "_site/")
    rm("-Rf", glob("_site/*"))
    #copy static contant
    cp("-a", glob("_web/*"), "_site/")
    mv("_site/dot_htaccess", "_site/.htaccess")
    #copy optimized css
    cssoptimizer(cat(glob("_css/*")), "-i", "_site/style.css")
    #copy optimized js
    uglifyjs(cat(glob("_js/*")), "-o", "_site/scripts.js")

    #generate content
    materialize_notes(SOURCE)
    materialize_template("Y_Search", "Y_Search", {"title": "Поиск"})

    logging.debug('end.')
    logging.info(
        'To start copy following url into your browser: \n%sindex.html' %
        BASE_URL)
def read(temp_files):

    bucket_id = "jfhuete-pycones2021"
    received_temp_path = "/tmp/received"

    rebuilt_temp_file = "sample_received.hdf5"
    rebuilt_temp_file_path = f"{received_temp_path}/{rebuilt_temp_file}"

    # Get splited files from s3

    processes = []
    for file in temp_files:
        processes.append(
            sh.aws(
                "s3api",
                "get-object",
                "--bucket",
                bucket_id,
                "--key",
                file,
                f"{received_temp_path}/{file}",
                _bg=True)
            )

    for process in processes:
        process.wait()

    import time
    time.sleep(2)

    temp_files_path = [f"{received_temp_path}/{f}" for f in temp_files]
    sh.cat(*temp_files_path, _out=f"{rebuilt_temp_file_path}")

    df = vaex.open(rebuilt_temp_file_path)
def plot(input_file, output_file, nr_of_lines, options = None, label = 'transfer rate [MB per s]'):
    '''Plot input file with uneven column number n being x axis value, 
    and n+1 being the corresponding y axis values for column n.'''
    if options is None:
        options = []
    with tempfile.NamedTemporaryFile() as plot_file:
        print >>plot_file, 'set xlabel "time [min]";'
        print >>plot_file, 'set xtic auto;'
        print >>plot_file, 'set ylabel "%s";' % label
        #print >>plot_file, 'set timefmt '%Y-%m-%d %H:%M:%S''
        if MONOCHROME in options:
            print >>plot_file, 'set terminal pdf linewidth 3 monochrome solid font "Helvetica,14" size 16cm,12cm'
        else:
            print >>plot_file, 'set terminal pdf linewidth 3 solid font "Helvetica,14" size 16cm,12cm'
        print >>plot_file, 'set output "%s"' % output_file 
        plot_file.write('plot ')
        print nr_of_lines
        for i in range(nr_of_lines):
            print "line:"+str(i)
            x_axis_col = i*2 + 1
            y_axis_col = i*2 + 2
            plot_file.write('"%s" using %s:%s title column(%s)  w lines ' % (input_file, x_axis_col, y_axis_col, y_axis_col))
            if i+1 != nr_of_lines:
                plot_file.write(',')
        plot_file.flush()
        print "plot file:"
        #print plot_file.name
        print sh.cat(plot_file.name)
        #raw_input("raw_input")
        sh.gnuplot(plot_file.name)
예제 #7
0
    def test_internal_bufsize(self):
        from sh import cat

        output = cat(_in="a" * 1000, _internal_bufsize=100, _out_bufsize=0)
        self.assertEqual(len(output), 100)

        output = cat(_in="a" * 1000, _internal_bufsize=50, _out_bufsize=2)
        self.assertEqual(len(output), 100)
예제 #8
0
파일: test.py 프로젝트: ahhentz/sh
 def test_internal_bufsize(self):
     from sh import cat
     
     output = cat(_in="a"*1000, _internal_bufsize=100, _out_bufsize=0)
     self.assertEqual(len(output), 100)
     
     output = cat(_in="a"*1000, _internal_bufsize=50, _out_bufsize=2)
     self.assertEqual(len(output), 100)
예제 #9
0
파일: maid.py 프로젝트: mlawe/t2kdm
 def _do(self):
     with tempfile.TemporaryFile('w+t') as tf:
         # Write tail of logfile into temporary file
         sh.tail(self.path, lines=self.nlines, _in=self.path, _out=tf)
         # Rewind temporary file
         tf.seek(0)
         # Overwrite old file
         sh.cat(_in=tf, _out=self.path)
     return True
예제 #10
0
 def runCommand(input_path, output_path, *extra_arguments, error_path=None):
   # See createNativeCommand for why I'm using cat
   if not error_path:
     return command(
               snapshots(sh.cat(input_path, _piped="direct"), _piped="direct"), 
               *extra_arguments, _out=output_path, _iter="err")
   else:
     return command(
               snapshots(sh.cat(input_path, _piped="direct"), _piped="direct"), 
               *extra_arguments, _out=output_path, _err=error_path, _bg=True)
예제 #11
0
파일: sample.py 프로젝트: vhnuuh/pyutil
def test_stdin_processing():
    """
    You’re also not limited to using just strings. You may use a file object,
    a Queue, or any iterable (list, set, dictionary, etc):
    :return:
    """
    from sh import cat, tr
    # print "test"
    print cat(_in='test')
    print tr('[:lower:]', '[:upper:]', _in='sh is awesome')
예제 #12
0
파일: battery.py 프로젝트: xolan/dotfiles
def main(argv=None):
    if argv:
        post = ' '.join(argv[1:])
    else:
        post = ''
    cap = int(cat('/sys/class/power_supply/BAT0/energy_full'))
    now = int(cat('/sys/class/power_supply/BAT0/energy_now'))
    per = (now/cap)*100

    return output(per, post)
예제 #13
0
def my_menu():
    your_resp = 0
    while your_resp != 3:
        try:
            print("""\033[1;31m              
                     [===========[\033[1;37mMENU\033[1;31m]===========] 
                     \033[1;31m||\033[1;34m  |1|\033[1;37m Check icmp reply\033[1;31m    ||
                     \033[1;31m||\033[1;34m  |2|\033[1;37m Show good/bad addr\033[1;31m  ||
                     \033[1;31m||\033[1;34m  |3|\033[1;37m Exit       \033[1;31m         ||
                     [============================]\033[1;m""")
            your_resp = int(
                input(
                    "\n\n\033[1;34m ==> \033[1;37m Enter your choice: \033[1;m"
                ))
            break
        except (ValueError):
            print(
                "\n\n\n\033[1;31m Oops! You didn't enter a valid choice. Try again.\n\n\n\033[1;m"
            )

    if your_resp == 1:
        print("\033[1;37m \n\nHey, wassup breh...\n\n\033[1;m")
        for num in range(5, 10):
            address = "8.8.8." + str(num)
            try:
                sh.ping(address, "-c 1", _out="/dev/null")
                print("Success:", address, "is up")
                with open('gaddresses.txt', 'a') as out:
                    out.write(address + '\n')
                with open('baddresses.txt', 'a') as out:
                    out.write('\n')
            except sh.ErrorReturnCode_1:
                print("Error:", address, "not responding")
                with open('baddresses.txt', 'a') as out:
                    out.write(address + '\n')
                with open('gaddresses.txt', 'a') as out:
                    out.write('\n')
        my_menu()
    elif your_resp == 2:
        print("\033[1;34m \n\nGood Addresses...\n\n\033[1;m")
        print(sh.cat('gaddresses.txt'))
        print("\033[1;31m \n\nBad Addresses...\n\n\033[1;m")
        print(sh.cat('baddresses.txt'))
        my_menu()
    elif your_resp == 3:
        print("\033[1;34m\n\n See you later!\033[1;m")
        sh.rm('gaddresses.txt')
        sh.rm('baddresses.txt')
    else:
        print(
            "\033[1;41m\n\n\n Sorry, I didn't understand your choice...\n\n\n\033[1;m"
        )
        print("\033[1;41m\n\n Please try again.\n\n\n\033[1;m")
        my_menu()
    print()
예제 #14
0
 def runCommand(input_path, output_path, *extra_arguments, error_path=None):
   # Due to a quirk of sh's design, having cat pipe the file contents is
   # *considerably* faster than using _in directly (in which case Python
   # will have to read everything in, and then echo it out. Seriously slow.)
   if not error_path:
     return native_command(sh.cat(input_path, _piped="direct"),
                           *extra_arguments, _out=output_path, _iter="err")
   else:
     return native_command(sh.cat(input_path, _piped="direct"),
                           *extra_arguments, _out=output_path, _err=error_path, 
                           _bg=True)
예제 #15
0
 def _collect_block(self, sysinfo_path):
     try:
         block = Path("/sys/block")
         results = open(Path(sysinfo_path, "block_params.log"), "a+")
         if block.exists():
             for f in block.rglob("[s,h,v]d*/*"):
                 if f.is_file():
                     sh.echo(f, _out=results)
                     sh.cat(f, _out=results)
     except Exception:
         pass
def report_benchmark_results(file_a, file_b, description):
  """Wrapper around report_benchmark_result.py."""
  result = "{0}/perf_results/latest/performance_result.txt".format(IMPALA_HOME)
  with open(result, "w") as f:
    subprocess.check_call(
      ["{0}/tests/benchmark/report_benchmark_results.py".format(IMPALA_HOME),
       "--reference_result_file={0}".format(file_a),
       "--input_result_file={0}".format(file_b),
       '--report_description="{0}"'.format(description)],
      stdout=f)
  sh.cat(result, _out=sys.stdout)
예제 #17
0
def report_benchmark_results(file_a, file_b, description):
  """Wrapper around report_benchmark_result.py."""
  result = "{0}/perf_results/latest/performance_result.txt".format(IMPALA_HOME)
  with open(result, "w") as f:
    subprocess.check_call(
      ["{0}/tests/benchmark/report_benchmark_results.py".format(IMPALA_HOME),
       "--reference_result_file={0}".format(file_a),
       "--input_result_file={0}".format(file_b),
       '--report_description="{0}"'.format(description)],
      stdout=f)
  sh.cat(result, _out=sys.stdout)
예제 #18
0
파일: catalog.py 프로젝트: laegrim/gitberg
    def parse_rdf(self):
        """ cat|grep's the rdf file for minimum metadata
        """
        # FIXME: make this an rdf parser if I can
        _title = sh.grep(sh.cat(self.rdf_path), 'dcterms:title', _tty_out=False)
        try:
            _author = sh.grep(sh.cat(self.rdf_path), 'name', _tty_out=False)
            self.author = self._clean_properties(_author)
        except sh.ErrorReturnCode_1:
            self.author = "Various"

        self.title = self._clean_properties(_title)
예제 #19
0
파일: zconcat.py 프로젝트: wyfunique/kgtk
def run(output, gz, bz2, xz, tmpDir, inputs):

    # import modules locally
    import socket
    if isinstance(output, str):
        output = open(output, "wb")
    compress = None
    if gz:
        compress = sh.gzip
    elif bz2:
        compress = sh.bzip2
    elif xz:
        compress = sh.xz

    print(inputs)
    if inputs:
        for inp in inputs:
            catcmd = sh.cat
            file, fileType = determineFileType(inp)
            if fileType == 'gzip':
                catcmd = sh.zcat
            elif fileType == 'bzip2':
                catcmd = sh.bzcat
            elif fileType == 'xz':
                catcmd = sh.xzcat
            try:
                if compress is not None:
                    compress(catcmd(file, _piped=True, _out=output),
                             '-c',
                             _out=output,
                             _tty_out=False)
                else:
                    catcmd(file, _out=output)
            except sh.SignalException_SIGPIPE:
                break
    else:
        print('here')
        try:
            if compress is not None:
                print('compressing')
                compress('-c', _in=sys.stdin, _out=output, _tty_out=False)
                print('compressing done')
            else:
                sh.cat(_in=sys.stdin, _piped=True, _out=output)
        except sh.SignalException_SIGPIPE:
            pass

    # cleanup in case we piped and terminated prematurely:
    try:
        output.flush()
        sys.stdout.flush()
    except:
        pass
예제 #20
0
def write_fasta(haplotype_seqs, outdir):
    fasta_record = collections.namedtuple("fasta_record", "id seq")
    output_files = []
    for idx in range(len(haplotype_seqs)):
        haplotype_id = ''.join(("haplotype", str(idx)))
        seq = fasta_record(id=haplotype_id, seq=haplotype_seqs[idx])
        output_file = os.path.join(outdir, ''.join((haplotype_id, ".fasta")))
        output_files.append(output_file)

        with open(output_file, 'w') as outfile:
            outfile.write(">{}\n{}\n".format(seq.id, seq.seq))

    sh.cat(output_files, _out=os.path.join(outdir, "haplotypes.fasta"))
예제 #21
0
    def parse_rdf(self):
        """ cat|grep's the rdf file for minimum metadata
        """
        # FIXME: make this an rdf parser if I can
        _title = sh.grep(sh.cat(self.rdf_path),
                         'dcterms:title',
                         _tty_out=False)
        try:
            _author = sh.grep(sh.cat(self.rdf_path), 'name', _tty_out=False)
            self.author = self._clean_properties(_author)
        except sh.ErrorReturnCode_1:
            self.author = "Various"

        self.title = self._clean_properties(_title)
예제 #22
0
def test_console_script(cli):
    TEST_COMBINATIONS = (
        # quote_mode, var_name, var_value, expected_result
        ("always", "HELLO", "WORLD", 'HELLO="WORLD"\n'),
        ("never", "HELLO", "WORLD", 'HELLO=WORLD\n'),
        ("auto", "HELLO", "WORLD", 'HELLO=WORLD\n'),
        ("auto", "HELLO", "HELLO WORLD", 'HELLO="HELLO WORLD"\n'),
    )
    with cli.isolated_filesystem():
        for quote_mode, variable, value, expected_result in TEST_COMBINATIONS:
            sh.touch(dotenv_path)
            sh.dotenv('-f', dotenv_path, '-q', quote_mode, 'set', variable, value)
            output = sh.cat(dotenv_path)
            assert output == expected_result
            sh.rm(dotenv_path)

    # should fail for not existing file
    result = cli.invoke(dotenv.cli.set, ['my_key', 'my_value'])
    assert result.exit_code != 0

    # should fail for not existing file
    result = cli.invoke(dotenv.cli.get, ['my_key'])
    assert result.exit_code != 0

    # should fail for not existing file
    result = cli.invoke(dotenv.cli.list, [])
    assert result.exit_code != 0
예제 #23
0
def run_all_SRGP_jobs(placeholder):
    i = 0
    dropbox_trnsfer = TransferData(DROPBOX_KEY)
    random_sleep = random.randint(1, 20)
    time.sleep(random_sleep)
    for finished in range(0, 2):
        print("Looking for a job to run")
        job_ID, job_arguments = get_SRGP_job(finished)
        while job_arguments is not None:
            print("Got a job to run")
            output_db = job_arguments[2]
            if (job_arguments[0] != pySRURGS_dir + '/experiments/SRGP.py'):
                raise Exception("SQL injection?")
            try:
                sh.python(*job_arguments, _err="error.txt")
            except:
                print(sh.cat('error.txt'))
                continue
            dropbox_trnsfer.upload_file(output_db,
                                        '/' + os.path.basename(output_db))
            with SqliteDict(output_db, autocommit=True) as results_dict:
                n_evals = results_dict['n_evals']
            set_job_finished(n_evals, job_ID)
            job_ID, job_arguments = get_SRGP_job(finished)
            print('finished a job', i)
            i = i + 1
예제 #24
0
def email_sh_error(
    sh_error, email_dir, sender_email,
    recipient_emails, subject
):
    """
    Takes an sh.ErrorReturnCode error
    and sends an email (using sendmail) with its contents
    """

    epoch_time = datetime.now().strftime('%s')

    email_filepath = '{0}/{1}.email'.format(email_dir, epoch_time)

    with open(email_filepath, 'w') as email_file:
        email_file.write('from: {0}\n'.format(sender_email))
        email_file.write('subject: {0}\n'.format(subject))
        email_file.write('\n')
        email_file.write('{0}\n'.format(sh_error.message))
        email_file.write('\n')
        email_file.write('Exception properties\n')
        email_file.write('===\n')
        email_file.write('full_cmd: {0}\n'.format(sh_error.full_cmd))
        email_file.write('exit_code: {0}\n'.format(str(sh_error.exit_code)))
        email_file.write('stdout: {0}\n'.format(sh_error.stdout))
        email_file.write('stderr: {0}\n'.format(sh_error.stderr))

    sh.sendmail(sh.cat(email_filepath), recipient_emails)
예제 #25
0
    def crack(self,
              dilateiter=4,
              erodeiter=4,
              threshold=200,
              size=(155, 55),
              whitelist_chars=string.ascii_lowercase):  #Take all parameters
        ''':param whitelist_char: the characters to recognize'''
        resized = resizeImage(self.image,
                              (self.image.width * 6, self.image.height * 6))

        dilateImage(resized, dilateiter)
        erodeImage(resized, erodeiter)
        thresholdImage(resized, threshold, cv.CV_THRESH_BINARY)

        resized = resizeImage(resized, size)

        #Call the tesseract engine
        from tempfile import NamedTemporaryFile
        temp_img_file = NamedTemporaryFile(suffix='.jpg')
        temp_solution_file = NamedTemporaryFile()
        cv.SaveImage(temp_img_file.name, resized)
        tesseract(temp_img_file.name, temp_solution_file.name, '-c',
                  'tessedit_char_whitelist=' + whitelist_chars)
        ret = str(cat(temp_solution_file.name + '.txt'))
        import os
        os.unlink(temp_solution_file.name + '.txt')
        return ret
예제 #26
0
def test_filter_pipe_config():
    output = StringIO()
    python3(cat(program_output), path2main, config=config_file,
            use_config_section='TEST', _out=output)

    with open(program_output_filtered, 'r') as correctly_filtered_output:
        assert correctly_filtered_output.read() == output.getvalue()
예제 #27
0
def normalize_transcription(transcriptions, wsj_root: Path):
    """ Passes the dirty transcription dict to a Kaldi Perl script for cleanup.

    We use the original Perl file, to make sure, that the cleanup is done
    exactly as it is done by Kaldi.

    :param transcriptions: Dirty transcription dictionary
    :param wsj_root: Path to WSJ database

    :return result: Clean transcription dictionary
    """
    assert len(transcriptions) > 0, 'No transcriptions to clean up.'
    with tempfile.TemporaryDirectory() as temporary_directory:
        temporary_directory = Path(temporary_directory).absolute()
        with open(temporary_directory / 'dirty.txt', 'w') as f:
            for key, value in transcriptions.items():
                f.write('{} {}\n'.format(key, value))
        result = sh.perl(
            sh.cat(str(temporary_directory / 'dirty.txt')),
            kaldi_wsj_tools / 'normalize_transcript.pl',
            '<NOISE>'
        )
    result = [line.split(maxsplit=1) for line in result.strip().split('\n')]
    result = {k: v for k, v in result}
    return result
예제 #28
0
    def get_maintain_list():
        '''
			获取区服维护列表
		'''

        gamename = sh.cat(settings.DIR['game']).strip("\n")
        filepath = settings.DIR['meta'].format(game=gamename)
        meta_filename = '{filepath}/meta.yaml'.format(filepath=filepath)
        section_filename = '{filepath}/section.yaml'.format(filepath=filepath)

        meta_content = Base.get_yaml_content(meta_filename)

        data = []
        if 'maintain' in meta_content:  #整体维护
            temp = {'section': 'all'}
            data.append(temp)

        else:  #区服维护

            section_content = Base.get_yaml_content(section_filename)

            if 'sections' in section_content:
                for i in section_content['sections']:
                    temp = {}
                    if 'maintain' in i and i['maintain'] == 1:
                        temp['section'] = i['id']
                        data.append(temp)

        return data
예제 #29
0
def present(unit_path, name, content):
    changed = False

    if not os.path.exists(unit_path):
        with open(unit_path, "w") as f:
            f.write(content)
            changed = True
        logger.info("Created new")
    else:
        current = sh.cat(unit_path)
        if current.strip() != content.strip():
            with open(unit_path, "w") as f:
                f.write(content)
                changed = True
            logger.info("Content changed")

    is_running = False

    try:
        sh.systemctl("is-active", name)
        is_running = True
    except sh.ErrorReturnCode:
        pass

    if changed:
        sh.systemctl("daemon-reload")

    if is_running and changed:
        logger.info("Restarting because changed and is running")
        sh.systemctl("restart", name)

    return changed
예제 #30
0
def get_container_id():
    "get container id"
    id_ = sh.cut(sh.head(sh.cat("/proc/self/cgroup"), "-n", "1"), "-d", "/",
                 "-f4").strip()
    if not id_:
        return "unknown"
    return id_
예제 #31
0
def test_console_script(cli):
    TEST_COMBINATIONS = (
        # quote_mode, var_name, var_value, expected_result
        ("always", "HELLO", "WORLD", 'HELLO="WORLD"\n'),
        ("never", "HELLO", "WORLD", 'HELLO=WORLD\n'),
        ("auto", "HELLO", "WORLD", 'HELLO=WORLD\n'),
        ("auto", "HELLO", "HELLO WORLD", 'HELLO="HELLO WORLD"\n'),
    )
    with cli.isolated_filesystem():
        for quote_mode, variable, value, expected_result in TEST_COMBINATIONS:
            sh.touch(dotenv_path)
            sh.dotenv('-f', dotenv_path, '-q', quote_mode, 'set', variable,
                      value)
            output = sh.cat(dotenv_path)
            assert output == expected_result
            sh.rm(dotenv_path)

    # should fail for not existing file
    result = cli.invoke(dotenv.cli.set, ['my_key', 'my_value'])
    assert result.exit_code != 0

    # should fail for not existing file
    result = cli.invoke(dotenv.cli.get, ['my_key'])
    assert result.exit_code != 0

    # should fail for not existing file
    result = cli.invoke(dotenv.cli.list, [])
    assert result.exit_code != 0
예제 #32
0
  def postClone(self, cloned_files, target_dir, version):
    """
    .. versionadded:: 0.3.0
    """
    # Start by extracting all the files
    for f in cloned_files:
      # GunZIP the file (and remove the archive)
      sh.gunzip(f)

    # Then let's concat them
    target_path = "{}/Genbank.Homo_sapiens.fa".format(target_dir)
    # Remove ".gz" ending to point to extracted files
    cat_args = [f[:-3] for f in cloned_files]

    # Execute the concatenation in the background and write to the target path
    sh.cat(*cat_args, _out=target_path)
예제 #33
0
  def postClone(self, cloned_files, target_dir, version):
    """
    .. versionadded:: 0.3.0
    """
    # Start by extracting all the files
    for f in cloned_files:
      # GunZIP the file (and remove the archive)
      sh.gunzip(f)

    # Then let's concat them
    target_path = "{}/NCBI.Homo_sapiens.fa".format(target_dir)
    # Remove ".gz" ending to point to extracted files
    cat_args = [f[:-3] for f in cloned_files]

    # Execute the concatenation in the background and write to the target path
    sh.cat(*cat_args, _out=target_path, _bg=True)
예제 #34
0
def restore_config(config_file):
    output.itemize(
        'Restoring firewall configuration from \'{}\''.format(config_file)
    )

    sh.iptables_restore(sh.cat(config_file))
    files.remove(os.path.dirname(config_file), _output_level=1)
예제 #35
0
def run_all_SRURGS_jobs(placeholder):
    i = 0
    dropbox_trnsfer = TransferData(DROPBOX_KEY)
    random_sleep = random.randint(1, 20)
    time.sleep(random_sleep)
    for finished in range(0, 2):
        job_ID, job_arguments = get_SRURGS_job(finished)
        SRGP_db = job_ID.replace("SRURGS", "SRGP")
        n_evals = find_matching_SRGP_job_n_evals(SRGP_db)
        if n_evals == -1:
            # this job has not been completed on the SRGP side.
            continue
        while job_arguments is not None:
            output_db = job_arguments[-3]
            if ((job_arguments[0] != pySRURGS_dir + '/pySRURGS.py')
                    or (';' in ''.join(job_arguments))):
                raise Exception("SQL injection?")
            try:
                sh.python(*job_arguments, _err="error.txt")
            except:
                print(sh.cat('error.txt'))
                continue
            dropbox_trnsfer.upload_file(output_db,
                                        '/' + os.path.basename(output_db))
            with SqliteDict(output_db, autocommit=True) as results_dict:
                n_evals = results_dict['n_evals']
            set_job_finished(n_evals, job_ID)
            job_ID, job_arguments = get_SRURGS_job(finished)
            print('finished a job', i)
            i = i + 1
예제 #36
0
def _get_files(file_type, file_index):
    files = awk(grep(cat("/tmp/git_hook"),
                     "-P",
                     "(A|M).*.%s$" % file_type,
                     _ok_code=[0, 1]),
                "{print $%s}" % file_index,
                _iter=True)

    if not files:
        return None

    exten = ".%s" % file_type
    files = [
        file_path[:file_path.rindex(exten) + len(exten)] for file_path in files
    ]
    if not except_paths:
        return files

    except_files = []
    for file_path in files:
        for except_path in except_paths:
            if file_path not in except_files and file_path.startswith(
                    except_path):
                except_files.append(file_path)

    return [file_path for file_path in files if file_path not in except_files]
예제 #37
0
def test_two_images():
    sh.docker(sh.cat('empty.tar'), 'import', '-', 'footest')
    sh.docker('build', '-t', 'bartest', '.')
    f = StringIO()
    Docktree(restrict='footest', file=f).draw_tree()
    assert re.match(
        u'└─ [a-f0-9]{12} footest:latest\n' +
        u'   └─ [a-f0-9]{12} bartest:latest\n', f.getvalue())
예제 #38
0
def info(ctx):
    '''
    Show Aagard's config.
    '''

    click.echo(click.style(CONFIG_FILE + ' :', fg='yellow'))
    click.echo()
    click.echo(cat(CONFIG_FILE))
예제 #39
0
파일: pstor.py 프로젝트: rrader/pstor
def mounted(dir=None):
    try:
        cwd = dir if dir else os.path.join(os.getcwd(), 'files')
        cwd = os.path.realpath(cwd)
        return any(cwd == path.strip() for path in 
                list(sh.awk(sh.cat("/proc/mounts"), "{print $2}")))
    except sh.ErrorReturnCode:
        return False
예제 #40
0
def test_two_images():
    sh.docker(sh.cat('empty.tar'), 'import', '-', 'footest')
    sh.docker('build', '-t', 'bartest', '.')
    f = StringIO()
    Docktree(restrict='footest', file=f).draw_tree()
    assert re.match(
        u'└─ sha256:[a-f0-9]{5} footest:latest\n' +
        u'   └─ sha256:[a-f0-9]{5} bartest:latest\n', f.getvalue())
예제 #41
0
def raxml_consensus(gene_trees, model, outgroup, list_of_genes):
    '''Generates consensus trees from bootstraps and puts support values on
    best trees'''

    from sh import raxmlHPC as raxml
    os.chdir(gene_trees)
    for gene in list_of_genes:
        raxml("-m", model, "-p", "12345", "-f", "b", "-t", "RAxML_bestTree." +
            gene + '.' + model, "-z", "RAxML_bootstrap." + gene + '.boot', "-n",
            gene + ".cons.tre", "-o", outgroup)
    from sh import cat
    consensus_trees = glob('RAxML_bipartitions.*.cons.tre')
    cat(consensus_trees, _out='all_consensus_gene_trees.tre')
    # NEED TO ADD, ETE2 OR SOMETHING TO PUT NAMES ON TREES
    os.chdir('../../')
    print "# RAXML bootstrap search finished"
    return
예제 #42
0
    def _runTest(self, shards, max_threads):
        for threads in range(1, max_threads + 1):
            for shard in range(0, shards):
                with sh.sudo:
                    outfile = output_file_name(shards, shard, threads)
                    zmap(p=80,
                         T=threads,
                         shards=shards,
                         shard=shard,
                         _out="tempfile")
                    parse("tempfile", _out=outfile)
                    dup_lines = int(wc(uniq(cat(outfile), "-d"), "-l"))
                    self.assertEqual(dup_lines, 0)
                    shard_file = shard_file_name(shards, threads)
                    if shard == 0:
                        cat(outfile, _out=shard_file)
                    else:
                        cat(shard_file, outfile, _out="tempfile")
                        mv("tempfile", shard_file)

        for threads in range(1, max_threads + 1):
            shard_file = shard_file_name(shards, threads)
            num_lines = int(wc(cat(shard_file), "-l"))
            self.assertEqual(num_lines, TestSharding.NUM_IPS)
            dup_lines = int(
                wc(uniq(sh.sort(cat(shard_file), "-n"), "-d"), "-l"))
            self.assertEqual(dup_lines, 0)
예제 #43
0
 def get_text(self, filename: str, _iter=None):
     """Get the text from filename on the node.  The argument _iter
     is the same as in the sh library.
     """
     if self.is_localhost():
         return sh.cat(filename, _iter=_iter)
     return sh.scp(f"{self.node_address}:{filename}",
                   "/dev/stdout",
                   _iter=_iter)
예제 #44
0
def test_basic_filter_pipe():
    output = StringIO()
    python3(cat(program_output), path2main,
            start_pattern=start_pattern,
            error_pattern=error_pattern,
            _out=output)

    with open(program_output_filtered, 'r') as correctly_filtered_output:
        assert correctly_filtered_output.read() == output.getvalue()
예제 #45
0
def parse(filename, **kwargs):
    # cat outfile | grep ip | cut -d '|' -f 2 | cut -d ' ' -f 3 | cut -d '.' -f 4 | sort -n | wc -l
    return sh.sort(cut(cut(cut(grep(cat(filename), "ip"), d="|", f=2),
                           d=" ",
                           f=3),
                       d=".",
                       f=4),
                   "-n",
                   _out=kwargs.get("_out"))
예제 #46
0
def test_basic_filter_pipe():
    output = StringIO()
    python3(cat(program_output),
            path2main,
            start_pattern=start_pattern,
            error_pattern=error_pattern,
            _out=output)

    with open(program_output_filtered, 'r') as correctly_filtered_output:
        assert correctly_filtered_output.read() == output.getvalue()
예제 #47
0
파일: cat_cpu_info.py 프로젝트: Swind/clif
def cat_sys_info_cmd(**kwargs):
    """
    Usage:
        cat sys info --cpu --memory --repeat <repeat>

    Options:
        -c,--cpu               Show CPU information
        -m,--memory            Show memory usage
        -r,--repeat <repeat>   Repeat time [default: 1]
    """
    result = ""

    if "--cpu" in kwargs and kwargs["--cpu"]:
        print "CPU:"
        print cat("/proc/cpuinfo")

    if "--memory" in kwargs and kwargs["--memory"]:
        print "Memory:"
        print cat("/proc/meminfo")
예제 #48
0
def test_filter_pipe_config():
    output = StringIO()
    python3(cat(program_output),
            path2main,
            config=config_file,
            use_config_section='TEST',
            _out=output)

    with open(program_output_filtered, 'r') as correctly_filtered_output:
        assert correctly_filtered_output.read() == output.getvalue()
예제 #49
0
def get_commit_files(file_type):
  system("git diff --cached --name-status > /tmp/git_hook")

  files = awk(
    grep(
     cat("/tmp/git_hook"), "-P", "A|M.*.%s$" % file_type,
     _ok_code = [0, 1]
    ), "{print $2}", _iter = True
  )

  exten = ".%s" % file_type
  return [path[:path.rindex(exten) + len(exten)] for path in files]
예제 #50
0
def get_cmd_from_ps(needle):
    result = sh.grep(sh.cat(sh.ps('-wwaeopid,cmd')), needle)
    if result.exit_code == 0:
        for line in result.stdout.split('\n'):
            line = line.strip()
            if not line:
                continue
            match = re.search(r'^(\d*)\s*(.*)', line)
            if match.group(2).startswith('grep'):
                continue
            return match.group(2)
    raise KeyError('Failed to find: %s' % needle)
예제 #51
0
파일: build3.py 프로젝트: dmi3/notes
def main():
  logging.debug('start')
  if YANDEX_SEARCH_ID=="":
    logging.warn('to enable seach on your site run\n    python3 build3.py "http://website.url/" 123\n    where 123 is yandex search id obtainable on http://site.yandex.ru/searches/new/')

  #create and clear output directory if necessary
  mkdir("-p","_site/")
  rm("-Rf",glob("_site/*"))
  #copy static contant
  cp("-a",glob("_web/*"),"_site/")
  mv("_site/dot_htaccess","_site/.htaccess")
  #copy optimized css
  cssoptimizer(cat(glob("_css/*")),"-i","_site/style.css")
  #copy optimized js
  uglifyjs(cat(glob("_js/*")),"-o","_site/scripts.js")

  #generate content
  materialize_notes(SOURCE)
  materialize_template("Y_Search","Y_Search",{"title":"Поиск"})

  logging.debug('end.') 
  logging.info('To start copy following url into your browser: \n%sindex.html' % BASE_URL)
    def _wait_for_pid_file(self, filename, wait_time):
        count = 0
        while not os.path.exists(filename):
            if count == wait_time:
                break
            time.sleep(1)
            count += 1

        if os.path.isfile(filename):
            self.twistd_pid = cat(filename)
            print 'self.twistd_pid: ', self.twistd_pid
        else:
            raise ValueError("%s isn't a file!" % filename)
예제 #53
0
    def findPointer(self):
        for i in range(self.cfg.pointer, self.cfg.pointer + 100):

            pattern = '(' + str(i) + ','
            matches = grep(cat(self.infile), '-c', pattern)

            if int(matches.strip()) == 1:
                # Update the pointer
                self.cfg.pointer = i

                return i

        # If we get here, then we didn't find our pointer
        raise LogServerError
예제 #54
0
def get_container_info():
    container_info = {}

    try:
        container_info['container_name'] = tail(sed(grep(cat('/proc/self/cgroup'), 'docker'), 's/^.*\///'), '-n1')
    except:
        container_info['container_name'] = 'unknown'

    try:
        container_info['app_version'] =  open('version', 'r').read()
    except:
        container_info['app_version'] = 'unknown'


    return container_info
예제 #55
0
def get_wc(content_dir):
    """
    """
    filetype = "*.markdown"
    cd(content_dir)
    files_list = find(".", "-name", "*.markdown")
    files_arr = files_list.split('\n')
    word_count = 0
    for f in files_arr:
        if f:
            try:
                file_word_count = int(wc(cat(content_dir + f), "-w"))
                word_count += file_word_count
            except:
                pass
    return word_count
예제 #56
0
파일: test.py 프로젝트: ahhentz/sh
 def test_stringio_input(self):
     from sh import cat
     
     if IS_PY3:
         from io import StringIO
         from io import BytesIO as cStringIO
     else:
         from StringIO import StringIO
         from cStringIO import StringIO as cStringIO
         
     input = StringIO()
     input.write("herpderp")
     input.seek(0)
     
     out = cat(_in=input)
     self.assertEqual(out, "herpderp")
예제 #57
0
def awesome_test(backend, user_rate, duration, load=False, notes=None):
    if load:
        load_db(backend)

    tsung_build(backend, user_rate, duration)

    backend = _get_backend(backend)
    if not backend.is_running():
        backend.restart()

    log_dir = None
    test_file = "build/{}".format(backend.tsung_test_template[:-3])
    args = ("-f", test_file, "start")
    try:
        for line in sh.tsung(*args, _iter=True):
            sys.stdout.write(line)
            if 'Log directory' in line:
                log_dir = line.split(':')[1].strip()
                log_dir = log_dir.replace('"', '')
    except Exception as e:
        if hasattr(e, 'stderr'):
            print(e.stderr)
        else:
            raise

    if log_dir:
        print("Creating README in log directory")
        context = {
            'notes': notes,
            'settings': get_settings_for_readme(),
            'user_rate': user_rate,
            'duration': duration
        }
        with open(os.path.join(log_dir, 'README.md'), 'w') as f:
            f.write(_render_template('README.md.j2', context))

        print("Generating report")
        title = 'Awesome Test: backend={}, user_rate={}, duration={}'.format(
            backend.name, user_rate, duration
        )
        with cd(log_dir):
            sh.Command('/usr/lib/tsung/bin/tsung_stats.pl')('--title', title)

            print(sh.cat('README.md'))
    def switch_to_custom_manifest(cls, manifest_body):
        """
        Helper to overwrite original manifest by custom manifest
        :param manifest_body:
        :return: None
        """

        with open("/var/tmp/netapp_test_suite_tmp_site.pp", 'w') as temp_site_pp:
            temp_site_pp.write(manifest_body)

        if os.geteuid() != 0:
            sh.sudo('/bin/mv', '/var/tmp/netapp_test_suite_tmp_site.pp', cls.manifest_path + "/site.pp")
            sh.sudo('/bin/chmod', '664', cls.manifest_path + "/site.pp")
        else:
            sh.mv('/var/tmp/netapp_test_suite_tmp_site.pp', cls.manifest_path + "/site.pp")
            sh.chmod('664', cls.manifest_path + "/site.pp")

        # Show how looks like site.pp for now
        cls.log.debug("How looks site.pp for now (by 'cat {0}'):".format(cls.manifest_path + "/site.pp"))
        cls.log.debug(sh.cat(cls.manifest_path + "/site.pp"))
 def crack(self,dilateiter=4, erodeiter=4, threshold=200, size=(155,55), whitelist_chars=string.ascii_lowercase): #Take all parameters
     ''':param whitelist_char: the characters to recognize'''
     resized = resizeImage(self.image, (self.image.width*6, self.image.height*6))
 
     dilateImage(resized, dilateiter)
     erodeImage(resized, erodeiter)
     thresholdImage(resized,threshold, cv.CV_THRESH_BINARY)
     
     resized = resizeImage(resized, size)
     
     #Call the tesseract engine
     from tempfile import NamedTemporaryFile
     temp_img_file = NamedTemporaryFile(suffix='.jpg') 
     temp_solution_file = NamedTemporaryFile() 
     cv.SaveImage(temp_img_file.name,resized)
     tesseract(temp_img_file.name, temp_solution_file.name, '-c', 'tessedit_char_whitelist='+whitelist_chars)
     ret = str(cat(temp_solution_file.name+'.txt'))
     import os
     os.unlink(temp_solution_file.name+'.txt')
     return ret
예제 #60
0
def test_filter_end_file():
    file = 'test/program_output_end_file'
    filtered_output_file = 'test/program_output_filtered_end_file'
    # Test with the file passed as a parameter.
    output = StringIO()
    python3(path2main, file,
            start_pattern=start_pattern,
            error_pattern=error_pattern,
            _out=output)

    with open(filtered_output_file, 'r') as correctly_filtered_output:
        assert correctly_filtered_output.read() == output.getvalue()

    # Test with stdout
    output = StringIO()
    python3(cat(file), path2main,
        start_pattern=start_pattern,
        error_pattern=error_pattern,
        _out=output)

    with open(filtered_output_file, 'r') as correctly_filtered_output:
        assert correctly_filtered_output.read() == output.getvalue()