Exemple #1
0
def run_sim_commands(command_list, test, bin_dir, use_lsf):
    '''Run the given list of commands

    command_list should be a list of tuples (desc, cmd, dirname) where desc is
    a human-readable description of the test, cmd is a command to run and
    dirname is the directory in which to run it (which will be created if
    necessary).
    
    test is a dictonary for given TEST matched from testlist.yaml
    bin_dir is the general path to binary directory

    If use_lsf is true, the commands in command_list begin with something like
    'bsub -Is'. It seems that we always use interactive bsub, so we'll have a
    local process per job, which we track with run_parallel_cmd.

    '''
    # If we're in LSF mode, we submit all the commands 'at once', which means
    # we have to create the output directories in advance.
    if use_lsf:
        cmds = []
        for desc, cmd, dirname in command_list:
            os.makedirs(dirname, exist_ok=True)
            cmds.append(cmd)
        run_parallel_cmd(cmds, 600, check_return_code=True)
        return

    # We're not in LSF mode, so we'll create the output directories as we go.
    # That should make it a bit easier to see how far we got if there was an
    # error.
    for desc, cmd, dirname in command_list:
        os.makedirs(dirname, exist_ok=True)
        cp_compiled_test(test, bin_dir)
        logging.info("Running " + desc)
        run_cmd(cmd, 300, check_return_code=True)
Exemple #2
0
def gen_cov(base_dir, simulator, lsf_cmd):
    """Generate a merged coverage directory.

    Args:
        base_dir:   the base simulation output directory (default: out/)
        simulator:  the chosen RTL simulator
        lsf_cmd:    command to run on LSF

    """
    # Compile a list of all output seed-###/rtl_sim/test.vdb directories
    dir_list = []
    for entry in os.scandir(base_dir):
        vdb_path = "%s/%s/rtl_sim/test.vdb" % (base_dir, entry.name)
        if 'seed' in entry.name:
            logging.info("Searching %s/%s for coverage database" %
                         (base_dir, entry.name))
            if os.path.exists(vdb_path):
                dir_list.append(vdb_path)
    if dir_list == []:
        logging.info("No coverage data available, exiting...")
        sys.exit(RET_SUCCESS)

    if simulator == 'vcs':
        cov_cmd = "urg -full64 -format both -dbname test.vdb " \
                  "-report %s/rtl_sim/urgReport -dir" % base_dir
        for cov_dir in dir_list:
            cov_cmd += " %s" % cov_dir
        logging.info("Generating merged coverage directory")
        if lsf_cmd is not None:
            cov_cmd = lsf_cmd + ' ' + cov_cmd
        run_cmd(cov_cmd)
    else:
        logging.error("%s is an unsuported simulator! Exiting..." % simulator)
        sys.exit(RET_FAIL)
Exemple #3
0
def rtl_compile(compile_cmds, output_dir, lsf_cmd, opts):
    """Compile the testbench RTL

    compile_cmds is a list of commands (each a string), which will have <out>
    and <cmp_opts> substituted. Running them in sequence should compile the
    testbench.

    output_dir is the directory in which to generate the testbench (usually
    something like 'out/rtl_sim'). This will be substituted for <out> in the
    commands.

    If lsf_cmd is not None, it should be a string to prefix onto commands to
    run them through LSF. Here, this is not used for parallelism, but might
    still be needed for licence servers.

    opts is a string giving extra compilation options. This is substituted for
    <cmp_opts> in the commands.

    """
    logging.info("Compiling TB")
    for cmd in compile_cmds:
        cmd = subst_vars(cmd,
                         {
                             'out': output_dir,
                             'cmp_opts': opts
                         })

        if lsf_cmd is not None:
            cmd = lsf_cmd + ' ' + cmd

        logging.debug("Compile command: %s" % cmd)

        # Note that we don't use run_parallel_cmd here: the commands in
        # compile_cmds need to be run serially.
        run_cmd(cmd)
Exemple #4
0
 def _delete_from_registry(self):
     """
     Deletes marked images from registry by invoking inbuild docker
     distribution gc.
     """
     cmd = [
         "registry", "garbage-collect",
         "/etc/docker-distribution/registry/config.yml"
     ]
     lib.run_cmd(cmd, no_shell=not self._verbose)
 def _delete_from_registry(self):
     """
     Deletes marked images from registry by invoking inbuild docker
     distribution gc.
     """
     cmd = [
         "registry",
         "garbage-collect",
         "/etc/docker-distribution/registry/config.yml"
     ]
     lib.run_cmd(cmd, no_shell=not self._verbose)
Exemple #6
0
def rtl_sim(sim_cmd, test_list, seed_gen, opts,
            output_dir, bin_dir, lsf_cmd):
    """Run the testbench in the simulator

    sim_cmd is the base command (as returned by get_simulator_cmd). This will
    still have placeholders for test-specific arguments. test_list is a list of
    test objects read from the testlist YAML file which gives the tests to run.

    seed the seed generator to use to supply seeds for the simulations
    (controls things like random delays on the bus). opts is a string of
    plusargs to give to the simulator.

    output_dir is the output directory for simulation files (and the directory
    in which the simulator gets run). bin_dir is the directory containing
    binaries to be run.

    If lsf_cmd is not None, it should be prefixed on each command, which will
    be run in parallel.

    """
    logging.info("Running RTL simulation...")

    sim_cmd = subst_vars(sim_cmd,
                         {
                             'out': output_dir,
                             'sim_opts': opts,
                             'cwd': _CORE,
                             'seed': str(seed_gen)
                         })
    # Compute a list of pairs (cmd, dirname) where cmd is the command to run
    # and dirname is the directory in which the command should be run.
    
    # cmd_list = []
    for test in test_list:
        for i in range(test['iterations']):
        	cmd = get_test_sim_cmd(sim_cmd, test, i, output_dir, bin_dir, lsf_cmd)
        	# Creating directory for test simulation log
        	os.makedirs(cmd[2], exist_ok=True)
        	cp_compiled_test(test, bin_dir, i)
        	#logging.info("Running " + cmd[0])
        	#print("Command: " + cmd[1] + "\n")
        	run_cmd(cmd[1], 300, check_return_code=True)
        	
        	'''it_cmd = subst_vars(sim_cmd, {'seed': str(seed_gen.gen(i))})
Exemple #7
0
def make_diamond_database(protein_file,dbfile=False,threads=False):
    if not dbfile:
        name_clean = protein_file[:-6]
        dbfile = '%s.dmnd' %name_clean
    commands = ['diamond','makedb','--in',protein_file,'-d',dbfile]
    if threads:
        commands += ['-p',str(threads)]
    logger.debug(' '.join(commands))
    _ = run_cmd(commands)
    return(dbfile)
Exemple #8
0
    def __restore_metadata(self):
        """
        Restore the metadata and global objects is its a full restore
        :return:
        """
        # Source the hawq executable path
        self.logger.info("Source the hawq executable path")
        run_cmd("source $GPHOME/greenplum_path.sh")

        # Backup file name
        ddl_file = self.metadata_backup_dir + '/hdb_dump_' + self.backup_id + '_ddl.dmp'
        global_file = self.metadata_backup_dir + '/hdb_dump_' + self.backup_id + '_global.dmp'

        # If request to create database then create it with default encoding if not provided.
        if self.create_target_db:
            create_db_cmd = 'createdb ' + self.to_dbname + ' -E ' + self.target_db_encoding
            run_cmd(create_db_cmd)

        # Metadata restore command creator
        pg_restore_cmd = self.__get_args("pg_restore", "--schema-only")
        pg_restore_cmd = ' '.join(pg_restore_cmd)
        metadata_file = 'hdfs dfs -cat ' + ddl_file + ' | '

        # If generate list is requested.
        if self.generate_list:
            pg_restore_cmd += ' > ' + self.generate_list_location
            pg_restore_cmd = metadata_file + pg_restore_cmd + ' ; exit $PIPESTATUS;'
            run_cmd(pg_restore_cmd, self.ignore)
            self.logger.info(
                "Backup List for the backup ID \"{0}\" is generated at location: \"{1}\""
                .format(self.backup_id, self.generate_list_location))
            sys.exit(0)

        # Else then this a full restore or user list restore
        else:
            pg_restore_cmd = metadata_file + pg_restore_cmd + ' ; exit $PIPESTATUS;'
            run_cmd(pg_restore_cmd, self.ignore)

        # If full restore or if requested to restore the global dump then
        if self.global_restore or not (self.generate_list or self.user_list):
            psql_cmd = 'psql -d ' + self.to_dbname + ' -f ' + global_file + ' -U ' + self.username
            run_cmd(psql_cmd)
Exemple #9
0
    def __backup_metadata(self):
        """
        Backup DDLs and metadata before dumping the data
        :return
        """

        # Source the hawq executable path
        self.logger.info("Source the hawq executable path")
        run_cmd("source $GPHOME/greenplum_path.sh")

        # Backup file name
        ddl_file = self.metadata_backup_dir + '/hdb_dump_' + self.backup_id + '_ddl.dmp'
        global_file = self.metadata_backup_dir + '/hdb_dump_' + self.backup_id + '_global.dmp'

        # Metadata backup command creator
        pg_dump_cmd, pg_dumpall_cmd = self.__get_args("pg_dump",
                                                      "--schema-only",
                                                      "--format=c")
        pg_dump_cmd = ' '.join(pg_dump_cmd)
        pg_dump_cmd += ' | hdfs dfs -put - {0}'.format(ddl_file)
        pg_dump_cmd += ' ; exit $PIPESTATUS;'
        self.logger.info(
            "Executing DDL backup, metadata backup file: \"{0}\"".format(
                ddl_file))
        run_cmd(pg_dump_cmd)

        if pg_dumpall_cmd:
            pg_dumpall_cmd += ' | hdfs dfs -put - {0}'.format(global_file)
            pg_dumpall_cmd += ' ; exit $PIPESTATUS;'
            self.logger.info(
                "Executing global object backup, global backup file: \"{0}\"".
                format(global_file))
            run_cmd(pg_dumpall_cmd)
Exemple #10
0
def makeblastdb(infile,dbtype='prot',title = False,outfile=False):
    # Makes NCBI BLAST databases
    current_path = os.getcwd()
    # Changing directory to prevent some errors
    os.chdir(infile.rpartition(os.sep)[0])
    if not outfile:
        outfile = infile.rpartition('.')[0]
    commands = ["makeblastdb","-in",'"%s"' %infile,"-dbtype",dbtype,"-out",outfile]
    if title:
        commands += ["-title",title]
    _ = run_cmd(commands)
    os.chdir(current_path)
    return(outfile)
Exemple #11
0
def gen_cov(base_dir, simulator, lsf_cmd):
    """Generate a merged coverage directory.

    Args:
        base_dir:   the base simulation output directory (default: out/)
        simulator:  the chosen RTL simulator
        lsf_cmd:    command to run on LSF

    """
    # Compile a list of all output seed-###/rtl_sim/test.vdb directories
    dir_list = []

    # All generated coverage databases will be named "test.vdb"
    vdb_dir_name = "test.vdb"

    for path, dirs, files in os.walk(base_dir):
        if vdb_dir_name in dirs:
            vdb_path = os.path.join(path, vdb_dir_name)
            logging.info("Found coverage database at %s" % vdb_path)
            dir_list.append(vdb_path)

    if dir_list == []:
        logging.info("No coverage data available, exiting...")
        sys.exit(RET_SUCCESS)

    if simulator == 'vcs':
        cov_cmd = "urg -full64 -format both -dbname test.vdb " \
                  "-report %s/rtl_sim/urgReport -dir" % base_dir
        for cov_dir in dir_list:
            cov_cmd += " %s" % cov_dir
        logging.info("Generating merged coverage directory")
        if lsf_cmd is not None:
            cov_cmd = lsf_cmd + ' ' + cov_cmd
        run_cmd(cov_cmd)
    else:
        logging.error("%s is an unsuported simulator! Exiting..." % simulator)
        sys.exit(RET_FAIL)
Exemple #12
0
def run():
    nodes = get_nodes(count=1)

    controller = nodes[0]

    setup_controller(controller)
    kompose_setup(controller)
    run_cmd('iptables -F', host=controller)

    run_cmd('git clone ' + str(repo_url) + ' /tmp/kompose-tests/',
            host=controller)
    run_cmd('cd /tmp/kompose-tests/ && ./run.sh', host=controller, stream=True)
Exemple #13
0
def setup_controller(controller):
    # provision controller
    run_cmd(
        "yum install -y git &&"
        "yum install -y gcc libffi-devel python-devel openssl-devel && "
        "yum install -y docker && "
        "yum install -y golang && "
        "yum install -y python-requests",
        host=controller,
        stream=True)

    run_cmd("scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "
            "docker_settings.sh root@%s:/tmp" % controller,
            stream=True)

    run_cmd("/tmp/docker_settings.sh", host=controller, stream=True)
Exemple #14
0
    def __get_data_location(self):
        """
        Get all the schema and table names that this directory holds the backup for
        :return: list of all relation that it has the backup
        """

        cmd = "hdfs dfs -ls " + self.data_backup_dir + '/*'
        output = run_cmd(cmd)
        backup_object_list = []
        for directory in output.split('\n'):

            # Ignore blanks space and other unwanted output
            if (directory.startswith('Found')
                    and directory.endswith('items')) or not directory.strip():
                pass
            # For the rest get the table and schema names
            else:
                schema = '"' + directory.split('/')[-2] + '"'
                table = '"' + directory.split('/')[-1] + '"'
                backup_object_list.append(schema + '.' + table)

        return backup_object_list
Exemple #15
0
def blast_worker(query,out,db_file,nr,queue,name,parse_function,parse_function_args,evalue,outfmt,kwargs):
    # Blast this process' part of the query
    logger.debug('Blasting process %s' %nr)
    commands = ['blastp','-query',query,'-db',db_file,'-out',out,'-evalue', str(evalue), '-outfmt',outfmt]
    if kwargs != {}:
        for key,value in kwargs.items():
            commands += ['-'+str(key),str(value)]
    if not os.path.isfile(out):
        _ = run_cmd(commands)
    else:
        logger.debug('%s already found. Not running BLAST' %out)
    if parse_function:
        logger.debug('Parsing function process: %s' %nr)
        if parse_function_args:
            parse_function_args = [out] + list(parse_function_args)
            res = parse_function(*parse_function_args)
        else:
            res = parse_function(out)
    else:
        res = out
    logger.debug('Process %s: blasting done!' %nr)
    queue.put(res)
Exemple #16
0
def run_diamond(protein_file,database,outname,replacetabs=False,tmpdir='/dev/shm/',maxhits=100,sens=False,moresens=False,threads=False,evalue=False,quiet=False):
    commands = ['diamond','blastp','--query',protein_file,'--db',database,\
    '--max-target-seqs',str(maxhits),'--out',outname,'--tmpdir',tmpdir,'--outfmt',\
    '6', 'qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'gapopen', 'qlen', 'slen', 'evalue', 'bitscore']
    if threads:
        commands += ['-p',str(threads)]
    if evalue:
        commands += ['--evalue',str(evalue)]
    if moresens:
        commands.append('--more-sensitive')
    elif sens:
        commands.append('--sensitive')

    _ = run_cmd(commands)    
    
    if replacetabs:
        f = open(outname)
        fout = open(out_clean + '.tab','w')
        for l in f:
            l_new = l.replace('|','\t')
            fout.write(l_new)
        f.close()
        fout.close()
        outname = out_clean+'.tab'
        elif opt in ('--qps',):
            qps_flag = 1
            qps_output = arg
        elif opt in ('-i',):
            input_file = arg
        elif opt in ('-e',):
            cdc.set_codec_id(arg.strip())
        elif opt[1] in help.get_opt():
            help.parse_opt(opt)
        else:
            assert False, "unknown option"

    if bits_flag == 1:
        cmd = cdc.get_bits_fcmd(input_file)
        #print cmd
        lib.run_cmd(cmd, bits_output,help.get_do_execute(),0)

    if qp_flag == 1:
        cmd = cdc.get_qp_fcmd(input_file)
        #print cmd
        lib.run_cmd(cmd, qp_output,help.get_do_execute(),0)
        #if help.get_do_execute()==1:
        #   os.system(cmd)


#obtain_data()
#parse_cl()
#cdc=Codec_analysis("ashevcd")

#input_file="F:\\tmp\\2015.04.13\\cons.log"
#output_str=cdc.get_qp_vals(lib.format_file_path("F:\\tmp\\2015.04.13\\cons.log"))
Exemple #18
0
    qp_output = ''
    qps_output = ''
    for opt, arg in opts:
        if opt in ('--bits', '-b'):
            bits_flag = 1
            bits_output = arg
        elif opt in ('--qp', '-q'):
            qp_flag = 1
            qp_output = arg
        elif opt in ('--qps', ):
            qps_flag = 1
            qps_output = arg
        elif opt in ('-i', ):
            input_file = arg
        elif opt in ('-e', ):
            cdc.set_id(arg.strip())
        #elif opt[1] in help.get_opt():
        #    help.parse_opt(opt)
        else:
            help.parse_opt(opt)

    if bits_flag == 1:
        cmd = cdc.get_bits_cmd(input_file)
        lib.run_cmd(cmd, help.get_do_execute(), bits_output)

    if qp_flag == 1:
        cmd = cdc.get_qp_cmd(input_file)
        lib.run_cmd(cmd, help.get_do_execute(), qp_output)
        #if help.get_do_execute()==1:
        #   os.system(cmd)
Exemple #19
0
def run_hmmsearch(in_path, out_path, thr, hmm_db_path, outfile_base='domains'):
    cmds = ['hmmsearch', '-o', out_path+outfile_base + '.out', '--tblout', out_path+outfile_base+'.tbl', '--domtblout', out_path + outfile_base + '.domtbl', \
            '--acc', '--cut_ga', '--cpu', str(thr),hmm_db_path, in_path]
    _ = run_cmd(cmds)
Exemple #20
0
# param_list['encder_exe']=lib.get_encoder_exe()
#param_list['output_path']="F:/encoder_test_output/as265_output/"
#param_list['input_path']="E:/sequences/"
#param_list['i_frame_num_to_encode']=100

#param_list['input_filename']="BlowingBubbles_416x240_50.yuv"
#seq_name="BlowingBubbles_416x240_50"
#param_list['e_rctype']=1

cons = "tmp_cons.log"
#if len(sys.argv)> 1:
cons, extra_cls,do_execute = lib.configure_enc_param(enc, param_list)

cmd_line = lib.get_full_cdec_cmd(enc, param_list)
cmd_line += " " + extra_cls

#reg_file = None
#if lib.determin_sys() == "cygwin":
#    cmd_line += (" 2>&1 |tee -a " + cons)
#    pf = open(cons, "w")
#    print >> pf, "%s" % cmd_line
#    pf.close()
#else:
#    reg_file = open(cons, 'w')
#
##os.system(cmd_line)
#print cmd_line
#subprocess.call(cmd_line, shell=True, stdout=reg_file, stderr=reg_file)
lib.run_cmd(cmd_line,do_execute,cons,1)

Exemple #21
0
param_list['i_aq_mode'] = 0
param_list['i_lookahead'] = 10
param_list['b_cutree'] = 0
param_list['i_lowres'] = 1

encoder_list = ("as265", "x265")
# encoder_list = ("as265", )
# encoder_list = ("x265", )
#lib.Encoder_prop.SET_PATH("as265","")
#lib.Encoder_prop.SET_PATH("x265","")

lib.remove_some_tmp_files(param_list['output_path'])

do_execute = 0
for encoder_id in encoder_list:
    enc.set_id(encoder_id)
    for seq_name in seq_list:
        for bitrate in br_list:
            tag_str = "_" + encoder_id + "_bitrate" + str(bitrate)
            lib.configure_seq_param(param_list, seq_name, tags=tag_str)
            lib.check_params(param_list)
            lib.set_rc_full_param(param_list, bitrate)
            cmd = lib.get_full_cdec_cmd(enc, param_list)
            print cmd
            #os.system(cmd)
            reg_file_name = param_list[
                'output_path'] + seq_name + tag_str + "_cons.log"
            #regression_file = open(reg_file_name, 'w')
            #subprocess.call(cmd, stdout=regression_file, shell=True)
            lib.run_cmd(cmd, do_execute, reg_file_name, 1)
Exemple #22
0
def host_clean_up(controller):
    run_cmd("rm -rf /usr/bin/kompose && rm -rf /tmp/kompose-tests",
            host=controller)
Exemple #23
0
def buildAndTest(submissionpath, sourceTestPath, no_remove):

    points = 0
    script_path = os.path.dirname(os.path.realpath(__file__))

    # create temporary directory so that previous students' results will not affect subsequent tests
    testCasePath = sourceTestPath

    testCases = glob.glob(os.path.join(testCasePath, "*.simplec"))
    #print(f"testCases {testCases}")
    if not no_remove:
        for i in glob.glob(os.path.join(submissionpath, "*.o")):
            if os.path.exists(i):
                os.remove(i)
    progname = os.path.join(submissionpath, "simplec")
    if os.path.exists(progname):
        os.remove(progname)

    if len(testCases) == 0:
        print("# no tests found.  double-check your path: " + testCasePath)
        sys.exit()

    if os.path.exists(submissionpath + "/simplec"):
        os.remove(submissionpath + "/simplec")
    out = subprocess.run(['make'],
                         cwd=submissionpath,
                         stdout=subprocess.DEVNULL,
                         stderr=subprocess.DEVNULL)

    output = ""
    err = ""
    if out.returncode != 0:
        output += "Make failed."
        print(output +
              " Do you have a Makefile?")  # can't even compile the compiler
        return 0, output
    else:
        print("Build succeeded!")
        points += build_points  # points to build. tentative

    # simpleC compilers lives so lets go through every test case now
    for case in testCases:
        base_name = os.path.basename(case)
        ground_truth = case.replace(".simplec", ".ast")
        output_file = base_name.replace(".simplec", ".out")
        diff_file = base_name.replace(".simplec", ".diff")
        print(f"Testing {base_name}:", end=" ")

        with cd(submissionpath):
            cmd = f"cat \"{case}\" | ./simplec > {output_file}"
            #print(f"Running command: {cmd}")
            return_code, stdout_, stderr_ = run_cmd(cmd)
            cmd = f"diff -w -B \"{ground_truth}\" {output_file}"
            #print(f"Running command: {cmd}")
            return_code, stdout_, stderr_ = run_cmd(cmd, False)
            if return_code == 0 and len(stdout_) == 0:
                print("Success!")
                points += test_case_points
            if return_code == 1 and len(stdout_) > 0:
                print(
                    f"Failure. See {diff_file} for diff and {output_file} for output."
                )
                diff_out = open(diff_file, "w")
                diff_out.write(stdout_)
                diff_out.close()

                return_code, stdout_, stderr_ = run_cmd(cmd, False)
            if return_code > 1:
                print(
                    f"diff exited with an unknown return code. This shouldn't happen. Here is the stderr: {stderr_}"
                )
    print(
        f"{int((points - build_points) / test_case_points)} / {len(testCases)} test casing passing. "
    )
    return points, output
Exemple #24
0
param_list = lib.get_default_enc_param_list()
# param_list['encder_exe']=lib.get_encoder_exe()
#param_list['output_path']="F:/encoder_test_output/as265_output/"
#param_list['input_path']="E:/sequences/"
#param_list['i_frame_num_to_encode']=100

#param_list['input_filename']="BlowingBubbles_416x240_50.yuv"
#seq_name="BlowingBubbles_416x240_50"
#param_list['e_rctype']=1

cons = "tmp_cons.log"
#if len(sys.argv)> 1:
cons, extra_cls, do_execute = lib.configure_enc_param(enc, param_list)

cmd_line = lib.get_full_cdec_cmd(enc, param_list)
cmd_line += " " + extra_cls

#reg_file = None
#if lib.determin_sys() == "cygwin":
#    cmd_line += (" 2>&1 |tee -a " + cons)
#    pf = open(cons, "w")
#    print >> pf, "%s" % cmd_line
#    pf.close()
#else:
#    reg_file = open(cons, 'w')
#
##os.system(cmd_line)
#print cmd_line
#subprocess.call(cmd_line, shell=True, stdout=reg_file, stderr=reg_file)
lib.run_cmd(cmd_line, do_execute, cons, 1)
Exemple #25
0
        extra_cmd += ' -vf scale=%s:%s' % (width, height)

    if len(output_tag) == 0:  # =='':
        output_tag = "out"

    cmd_list = []
    output_list = []
    for input_file in input_list:
        print input_file
        if not os.path.isfile(input_file):
            logging.error('"%s" is not a file, but is in the input_list' %
                          input_file)
        cmd_line, output_file = get_cmd_line(input_file, extension,
                                             output_path, output_tag,
                                             prepared_cmd, extra_cmd)
        print cmd_line
        cmd_list.append(cmd_line)
        output_list.append(output_file)
    if len(merged_file) > 0:
        concat_str = "|".join(output_list)
        print concat_str
        cmd_line = FFMPEG_BIN
        cmd_line += ' -i "concat:%s" -c copy -bsf:a aac_adtstoasc %s' % (
            concat_str, merged_file)
        print cmd_line
        cmd_list.append(cmd_line)

    logging.info('FINAL COMMANDS:')
    for cmd in cmd_list:
        lib.run_cmd(cmd, help.get_do_execute())
Exemple #26
0
def kompose_setup(controller):
    _print("Installing the kompose binary on the controller")
    run_cmd(
        "export GOPATH=/usr && go get github.com/kubernetes-incubator/kompose",
        host=controller)

    _print("Installing the oc binaries on the controller")
    oc_download_url = requests.get(
        "https://api.github.com/repos/openshift/origin/releases/latest").json(
        )['assets'][2]['browser_download_url']
    run_cmd("yum install wget -y", host=controller)
    run_cmd("wget " + oc_download_url + " -O /tmp/oc.tar.gz", host=controller)
    run_cmd("mkdir /tmp/ocdir && cd /tmp/ocdir && tar -xvvf /tmp/oc.tar.gz",
            host=controller)
    run_cmd("cp /tmp/ocdir/*/oc /usr/bin/", host=controller)
    run_cmd("service docker restart", host=controller)