Exemplo n.º 1
0
def sketchupl(firmware):
    sketchpath = os.path.join(sdir, "sketches", firmware)
    if os.path.exists(sketchpath):
        errorkey = int(time.time())
        cuser = g.user.nickname
        if host == "win":
            output = subprocess.call(
                [rfpath, "-h", host, "-s", sketchpath, "-r", "-e", errorkey, "-c", cuser], shell=True
            )
            print "********************************************************************"
            print output
            print "********************************************************************"
        else:
            output = subprocess.call(
                ["%s -h %s -s %s -r -e %s -c %s" % (rfpath, host, sketchpath, errorkey, cuser)], shell=True
            )
            print "********************************************************************"
            print output
            print "********************************************************************"
        if output == 0:
            print "Subprocess call complete with " + str(output) + " errors"
            return messagereturn(cuser, errorkey, None, "fullcycle")
        else:
            print "Error uploading firmware to devices"
            print "ERROR 4: Subprocess call complete with " + str(output) + " errors"
            return messagereturn(cuser, errorkey, None, "fullcycle")
    else:
        # Firmware specified does not exist, explicitly handled through messagereturn
        print sketchpath
        return messagereturn(None, None, 34, None)
def runPkcombu(a_path, b_path, oam_path):
    FNULL = open(os.devnull, 'w')
    cmds = ["pkcombu",
            "-A", a_path,
            "-B", b_path,
            "-oam", oam_path]
    subprocess32.call(cmds, stdout=FNULL, stderr=subprocess32.STDOUT)
Exemplo n.º 3
0
def trimc_trim (file_fw, file_rv, outdir, trimc_dir=None):
	if not trimc_dir: trimc_dir = '/home/anna/bioinformatics/bioprograms/Trimmomatic-0.32/'
	trim_out = outdir + 'trim_out/'
	if not os.path.exists(trim_out):
	    os.makedirs(trim_out)

	trimlog = trim_out +'trimlog'
	paired_out_fw = trim_out + 'paired_out_fw' + '.fastq'
	unpaired_out_fw = trim_out + 'unpaired_out_fw' + '.fastq'
	paired_out_rv = trim_out + 'paired_out_rv' + '.fastq'
	unpaired_out_rv = trim_out + 'unpaired_out_rv' + '.fastq'

	adapters_file = trimc_dir + 'adapters/'+ "illumina.fasta"

	trimmomatic = ['java', '-jar', trimc_dir + 'trimmomatic-0.32.jar']
	# trim_options = ['PE', '-threads', str(THREADS), '-phred33', '-trimlog', trimlog, file_fw, file_rv, 
	# 				paired_out_fw, unpaired_out_fw, paired_out_rv, unpaired_out_rv,
	# 				'ILLUMINACLIP:'+ adapters_file + ':2:15:15:8:true', 'LEADING:3', 'TRAILING:3', 'SLIDINGWINDOW:4:5',  
	# 				'MAXINFO:200:0.2', 'MINLEN:5' ] 
	trim_options = ['PE', '-threads', str(THREADS), '-phred33', '-trimlog', trimlog, file_fw, file_rv, 
				paired_out_fw, unpaired_out_fw, paired_out_rv, unpaired_out_rv, 'TRAILING:3', 'SLIDINGWINDOW:4:25', 'MINLEN:100' ] 

	trim = trimmomatic + trim_options
	print ' '.join(trim)
	call(trim)
	return trim_out
Exemplo n.º 4
0
 def timeout_func():
     self.logger.debug("%s: time limit exceeded; killing docker container" % (user,))
     ran_to_completion[0] = False
     try:
         subprocess.call(['docker', 'rm', '-f', docker_name])
     except:
         pass
Exemplo n.º 5
0
def spades_assemble(outdir, test=None, reads = None, file_fw=None, file_rv=None, spades_dir=None, bbduk_out = None, trimc_out=None, RAM=6):
	if not spades_dir: spades_dir = '/home/anna/bioinformatics/bioprograms/SPAdes-3.1.1-Linux/bin/'

	spades_out = outdir + 'spades_out/'
	spades = spades_dir + './spades.py'

	if test: spades_assemble= [spades, '--test'] # Test SPAdes

	else:

		if trimc_out:
			files = {'PEfw' : 'paired_out_fw.fastq', 'PErv' : 'paired_out_rv.fastq', 
					 'UPfw': 'unpaired_out_fw.fastq', 'UPrv': 'unpaired_out_rv.fastq'}
			for key in files:
				files[key] = trimc_out + files[key]
				spades_options = ['-1', files['PEfw'], '-2', files['PErv'], '-s', files['UPfw'], '-s', files['UPrv'], 
								  '-o', spades_out, '-m '+ str(RAM), '--careful']
				spades_assemble= [spades] + spades_options

		elif file_fw and file_rv:
			spades_options = ['-o', spades_out, '-m '+ str(RAM), '--careful', '--only-assembler']
			spades_assemble = [spades, '-1', file_fw, '-2', file_rv] + spades_options

		elif reads: 
			spades_options = ['-o', spades_out, '-m '+ str(RAM), '--only-assembler']
			spades_assemble = [spades, '-s', reads] + spades_options

		else: print "Error: spades_assemble haven't get needed values"

		if not os.path.exists(spades_out): os.makedirs(spades_out)
		call(spades_assemble)

	return spades_out
 def run_apoc(self):
     subprocess32.call(["mkdir", "-p", self._mydir()])
     paths = [_.output().path for _ in self.requires()]
     cmd = [APOC_BIN] + paths
     print " ".join(cmd)
     stdout = subprocess32.check_output(cmd)
     return stdout
Exemplo n.º 7
0
def test_case (testname, timeout, prog_name, checker):
  import subprocess32
  try:
    q=subprocess32.call(
                          ("./"+prog_name,),
                          stdin=open(testname+".in", "r"),
                          stdout=open(testname+".test", "w"),
                          timeout=timeout
                        )
  except:
    return (testname, 2)
  if q!=0:
    return (testname, "program returned code {}.".format(q))
  if checker=="diff":
    return (
            testname,
            subprocess32.call(
                                ("diff", "-qb", testname+".out", testname+".test"),
                                stdout=open("/dev/null", "w")
                              )
            )
  else:
    return (
            testname,
            subprocess32.call(
                                ("./"+checker, testname+".out", testname+".test", testname+".in"),
                                stdout=open("/dev/null", "w")
                              )
            )
Exemplo n.º 8
0
    def run(self, logdir):
        """
        Execute the command as a subprocess and log its output in logdir.

        :param logdir: Path to a log directory.
        """
        env = os.environ.copy()
        if "PATH" not in env:
            env["PATH"] = "/usr/bin:/bin"
        locale = settings.get_value("sysinfo.collect", "locale", str, None)
        if locale:
            env["LC_ALL"] = locale
        logf_path = os.path.join(logdir, self.logf)
        stdin = open(os.devnull, "r")
        stdout = open(logf_path, "w")
        try:
            subprocess.call(self.cmd, stdin=stdin, stdout=stdout,
                            stderr=subprocess.STDOUT, shell=True, env=env)
        finally:
            for f in (stdin, stdout):
                f.close()
            if self._compress_log and os.path.exists(logf_path):
                process.run('gzip -9 "%s"' % logf_path,
                            ignore_status=True,
                            verbose=False)
Exemplo n.º 9
0
def perform_git_install(use_pyqt5):
    """
    Performs a git-based install.
    """
    if not IS_ROOT:
        root_warning()
    if use_pyqt5:
        if PYTHON3_OK:
            run_cmd = ("make", "PYQT=5", "PYTHON=python3")
        else:
            run_cmd = ("make", "PYQT=5")
    else:
        if PYTHON3_OK:
            run_cmd = ("make", "PYTHON=python3")
        else:
            run_cmd = ("make")
    try:
        code = subprocess.call(run_cmd)
    except OSError as errmsg:
        if errmsg.errno == errno.ENOENT:
            print("\nError: 'make' not found. It's either not installed or "
                  "not in the PATH environment variable like expected.")
            return
    if code == 0:
        print("\nCustomizer has been built from git.")
    else:
        print("\nCustomizer could not build properly. If this is caused"
              " by edits you have made to the code you can try the repair"
              " option from the Maintenance submenu")
    if not IS_ROOT:
        code = subprocess.call(("sudo", "make", "install"))
        if code == 0:
            print("\nCustomizer has been installed from git.")
        else:
            print("The installation has failed.")
Exemplo n.º 10
0
    def run_experiment(self, timeout, filename):
        status, value, db = self.get_status(filename)
        if status == Experiment.DONE: return
        if status == Experiment.TIMEOUT and value >= int(timeout): return

        # remove output and timeout files
        if os.path.isfile(filename): os.unlink(filename)
        timeout_filename = "{}.timeout".format(filename)
        if os.path.isfile(timeout_filename): os.unlink(timeout_filename)

        print("Performing {}... ".format(self.name), end='')
        sys.stdout.flush()

        try:
            with open(filename, 'w+') as out:
                call(self.call, stdout=out, stderr=out, timeout=timeout)
        except KeyboardInterrupt:
            os.unlink(filename)
            print("interrupted!")
            sys.exit()
        except OSError:
            os.unlink(filename)
            print("OS failure! (missing executable?)")
            sys.exit()
        except TimeoutExpired:
            with open(timeout_filename, 'w') as to: to.write(str(timeout))
            print("timeout!")
        else:
            status, value, db = self.get_status(filename)
            if status == Experiment.DONE: print("done; {}!".format(value))
            else: print("not done!")
        time.sleep(2)
Exemplo n.º 11
0
 def timeout_func():
     self.logger.debug("%s: time limit exceeded; killing docker container" % (user,))
     ran_to_completion[0] = False
     try:
         subprocess.call('docker rm -f %s' % (docker_name,), shell=True)
     except:
         pass
    def superpose(self):
        """superpose and copy the referenced protein and ligand
        """
        result = json.loads(QueryVinaResultOnBioLipFixedPocket(
            self.lig_pdb).output().open('r').read())
        dset = read2Df(result, self.lig_pdb)
        dset = similarPocketsLigands(clean(dset), minimum_Tc=0.2)
        work_dir = os.path.join(self.workdir(), 'superpose')
        try:
            os.makedirs(work_dir)
        except Exception:
            pass

        mob_pdb = self.append_ligand()
        for template_pdb in dset.index:
            ref_pdb = Path(template_pdb).prtPdb
            lig_code = Path(template_pdb).lig_code
            ref_lig = Path(lig_code).ligPdb() + '.pdb'
            sup_pdb = os.path.join(work_dir, lig_code + '.sup.pdb')
            cmd = shlex.split("perl %s all %s %s %s" %
                              (self.superpose_perl, ref_pdb, mob_pdb, sup_pdb))
            subprocess32.call(cmd)

            shutil.copy(ref_pdb, work_dir)
            shutil.copy(ref_lig, work_dir)
Exemplo n.º 13
0
def do_cmd(cmd, tmp_file, cargs):

    out = []

    if cargs.verbose:
        print "    CMD: %s" % cmd

    fh = None
    if tmp_file:
        fh = open(tmp_file, 'w')
    else:
        fh = open(os.devnull, 'w')

    if cargs.verbose and not tmp_file:
        subprocess.call(cmd, stdin=None, shell=True)
    else:
        subprocess.call(cmd, stdin=None,
                stdout=fh, stderr=subprocess.STDOUT, shell=True)

    fh.close()

    if tmp_file:
        with open(tmp_file, 'r') as f:
            for line in f:
                out.append(line.rstrip('\n'))

    return out
Exemplo n.º 14
0
def execute_link(link_cmd_args, record_byproducts):
  """
  <Purpose>
    Executes the passed command plus arguments in a subprocess and returns
    the return value of the executed command. If the specified standard output
    and standard error of the command are recorded and also returned to the
    caller.

  <Arguments>
    link_cmd_args:
            A list where the first element is a command and the remaining
            elements are arguments passed to that command.
    record_byproducts:
            A bool that specifies whether to redirect standard output and
            and standard error to a temporary file which is returned to the
            caller (True) or not (False).

  <Exceptions>
    TBA (see https://github.com/in-toto/in-toto/issues/6)

  <Side Effects>
    Executes passed command in a subprocess and redirects stdout and stderr
    if specified.

  <Returns>
    - A dictionary containg standard output and standard error of the
      executed command, called by-products.
      Note: If record_byproducts is False, the dict values are empty strings.
    - The return value of the executed command.
  """
  # XXX: The first approach only redirects the stdout/stderr to a tempfile
  # but we actually want to duplicate it, ideas
  #  - Using a pipe won't work because processes like vi will complain
  #  - Wrapping stdout/sterr in Python does not work because the suprocess
  #    will only take the fd and then uses it natively
  #  - Reading from /dev/stdout|stderr, /dev/tty is *NIX specific

  # Until we come up with a proper solution we use a flag and let the user
  # decide if s/he wants to see or store stdout/stderr
  # btw: we ignore them in the layout anyway

  if record_byproducts:
    # XXX: Use SpooledTemporaryFile if we expect very large outputs
    stdout_file = tempfile.TemporaryFile()
    stderr_file = tempfile.TemporaryFile()

    return_value = subprocess.call(link_cmd_args,
        stdout=stdout_file, stderr=stderr_file)

    stdout_file.seek(0)
    stderr_file.seek(0)

    stdout_str = stdout_file.read()
    stderr_str = stderr_file.read()

  else:
      return_value = subprocess.call(link_cmd_args)
      stdout_str = stderr_str = ""

  return {"stdout": stdout_str, "stderr": stderr_str}, return_value
Exemplo n.º 15
0
    def run(self):
        nullFile = open('/dev/null','w')
        #logging.info("Starting FFMPEG")

        logging.info("Start up FFmpeg %s"%Config.FFMPEG_COMMAND)
        subprocess32.call(Config.FFMPEG_COMMAND,stdout=nullFile,stderr=nullFile)
        logging.info("FFMPEG ends!")
Exemplo n.º 16
0
def cli(force):
    """
    Update AerisCloud
    """
    if not force and config.get('github', 'enabled', default=False) == 'true':
        client = Github().gh
        repo = client.repository('aeriscloud', 'aeriscloud')
        latest_release = repo.iter_releases().next()
        latest_version = latest_release.tag_name[1:]

        if semver.compare(version, latest_version) != -1:
            click.secho('AerisCloud is already up to date!', fg='green')
            sys.exit(0)

        click.echo('A new version of AerisCloud is available: %s (%s)' % (
            click.style(latest_version, fg='green', bold=True),
            click.style(latest_release.name, bold=True)
        ))

    # retrieve install script in a tmpfile
    tmp = tempfile.NamedTemporaryFile()
    r = requests.get('https://raw.githubusercontent.com/' +
                     'AerisCloud/AerisCloud/develop/scripts/install.sh')
    if r.status_code != 200:
        fatal('error: update server returned %d (%s)' % (
            r.status_code, r.reason))

    tmp.write(r.content)
    tmp.flush()

    os.environ['INSTALL_DIR'] = aeriscloud_path
    call(['bash', tmp.name])

    tmp.close()
Exemplo n.º 17
0
 def settings(data):
   call(['touch',data['home_dir']+'/.ssh/config'])
   config_path = data['home_dir']+'/.ssh/config'
   if os.path.isfile(config_path):
     config_file = open(config_path,'wt')
     config_file.write('Host *\n\tStrictHostKeyChecking no\n')
     config_file.close()
Exemplo n.º 18
0
def abacas_scaffold (contigs, reference, outdir, abacas_dir=None):
	if not abacas_dir: abacas_dir = '/home/anna/bioinformatics/bioprograms/Abacas/abacas.pl'
	abacas_out = outdir + 'abacas_out/'
	abacas = ['perl', abacas_dir + 'abacas.pl']
	abacas_options = ['-r', reference, '-q', contigs, '-b', '-c', '-m', '-p', nucmer]
	call(abacas + abacas_options)
	return abacas_out
Exemplo n.º 19
0
def test_app_is_uninstalled(appiumSetup, moduleSetup, testSetup):
    print 'in test app uninstalled'
    #setup.setup_appium_driver(app=config.app)
    # time.sleep(5)
    subprocess32.call(['adb', 'uninstall', config.app_package])
    app_package = subprocess32.check_output(['adb', '-s', config.udid, 'shell', 'pm list packages', '|', 'grep', config.app_package])
    assert not app_package, 'app is not uninstalled, %s not found' % config.app_package
Exemplo n.º 20
0
 def run_command(self, thefile):
     new_command = []
     for item in self.command:
         if item == "%f":
             item = thefile
         new_command.append(item)
     subprocess.call(new_command)
Exemplo n.º 21
0
def up_node3():
    subproc.check_call(['vagrant', 'destroy', '-f', 'node3'])
    subproc.check_call(['vagrant', 'up', 'node3'])
    yield "node3 is ready"

    print("Destroying node3...")
    subproc.call(['vagrant', 'destroy', '-f', 'node3'])
    print("Node3 is destroyed.")
Exemplo n.º 22
0
def launch_folder(full_path, cleanup = False):
    if not cleanup:
        subprocess.call(['open',
         '-a',
         'Finder',
         full_path], close_fds=True)
    else:
        open_folder_in_finder(full_path, cleanup)
Exemplo n.º 23
0
def up_node1():
    subproc.check_call(['vagrant', 'destroy', '-f', 'node1'])
    subproc.check_call(['vagrant', 'up', 'node1', '--no-provision'])
    yield "node1 is ready"

    print("Destroying node1...")
    subproc.call(['vagrant', 'destroy', '-f', 'node1'])
    print("Node1 is destroyed.")
Exemplo n.º 24
0
def use_fuzznuc (reads, pattern, outdir, max_mismatch = 0, indels = False, name = ''):
	fuzznuc_file = outdir + 'fuzznuc_report' + name
	fuzznuc = ['fuzznuc', '-sequence', reads, '-pattern', pattern, '-outfile', fuzznuc_file]
	fuzznuc_options = ['-pmismatch', str(max_mismatch), '-complement', '-snucleotide1', '-squick1', 
					   '-rformat2', 'excel', '-stdout']
	fuzznuc = fuzznuc + fuzznuc_options
	call(fuzznuc)
	return fuzznuc_file
Exemplo n.º 25
0
def prokka_annotate (sequence, outdir, prokka_dir=None):
	if not prokka_dir: prokka_dir = '/home/anna/bioinformatics/bioprograms/prokka-1.10/bin/'
	prokka_out = outdir + 'prokka_out/'
	prokka = prokka_dir + './prokka'
	prokka_options = ['--centre', 'XXX', '--kingdom', 'Bacteria',  '--gram', 'neg', '--addgenes',  '--outdir', prokka_out, '--force', sequence, '--genus', 'Pandoraea']
	prokka_annotate = [prokka] + prokka_options
	call(prokka_annotate)
	return prokka_out
Exemplo n.º 26
0
def gen_correct (testname, correct_prog):
  import subprocess32
  subprocess32.call(
                  ("./"+correct_prog,),
                  stdin=open(testname+".in", "r"),
                  stdout=open(testname+".out", "w"),
                  )
  return testname
Exemplo n.º 27
0
def use_quast (contigs, outdir, reference = None, quast_dir=None):
	if not quast_dir: quast_dir = '/home/anna/bioinformatics/bioprograms/quast-2.3/'
	quast_out = outdir + 'quast_out/'
	quast = quast_dir + './quast.py'
	quast_options = ['-o', quast_out]
	if reference: quast_options = quast_options + ['-R', reference]
	use_quast = [quast] + quast_options + [contigs]
	call(use_quast)
	return quast_out
Exemplo n.º 28
0
def compile (source_file, target_file, lazy=True):

    compile_return = 0
    if os.path.splitext(source_file)[1] == '.c':
        compile_return = subprocess.call(["gcc", "-std=c99", source_file, "-o", target_file])
    else:
        compile_return = subprocess.call(["g++", "-std=c++11", source_file, "-o", target_file])

    return compile_return
Exemplo n.º 29
0
 def upload_image(self, filename):
     """ upload the specified image to imgurl"""
     self.log.debug("calling imgurbash.sh %s", filename)
     # TODO use pure python here
     subprocess.call([
         'bash',
         './imgurbash.sh',
         filename
     ], timeout=5)
Exemplo n.º 30
0
 def run_command(self, thefile):
     if self.run_once:
         if os.path.exists(thefile) and os.path.getmtime(thefile) < self.last_run:
             return
     new_command = []
     for item in self.command:
         new_command.append(item.replace('%f', thefile))
     subprocess.call(new_command, shell=(len(new_command) == 1))
     self.last_run = time.time()
Exemplo n.º 31
0
	quit()

verbose = False

n1 = int(sys.argv[1])
if len(sys.argv) > 2:
	n2 = int(sys.argv[2])
else:
	n2 = n1

inname = "input/comp/%s.in"
assumname = "input/comp/%s.assum"
archivename = "archives/comp/%s.zip"
compname = "input/comp/%s.comp"

if not os.path.exists("input"): call(["mkdir", "input"])
if not os.path.exists("input/comp"): call(["mkdir", "input/comp"])
if not os.path.exists("archives"): call(["mkdir", "archives"])
if not os.path.exists("archives/comp"): call(["mkdir", "archives/comp"])

def runstr(n, c, k):
	if k == -1 and c == -1:
		return "%d" % n
	elif k == -1:
		return "%d.%d" % (n, c)
	elif k == "*" and c == "*":
		return "%d.*.*" % n
	elif k == "*":
		return "%d.%d.*" % (n, c)
	else:
		return "%d.%d.%d" % (n, c, k)
Exemplo n.º 32
0
def test_exit_unknown_option(arg):
    if platform.system() == 'Windows':
        tool_name = TOOL + '.exe'
    else:
        tool_name = TOOL
    assert subprocess.call([tool_name, arg]) == 1
Exemplo n.º 33
0
    def kill_job(self):
        """
        Attempt to kill a job using qdel.
        """

        return call(self.qdel_cmd)
Exemplo n.º 34
0
import subprocess32 as subprocess
import shlex
import sys

fonts = [
    "/Library/Fonts/Tahoma.ttf",
    "/Users/jason/Library/Fonts/FreeSans.ttf",
    "/Users/jason/Library/Fonts/FreeSansBold.ttf",
    "/Users/jason/Library/Fonts/FreeMono.ttf",
    "/Users/jason/Library/Fonts/FreeMonoBold.ttf",
    "/Users/jason/Library/Fonts/VCR_OSD_MONO_1.001.ttf",
    "/Users/jason/Library/Fonts/Retron2000.ttf",
    "/Users/jason/Library/Fonts/pt-mono.regular.ttf",
    "/Users/jason/Library/Fonts/pt-mono.bold.ttf",
]

sizes = [6, 8, 10, 12, 14, 13, 16, 18, 22, 24, 26, 32, 36, 40, 42, 46, 60]

source = open("generate.py").read().replace("\n", "\n// ")
print("// Generated with generate.py:")
print("// " + source)
print()
sys.stdout.flush()
for font in fonts:
    for size in sizes:
        cmd = "./fontconvert %s %d 64 32 176" % (font, size)
        cmd = shlex.split(cmd)
        subprocess.call(cmd)
Exemplo n.º 35
0
def test_exit_unknown_option(arg):
    assert subprocess.call([TOOL, arg]) == 1
Exemplo n.º 36
0
    else:
        psl_option = '-j managed'

    job_type_option = ''
    if job_type:
        job_type_option = '-i {0}'.format(job_type)

    # wrapper_params = '-a {0} -s {1} -r {2} -q {3} {4} {5} {6}'.format(WORK_DIR, panda_site, panda_queue, panda_queue,
    wrapper_params = '-s {0} -r {1} -q {2} {3} {4} {5}'.format(
        panda_site, panda_queue, panda_queue, resource_type_option, psl_option,
        job_type_option)

    if submit_mode == 'PUSH':
        # job configuration files need to be copied, because k8s configmap mounts as read-only file system
        # and therefore the pilot cannot execute in the same directory
        copy_files_in_dir(CONFIG_DIR, WORK_DIR)

    pilot_url = "https://raw.githubusercontent.com/yesw2000/Harvester/master/pilot2-gcs.tgz"
    command = "/tmp/runpilot2-wrapper.sh {0} -i PR --piloturl {1} -w generic --pilot-user generic --url=https://ai-idds-01.cern.ch -d --harvester-submit-mode {2} --allow-same-user=False -t | tee /tmp/wrapper-wid.log". \
        format(wrapper_params, pilot_url, submit_mode)
    try:
        subprocess.call(command, shell=True)
    except:
        logging.error(traceback.format_exc())
    logging.debug('[main] pilot wrapper done...')

    # upload logs to e.g. panda cache or similar
    upload_logs(logs_frontend_w, '/tmp/wrapper-wid.log', destination_name,
                proxy_path)
    logging.debug('[main] FINISHED')
Exemplo n.º 37
0
    tmp_filename = "tmp_{}.nc".format(EXP)
    tmp_file = os.path.join(tmp_dir, tmp_filename)
    try:
        os.remove(tmp_file)
    except OSError:
        pass

    # Check if request variables are present
    nc = CDF(infile, "r")
    for m_var in pism_copy_vars:
        if m_var not in nc.variables:
            print(("Requested variable '{}' missing".format(m_var)))
    nc.close()
    print(("Copy {} to {}".format(infile, tmp_file)))
    cmd = ["ncks", "-O", "-d", "time,1,", "-v", "{}".format(",".join(pism_copy_vars)), infile, tmp_file]
    sub.call(cmd)

    # Make the file ISMIP6 conforming
    make_spatial_vars_ismip6_conforming(tmp_file, ismip6_vars_dict)
    # Should be temporary until new runs
    ncatted_cmd = [
        "ncatted",
        "-a",
        """bounds,lat,o,c,lat_bnds""",
        "-a",
        """bounds,lon,o,c,lon_bnds""",
        "-a",
        """coordinates,lat_bnds,d,,""",
        "-a",
        """coordinates,lon_bnds,d,,""",
        tmp_file,
Exemplo n.º 38
0
def host_cmd(args, cmd_args):
    if not cmd_args:
        raise TestFormatError('Must specify command to run on the host.')
    elif args.deploy_chrome and not args.path_to_outdir:
        raise TestFormatError(
            '--path-to-outdir must be specified if --deploy-chrome is passed.')

    cros_run_test_cmd = [
        CROS_RUN_TEST_PATH,
        '--board',
        args.board,
        '--cache-dir',
        os.path.join(CHROMIUM_SRC_PATH, args.cros_cache),
    ]
    if args.use_vm:
        cros_run_test_cmd += [
            '--start',
            # Don't persist any filesystem changes after the VM shutsdown.
            '--copy-on-write',
        ]
    else:
        cros_run_test_cmd += [
            '--device', args.device if args.device else LAB_DUT_HOSTNAME
        ]
    if args.verbose:
        cros_run_test_cmd.append('--debug')
    if args.flash:
        cros_run_test_cmd.append('--flash')
        if args.public_image:
            cros_run_test_cmd += ['--public-image']

    if args.logs_dir:
        for log in SYSTEM_LOG_LOCATIONS:
            cros_run_test_cmd += ['--results-src', log]
        cros_run_test_cmd += [
            '--results-dest-dir',
            os.path.join(args.logs_dir, 'system_logs')
        ]

    test_env = setup_env()
    if args.deploy_chrome or args.deploy_lacros:
        # Mounting ash-chrome gives it enough disk space to not need stripping.
        cros_run_test_cmd.extend([
            '--deploy-lacros', '--lacros-launcher-script',
            LACROS_LAUNCHER_SCRIPT_PATH
        ] if args.deploy_lacros else ['--deploy', '--mount', '--nostrip'])

        cros_run_test_cmd += [
            '--build-dir',
            os.path.join(CHROMIUM_SRC_PATH, args.path_to_outdir)
        ]

    cros_run_test_cmd += [
        '--host-cmd',
        '--',
    ] + cmd_args

    logging.info('Running the following command:')
    logging.info(' '.join(cros_run_test_cmd))

    return subprocess.call(cros_run_test_cmd,
                           stdout=sys.stdout,
                           stderr=sys.stderr,
                           env=test_env)
Exemplo n.º 39
0
save_times = [-20000, -15000, -12500, -11700]

if options.no_refreeze:
    pism_config = 'init_config_norefreeze'
else:
    pism_config = 'init_config'
pism_config_nc = '.'.join([pism_config, 'nc'])
pism_config_cdl = os.path.join('../config', '.'.join([pism_config, 'cdl']))
# Anaconda libssl problem on chinook
if system in ('chinook'):
    ncgen = '/usr/bin/ncgen'
else:
    ncgen = 'ncgen'
cmd = [ncgen, '-o',
       pism_config_nc, pism_config_cdl]
sub.call(cmd)
if not os.path.isdir(odir):
    os.mkdir(odir)
state_dir = 'state'
scalar_dir = 'scalar'
spatial_dir = 'spatial'
snap_dir = 'snap'
for tsdir in (scalar_dir, spatial_dir, snap_dir, state_dir):
    if not os.path.isdir(os.path.join(odir, tsdir)):
        os.mkdir(os.path.join(odir, tsdir))
odir_tmp = '_'.join([odir, 'tmp'])
if not os.path.isdir(odir_tmp):
    os.mkdir(odir_tmp)

# ########################################################
# set up model initialization
Exemplo n.º 40
0
import subprocess
import subprocess32

thread_timeout = 30

includeos_src = os.environ.get(
    'INCLUDEOS_SRC',
    os.path.realpath(os.path.join(
        os.getcwd(), os.path.dirname(__file__))).split('/test')[0])
sys.path.insert(0, includeos_src)

from vmrunner import vmrunner

# Get an auto-created VM from the vmrunner
vm = vmrunner.vms[0]


def cleanup():
    # Call the cleanup script - let python do the printing to get it synced
    print subprocess.check_output(["./fat32_disk.sh", "clean"])


# Setup disk
subprocess32.call(["./fat32_disk.sh"], shell=True, timeout=thread_timeout)

# Clean up on exit
vm.on_exit(cleanup)

# Boot the VM
vm.cmake().boot(thread_timeout).clean()
Exemplo n.º 41
0
def place_file(self, id, source_path):
    target_dir = '{}/{}'.format(IMAGERY_PATH, id)
    if not os.path.exists(target_dir):
        os.mkdir(target_dir)
    output_file = '{}/index.tif'.format(target_dir)

    # rewrite with gdal_translate
    gdal_translate = [
        'gdal_translate',
        source_path,
        output_file,
        '-co',
        'TILED=yes',
        '-co',
        'COMPRESS=DEFLATE',
        '-co',
        'PREDICTOR=2',
        '-co',
        'SPARSE_OK=yes',
        '-co',
        'BLOCKXSIZE=256',
        '-co',
        'BLOCKYSIZE=256',
        '-co',
        'INTERLEAVE=band',
        '-co',
        'NUM_THREADS=ALL_CPUS',
    ]

    started_at = datetime.utcnow()

    self.update_state(state='RUNNING',
                      meta={
                          'name': 'preprocess',
                          'started_at': started_at.isoformat(),
                          'status': 'Rewriting imagery'
                      })

    try:
        returncode = subprocess.call(gdal_translate, timeout=60 * 5)
    except subprocess.TimeoutExpired as e:
        raise Exception(
            json.dumps({
                'name': 'proprocess',
                'started_at': started_at.isoformat(),
                'command': ' '.join(gdal_translate),
                'return_code': returncode,
                'status': 'Timed out'
            }))

    if returncode != 0:
        raise Exception(
            json.dumps({
                'name': 'preprocess',
                'started_at': started_at.isoformat(),
                'command': ' '.join(gdal_translate),
                'return_code': returncode,
                'status': 'Failed'
            }))

    # delete original
    os.unlink(source_path)

    return {
        'name': 'preprocess',
        'completed_at': datetime.utcnow().isoformat(),
        'started_at': started_at,
        'status': 'Image pre-processing completed'
    }
Exemplo n.º 42
0
def node_start(node_image, runtime, log_dir, ip, ip6, as_num, detach,
               kubernetes_version, rkt, libnetwork_image):
    """
    Create the calico-node container and establish Calico networking on this
    host.

    :param ip:  The IPv4 address of the host.
    :param node_image:  The calico-node image to use.
    :param ip6:  The IPv6 address of the host (or None if not configured)
    :param as_num:  The BGP AS Number to use for this node.  If not specified
    the global default value will be used.
    :param detach: True to run in Docker's "detached" mode, False to run
    attached.
    :param kubernetes_version: The version of the calico-kubernetes plugin to
     install, or None if the plugin should not be installed.
    :param rkt: True to install the rkt plugin, False otherwise.
    :param libnetwork_image: The name of the Calico libnetwork driver image to
    use.  None, if not using libnetwork.
    :return:  None.
    """
    # Normally, Felix will load the modules it needs, but when running inside a
    # container it might not be able to so ensure the required modules are
    # loaded each time the node starts.
    # This is just a best error attempt, as the modules might be builtins.
    # We'll warn during the check_system() if the modules are unavailable.
    try:
        call(["modprobe", "-a"] + REQUIRED_MODULES)
    except OSError:
        pass

    # Print warnings for any known system issues before continuing
    using_docker = True if runtime == 'docker' else False
    (_, _, etcd_ok) = \
        check_system(quit_if_error=False, libnetwork=libnetwork_image,
                     check_docker=using_docker)

    if not etcd_ok:
        sys.exit(1)

    # We will always want to setup IP forwarding
    _setup_ip_forwarding()

    # Ensure log directory exists
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    # Get IP address of host, if none was specified
    if not ip:
        ips = get_host_ips(exclude=["^docker.*", "^cbr.*"])
        try:
            ip = ips.pop()
        except IndexError:
            print "Couldn't autodetect a management IP address. Please provide" \
                  " an IP by rerunning the command with the --ip=<IP_ADDRESS> flag."
            sys.exit(1)
        else:
            print "No IP provided. Using detected IP: %s" % ip

    # Verify that IPs are not already in use by another host.
    error_if_bgp_ip_conflict(ip, ip6)

    # Verify that the chosen IP exists on the current host
    warn_if_unknown_ip(ip, ip6)

    # Warn if this hostname conflicts with an existing host
    warn_if_hostname_conflict(ip)

    # Install Kubernetes plugin
    if kubernetes_version:
        # Build a URL based on the provided Kubernetes_version.
        url = KUBERNETES_BINARY_URL % kubernetes_version
        try:
            # Attempt to install to the default Kubernetes directory
            install_plugin(KUBERNETES_PLUGIN_DIR, url)
        except OSError:
            # Use the backup directory
            install_plugin(KUBERNETES_PLUGIN_DIR_BACKUP, url)

    # Install rkt plugin
    if rkt:
        try:
            # Attempt to install to the default rkt directory
            install_plugin(RKT_PLUGIN_DIR, RKT_BINARY_URL)
        except OSError:
            # Use the backup directory
            install_plugin(RKT_PLUGIN_DIR_BACKUP, RKT_BINARY_URL)

    # Set up etcd
    ipv4_pools = client.get_ip_pools(4)
    ipv6_pools = client.get_ip_pools(6)

    # Create default pools if required
    if not ipv4_pools:
        client.add_ip_pool(4, DEFAULT_IPV4_POOL)
    if not ipv6_pools:
        client.add_ip_pool(6, DEFAULT_IPV6_POOL)

    client.ensure_global_config()
    client.create_host(hostname, ip, ip6, as_num)

    # Always try to convert the address(hostname) to an IP. This is a noop if
    # the address is already an IP address.  Note that the format of the authority
    # string has already been validated.
    etcd_authority = os.getenv(ETCD_AUTHORITY_ENV, ETCD_AUTHORITY_DEFAULT)
    etcd_authority_address, etcd_authority_port = etcd_authority.split(':')
    etcd_authority = '%s:%s' % (socket.gethostbyname(etcd_authority_address),
                                etcd_authority_port)

    # Get etcd SSL environment variables if they exist
    etcd_scheme = os.getenv(ETCD_SCHEME_ENV, ETCD_SCHEME_DEFAULT)
    etcd_key_file = os.getenv(ETCD_KEY_FILE_ENV, None)
    etcd_cert_file = os.getenv(ETCD_CERT_FILE_ENV, None)
    etcd_ca_cert_file = os.getenv(ETCD_CA_CERT_FILE_ENV, None)

    etcd_volumes = []
    etcd_binds = {}
    etcd_envs = [
        "ETCD_AUTHORITY=%s" % etcd_authority,
        "ETCD_SCHEME=%s" % etcd_scheme
    ]
    felix_envs = [
        "FELIX_ETCDADDR=%s" % etcd_authority,
        "FELIX_ETCDSCHEME=%s" % etcd_scheme
    ]

    if etcd_ca_cert_file and etcd_key_file and etcd_cert_file:
        etcd_volumes.append(ETCD_CA_CERT_NODE_FILE)
        etcd_binds[etcd_ca_cert_file] = {
            "bind": ETCD_CA_CERT_NODE_FILE,
            "ro": True
        }
        etcd_envs.append("ETCD_CA_CERT_FILE=%s" % ETCD_CA_CERT_NODE_FILE)
        felix_envs.append("FELIX_ETCDCAFILE=%s" % ETCD_CA_CERT_NODE_FILE)

        etcd_volumes.append(ETCD_KEY_NODE_FILE)
        etcd_binds[etcd_key_file] = {"bind": ETCD_KEY_NODE_FILE, "ro": True}
        etcd_envs.append("ETCD_KEY_FILE=%s" % ETCD_KEY_NODE_FILE)
        felix_envs.append("FELIX_ETCDKEYFILE=%s" % ETCD_KEY_NODE_FILE)

        etcd_volumes.append(ETCD_CERT_NODE_FILE)
        etcd_binds[etcd_cert_file] = {"bind": ETCD_CERT_NODE_FILE, "ro": True}
        etcd_envs.append("ETCD_CERT_FILE=%s" % ETCD_CERT_NODE_FILE)
        felix_envs.append("FELIX_ETCDCERTFILE=%s" % ETCD_CERT_NODE_FILE)

    if runtime == 'docker':
        _start_node_container(ip, ip6, log_dir, node_image, detach, etcd_envs,
                              felix_envs, etcd_volumes, etcd_binds)
        if libnetwork_image:
            _start_libnetwork_container(libnetwork_image, etcd_envs,
                                        etcd_volumes, etcd_binds)
Exemplo n.º 43
0
'''
PreCluster submission.

Joblist is created and the first job is send out to test whether the pipeline
runs successfully.
'''

# 1) Create joblist
process = Popen(['jt', 'joblist'], stdin=PIPE, stdout=PIPE, stderr=PIPE)

(stdoutdata, stderrdata) = process.communicate()
print stdoutdata
print stderrdata

if process.returncode > 0 or re.search('Failed', stderrdata):
    raise Exception('\n--> Building joblist failed!')

if not os.path.exists('lsf'):
    os.mkdir('lsf')

# 2) Run 'PreCluster'
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M-%S')
lsf = os.path.abspath(os.path.join('lsf', '%.5d_%s.precluster' % (1, st)))
if not os.path.exists(lsf):
    print('jt - PreCluster submission:')
    call([
        'bsub', '-W', '8:00', '-o', lsf, '-N', '-R',
        'rusage[mem=4000,scratch=4000]', 'jt', 'run', '--job', '1'
    ])
Exemplo n.º 44
0
time_dir = join(output_dir, "time_forcing")
if not os.path.isdir(time_dir):
    os.makedirs(time_dir)

# use the actual path of the uq directory
uq_dir = join(output_dir, "uq")
if not os.path.isdir(uq_dir):
    os.makedirs(uq_dir)

# generate the config file *after* creating the output directory
pism_config = "pism"
pism_config_nc = join(output_dir, pism_config + ".nc")

cmd = "ncgen -o {output} {input_dir}/config/{config}.cdl".format(
    output=pism_config_nc, input_dir=input_dir, config=pism_config)
sub.call(shlex.split(cmd))

# these Bash commands are added to the beginning of the run scrips
run_header = """# stop if a variable is not defined
set -u
# stop on errors
set -e

# path to the config file
config="{config}"
# path to the input directory (input data sets are contained in this directory)
input_dir="{input_dir}"
# output directory
output_dir="{output_dir}"
# temporary directory for spatial files
spatial_tmp_dir="{spatial_tmp_dir}"
Exemplo n.º 45
0
import os
import subprocess32

DAT_DIR = "/work/jaydy/dat/fda_pdb_mb"
WORK_DIR = "/work/jaydy/working/clean_pdb"


def readDat(ifn):
    with open(ifn, 'r') as f:
        return cPickle.load(f)


dat_ifn = "../dat/drugid_checked_pdb.dat"
dat = readDat(dat_ifn)

for drug_id, ligs in dat.iteritems():
    pdb_ids = [name.split('.')[0] for name in ligs]
    for pdb_id in pdb_ids:
        my_work_dir = ""
        try:
            work_dir = os.path.join(WORK_DIR, pdb_id[1:3])
            my_work_dir = work_dir
            os.makedirs(work_dir)
        except:
            pass
        pdb_fn = os.path.join(DAT_DIR, pdb_id[1:3], pdb_id + '.pdb')
        if os.path.exists(pdb_fn):
            os.chdir(my_work_dir)
            ctrip_cmd = ['ctrip', '-prm', '2', pdb_fn]
            subprocess32.call(ctrip_cmd)
Exemplo n.º 46
0
def get_file_dic(path, options):
    '''
    This function opens supplied VCF files and saves metadata and variants in the dictionary spec_data.
    '''
    global sample_list

    spec_data = [
    ]  # create empty list that will be populated which dictionaries of VCF file information
    file_list = [f for f in os.listdir(path) if options.file_pattern in f
                 ]  # get file list from input path and fname pattern

    for fname in file_list:

        if not options.inverse_deletions:  # retroseq results

            # DO VCF to BED conversion
            if fname.endswith(".vcf"):
                subprocess32.call(["vcf2bed", "-d"],
                                  stdin=open(os.path.join(path, fname)),
                                  stdout=open(
                                      os.path.join(
                                          options.out_path,
                                          fname.replace(".vcf", ".bed")), "w"))
                fname = fname.replace(".vcf", ".bed")
            else:
                copyfile(os.path.join(path, fname),
                         os.path.join(options.out_path, fname))

            fname_splitted = fname.split(".")

            species_dict = {
                "sample": fname_splitted[0],
                "fname": fname,
                "ftype": fname_splitted[-1],
                "meitype": fname_splitted[3],
                "f_bedtools": BedTool(os.path.join(options.out_path, fname)),
                "fname_sm": fname.replace(".gq.bed", ".sorted.merged.gq.bed")
            }

        elif options.inverse_deletions:

            fname_splitted = fname.split(".")
            species_dict = {
                "sample":
                fname_splitted[0],
                "fname":
                fname,
                "ftype":
                fname_splitted[-1],
                "meitype":
                fname_splitted[-2],  #fname_splitted[2],
                "f_bedtools":
                BedTool(os.path.join(path, fname)).saveas(
                    os.path.join(options.out_path, fname)),
                "fname_sm":
                fname.replace(".bed", ".sorted.merged.bed")
            }

        print "\n loading %s" % fname,
        print "\t BedTool object length: %i" % (len(
            species_dict.get("f_bedtools"))),

        if len(species_dict.get("f_bedtools")) < 3 or species_dict.get(
                "meitype"
        ) == "DNA":  # filter out empty BedTool object and DNA insetions
            continue
        print "\t performing analyses: ",
        for analyses in prep_analyses:  # perform initial analyses
            print "\t %s" % analyses.__name__,
            species_dict["f_bedtools"] = analyses(
                species_dict.get("f_bedtools")
            ).saveas(
                os.path.join(options.out_path, species_dict.get("fname_sm"))
            )  #.saveas(os.path.join(options.out_path, species_dict.get("fname_sm"))) # save again to dictionary

        # species_dict.get("f_bedtools").saveas(
        #     os.path.join(options.out_path, species_dict.get("fname_sm")))  # save to file
        spec_data.append(species_dict)  # append to list
    sample_list = set([l.get("sample") for l in spec_data])

    return spec_data
Exemplo n.º 47
0
    else:
        failed = False
    f.close()
    return failed


# 1) Check results of 'PreCluster' step
lsf_file = glob.glob('lsf/*.precluster')
lsf_file = lsf_file[-1]  # take the latest

failed = check_precluster(lsf_file)
if failed:
    raise Exception('\n--> PreCluster step failed!')
else:
    print('jt - PreCluster step successfully completed')

# 2) Run 'JTCluster' step
print('jt - JTCluster submission:')
joblist_filename = glob.glob(os.path.join(os.getcwd(), '*.jobs'))[0]
joblist = yaml.load(open(joblist_filename))

for job in joblist.itervalues():
    print('jt - Submitting job # %d' % job['jobID'])
    ts = time.time()
    st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M-%S')
    lsf = os.path.abspath(os.path.join('lsf',
                          '%.5d_%s.jtcluster' % (job['jobID'], st)))
    call(['bsub', '-W', '8:00', '-o', lsf,
         '-R', 'rusage[mem=4000,scratch=4000]',
         'jt', 'run', '--job', '%s' % job['jobID']])
Exemplo n.º 48
0
def start_redis_daemon():
    cmd = [
        'redis-server', '--unixsocket', '/tmp/redis.sock', '--daemonize', 'yes'
    ]
    call(cmd)
Exemplo n.º 49
0
def test_exit_no_option():
    # When ran by itself, 'sfntedit' prints the usage
    assert subprocess.call([TOOL]) == 1
Exemplo n.º 50
0
def clearCache():
    command = "rm -rf /home/bishal/.cache/mozilla/firefox/mw7he6gs.default/cache2/entries/*"
    result = call(command, shell=True)
    time.sleep(1)
Exemplo n.º 51
0
def node_start(node_image, runtime, log_dir, ip, ip6, as_num, detach,
               libnetwork_image):
    """
    Create the calico-node container and establish Calico networking on this
    host.

    :param ip:  The IPv4 address of the host.
    :param node_image:  The calico-node image to use.
    :param ip6:  The IPv6 address of the host (or None if not configured)
    :param as_num:  The BGP AS Number to use for this node.  If not specified
    the global default value will be used.
    :param detach: True to run in Docker's "detached" mode, False to run
    attached.
    :param libnetwork_image: The name of the Calico libnetwork driver image to
    use.  None, if not using libnetwork.
    :return:  None.
    """
    # Normally, Felix will load the modules it needs, but when running inside a
    # container it might not be able to do so. Ensure the required modules are
    # loaded each time the node starts.
    # We only make a best effort attempt because the command may fail if the
    # modules are built in.
    # We'll warn during the check_system() if the modules are unavailable.
    try:
        call(["modprobe", "-a"] + REQUIRED_MODULES)
    except OSError:
        pass

    # Print warnings for any known system issues before continuing
    using_docker = True if runtime == 'docker' else False
    (_, _, etcd_ok) = \
        check_system(quit_if_error=False, libnetwork=libnetwork_image,
                     check_docker=using_docker)

    if not etcd_ok:
        sys.exit(1)

    # We will always want to setup IP forwarding
    _setup_ip_forwarding()

    # Ensure log directory exists
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    # Get IP address of host, if none was specified
    if not ip:
        ips = get_host_ips(exclude=[
            "^docker.*", "^cbr.*", "virbr.*", "lxcbr.*", "veth.*", "cali.*",
            "tunl.*"
        ])
        try:
            ip = ips.pop()
        except IndexError:
            print "Couldn't autodetect a management IP address. Please provide" \
                  " an IP by rerunning the command with the --ip=<IP_ADDRESS> flag."
            sys.exit(1)
        else:
            print "No IP provided. Using detected IP: %s" % ip

    # Verify that IPs are not already in use by another host.
    error_if_bgp_ip_conflict(ip, ip6)

    # Verify that the chosen IP exists on the current host
    warn_if_unknown_ip(ip, ip6)

    # Warn if this hostname conflicts with an existing host
    warn_if_hostname_conflict(ip)

    # Set up etcd
    ipv4_pools = client.get_ip_pools(4)
    ipv6_pools = client.get_ip_pools(6)

    # Create default pools if required
    if not ipv4_pools:
        client.add_ip_pool(4, DEFAULT_IPV4_POOL)
    if not ipv6_pools:
        client.add_ip_pool(6, DEFAULT_IPV6_POOL)

    client.ensure_global_config()
    client.create_host(hostname, ip, ip6, as_num)

    # If IPIP is enabled, the host requires an IP address for its tunnel
    # device, which is in an IPIP pool.  Without this, a host can't originate
    # traffic to a pool address because the response traffic would not be
    # routed via the tunnel (likely being dropped by RPF checks in the fabric).
    ipv4_pools = client.get_ip_pools(4)
    ipip_pools = [p for p in ipv4_pools if p.ipip]
    if ipip_pools:
        # IPIP is enabled, make sure the host has an address for its tunnel.
        _ensure_host_tunnel_addr(ipv4_pools, ipip_pools)
    else:
        # No IPIP pools, clean up any old address.
        _remove_host_tunnel_addr()

    # The format of the authority string has already been validated.
    etcd_authority = os.getenv(ETCD_AUTHORITY_ENV, ETCD_AUTHORITY_DEFAULT)

    # Get etcd SSL environment variables if they exist
    etcd_scheme = os.getenv(ETCD_SCHEME_ENV, ETCD_SCHEME_DEFAULT)
    etcd_key_file = os.getenv(ETCD_KEY_FILE_ENV, None)
    etcd_cert_file = os.getenv(ETCD_CERT_FILE_ENV, None)
    etcd_ca_cert_file = os.getenv(ETCD_CA_CERT_FILE_ENV, None)

    etcd_volumes = []
    etcd_binds = {}
    etcd_envs = [
        "ETCD_AUTHORITY=%s" % etcd_authority,
        "ETCD_SCHEME=%s" % etcd_scheme
    ]
    felix_envs = [
        "FELIX_ETCDADDR=%s" % etcd_authority,
        "FELIX_ETCDSCHEME=%s" % etcd_scheme
    ]

    if etcd_ca_cert_file and etcd_key_file and etcd_cert_file:
        etcd_volumes.append(ETCD_CA_CERT_NODE_FILE)
        etcd_binds[etcd_ca_cert_file] = {
            "bind": ETCD_CA_CERT_NODE_FILE,
            "ro": True
        }
        etcd_envs.append("ETCD_CA_CERT_FILE=%s" % ETCD_CA_CERT_NODE_FILE)
        felix_envs.append("FELIX_ETCDCAFILE=%s" % ETCD_CA_CERT_NODE_FILE)

        etcd_volumes.append(ETCD_KEY_NODE_FILE)
        etcd_binds[etcd_key_file] = {"bind": ETCD_KEY_NODE_FILE, "ro": True}
        etcd_envs.append("ETCD_KEY_FILE=%s" % ETCD_KEY_NODE_FILE)
        felix_envs.append("FELIX_ETCDKEYFILE=%s" % ETCD_KEY_NODE_FILE)

        etcd_volumes.append(ETCD_CERT_NODE_FILE)
        etcd_binds[etcd_cert_file] = {"bind": ETCD_CERT_NODE_FILE, "ro": True}
        etcd_envs.append("ETCD_CERT_FILE=%s" % ETCD_CERT_NODE_FILE)
        felix_envs.append("FELIX_ETCDCERTFILE=%s" % ETCD_CERT_NODE_FILE)

    if runtime == 'docker':
        _start_node_container(ip, ip6, log_dir, node_image, detach, etcd_envs,
                              felix_envs, etcd_volumes, etcd_binds)
        if libnetwork_image:
            _start_libnetwork_container(libnetwork_image, etcd_envs,
                                        etcd_volumes, etcd_binds)
Exemplo n.º 52
0
def setta(bot, update, **optional_args):
    #composizione del comando da lanciare
    comando = 'curl "192.168.43.84:5555/setta?token=386429372:AAG7xgNjODVqBFg517NpbmhDNDbpVEdtO5M&chat=%s&soglia=' % update.message.chat_id
    comando = comando + '%s"' % optional_args['args'][0]
    #invia la richiesta http ad arduino
    subprocess32.call(comando, shell=True)
import sys, os.path
from subprocess32 import call
from mathcheck_common import sq

if len(sys.argv) < 2:
    print "need order (or min and max order) of cases to generate"
    quit()

n = int(sys.argv[1])

inname = "input/compstring/%d.in"

if not os.path.exists("input"): call(["mkdir", "input"])
if not os.path.exists("input/compstring"): call(["mkdir", "input/compstring"])

indices = [
    0 * (n / 2 + 1) + 1, 1 * (n / 2 + 1) + 1, 2 * (n / 2 + 1) + 1,
    3 * (n / 2 + 1) + 1
]
f = open(inname % n, "w")
if n % 2 == 1:
    f.write("p cnf {0} {1}\n".format(4 * (n / 2) + 4, 8 * (n / 2) + 4))
    for j in range(1, n / 2 + 1):
        #f.write("cx%d %d %d %d 0\n" % (-(indices[0]+j), (indices[1]+j), (indices[2]+j), (indices[3]+j)))
        f.write("%d %d %d %d 0\n" % (-(indices[0] + j), -(indices[1] + j),
                                     -(indices[2] + j), -(indices[3] + j)))
        f.write("%d %d %d %d 0\n" % (-(indices[0] + j), -(indices[1] + j),
                                     (indices[2] + j), (indices[3] + j)))
        f.write("%d %d %d %d 0\n" % (-(indices[0] + j),
                                     (indices[1] + j), -(indices[2] + j),
                                     (indices[3] + j)))
def remove_veth(ep):
    # The veth removal is best effort. If it fails then just log.
    rc = call(['ip', 'link', 'del', ep.name], timeout=IP_CMD_TIMEOUT)
    if rc != 0:
        app.logger.warn("Failed to delete veth %s", ep.name)
        return "%d.%d" % (n, c)
    elif k == "*" and c == "*":
        return "%d.*.*" % n
    elif k == "*":
        return "%d.%d.*" % (n, c)
    else:
        return "%d.%d.%d" % (n, c, k)

inname = "input/comp/%s.in"
compname = "input/comp/%s.comp"
assumname = "input/comp/%s.assum"
#prodvarname = "input/prodvar/%d.prodvars"
resultname = "results/compprog/%s.out"
logname = "output/compprog/%s.log"

if not os.path.exists("output"): call(["mkdir", "output"])
if not os.path.exists("output/compprog"): call(["mkdir", "output/compprog"])
if not os.path.exists("results"): call(["mkdir", "results"])
if not os.path.exists("results/compprog"): call(["mkdir", "results/compprog"])

for n in range(n1, n2+1):
    # Generate SAT instance
    #command = "python generate_compression_instances.py {0}".format(n)
    #call(command.split(" "))

    files = glob.glob(compname % runstr(n, "*", "*"))
    count = 0
    tottime = 0

    for f in files:
        k = int(f.split(".")[-2])
Exemplo n.º 56
0
    if qemu_use_rr:
        qemu_args = ['rr', 'record', project['qemu'], '-replay', isoname]
        subprocess32.check_call(qemu_args)
    else:
        raise

replay_time = tock()
print("taint analysis complete %.2f seconds" % replay_time)
sys.stdout.flush()

tick()

progress("Trying to create database {}...".format(project['name']))
createdb_args = ['createdb', '-U', 'postgres', project['db']]
createdb_result = subprocess32.call(createdb_args,
                                    stdout=sys.stdout,
                                    stderr=sys.stderr)

print()
if createdb_result == 0:  # Created new DB; now populate
    progress("Database created. Initializing...")
    # psql_args = ['psql', '-U', 'postgres', '-d', project['db'],
    # '-f', join(join(lavadir, 'include'), 'lava.sql')]
    psql_args = [
        'psql', '-U', 'postgres', '-d', project['db'], '-f',
        join(join(lavadir, 'fbi'), 'lava.sql')
    ]
    dprint("psql invocation: [%s]" % (" ".join(psql_args)))
    subprocess32.check_call(psql_args, stdout=sys.stdout, stderr=sys.stderr)
else:
    progress("Database already exists.")
Exemplo n.º 57
0
def create_warped_vrt(self, id):
    raster_path = '{}/{}/index.tif'.format(IMAGERY_PATH, id)
    vrt_path = '{}/{}/index.vrt'.format(IMAGERY_PATH, id)
    # initialize Flask
    # TODO Celery's @worker_init.connect decorator _should_ work for this
    app.config['SERVER_NAME'] = SERVER_NAME
    meta = get_metadata(id)
    approximate_zoom = meta['meta']['approximateZoom']

    # create a warped VRT to reproject on the fly
    gdalwarp = [
        'gdalwarp',
        raster_path,
        vrt_path,
        '-r',
        'cubic',
        '-t_srs',
        'epsg:3857',
        '-overwrite',
        '-of',
        'VRT',
        '-te',
        '-20037508.34',
        '-20037508.34',
        '20037508.34',
        '20037508.34',
        '-ts',
        str(2**approximate_zoom * 256),
        str(2**approximate_zoom * 256),
    ]

    # add an alpha band (for NODATA) if one wasn't already included
    if meta['meta']['bandCount'] < 4:
        gdalwarp.append('-dstalpha')

    started_at = datetime.utcnow()

    self.update_state(state='RUNNING',
                      meta={
                          'name': 'warped-vrt',
                          'started_at': started_at.isoformat(),
                          'status': 'Creating warped VRT'
                      })

    try:
        returncode = subprocess.call(gdalwarp, timeout=60 * 5)
    except subprocess.TimeoutExpired as e:
        raise Exception(
            json.dumps({
                'name': 'overviews',
                'started_at': started_at.isoformat(),
                'command': ' '.join(gdalwarp),
                'return_code': returncode,
                'status': 'Timed out'
            }))

    if returncode != 0:
        raise Exception(
            json.dumps({
                'name': 'warped-vrt',
                'started_at': started_at.isoformat(),
                'command': ' '.join(gdalwarp),
                'return_code': returncode,
                'status': 'Failed'
            }))

    return {
        'completed_at': datetime.utcnow().isoformat(),
        'started_at': started_at,
        'status': 'Warped VRT creation completed'
    }
Exemplo n.º 58
0
def run_autorestart(args=None):
    """
    Run B3 in auto-restart mode.
    """
    restart_num = 0

    if main_is_frozen():
        # if we are running the frozen application we do not
        # need to run any script, just the executable itself
        script = ''
    else:
        # if we are running from sources, then sys.executable is set to `python`
        script = os.path.join(modulePath[:-3], 'b3_run.py')
        if not os.path.isfile(script):
            # must be running from the wheel, so there is no b3_run
            script = os.path.join(modulePath[:-3], 'b3', 'run.py')
        if os.path.isfile(script + 'c'):
            script += 'c'

    if args:
        script = '%s %s %s --autorestart' % (sys.executable, script,
                                             ' '.join(args))
    else:
        script = '%s %s --autorestart' % (sys.executable, script)

    while True:

        try:

            try:
                import subprocess32 as subprocess
            except ImportError:
                import subprocess

            status = subprocess.call(script, shell=True)

            sys.stdout.write('Exited with status: %s ... ' % status)
            sys.stdout.flush()
            sleep(2)

            if status == 221:
                restart_num += 1
                sys.stdout.write('restart requested (%s)\n' % restart_num)
                sys.stdout.flush()
            elif status == 222:
                sys.stdout.write('shutdown requested!\n')
                sys.stdout.flush()
                break
            elif status == 220 or status == 223:
                sys.stdout.write('B3 error (check log file)\n')
                sys.stdout.flush()
                break
            elif status == 224:
                sys.stdout.write('B3 error (check console)\n')
                sys.stdout.flush()
                break
            elif status == 256:
                sys.stdout.write('python error, (check log file)\n')
                sys.stdout.flush()
                break
            elif status == 0:
                sys.stdout.write('normal shutdown\n')
                sys.stdout.flush()
                break
            elif status == 1:
                sys.stdout.write('general error (check console)\n')
                sys.stdout.flush()
                break
            else:
                restart_num += 1
                sys.stdout.write(
                    'unknown exit code (%s), restarting (%s)...\n' %
                    (status, restart_num))
                sys.stdout.flush()

            sleep(4)

        except KeyboardInterrupt:
            print('Quit')
            break
Exemplo n.º 59
0
def create_overviews(self, id):
    raster_path = '{}/{}/index.tif'.format(IMAGERY_PATH, id)
    # initialize Flask
    # TODO Celery's @worker_init.connect decorator _should_ work for this
    app.config['SERVER_NAME'] = SERVER_NAME
    meta = get_metadata(id)
    approximate_zoom = meta['meta']['approximateZoom']

    # create external overviews
    gdaladdo = [
        'gdaladdo',
        '-r',
        'cubic',
        '--config',
        'GDAL_TIFF_OVR_BLOCKSIZE',
        '256',
        '--config',
        'TILED_OVERVIEW',
        'yes',
        '--config',
        'COMPRESS_OVERVIEW',
        'DEFLATE',
        '--config',
        'PREDICTOR_OVERVIEW',
        '2',
        '--config',
        'SPARSE_OK_OVERVIEW',
        'yes',
        '--config',
        'BLOCKXSIZE_OVERVIEW',
        '256',
        '--config',
        'BLOCKYSIZE_OVERVIEW',
        '256',
        '--config',
        'INTERLEAVE_OVERVIEW',
        'band',
        '--config',
        'NUM_THREADS_OVERVIEW',
        'ALL_CPUS',
        '-ro',
        raster_path,
    ]

    # generate a list of overview values
    gdaladdo.extend([str(2**(x + 1)) for x in range(approximate_zoom)])

    started_at = datetime.utcnow()

    self.update_state(state='RUNNING',
                      meta={
                          'name': 'overviews',
                          'started_at': started_at.isoformat(),
                          'status': 'Creating external overviews'
                      })

    try:
        returncode = subprocess.call(gdaladdo, timeout=60 * 5)
    except subprocess.TimeoutExpired as e:
        raise Exception(
            json.dumps({
                'name': 'overviews',
                'started_at': started_at.isoformat(),
                'command': ' '.join(gdaladdo),
                'return_code': returncode,
                'status': 'Timed out'
            }))

    if returncode != 0:
        raise Exception(
            json.dumps({
                'name': 'overviews',
                'started_at': started_at.isoformat(),
                'command': ' '.join(gdaladdo),
                'return_code': returncode,
                'status': 'Failed'
            }))

    return {
        'name': 'overviews',
        'completed_at': datetime.utcnow().isoformat(),
        'started_at': started_at,
        'status': 'Overview addition completed'
    }
Exemplo n.º 60
0
def host_cmd(args, unknown_args):
    if not args.cmd:
        raise TestFormatError('Must specify command to run on the host.')
    elif unknown_args:
        raise TestFormatError(
            'Args "%s" unsupported. Is your host command correctly formatted?'
            % (' '.join(unknown_args)))
    elif args.deploy_chrome and not args.path_to_outdir:
        raise TestFormatError(
            '--path-to-outdir must be specified if --deploy-chrome is passed.')

    cros_run_test_cmd = [
        CROS_RUN_TEST_PATH,
        '--board',
        args.board,
        '--cache-dir',
        args.cros_cache,
    ]
    if args.use_vm:
        cros_run_test_cmd += [
            '--start',
            # Don't persist any filesystem changes after the VM shutsdown.
            '--copy-on-write',
            '--device',
            'localhost',
        ]
    else:
        cros_run_test_cmd += [
            '--device', args.device if args.device else LAB_DUT_HOSTNAME
        ]
    if args.verbose:
        cros_run_test_cmd.append('--debug')

    if args.logs_dir:
        for log in SYSTEM_LOG_LOCATIONS:
            cros_run_test_cmd += ['--results-src', log]
        cros_run_test_cmd += ['--results-dest-dir', args.logs_dir]

    test_env = setup_env()
    if args.deploy_chrome:
        cros_run_test_cmd += [
            '--deploy',
            # Mounting the browser gives it enough disk space to not need stripping.
            '--mount',
            '--nostrip',
            '--build-dir',
            os.path.abspath(args.path_to_outdir),
        ]

    cros_run_test_cmd += [
        '--host-cmd',
        '--',
    ] + args.cmd

    logging.info('Running the following command:')
    logging.info(' '.join(cros_run_test_cmd))

    return subprocess.call(cros_run_test_cmd,
                           stdout=sys.stdout,
                           stderr=sys.stderr,
                           env=test_env)