Exemplo n.º 1
0
def get_cpuinfo(platform='linux'):
    vendor_string = ''
    feature_string = ''
    if platform == "darwin":
        vendor_string = utils.run_command(['sysctl',
                                           '-n',
                                           'machdep.cpu.vendor'])
        feature_string = utils.run_command(['sysctl',
                                            '-n',
                                            'machdep.cpu.features'])
        # osx reports AVX1.0 while linux reports it as AVX
        feature_string = feature_string.replace("AVX1.0", "AVX")
    elif os.path.isfile('/proc/cpuinfo'):
        with open('/proc/cpuinfo') as f:
            cpuinfo = f.readlines()
        for line in cpuinfo:
            if 'vendor_id' in line:
                vendor_string = line.split(':')[1].strip()
            elif 'flags' in line:
                feature_string = line.split(':')[1].strip()
            if vendor_string and feature_string:
                break
    else:
        raise ValueError("Unknown platform, could not find CPU information")
    return (vendor_string.strip(), feature_string.strip())
Exemplo n.º 2
0
def unf(pathin, destdir):
    fname = os.path.split(pathin)[1]
    outname = os.path.splitext(fname)[0] + '.mzML'
    outpath = os.path.join(destdir, outname)
    cmd = "%s %s > %s" % (exec_unf, pathin, outpath)
    ut.run_command(cmd)
    return outpath
Exemplo n.º 3
0
    def install_source (self, source_path):
        prefix = self.get ('mingw prefix')
        confflags="--prefix=%s --host=i586-mingw32msvc --with-gcc-arch=prescott --enable-portable-binary --with-our-malloc16 --with-windows-f77-mangling --enable-shared --disable-static --enable-threads --with-combined-threads" % (unwin(prefix))
        confflags="--prefix=%s --host=i586-mingw32msvc --with-gcc-arch=native --enable-portable-binary --with-our-malloc16 --with-windows-f77-mangling --enable-shared --disable-static" % (unwin(prefix))
        wd = os.path.join (source_path, 'double-mingw32')
        if 1:
            shutil.rmtree(wd, ignore_errors = True)
            if not os.path.isdir(wd):
                os.makedirs (wd)
        conf = unwin(os.path.join (source_path, 'configure'))
        bash = self.get('mingw bash')
        make = self.get('mingw make')
        if ' ' in conf: 
            raise RuntimeError("The path of fftw3 configure script cannot contain spaces: %r" % (conf))

        if 1:
            r = run_command('%s %s %s --enable-sse2' % (bash, conf, confflags), cwd=wd, env=self.environ,
                            verbose=True)
            if r[0]:
                return False
        r = run_command(make+' -j4', cwd=wd, env=self.environ, verbose=True)
        if r[0]:
            return False
        r = run_command(make+' install', cwd=wd, env=self.environ, verbose=True)
        return not r[0]
Exemplo n.º 4
0
def main():
    distro = get_distro()
    print()
    packages = get_packages(distro)
    dependencies = get_dependencies(distro, packages)

    if dependencies:
        install_command = install_commands[distro] + list(dependencies)

        print()
        print('Installing packages...')
        print(' '.join(install_command))
        run_command(install_command)
    else:
        print()
        print('No packages to install')

    print()
    print('Fetching submodules')
    for package in packages:
        if package.has_submodules:
            run_command(['git', 'submodule', 'update', '--init', '--recursive', package.path])

    print()
    for package in packages:
        print('Setting up {}'.format(package))
        package.setup(distro)
Exemplo n.º 5
0
 def allocation_lrc_file(self, song, lrc_path):    
     if os.path.exists(lrc_path):
         if self.vaild_lrc(lrc_path):
             save_lrc_path = self.get_lrc_filepath(song)
             if os.path.exists(save_lrc_path): os.unlink(save_lrc_path)
             utils.run_command("cp %s %s" % (lrc_path, save_lrc_path))
             Dispatcher.reload_lrc(song)
Exemplo n.º 6
0
def get_all_wmtdata():
    thread_mono = Process(target = get_all_wmt_monolingual)
    thread_para = Process(target = get_all_wmt_parallel)
    thread_mono.start(); thread_para.start()
    thread_mono.join(); thread_para.join()
    homedir = os.path.expanduser("~")
    run_command('mv wmt-data '+homedir)
Exemplo n.º 7
0
def exec_latexmk(tex_filename, src_path):
    """
    Exect latexmk. With -pdf option, this runs pdflatex and bibtex 
    enough times until all cross-references have been sorted out.
    
    :param tex_filename: File name of the .tex to compile.
    :param src_path: Path from which pdflatex will be called (this should
        make most figures work).
    :return: PDF file name.
    """

    tex_path = os.path.dirname(tex_filename)
    if tex_path == '':
        tex_path = '.'

    aux_filename = os.path.splitext(tex_filename)[0] + ".aux"
    pdf_filename = os.path.splitext(tex_filename)[0] + ".pdf"
    
    # We enter the folder of the source to get proper relative paths to 
    # figures
    starting_dir = os.getcwd()
    if src_path != '':
        os.chdir(src_path)
    
    # Run pdflatex and bibtex a bunch of times
    try:
        run_command("latexmk -pdf -output-directory={} {}".format(tex_path, tex_filename))
        logger.info("Ran latexmk on {} outputting to {}".format(tex_filename, tex_path))
    except:
        logger.debug("Problem building pdf file.")
    
    # Return to original directory
    os.chdir(starting_dir)
    return pdf_filename
Exemplo n.º 8
0
def has_dependencies_installed():
    try:
        import z3
        import z3.z3util
        z3_version =  z3.get_version_string()
        tested_z3_version = '4.5.1'
        if compare_versions(z3_version, tested_z3_version) > 0:
            logging.warning("You are using an untested version of z3. %s is the officially tested version" % tested_z3_version)
    except:
        logging.critical("Z3 is not available. Please install z3 from https://github.com/Z3Prover/z3.")
        return False

    if not cmd_exists("evm"):
        logging.critical("Please install evm from go-ethereum and make sure it is in the path.")
        return False
    else:
        cmd = "evm --version"
        out = run_command(cmd).strip()
        evm_version = re.findall(r"evm version (\d*.\d*.\d*)", out)[0]
        tested_evm_version = '1.7.3'
        if compare_versions(evm_version, tested_evm_version) > 0:
            logging.warning("You are using evm version %s. The supported version is %s" % (evm_version, tested_evm_version))

    if not cmd_exists("solc"):
        logging.critical("solc is missing. Please install the solidity compiler and make sure solc is in the path.")
        return False
    else:
        cmd = "solc --version"
        out = run_command(cmd).strip()
        solc_version = re.findall(r"Version: (\d*.\d*.\d*)", out)[0]
        tested_solc_version = '0.4.19'
        if compare_versions(solc_version, tested_solc_version) > 0:
            logging.warning("You are using solc version %s, The latest supported version is %s" % (solc_version, tested_solc_version))

    return True
Exemplo n.º 9
0
def pkgconfig_get_link_args(pkg, ucp='', system=True, static=True):
  havePcFile = pkg.endswith('.pc')
  pcArg = pkg
  if not havePcFile:
    if system:
      # check that pkg-config knows about the package in question
      run_command(['pkg-config', '--exists', pkg])
    else:
      # look for a .pc file
      if ucp == '':
        ucp = default_uniq_cfg_path()
      pcfile = pkg + '.pc' # maybe needs to be an argument later?

      pcArg = os.path.join(get_cfg_install_path(pkg, ucp), 'lib',
                           'pkgconfig', pcfile)

      if not os.access(pcArg, os.R_OK):
        error("Could not find '{0}'".format(pcArg), ValueError)

  static_arg = [ ]
  if static:
    static_arg = ['--static']

  libs_line = run_command(['pkg-config', '--libs'] + static_arg + [pcArg]);
  libs = libs_line.split()
  return libs
Exemplo n.º 10
0
def get_cpuinfo(platform_val='linux'):
    vendor_string = ''
    feature_string = ''
    if platform_val == "darwin":
        vendor_string = run_command(['sysctl', '-n', 'machdep.cpu.vendor'])
        feature_string = run_command(['sysctl', '-n', 'machdep.cpu.features'])
        # osx reports AVX1.0 while linux reports it as AVX
        feature_string = feature_string.replace("AVX1.0", "AVX")
        feature_string = feature_string.replace("SSE4.", "SSE4")
    elif os.path.isfile('/proc/cpuinfo'):
        with open('/proc/cpuinfo') as f:
            cpuinfo = f.readlines()
        # Compensate for missing vendor in ARM /proc/cpuinfo
        if get_native_machine() == 'aarch64':
            vendor_string = "arm"
        for line in cpuinfo:
            if 'vendor_id' in line:
                vendor_string = line.split(':')[1].strip()
            elif 'flags' in line:
                feature_string = line.split(':')[1].strip()
            elif line.startswith('Features'):
                feature_string = line.split(':')[1].strip()
            if vendor_string and feature_string:
                feature_string = feature_string.replace("sse4_", "sse4")
                break
    else:
        raise ValueError("Unknown platform, could not find CPU information")
    return (vendor_string.strip(), feature_string.strip())
Exemplo n.º 11
0
    def _luks_header_backup_restore(self, create_fn):
        succ = create_fn(self.loop_dev, PASSWD, None)
        self.assertTrue(succ)

        backup_file = os.path.join(self.backup_dir, "luks-header.txt")

        succ = BlockDev.crypto_luks_header_backup(self.loop_dev, backup_file)
        self.assertTrue(succ)
        self.assertTrue(os.path.isfile(backup_file))

        # now completely destroy the luks header
        ret, out, err = run_command("cryptsetup erase %s -q && wipefs -a %s" % (self.loop_dev, self.loop_dev))
        if ret != 0:
            self.fail("Failed to erase LUKS header from %s:\n%s %s" % (self.loop_dev, out, err))

        _ret, fstype, _err = run_command("blkid -p -ovalue -sTYPE %s" % self.loop_dev)
        self.assertFalse(fstype)  # false == empty

        # header is destroyed, should not be possible to open
        with self.assertRaises(GLib.GError):
            BlockDev.crypto_luks_open(self.loop_dev, "libblockdevTestLUKS", PASSWD, None)

        # and restore the header back
        succ = BlockDev.crypto_luks_header_restore(self.loop_dev, backup_file)
        self.assertTrue(succ)

        _ret, fstype, _err = run_command("blkid -p -ovalue -sTYPE %s" % self.loop_dev)
        self.assertEqual(fstype, "crypto_LUKS")

        # opening should now work
        succ = BlockDev.crypto_luks_open(self.loop_dev, "libblockdevTestLUKS", PASSWD)
        self.assertTrue(succ)

        succ = BlockDev.crypto_luks_close("libblockdevTestLUKS")
        self.assertTrue(succ)
Exemplo n.º 12
0
    def test_get_device_symlinks(self):
        """Verify that getting device symlinks works as expected"""

        with self.assertRaises(GLib.GError):
            BlockDev.utils_get_device_symlinks("no_such_device")

        symlinks = BlockDev.utils_get_device_symlinks(self.loop_dev)
        # there should be at least 2 symlinks for something like "/dev/sda" (in /dev/disk/by-id/)
        self.assertGreaterEqual(len(symlinks), 2)

        symlinks = BlockDev.utils_get_device_symlinks(self.loop_dev[5:])
        self.assertGreaterEqual(len(symlinks), 2)

        # create an LV to get a device with more symlinks
        ret, _out, _err = run_command ("pvcreate %s" % self.loop_dev)
        self.assertEqual(ret, 0)
        self.addCleanup(run_command, "pvremove %s" % self.loop_dev)

        ret, _out, _err = run_command ("vgcreate utilsTestVG %s" % self.loop_dev)
        self.assertEqual(ret, 0)
        self.addCleanup(run_command, "vgremove -y utilsTestVG")

        ret, _out, _err = run_command ("lvcreate -n utilsTestLV -L 12M utilsTestVG")
        self.assertEqual(ret, 0)
        self.addCleanup(run_command, "lvremove -y utilsTestVG/utilsTestLV")

        symlinks = BlockDev.utils_get_device_symlinks("utilsTestVG/utilsTestLV")
        # there should be at least 4 symlinks for an LV
        self.assertGreaterEqual(len(symlinks), 4)
Exemplo n.º 13
0
def extract_forms(url, follow = "false", cookie_jar = None, filename = "forms.json"):
	utils.remove_file(os.path.join(os.path.dirname(__file__), filename))
	
	if cookie_jar == None:
		try:
			out = utils.run_command('{} && {}'.format(
				utils.cd(os.path.dirname(os.path.abspath(__file__))),
				'scrapy crawl form -o {} -a start_url="{}" -a follow={} -a proxy={}'.format(filename, url, follow, HTTP_PROXY)), EXTRACT_WAIT_TIME)
		except:
			out = utils.run_command('{} && {}'.format(
				utils.cd(os.path.dirname(os.path.abspath(__file__))),
				'scrapy crawl form -o {} -a start_url="{}" -a follow={}'.format(filename, url, follow)), EXTRACT_WAIT_TIME)
	else:
		cookie_jar_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), filename.replace('.json', '.txt'))
		cookie_jar.save(cookie_jar_path)
		out = utils.run_command('{} && {}'.format(
			utils.cd(os.path.dirname(os.path.abspath(__file__))),
			'scrapy crawl form_with_cookie -o {} -a start_url="{}" -a cookie_jar={}'.format(filename, url, cookie_jar_path)), EXTRACT_WAIT_TIME)

	with open(os.path.join(os.path.dirname(__file__), filename)) as json_forms:
		forms = json.load(json_forms)

	utils.remove_file(os.path.join(os.path.dirname(__file__), filename))
		
	return forms
Exemplo n.º 14
0
def _seed(torrent_path, seed_cache_path, torrent_seed_duration,
          torrent_listen_port_start, torrent_listen_port_end):
    plugin_path = os.path.dirname(inspect.getabsfile(inspect.currentframe()))
    seeder_path = os.path.join(plugin_path, SEEDER_PROCESS)
    seed_cmd = map(str, [seeder_path, torrent_path, seed_cache_path,
                         torrent_seed_duration, torrent_listen_port_start,
                         torrent_listen_port_end])
    utils.run_command(seed_cmd)
Exemplo n.º 15
0
def make_partition(session, dev, partition_start, partition_end):
    dev_path = utils.make_dev_path(dev)

    if partition_end != "-":
        raise pluginlib.PluginError("Can only create unbounded partitions")

    utils.run_command(['sfdisk', '-uS', dev_path],
                      '%s,;\n' % (partition_start))
    def render(self):
        filename = "temp_" + self.output_file + ".gnuplot"
        f = open(filename, "w")
        f.write(self.script)
        f.close()

        utils.run_command(["gnuplot", filename])
        os.remove(filename)
Exemplo n.º 17
0
    def download(self, temp_ver, store_metadata=True):
        """
        Retrieve the given template version

        Args:
            temp_ver (TemplateVersion): template version to retrieve
            store_metadata (bool): If set to ``False``, will not refresh the
                local metadata with the retrieved one

        Returns:
            None
        """
        dest = self._prefixed(temp_ver.name)
        temp_dest = '%s.tmp' % dest

        with lockfile.LockFile(dest):
            # Image was downloaded while we were waiting
            if os.path.exists(dest):
                return

            temp_ver.download(temp_dest)
            if store_metadata:
                with open('%s.metadata' % dest, 'w') as f:
                    utils.json_dump(temp_ver.get_metadata(), f)

            sha1 = hashlib.sha1()
            with open(temp_dest) as f:
                while True:
                    chunk = f.read(65536)
                    if not chunk:
                        break
                    sha1.update(chunk)
            if temp_ver.get_hash() != sha1.hexdigest():
                raise RuntimeError(
                    'Image %s does not match the expected hash %s' % (
                        temp_ver.name,
                        sha1.hexdigest(),
                    )
                )

            with open('%s.hash' % dest, 'w') as f:
                f.write(sha1.hexdigest())

            with log_utils.LogTask('Convert image', logger=LOGGER):
                utils.run_command(
                    [
                        'qemu-img',
                        'convert',
                        '-O',
                        'raw',
                        temp_dest,
                        dest,
                    ],
                )

            os.unlink(temp_dest)

            self._init_users(temp_ver)
Exemplo n.º 18
0
def check_installed_moses():
    os.chdir(os.path.expanduser("~")+ '/mosesdecoder')
    if not os.path.exists('sample-models.tgz'):
        run_command('wget '+ moses_sample_model)
        run_command('tar xzf sample-models.tgz')
    os.chdir('sample-models')
    proc = run_command(str('~/mosesdecoder/bin/moses -f '
                           'phrase-model/moses.ini < phrase-model/in > out'))
    print(open('out', 'r').read().strip())
Exemplo n.º 19
0
 def sync_server(self, path):
     LOG.info('Syncing server ...')
     command = '{} && {} && unset DJANGO_SETTINGS_MODULE && python manage.py syncdb --noinput'.format(
         utils.to_env(self.base_path), utils.cd(path))
     output = utils.run_command(command)
     if 'Unknown command' in output[2]:
         command = '{} && {} && unset DJANGO_SETTINGS_MODULE && python manage.py migrate --noinput'.format(
         utils.to_env(self.base_path), utils.cd(path))
     return utils.run_command(command)
Exemplo n.º 20
0
def msconvert(fin):
    fout = fin.replace('.mzML','.mzXML')
    out_newdir = os.path.splitext(fout)[0]
    cmd = "%s %s -o %s" % (exec_msconvert, fin, out_newdir)
    ut.run_command(cmd)
    match_output = os.path.join(out_newdir, '*.mzXML')
    ut.run_command('mv %s %s' % (match_output, fout))
    os.rmdir(out_newdir)
    return fout
Exemplo n.º 21
0
    def submit_polysel2_batch(self, generator, params):
        batch_size = params["batch_size"]
        batch_file = str(os.path.join(params["outputdir"], "polysel2.sh"))
        task_commands = []
        for i in range(batch_size):
            try:
                task_commands.append(next(generator))
            except StopIteration:
                break
        logger.info("Submitting %d additional polysel2 tasks", len(task_commands))

        outputfiles = []
        workdir = params.get('workdir')

        i = 0
        num_submitted = 0
        for command, outputfile in task_commands:
            with open(batch_file, 'w') as f:
                outputfiles.append(outputfile)
                f.write("#!/bin/sh\n")
                f.write("#SBATCH -p factor\n")
                f.write("#SBATCH -J %s\n" % outputfile.split('/')[-1])
                f.write("#SBATCH -n 1\n")
                f.write("#SBATCH -c 2\n")
                f.write("#SBATCH -s\n")
                f.write("#SBATCH --requeue\n")
                f.write("#SBATCH --output=/dev/null\n")
                f.write("srun -s -c 2 -n 1 -p factor -J polysel2 " + command + " 2>&1 &\n")
                f.write("wait\n")
            os.chmod(batch_file, 0o755)
            # TODO: use the stdout of sbatch to determine job IDs, and only continue when all of the jobs have finished.
            stdout = utils.run_command("sbatch " + batch_file)

            # rate-limit job submission so slurm is not overwhelmed
            if i >= 100:
                logger.debug("Submitted %d/%d polysel2 jobs", num_submitted, len(task_commands))
                #time.sleep(1)
                i = 0
            num_submitted += 1
            i += 1

        while (True):
            jobs_out = utils.run_command("squeue -t PENDING,RUNNING,COMPLETING").strip().split('\n')
            if len(jobs_out) - 1 <= 0:
                break
            logger.info("Number of queued jobs: %d", len(jobs_out) - 1)
            sleep(10)

        polys = []
        for filen in outputfiles:
            try:
                with open(filen) as f:
                    polys += list(utils.parse_poly(f.read()))
            except Exception as e:
                logger.error("%s. Is workdir NFS-shared?", e)
                raise
        return polys
Exemplo n.º 22
0
def start_training():
  cmd = []
  cmd.append(os.path.join(CAFFE_BIN_DIR, 'caffe'))
  cmd.append('train')
  cmd.append('--solver=' + SOLVER_PROTOTXT)
  cmd.append('--weights=' + WEIGHTS_FILE)
  if is_gpu():
    cmd.append('--gpu=' + str(DEVICE_ID))
  utils.run_command(cmd)
Exemplo n.º 23
0
def exec_diff(old_filename, new_filename, diff_filename):
    """ Exec Latexdiff

        :param old_filename:
        :param new_filename:
        :param diff_filename:

    """
    run_command("latexdiff %s %s > %s" % (old_filename, new_filename, diff_filename))
Exemplo n.º 24
0
def exec_diff(old_filename, new_filename, diff_filename, latexdiff_args=""):
    """ Exec Latexdiff

        :param old_filename:
        :param new_filename:
        :param diff_filename:
        :param latexdiff_args:

    """
    run_command("latexdiff %s %s %s > %s" % (latexdiff_args, old_filename, new_filename, diff_filename))
Exemplo n.º 25
0
def download_wmt_parallel(corpus_name):    
    corpusdir = 'wmt-data/parallel/' + corpus_name + '/'
    os.makedirs(corpusdir, exist_ok=True)
    os.chdir(corpusdir) 
    url = wmt_data.parallel[corpus_name]
    parallelized_download('wget', [url])
    run_command('tar zxvf *.tgz')
    run_command('tar -xvf *.tar')
    #run_command('tar -xvf *.tar.gz')
    os.chdir('../../..')
Exemplo n.º 26
0
    def __init__(self, package_name=None, clone_url=None, dest_path=None,
                 branch='master', commit_id=None):
        self.repo_name = package_name
        self.repo_url = clone_url
        self.local_path = os.path.join(dest_path, package_name)
        self.repo = None

        # Load if it is an existing git repo
        if os.path.exists(self.local_path):
            try:
                self.repo = pygit2.Repository(self.local_path)
                LOG.info("Found existent repository at destination path %s" % (
                         self.local_path))
                # Reset hard repository so we clean up any changes that may
                # prevent checkout on the right point of the three.
                self.repo.reset(self.repo.head.get_object().oid,
                                pygit2.GIT_RESET_HARD)

            except KeyError:
                raise exception.RepositoryError(package=package_name,
                                                repo_path=dest_path)

        else:
            LOG.info("Cloning into %s..." % self.local_path)
            self.repo = pygit2.clone_repository(self.repo_url,
                                                self.local_path,
                                                checkout_branch=branch)
        try:
            for remote in self.repo.remotes:
                remote.fetch()
                LOG.info("Fetched changes for %s" % remote.name)
            # NOTE(maurosr): Get references and rearrange local master's HEAD
            # we are always **assuming a fastforward**
            remote = self.repo.lookup_reference('refs/remotes/origin/%s' % (
                branch))
            master = self.repo.lookup_reference('refs/heads/%s' % branch)
            master.set_target(remote.target)
            self.repo.head.set_target(master.target)
            LOG.info("%(package_name)s Repository updated" % locals())

            if commit_id:
                LOG.info("Checking out into %s" % commit_id)
                obj = self.repo.git_object_lookup_prefix(commit_id)
                self.repo.checkout_tree(obj)
            else:
                LOG.info("Checking out into %s" % branch)
                self.repo.checkout('refs/heads/' + branch)
        except ValueError:
            ref = commit_id if commit_id else branch
            raise exception.RepositoryError(message="Could not find reference "
                                            "%s at %s repository" % (ref,
                                            package_name))

        cmd = "git submodule init; git submodule update"
        utils.run_command(cmd, cwd=self.local_path)
Exemplo n.º 27
0
    def run_sieving(self):    
        logger.info("Starting Sieving...")
        self.start_time = time.time()

        if os.path.isfile(self.msieve_dat_file):
            logger.info("Removing existing .dat file for msieve")
            os.remove(self.msieve_dat_file)

            with open(self.msieve_dat_file, 'wt', encoding='utf-8') as f:
                f.write(str(self.parameters.myparams({'N': int}, [])['N']))
                f.write('\n')

        # check if we should import relations
        import_relation_file = self.parameters.myparams({'import': None}, ['tasks', 'sieve']).get('import')
        if import_relation_file:
            logger.info("Importing relations from file '%s'", import_relation_file)
            imported_files = self.import_relations(import_relation_file)
            self.queue_extend(imported_files)
            logger.info("Found %d relation files in file '%s'", len(imported_files), self.reldir) 

        if not os.path.exists(self.reldir):
            # create the directory for relations if it does not yet exist
            logger.info("Creating directory for relations %s", self.reldir)
            os.makedirs(self.reldir)
        else:
            # check if there are relations in the directory already and add them to the import queue.
            # We do this check once outside the loop so that we know which tasks not to regenerate
            logger.info("Importing relations files from directory '%s;", self.reldir)
            imported_files = self.import_relations(self.reldir)
            self.queue_extend(imported_files)
            logger.info("Found %d relation files in directory '%s'", len(imported_files), self.reldir) 

        # generate sieving task commands, skipping files that have already been generated
        self.generator = self.generate_sieving_task_commands()

        # spawn a thread to launch sieving tasks until sieving is finished
        slurm_thread = threading.Thread(target=self.run_slurm_thread)
        slurm_thread.start()

        # spawn a thread to print out a status message periodically
        status_thread = threading.Thread(target=self.run_status_thread)
        status_thread.start()

        # spawn a thread to filter periodically until sieving is finished
        filter_thread = threading.Thread(target=self.run_filter_thread)
        filter_thread.start()

        # wait until filter thread completes
        try:
            filter_thread.join()
            slurm_thread.join()
            status_thread.join()
        finally:
            utils.run_command("scancel -p factor")
Exemplo n.º 28
0
def _create_iso(mkisofs_cmd, filename, path):
    logging.debug("Creating ISO '%s'..." % filename)
    orig_dir = os.getcwd()
    os.chdir(path)
    try:
        utils.run_command([mkisofs_cmd, '-quiet', '-l', '-o', filename,
                           '-c', 'boot.cat', '-b', 'isolinux.bin',
                           '-no-emul-boot', '-boot-load-size', '4',
                           '-boot-info-table', '.'])
    finally:
        os.chdir(orig_dir)
Exemplo n.º 29
0
def _unbundle_iso(sr_path, filename, path):
    logging.debug("Unbundling ISO '%s'" % filename)
    read_only_path = utils.make_staging_area(sr_path)
    try:
        utils.run_command(['mount', '-o', 'loop', filename, read_only_path])
        try:
            shutil.copytree(read_only_path, path)
        finally:
            utils.run_command(['umount', read_only_path])
    finally:
        utils.cleanup_staging_area(read_only_path)
Exemplo n.º 30
0
def get_compiler_version(compiler):
    version_string = '0'
    if compiler == 'aarch64-gnu':
        version_string = run_command(['aarch64-unknown-linux-gnu-gcc', '-dumpversion'])
    elif 'gnu' in compiler:
        # Asssuming the 'compiler' version matches the gcc version
        # e.g., `mpicc -dumpversion == gcc -dumpversion`
        version_string = run_command(['gcc', '-dumpversion'])
    elif 'cray-prgenv-cray' == compiler:
        version_string = os.environ.get('CRAY_CC_VERSION', '0')
    return CompVersion(version_string)
Exemplo n.º 31
0
 def toggle(self):
     run_command('playerctl', 'play-pause')
Exemplo n.º 32
0
 def play(self):
     run_command('playerctl', 'play')
Exemplo n.º 33
0
 def pause(self):
     run_command('playerctl', 'pause')
Exemplo n.º 34
0
 def stop(self):
     run_command('playerctl', 'stop')
Exemplo n.º 35
0
 def next(self):
     run_command('playerctl', 'next')
Exemplo n.º 36
0
 def get_artist(self):
     return run_command('playerctl', 'metadata', 'artist')
Exemplo n.º 37
0
 def get_title(self):
     return run_command('playerctl', 'metadata', 'title')
Exemplo n.º 38
0
 def previous(self):
     run_command('playerctl', 'previous')
Exemplo n.º 39
0
 def _get_ndctl_version(self):
     _ret, out, _err = run_command("ndctl --version")
     m = re.search(r"([\d\.]+)", out)
     if not m or len(m.groups()) != 1:
         raise RuntimeError("Failed to determine ndctl version from: %s" % out)
     return Version(m.groups()[0])
Exemplo n.º 40
0
    parser = argparse.ArgumentParser()
    parser.add_argument('-m', '--mode', action='store', dest='MODE')
    params = parser.parse_args()

    info = utils.read_json(PREPARED_INFO_FILE)
    image_count = int(info['lmdb_image_count'])
    src_prototxt_file = info[
        'test_prototxt_f32'] if params.MODE == 'F32' else info[
            'test_prototxt_i8']
    dst_prototxt_file = os.path.join(CUR_DIR, 'tmp.prototxt')

    print('\nPreparing {} ...'.format(src_prototxt_file))
    utils.prepare_test_prototxt(src_file=src_prototxt_file,
                                dst_file=dst_prototxt_file,
                                lmdb_dir=info['lmdb_dir'],
                                batch_size=BATCH_SIZE,
                                label_map_file=info['label_map_file'],
                                name_size_file=info['name_size_file'],
                                image_count=image_count)

    print('\nTest {} model...'.format(params.MODE))
    cmd = []
    cmd.append(os.path.join(CAFFE_BIN_DIR, 'caffe'))
    cmd.append('test')
    cmd.append('--model=' + dst_prototxt_file)
    cmd.append('--weights=' + info['weights'])
    cmd.append('--iterations=' + str(image_count / BATCH_SIZE))
    cmd.append('--detection')
    utils.run_command(cmd, 'test_' + params.MODE + '.log')