def main():
    my_config = get_config()

    new_synapse_config = generate_configuration(
        my_config, get_zookeeper_topology(), get_all_namespaces()
    )

    with tempfile.NamedTemporaryFile() as tmp_file:
        new_synapse_config_path = tmp_file.name
        with open(new_synapse_config_path, 'w') as fp:
            json.dump(new_synapse_config, fp, sort_keys=True, indent=4, separators=(',', ': '))

        # Match permissions that puppet expects
        os.chmod(new_synapse_config_path, 0644)

        # Restart synapse if the config files differ
        should_restart = not filecmp.cmp(new_synapse_config_path, my_config['config_file'])

        # Always swap new config file into place.  Our monitoring system
        # checks the config['config_file'] file age to ensure that it is
        # continually being updated.
        shutil.copy(new_synapse_config_path, my_config['config_file'])

        if should_restart:
            subprocess.check_call(SYNAPSE_RESTART_COMMAND)
Example #2
0
def sh(cmd):
    msg_trace('sh(%r)' % cmd)
    try:
        subprocess.check_call(cmd, shell=True)
    except:
        msg_trace('FAILED!')
        raise
Example #3
0
    def shell(self, name, *args):
        self.parser.set_usage(self.envshell_usage)
        self.parser.prog = 'envshell'
        options, args = self.parser.parse_args(list(args))

        if len(args) == 0:
            raise Response("%s\nError: incorrect number of arguments" %
                           (self.parser.get_usage()), 2)

        sys.stdout.write("Launching envshell for %s. "
                         "Type 'exit' or 'Ctrl+D' to return.\n" %
                         self.path(args[0]))
        sys.stdout.flush()
        self.open(args[0], 2)

        shell = os.environ['SHELL']

        try:
            subprocess.check_call([shell],
                                  universal_newlines=True,
                                  bufsize=0,
                                  close_fds=not is_windows,
                                  **params)
        except OSError as err:
            if err.errno == 2:
                raise Response("Unable to find shell %s" % shell, err.errno)
            else:
                raise Response("An error occurred: %s" % err,
                               status=err.errno)

        raise Response()
Example #4
0
 def MakeCoreSdist(self):
   os.chdir(args.grr_src)
   subprocess.check_call([self.PYTHON_BIN64, "setup.py", "sdist",
                          "--dist-dir=%s" % self.BUILDDIR, "--no-make-docs",
                          "--no-make-ui-files", "--no-sync-artifacts"])
   return glob.glob(os.path.join(self.BUILDDIR,
                                 "grr-response-core-*.zip")).pop()
Example #5
0
def initEnKFrun(callback=None,basedir=None,site=None,siteparam=None):
    if not site:
        raise "site is a required parameter"
    if siteparam:
        param = ast.literal_eval(siteparam)
    else:
        raise "Parameters are required. Please submit Paramerers with task submission"
    if not basedir:
        raise "Basedir is required"
    #Create working directory
    newDir = basedir + "celery_data/" + str(initEnKFrun.request.id)
    check_call(["mkdir",newDir])
    os.chdir(newDir)
    #copy matlab code to working directory
    codeDir =basedir + 'enfk_matlab/'
    for file in glob.glob(codeDir + '*'): 
        shutil.copy(file, newDir) 
    #check_call(["cp","-r",codeDir + '*',newDir])
    #set inital Paramters files
    setup_param(newDir,param)
    if callback:
        result=subtask(callback).delay(task_id=str(initEnKFrun.request.id),wkdir=newDir)
        return {'task_id':result.task_id,'task_name':result.task_name}
    else:
        return newDir
Example #6
0
 def run(self):
     if subprocess.call(["git", "--version"]) != 0:
         print("ERROR:\n\tPlease install git.")
         exit(1)
     status_lines = subprocess.check_output(["git", "status"]).splitlines()
     current_branch = status_lines[0].strip().split()[-1].decode()
     print("On branch {}.".format(current_branch))
     if current_branch != self.branch:
         print("ERROR:\n\tNew tags can only be made from branch \"{}\"."
               "".format(self.branch))
         print("\tYou can use \"git checkout {}\" to switch the branch."
               "".format(self.branch))
         exit(1)
     tags_output = subprocess.check_output(["git", "tag"])
     tags = [tag.strip().decode() for tag in tags_output.splitlines()]
     tag = "v" + __version__
     if tag in tags:
         print("Warning: \n\tTag {} already exists.".format(tag))
         print("\tEdit the version information in {}".format(
                 os.path.join(HERE, PACKAGE_NAME, "__init__.py")
             ))
     else:
         print("Creating tag \"{}\".".format(tag))
         subprocess.check_call(["git", "tag", tag])
     print("Pushing tag \"{}\" to remote \"{}\".".format(tag, self.remote))
     subprocess.check_call(["git", "push", self.remote, tag])
Example #7
0
def archive(conn, root, source, **kwargs):
  source = path.abspath(source)
  dir = path.dirname(source)
  base = path.basename(source)
  cursor = conn.cursor()
  cursor.execute("INSERT OR IGNORE INTO versions VALUES (:name, 0)", {"name": base})
  cursor.execute("UPDATE versions SET version = version + 1 WHERE name = :name", {"name": base})
  cursor.execute("SELECT version FROM versions WHERE name = :name", {"name": base})
  version = cursor.fetchone()["version"]
  conn.commit()
  archive = "{base}-{version}.tar.gz".format(**locals())
  fullPath = path.join(root, archive)
  commands = ["tar", "--checkpoint=.10000", "-C", dir, "-czf", fullPath, base]
  log(Level.Info, "Running command ", " ".join(commands))
  subprocess.check_call(commands)
  print
  cursor.execute(
      """INSERT INTO items VALUES (
             :name,
             :version,
             CURRENT_TIMESTAMP,
             'Archived',
             :source,
             :archive);
      """,
      {
        "name": base,
        "version": version,
        "source": source,
        "archive": archive,
      })
  conn.commit()
def generate_skeleton(package, path):
    """
    Use conda skeleton pypi to generate a recipe for a package and
    save it to path.

    Parameters
    ----------

    package: Package
        The package for which a recipe is to be generated.

    path: str
        Path to which the recipe should be written.
    """

    additional_arguments = ['--version', str(package.required_version),
                            '--output-dir', path]

    if package.setup_options:
        additional_arguments.extend(['--setup-options={}'.format(package.setup_options)])

    if package.numpy_compiled_extensions:
        additional_arguments.append('--pin-numpy')

    subprocess.check_call(["conda", "skeleton", "pypi", package.pypi_name] +
                          additional_arguments)
Example #9
0
    def build_dist(self):
        for sdir in self.staging_dirs:
            if os.path.exists(sdir):
                shutil.rmtree(sdir)
        main_stage, ninja_stage = self.staging_dirs
        modules = [os.path.splitext(os.path.split(x)[1])[0] for x in glob(os.path.join('mesonbuild/modules/*'))]
        modules = ['mesonbuild.modules.' + x for x in modules if not x.startswith('_')]
        modules += ['distutils.version']
        modulestr = ','.join(modules)
        python = shutil.which('python')
        cxfreeze = os.path.join(os.path.dirname(python), "Scripts", "cxfreeze")
        if not os.path.isfile(cxfreeze):
            print("ERROR: This script requires cx_freeze module")
            sys.exit(1)

        subprocess.check_call([python,
                               cxfreeze,
                               '--target-dir',
                               main_stage,
                               '--include-modules',
                               modulestr,
                               'meson.py'])
        if not os.path.exists(os.path.join(main_stage, 'meson.exe')):
            sys.exit('Meson exe missing from staging dir.')
        os.mkdir(ninja_stage)
        shutil.copy(shutil.which('ninja'), ninja_stage)
        if not os.path.exists(os.path.join(ninja_stage, 'ninja.exe')):
            sys.exit('Ninja exe missing from staging dir.')
Example #10
0
File: build.py Project: teto/ycmd
def BuildYcmdLibs( args ):
  build_dir = mkdtemp( prefix = 'ycm_build.' )

  try:
    full_cmake_args = [ '-G', GetGenerator( args ) ]
    if OnMac():
      full_cmake_args.extend( CustomPythonCmakeArgs() )
    full_cmake_args.extend( GetCmakeArgs( args ) )
    full_cmake_args.append( p.join( DIR_OF_THIS_SCRIPT, 'cpp' ) )

    os.chdir( build_dir )
    subprocess.check_call( [ 'cmake' ] + full_cmake_args )

    build_target = ( 'ycm_support_libs' if 'YCM_TESTRUN' not in os.environ else
                     'ycm_core_tests' )

    build_command = [ 'cmake', '--build', '.', '--target', build_target ]
    if OnWindows():
      build_command.extend( [ '--config', 'Release' ] )
    else:
      build_command.extend( [ '--', '-j', str( NumCores() ) ] )

    subprocess.check_call( build_command )

    if 'YCM_TESTRUN' in os.environ:
      RunYcmdTests( build_dir )
  finally:
    os.chdir( DIR_OF_THIS_SCRIPT )
    rmtree( build_dir, ignore_errors = OnTravisOrAppVeyor() )
Example #11
0
 def create_profile_F(self):
     """Function to create the SSHTunnel Firefox profile
     All firefox instances must be closed
     (or they can be left open, but the function will killall firefox)        
     """
    
     
     subprocess.call(["killall", "firefox"]) #all firefox instance must be killed
     subprocess.check_call(["firefox","-CreateProfile","SSHTunnel"]) #Create a new Profile named SSHTunnel
   
     
     #Navigate to the profile folder:
     os.chdir('/home/'+ str(LOGNAME) +'/.mozilla/firefox/')
     list_of_items = os.listdir(os.getcwd())
     for folder in list_of_items:
         if 'SSHTunnel' in folder:
             os.chdir(folder)
             break
     else:
         raise Exception("Create new profile for firefox failed")
                     
     
     write_prefs_js()       
     
     self.launch_firefox_instance()
    def sendCommand(self, jbossHome, controller, user, password, cluster):
        self.fillParameters(jbossHome, controller, user, password)
        startCommand =  self._clisg+cluster+":start-servers"

        print("eseguo: "+self._complPath+" "+self._cliconn+" "+self._complContr+" "+self._complUser+" "+self._complPwd+" "+startCommand)

        subprocess.check_call([self._complPath,self._cliconn,self._complContr,self._complUser,self._complPwd,startCommand])
Example #13
0
def compare_review(review_spec, branch, remote, rebase=False):
    new_ps = None    # none means latest

    if '-' in review_spec:
        review_spec, new_ps = review_spec.split('-')
    review, old_ps = parse_review_number(review_spec)

    if old_ps is None or old_ps == new_ps:
        raise InvalidPatchsetsToCompare(old_ps, new_ps)

    old_review = build_review_number(review, old_ps)
    new_review = build_review_number(review, new_ps)

    old_branch = fetch_review(old_review, branch, remote)
    checkout_review(old_branch)

    if rebase:
        print('Rebasing %s' % old_branch)
        rebase = rebase_changes(branch, remote, False)
        if not rebase:
            print('Skipping rebase because of conflicts')
            run_command_exc(CommandFailed, 'git', 'rebase', '--abort')

    new_branch = fetch_review(new_review, branch, remote)
    checkout_review(new_branch)

    if rebase:
        print('Rebasing also %s' % new_branch)
        if not rebase_changes(branch, remote, False):
            print("Rebasing of the new branch failed, "
                  "diff can be messed up (use -R to not rebase at all)!")
            run_command_exc(CommandFailed, 'git', 'rebase', '--abort')

    subprocess.check_call(['git', 'diff', old_branch])
Example #14
0
def start_dcvoip_process():
    try:
        logger.info("Starting dcvoip process - Thanks Jonas Karlsson!")
        with open(os.devnull, 'wb') as devnull:    
            subprocess.check_call(["sudo", "service", "dcvoip", "start"], stdout=devnull)    
    except (subprocess.CalledProcessError, IOError):
        logging.warning("Unable to start the dcvoip process")
Example #15
0
def stop_dcvoip_process():
    try:
        logger.info("Stopping dcvoip process")
        with open(os.devnull, 'wb') as devnull:
            subprocess.check_call(["sudo", "service", "dcvoip", "stop"], stdout=devnull)
    except (subprocess.CalledProcessError, IOError):
        logging.warning("Unable to stop the dcvoip process")    
def create_pool(service, name, replicas=2):
    ''' Create a new RADOS pool '''
    if pool_exists(service, name):
        log("Ceph pool {} already exists, skipping creation".format(name),
            level=WARNING)
        return
    # Calculate the number of placement groups based
    # on upstream recommended best practices.
    osds = get_osds(service)
    if osds:
        pgnum = (len(osds) * 100 / replicas)
    else:
        # NOTE(james-page): Default to 200 for older ceph versions
        # which don't support OSD query from cli
        pgnum = 200
    cmd = [
        'ceph', '--id', service,
        'osd', 'pool', 'create',
        name, str(pgnum)
    ]
    check_call(cmd)
    cmd = [
        'ceph', '--id', service,
        'osd', 'pool', 'set', name,
        'size', str(replicas)
    ]
    check_call(cmd)
Example #17
0
def archive_logs(log_dir):
    """Compress log files in given log_dir using gzip."""
    log_files = []
    for r, ds, fs in os.walk(log_dir):
        log_files.extend(os.path.join(r, f) for f in fs if is_log(f))
    if log_files:
        subprocess.check_call(['gzip', '--best', '-f'] + log_files)
Example #18
0
def do_build(package):
    print("Build started for %s" % package.name)

    sphinx_args = [package.path, package.build_path]
    copy_env = os.environ.copy()

    if package.devhelp:
        sphinx_args = ["-b", "devhelpfork"] + sphinx_args
        copy_env["PGIDOCGEN_TARGET_PREFIX"] = DEVHELP_PREFIX
    else:
        sphinx_args = ["-b", "html"] + sphinx_args
        copy_env["PGIDOCGEN_TARGET_PREFIX"] = ""

    copy_env["PGIDOCGEN_TARGET_BASE_PATH"] = \
        os.path.dirname(package.build_path)

    subprocess.check_call(
        [sys.executable, "-m", "sphinx", "-n", "-q", "-a", "-E"] + sphinx_args,
        env=copy_env)

    # we don't rebuild, remove all caches
    shutil.rmtree(os.path.join(package.build_path, ".doctrees"))
    os.remove(os.path.join(package.build_path, ".buildinfo"))

    # remove some pages we don't need
    os.remove(os.path.join(package.build_path, "genindex.html"))
    os.remove(os.path.join(package.build_path, "search.html"))

    if os.name != "nt":
        for d in ["structs", "unions", "interfaces", "iface-structs",
                  "class-structs"]:
            os.symlink("classes", os.path.join(package.build_path, d))

    return package
Example #19
0
def snapshot(source, destination, name=None):
    """Snapshot one directory to another. Specify names to snapshot small, named differences."""
    source = source + sep
    destination = destination + sep

    if not path.isdir(source):
        raise RuntimeError("source is not a directory")

    if path.exists(destination):
        if not path.isdir(destination):
            raise RuntimeError("destination is not a directory")

        if name is None:
            raise RuntimeError("can't snapshot base snapshot if destination exists")

    snapdir = path.join(destination, ".snapdir")
    
    if path.exists(path.join(source, ".snapdir")):
        raise RuntimeError("snapdir exists in source directory")

    if name is None:
        check_call(["rsync", "--del", "-av", source, destination])
        makedirs(snapdir)
    else:
        if not path.exists(snapdir):
            raise RuntimeError("No snapdir in destination directory")

        check_call(["rsync", "--del", "-av", "--only-write-batch={}".format(path.join(snapdir, name)), source, destination])
 def __exit__(self, exc_type, _value, _traceback):
   for k in self._config_dict:
     if self._previous_values.get(k):
       subprocess.check_call(
           ['git', 'config', '--local', k, self._previous_values[k]])
     else:
       subprocess.check_call(['git', 'config', '--local', '--unset', k])
Example #21
0
def _exec(app, args, work_dir, extra_env, save_output):
    all_args = [app]
    if args:
        all_args.extend(args)

    all_env = os.environ.copy()
    if extra_env:
        all_env.update(extra_env)

    cmd = ' '.join(all_args)

    if _output_file and (not save_output):
        with open(_output_file, 'a') as f:
            f.write('\n')
            if extra_env:
                for var in sorted(extra_env.iterkeys()):
                    f.write('%s = %s\n' % (var, extra_env[var]))
                f.write('\n')
            f.write('%s\n\n' % cmd)
        cmd = '%s >>%s 2>&1' % (cmd, _output_file)

    if save_output:
        return subprocess.check_output(cmd, cwd=work_dir, env=all_env, shell=True)
    else:
        subprocess.check_call(cmd, cwd=work_dir, env=all_env, shell=True)
Example #22
0
def text_mof(pdf_part, pdf_year, pdf_number, pdf_name):

    s3_url = "https://mgax-mof.s3.amazonaws.com"

    pdf_url = s3_url + "/" + pdf_name

    with temp_dir() as tmp:
        pdf_local_path = tmp / pdf_name
        text_path = tmp / 'plain.txt'

        with pdf_local_path.open('wb') as f:
            resp = requests.get(pdf_url, stream=True)
            assert resp.status_code == 200
            for chunk in FileWrapper(resp.raw):
                f.write(chunk)

        subprocess.check_call(['pdftotext', pdf_local_path, text_path])

        with text_path.open('r') as f:
            raw_text = f.read()

        json = dict([('part', int(pdf_part)),
                     ('year', int(pdf_year)),
                     ('number', int(pdf_number)),
                     ('slug', pdf_name.split('.')[0]),
                     ('text', raw_text)])

        resp = requests.put(flask.current_app.config['ELASTIC_SEARCH_URL']
                            + pdf_name.split('.')[0],
                            data=flask.json.dumps(json))
        assert 200 <= resp.status_code < 300, repr(resp)
Example #23
0
def get_compressor(ctools=COMPRESSING_TOOLS):
    global UPTO

    am_dir_pattern = "/usr/share/automake-*"
    am_files_pattern = am_dir_pattern + "/am/*.am"

    if len(glob.glob(am_dir_pattern)) == 0:  # automake is not installed.
        UPTO = STEP_PRECONFIGURE

        logging.warn("Automake not found. The process will go up to the step: " + UPTO)

        return ctools[-1]  # fallback to the default (gzip).

    for ct in ctools:
        # NOTE: bzip2 tries compressing input from stdin even it is invoked
        # with --version option. So give null input (/dev/null) to it.
        c_exists = 0 == subprocess.check_call(
            "%(command)s --version > /dev/null 2> /dev/null < /dev/null" % ct, shell=True
        )

        if c_exists:
            am_support_c = 0 == subprocess.check_call(
                "grep -q -e '^dist-%s:' %s 2> /dev/null" % (ct.command, am_files_pattern), shell=True
            )
            if am_support_c:
                return ct

    # gzip must exist at least and not reached here:
    raise RuntimeError("No compressor found! Aborting...")
Example #24
0
def run_script(prefix, dist, action='post-link', env_prefix=None):
    """
    call the post-link (or pre-unlink) script, and return True on success,
    False on failure
    """
    path = join(prefix, 'Scripts' if on_win else 'bin', '.%s-%s.%s' % (
            name_dist(dist),
            action,
            'bat' if on_win else 'sh'))
    if not isfile(path):
        return True
    if on_win:
        try:
            args = [os.environ['COMSPEC'], '/c', path]
        except KeyError:
            return False
    else:
        args = ['/bin/bash', path]
    env = os.environ
    env['PREFIX'] = env_prefix or prefix
    env['PKG_NAME'], env['PKG_VERSION'], env['PKG_BUILDNUM'] = str(dist).rsplit('-', 2)
    try:
        subprocess.check_call(args, env=env)
    except subprocess.CalledProcessError:
        return False
    return True
Example #25
0
 def pull_file(self, device_path, destination_path, remove_original=False):
   """Copies or moves the specified file on the device to the host."""
   subprocess.check_call(self._adb_command([
       'pull', device_path, destination_path]))
   if remove_original:
     subprocess.check_call(self._adb_command([
         'shell', 'rm', device_path]))
Example #26
0
def set_versions(base_version, build_number, label):
    print("mvers: " + check_string_output(["agvtool", "mvers", "-terse1"]))
    print("vers:  " + check_string_output(["agvtool", "vers", "-terse"]))
    marketing_version = "%s.%s %s" % (base_version, build_number, label)
    build_version = "%s.%s" % (base_version, build_number)
    subprocess.check_call(["agvtool", "new-marketing-version", marketing_version])
    subprocess.check_call(["agvtool", "new-version", "-all", build_version])
Example #27
0
def prepare_release(build_number, image_source_path):
    release_dir = "release"
    try:
        os.makedirs(release_dir)
    except FileExistsError:
        pass

    tag = 'builds/%s/%s' % (base_version, build_number)
    subprocess.check_call(['git', 'tag', tag])

    import appcast
    appcast_text = appcast.generate_appcast(image_source_path, updates_template_file, build_number, updates_signing_key_file)
    with open(os.path.join(release_dir, updates_appcast_file), 'w') as appcast_file:
        appcast_file.write(appcast_text)

    import shutil
    copied_image = os.path.join(release_dir, os.path.basename(image_source_path))
    unversioned_image = os.path.join(release_dir, artifact_prefix + ".dmg")
    shutil.copyfile(image_source_path, copied_image)
    shutil.copyfile(image_source_path, unversioned_image)

    publish_release_notes_file = os.path.join(release_dir, os.path.basename(release_notes_file))
    shutil.copyfile(release_notes_file, publish_release_notes_file)
    publish_release_notes_filebase, publish_release_notes_ext = os.path.splitext(publish_release_notes_file)
    publish_release_notes_version_file = "%s-%s%s" % (publish_release_notes_filebase, build_number, publish_release_notes_ext)
    shutil.copyfile(release_notes_file, publish_release_notes_version_file)
Example #28
0
def sklearn2pmml(estimator, mapper, pmml, with_repr = False, verbose = False):
	if(not isinstance(estimator, BaseEstimator)):
		raise TypeError("The estimator object is not an instance of " + BaseEstimator.__name__)
	if((mapper is not None) and (not isinstance(mapper, DataFrameMapper))):
		raise TypeError("The mapper object is not an instance of " + DataFrameMapper.__name__)
	cmd = ["java", "-cp", os.pathsep.join(_classpath()), "org.jpmml.sklearn.Main"]
	dumps = []
	try:
		estimator_pkl = _dump(estimator)
		cmd.extend(["--pkl-estimator-input", estimator_pkl])
		dumps.append(estimator_pkl)
		if(with_repr):
			estimator_repr = repr(estimator)
			cmd.extend(["--repr-estimator", estimator_repr])
		if(mapper):
			mapper_pkl = _dump(mapper)
			cmd.extend(["--pkl-mapper-input", mapper_pkl])
			dumps.append(mapper_pkl)
			if(with_repr):
				mapper_repr = repr(mapper)
				cmd.extend(["--repr-mapper", mapper_repr])
		cmd.extend(["--pmml-output", pmml])
		if(verbose):
			print(cmd)
		subprocess.check_call(cmd)
	finally:
		for dump in dumps:
			os.remove(dump)
Example #29
0
    def _commit_and_push(self):
        """ Commit all the files and push. """

        deploy = self._deploy_branch
        source = self._source_branch
        remote = self._remote_name

        source_commit = uni_check_output(['git', 'rev-parse', source])
        commit_message = (
            'Nikola auto commit.\n\n'
            'Source commit: %s'
            'Nikola version: %s' % (source_commit, __version__)
        )

        commands = [
            ['git', 'add', '-A'],
            ['git', 'commit', '-m', commit_message],
            ['git', 'push', '-f', remote, '%s:%s' % (deploy, deploy)],
            ['git', 'checkout', source],
        ]

        for command in commands:
            self.logger.info("==> {0}".format(command))
            try:
                subprocess.check_call(command)
            except subprocess.CalledProcessError as e:
                self.logger.error(
                    'Failed GitHub deployment — command {0} '
                    'returned {1}'.format(e.cmd, e.returncode)
                )
                sys.exit(e.returncode)
Example #30
0
def sendpfast(x, pps=None, mbps=None, realtime=None, loop=0, iface=None):
    """Send packets at layer 2 using tcpreplay for performance
    pps:  packets per second
    mpbs: MBits per second
    realtime: use packet's timestamp, bending time with realtime value
    loop: number of times to process the packet list
    iface: output interface """
    if iface is None:
        iface = conf.iface
    argv = [conf.prog.tcpreplay, "--intf1=%s" % iface ]
    if pps is not None:
        argv.append("--pps=%i" % pps)
    elif mbps is not None:
        argv.append("--mbps=%i" % mbps)
    elif realtime is not None:
        argv.append("--multiplier=%i" % realtime)
    else:
        argv.append("--topspeed")

    if loop:
        argv.append("--loop=%i" % loop)

    f = get_temp_file()
    argv.append(f)
    wrpcap(f, x)
    try:
        subprocess.check_call(argv)
    except KeyboardInterrupt:
        log_interactive.info("Interrupted by user")
    except Exception,e:
        log_interactive.error("while trying to exec [%s]: %s" % (argv[0],e))
Example #31
0
def generate_and_upload_docs():
    old_cwd = os.getcwd()
    tmp = tempfile.TemporaryDirectory()
    os.chdir(tmp.name)

    log_info('Entered temporary directory', os.getcwd(), 'cloning repository now')

    clone_url = f'https://{username}:{access_token}@github.com/{repository}.git'
    subprocess.check_call(['git', 'clone', '--single-branch', '--branch', 'gh-pages', clone_url, '.'])
    subprocess.check_call(['git', 'config', 'user.name', username])
    subprocess.check_call(['git', 'config', 'user.email', f'{username}@users.noreply.github.com'])

    for project in changed_projects:
        project_docs_out = os.path.join(tmp.name, project.cmake_project_name)
        project_docs_in = project.generate_docs()
        shutil.rmtree(project_docs_out, ignore_errors=True)
        copy_tree(project_docs_in, project_docs_out)

    if subprocess.check_output(['git', 'status', '-s']).decode(sys.getdefaultencoding()):
        with LogGroup('Uploading'):
            log_info('Creating a commit')
            subprocess.check_call(['git', 'add', '.'])
            subprocess.check_call(['git', 'status'])
            subprocess.check_call(['git', 'commit', '-a', '-m', f'Generated docs for {sha[0:8]}'])
            log_info('Pushing')
            subprocess.check_call(['git', 'push', clone_url, 'gh-pages'])
    else:
        log_info('There are no changes to documentation')

    os.chdir(old_cwd)
Example #32
0
def toeps(counter, ext):
    global dirname
    subprocess.check_call(['convert',\
      '%s/plot-%s.%s' % (dirname, counter, ext), \
      '%s/plot-%s.eps' % (dirname, counter)])
Example #33
0
    def build_extension(self, ext):
        # self.parallel = 4
        extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
        # required for auto-detection & inclusion of auxiliary "native" libs
        if not extdir.endswith(os.path.sep):
            extdir += os.path.sep

        debug = int(os.environ.get("DEBUG", 0)) if self.debug is None else self.debug
        cfg = "Debug" if debug else "Release"

        # CMake lets you override the generator - we need to check this.
        # Can be set with Conda-Build, for example.
        cmake_generator = os.environ.get("CMAKE_GENERATOR", "")

        # Set Python_EXECUTABLE instead if you use PYBIND11_FINDPYTHON
        cmake_args = [
            "-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}".format(extdir),
            "-DPYTHON_EXECUTABLE={}".format(sys.executable),
            "-DCMAKE_BUILD_TYPE={}".format(cfg),  # not used on MSVC, but no harm
            "-DCNIS_WITH_PYTHON_API=ON",
        ]
        build_args = []
        # Adding CMake arguments set as environment variable
        # (needed e.g. to build for ARM OSx on conda-forge)
        if "CMAKE_ARGS" in os.environ:
            cmake_args += [item for item in os.environ["CMAKE_ARGS"].split(" ") if item]

        if self.compiler.compiler_type != "msvc":
            # Using Ninja-build since it a) is available as a wheel and b)
            # multithreads automatically. MSVC would require all variables be
            # exported for Ninja to pick it up, which is a little tricky to do.
            # Users can override the generator with CMAKE_GENERATOR in CMake
            # 3.15+.
            if not cmake_generator:
                try:
                    import ninja  # noqa: F401

                    cmake_args += ["-GNinja"]
                except ImportError:
                    pass

        else:

            # Single config generators are handled "normally"
            single_config = any(x in cmake_generator for x in {"NMake", "Ninja"})

            # CMake allows an arch-in-generator style for backward compatibility
            contains_arch = any(x in cmake_generator for x in {"ARM", "Win64"})

            # Specify the arch if using MSVC generator, but only if it doesn't
            # contain a backward-compatibility arch spec already in the
            # generator name.
            if not single_config and not contains_arch:
                cmake_args += ["-A", PLAT_TO_CMAKE[self.plat_name]]

            # Multi-config generators have a different way to specify configs
            if not single_config:
                cmake_args += [
                    "-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}".format(cfg.upper(), extdir)
                ]
                build_args += ["--config", cfg]

        if sys.platform.startswith("darwin"):
            # Cross-compile support for macOS - respect ARCHFLAGS if set
            archs = re.findall(r"-arch (\S+)", os.environ.get("ARCHFLAGS", ""))
            if archs:
                cmake_args += ["-DCMAKE_OSX_ARCHITECTURES={}".format(";".join(archs))]

        # Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level
        # across all generators.

        if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ:
            # self.parallel is a Python 3 only way to set parallel jobs by hand
            # using -j in the build_ext call, not supported by pip or PyPA-build.
            if hasattr(self, "parallel") and self.parallel:
                # CMake 3.12+ only.
                build_args += ["-j{}".format(self.parallel)]

        if not os.path.exists(self.build_temp):
            os.makedirs(self.build_temp)

        subprocess.check_call(
            ["cmake", ext.sourcedir] + cmake_args, cwd=self.build_temp
        )
        subprocess.check_call(
            ["cmake", "--build", "."] + build_args, cwd=self.build_temp
        )
Example #34
0
def main():
    # Read arguments
    args = argparser.parse_args()

    architecture = args.architecture
    particle_diameter = args.particle_diameter
    trainging_dir = args.training_dir
    annot_dir = args.annot_dir
    input_size = args.input_size

    output_dir = args.output_directory
    if subprocess.os.path.exists(output_dir):
        sp_global_def.ERROR("Output folder already exists. Stop execution.")
    else:
        subprocess.os.makedirs(output_dir)
        sp_global_def.write_command(output_dir)

    num_patches = args.num_patches
    overlap_patches = args.overlap_patches
    train_times = args.train_times
    saved_weights_name = args.saved_weights_name
    saved_weights_name = subprocess.os.path.join(output_dir, saved_weights_name)
    pretrained_weights_name = args.pretrained_weights_name
    batch_size = args.batch_size
    learning_rate = args.learning_rate
    np_epoch = args.np_epoch
    object_scale = args.object_scale
    no_object_scale = args.no_object_scale
    coord_scale = args.coord_scale
    valid_image_dir = args.valid_image_dir
    valid_annot_dir = args.valid_annot_dir
    fine_tune = args.fine_tune
    gpu_fraction = args.gpu_fraction
    num_cpu = args.num_cpu
    cryolo_train_path = args.cryolo_train_path

    skiplowpass = args.skiplowpass
    cutoff = args.cutoff
    filtered_dir = args.filtered_dir

    warmup = args.warmup
    early_stop = int(args.early)

    str_gpus = [str(entry).strip() for entry in args.gpu.split(",")]
    arg_gpu = " ".join(str_gpus)

    # Create config file
    model_dict = {
        "architecture": architecture,
        "input_size": input_size,
        "anchors": [particle_diameter, particle_diameter],
        "overlap_patches": overlap_patches,
        "max_box_per_image": 1000,
        "num_patches": num_patches,
    }

    if not skiplowpass:
        model_dict["filter"] = [cutoff, filtered_dir]
    else:
        if args.usejanni:
            model_dict["filter"] = [
                args.janni_model,
                args.janni_overlap,
                args.janni_batches,
                filtered_dir,
            ]

    train_dict = {
        "train_image_folder": trainging_dir,
        "train_annot_folder": annot_dir,
        "train_times": train_times,
        "pretrained_weights": pretrained_weights_name,
        "batch_size": batch_size,
        "learning_rate": learning_rate,
        "nb_epoch": np_epoch,
        "warmup_epochs": 0,
        "object_scale": object_scale,
        "no_object_scale": no_object_scale,
        "coord_scale": coord_scale,
        "class_scale": 1.0,
        "saved_weights_name": saved_weights_name,
        "debug": True,
        "log_path": "cryolo_logs/",
    }

    valid_dict = {
        "valid_image_folder": valid_image_dir,
        "valid_annot_folder": valid_annot_dir,
        "valid_times": 1,
    }
    dict = {"model": model_dict, "train": train_dict, "valid": valid_dict}

    path = subprocess.os.path.join(output_dir, "config_yolo.json")
    with open(path, "w") as f:
        json.dump(dict, f, ensure_ascii=False, indent=4)

    # Run the training
    config_argument = "-c=" + path
    warmup_argument = "-w=" + str(warmup)
    gpu_argument = "-g " + arg_gpu
    early_stop = "-e=" + str(early_stop)
    fine_tune_argument = ""
    if fine_tune:
        fine_tune_argument = "--fine_tune"
    gpu_fraction_arg = "--gpu_fraction=1.0"
    if gpu_fraction < 1.0 and gpu_fraction > 0.0:
        gpu_fraction_arg = "--gpu_fraction=" + str(gpu_fraction)

    num_cpu_arg = ""
    if num_cpu != -1:
        num_cpu_arg = "--num_cpu=" + str(num_cpu)
    cryolo_ex_pth = "cryolo_train.py"
    if cryolo_train_path:
        cryolo_ex_pth = cryolo_train_path
    """Multiline Comment0"""

    if fine_tune:
        warmup_argument = "-w=0"
    command = [
        cryolo_ex_pth,
        config_argument,
        warmup_argument,
        gpu_argument,
        early_stop,
        gpu_fraction_arg,
    ]
    if fine_tune:
        command.append(fine_tune_argument)
    if num_cpu != -1:
        command.append(num_cpu_arg)

    subprocess.check_call(command)
Example #35
0
 def test1_01_version(self):
     subprocess.check_call(['gcat_workflow_cloud', '--version'])
    def test_simple(self):
        platforms = []

        # Xilinx Spartan6
        platforms += [("official",  "minispartan6")]
        platforms += [("community", "sp605")]

        # Xilinx Artix7
        platforms += [("official",  "arty")]
        platforms += [("official",  "nexys4ddr")]
        platforms += [("official",  "nexys_video")]
        platforms += [("partner",   "netv2")]
        platforms += [("community", "ac701")]

        # Xilinx Kintex7
        platforms += [("official",  "kc705")]
        platforms += [("official",  "genesys2")]
        platforms += [("community", "kx2")]

        # Xilinx Kintex Ultrascale
        platforms += [("official",  "kcu105")]

        # Intel Cyclone4
        platforms += [("official",  "de0nano")]
        platforms += [("community", "de2_115")]

        # Intel Cyclone5
        platforms += [("community", "de1soc")]

        # Intel Max10
        platforms += [("community", "de10lite")]

        # Lattice iCE40
        platforms += [("partner",   "tinyfpga_bx")]
        platforms += [("partner",   "fomu_evt")]
        platforms += [("partner",   "fomu_hacker")]
        platforms += [("partner",   "fomu_pvt")]

        # Lattice MachXO2
        platforms += [("official",  "machxo3")]

        # Lattice ECP3
        platforms += [("official",  "versa_ecp3")]

        # Lattice ECP5
        platforms += [("official",  "versa_ecp5")]
        platforms += [("partner",   "ulx3s")]
        platforms += [("partner",   "trellisboard")]
        platforms += [("community", "ecp5_evn")]

        # Microsemi PolarFire
        platforms += [("official",  "avalanche")]

        for s, p in platforms:
            with self.subTest(platform=p):
                cmd = """\
litex_boards/official/targets/simple.py litex_boards.{s}.platforms.{p} \
    --cpu-type=vexriscv     \
    --no-compile-software   \
    --no-compile-gateware   \
    --uart-name="stub"      \
""".format(s=s, p=p)
                subprocess.check_call(cmd, shell=True)
H0 uniform 0 %e\n\
PHI0 uniform 0 %f\n\
COSIOTA uniform -1 1\n\
PSI uniform 0 %f" % (h0ul, np.pi, np.pi/2.)

  priorf = 'test.prior'
  f = open(priorf, 'w')
  f.write(priorfile)
  f.close()

  # run code
  commandline="\
%s --detectors %s --par-file %s --input-files %s --outfile %s --prior-file %s --Nlive %s --Nmcmcinitial %s --sampleprior %s" \
% (execu, dets, parf, datafile, outfile, priorf, Nlive, Nmcmcinitial, priorsamples)

  sp.check_call(commandline, shell=True)

  # read in prior samples
  f = h5py.File(outfile, 'r')
  a = f['lalinference']
  h0samps = a['lalinference_nest']['nested_samples']['H0'][:]

  # get normed histogram of samples
  [n, nedges] = np.histogram(h0samps, bins=20, range=(0., h0ul), density=True)
  nc = np.cumsum(n)*(nedges[1]-nedges[0])

  stat, p = ss.kstest(nc, 'uniform')

  print "K-S test p-value for upper range of %e = %f" % (h0ul, p)

  if p < 0.005:
Example #38
0
    return 1

def check_stab_of_linear_props(rounds_start, rounds_end, samples_num):
    for rounds in range(rounds_start, rounds_end + 1):
        if check_linear_props(rounds, rounds, samples_num) != 0:
            print("Transformation is linear at {0}-th round".format(rounds))
            return 1
    return 0
        

if __name__ == '__main__':
    rounds_start = 1
    rounds_end = 100
    samples_num = 15

    # create directory samples/
    create_samples_dir()

    # creating samples in dir samples/
    generate_samples_for_checking_linear_props(samples_num)

    # encrypting and decrypting
    check_call(["clefia_exec.exe", str(rounds_start), str(rounds_end)])

    # check linear properties
    check_linear_props(rounds_start, rounds_end, samples_num)

    # check stability of linear properties
    #check_stab_of_linear_props(rounds_start, rounds_end, samples_num)

Example #39
0
def main():
    # Read config generated by configure.
    config = configparser.ConfigParser()
    configfile = os.path.join(os.path.abspath(os.path.dirname(__file__)), "..",
                              "config.ini")
    config.read_file(open(configfile))

    src_dir = config["environment"]["SRCDIR"]
    build_dir = config["environment"]["BUILDDIR"]
    tests_dir = os.path.join(src_dir, 'test', 'functional')

    # Parse arguments and pass through unrecognised args
    parser = argparse.ArgumentParser(
        add_help=False,
        usage='%(prog)s [test_runner.py options] [script options] [scripts]',
        description=__doc__,
        epilog='''
    Help text and arguments for individual test script:''',
        formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument(
        '--coverage',
        action='store_true',
        help='generate a basic coverage report for the RPC interface')
    parser.add_argument(
        '--exclude',
        '-x',
        help=
        'specify a comma-separated-list of scripts to exclude. Do not include the .py extension in the name.'
    )
    parser.add_argument(
        '--extended',
        action='store_true',
        help='run the extended test suite in addition to the basic tests')
    parser.add_argument('--cutoff',
                        type=int,
                        default=DEFAULT_EXTENDED_CUTOFF,
                        help='set the cutoff runtime for what tests get run')
    parser.add_argument(
        '--force',
        '-f',
        action='store_true',
        help=
        'run tests even on platforms where they are disabled by default (e.g. windows).'
    )
    parser.add_argument('--help',
                        '-h',
                        '-?',
                        action='store_true',
                        help='print help text and exit')
    parser.add_argument(
        '--jobs',
        '-j',
        type=int,
        default=DEFAULT_JOBS,
        help='how many test scripts to run in parallel. Default=4.')
    parser.add_argument(
        '--keepcache',
        '-k',
        action='store_true',
        help=
        'the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.'
    )
    parser.add_argument('--quiet',
                        '-q',
                        action='store_true',
                        help='only print results summary and failure logs')
    parser.add_argument('--tmpdirprefix',
                        '-t',
                        default=tempfile.gettempdir(),
                        help="Root directory for datadirs")
    parser.add_argument(
        '--junitouput',
        '-ju',
        default=os.path.join(build_dir, 'junit_results.xml'),
        help="file that will store JUnit formatted test results.")

    args, unknown_args = parser.parse_known_args()

    # Create a set to store arguments and create the passon string
    tests = set(arg for arg in unknown_args if arg[:2] != "--")
    passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
    passon_args.append("--configfile={}".format(configfile))

    # Set up logging
    logging_level = logging.INFO if args.quiet else logging.DEBUG
    logging.basicConfig(format='%(message)s', level=logging_level)

    # Create base test directory
    tmpdir = os.path.join("{}", "bitcoin_test_runner_{:%Y%m%d_%H%M%S}").format(
        args.tmpdirprefix, datetime.datetime.now())
    os.makedirs(tmpdir)

    logging.debug("Temporary test directory at {}".format(tmpdir))

    enable_wallet = config["components"].getboolean("ENABLE_WALLET")
    enable_utils = config["components"].getboolean("ENABLE_UTILS")
    enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")

    if config["environment"]["EXEEXT"] == ".exe" and not args.force:
        # https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
        # https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
        print(
            "Tests currently disabled on Windows by default. Use --force option to enable"
        )
        sys.exit(0)

    if not (enable_wallet and enable_utils and enable_bitcoind):
        print(
            "No functional tests to run. Wallet, utils, and bitcoind must all be enabled"
        )
        print(
            "Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make"
        )
        sys.exit(0)

    # Build list of tests
    all_scripts = get_all_scripts_from_disk(tests_dir, NON_SCRIPTS)

    # Check all tests with parameters actually exist
    for test in TEST_PARAMS:
        if not test in all_scripts:
            print("ERROR: Test with parameter {} does not exist, check it has "
                  "not been renamed or deleted".format(test))
            sys.exit(1)

    if tests:
        # Individual tests have been specified. Run specified tests that exist
        # in the all_scripts list. Accept the name with or without .py
        # extension.
        test_list = [
            t for t in all_scripts
            if (t in tests or re.sub(".py$", "", t) in tests)
        ]

        # Allow for wildcard at the end of the name, so a single input can
        # match multiple tests
        for test in tests:
            if test.endswith('*'):
                test_list.extend(
                    [t for t in all_scripts if t.startswith(test[:-1])])
        # Make the list unique
        test_list = list(set(test_list))

        # do not cut off explicitly specified tests
        cutoff = sys.maxsize
    else:
        # No individual tests have been specified.
        # Run all tests that do not exceed
        test_list = all_scripts
        cutoff = args.cutoff
        if args.extended:
            cutoff = sys.maxsize

    # Remove the test cases that the user has explicitly asked to exclude.
    if args.exclude:
        for exclude_test in args.exclude.split(','):
            if exclude_test + ".py" in test_list:
                test_list.remove(exclude_test + ".py")

    # Use and update timings from build_dir only if separate
    # build directory is used. We do not want to pollute source directory.
    build_timings = None
    if (src_dir != build_dir):
        build_timings = Timings(os.path.join(build_dir, 'timing.json'))

    # Always use timings from scr_dir if present
    src_timings = Timings(
        os.path.join(src_dir, "test", "functional", 'timing.json'))

    # Add test parameters and remove long running tests if needed
    test_list = get_tests_to_run(test_list, TEST_PARAMS, cutoff, src_timings,
                                 build_timings)

    if not test_list:
        print(
            "No valid test scripts specified. Check that your test is in one "
            "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests"
        )
        sys.exit(0)

    if args.help:
        # Print help for test_runner.py, then print help of the first script
        # and exit.
        parser.print_help()
        subprocess.check_call([os.path.join(tests_dir, test_list[0]), '-h'])
        sys.exit(0)

    if not args.keepcache:
        shutil.rmtree(os.path.join(build_dir, "test", "cache"),
                      ignore_errors=True)

    run_tests(test_list, build_dir, tests_dir, args.junitouput,
              config["environment"]["EXEEXT"], tmpdir, args.jobs,
              args.coverage, passon_args, build_timings)
Example #40
0
    def sendSig(self, interruptSignal):
        hit = 0
        # try signalling the process group
        if not hit and self.useProcGroup and runtime.platformType == "posix":
            sig = getattr(signal, "SIG" + interruptSignal, None)

            if sig is None:
                log.msg("signal module is missing SIG{0}".format(interruptSignal))
            elif not hasattr(os, "kill"):
                log.msg("os module is missing the 'kill' function")
            elif self.process.pgid is None:
                log.msg("self.process has no pgid")
            else:
                log.msg("trying to kill process group {0}".format(
                        self.process.pgid))
                try:
                    os.killpg(self.process.pgid, sig)
                    log.msg(" signal {0} sent successfully".format(sig))
                    self.process.pgid = None
                    hit = 1
                except OSError:
                    log.msg('failed to kill process group (ignored): {0}'.format(
                            (sys.exc_info()[1])))
                    # probably no-such-process, maybe because there is no process
                    # group
                    pass

        elif runtime.platformType == "win32":
            if interruptSignal is None:
                log.msg("interruptSignal==None, only pretending to kill child")
            elif self.process.pid is not None:
                if interruptSignal == "TERM":
                    log.msg("using TASKKILL PID /T to kill pid {0}".format(
                            self.process.pid))
                    subprocess.check_call(
                        "TASKKILL /PID {0} /T".format(self.process.pid))
                    log.msg("taskkill'd pid {0}".format(self.process.pid))
                    hit = 1
                elif interruptSignal == "KILL":
                    log.msg("using TASKKILL PID /F /T to kill pid {0}".format(
                            self.process.pid))
                    subprocess.check_call(
                        "TASKKILL /F /PID {0} /T".format(self.process.pid))
                    log.msg("taskkill'd pid {0}".format(self.process.pid))
                    hit = 1

        # try signalling the process itself (works on Windows too, sorta)
        if not hit:
            try:
                log.msg("trying process.signalProcess('{0}')".format(
                        interruptSignal))
                self.process.signalProcess(interruptSignal)
                log.msg(" signal {0} sent successfully".format(interruptSignal))
                hit = 1
            except OSError:
                log.err("from process.signalProcess:")
                # could be no-such-process, because they finished very recently
                pass
            except error.ProcessExitedAlready:
                log.msg("Process exited already - can't kill")
                # the process has already exited, and likely finished() has
                # been called already or will be called shortly
                pass

        return hit
 def test_import(self):
     subprocess.check_call([sys.executable, '-c', 'import youtube_dl'],
                           cwd=rootDir)
Example #42
0
 t_r_ok = T_R_COMPILER.findall(filename)
 relax_ok = RELAX_COMPILER.findall(filename)
 e_t_ok = E_T_COMPILER.findall(filename)
 acc_data_path = os.path.join(acc_input_dir, filename)
 emg_data_path = os.path.join(emg_input_dir, filename)
 if not os.access(acc_data_path, os.R_OK) or not os.access(
         emg_data_path, os.R_OK):
     print("error", filename)
     continue
 output_name = re.sub(".txt", ".json", filename)
 output_path = os.path.join(output_dir, output_name)
 if len(extra_ok) > 0:
     log_name = re.sub(".txt", "_log.csv", filename)
     log_path = os.path.join(log_input_dir, log_name)
     subprocess.check_call([
         "python", "mix_reader.py", '2', acc_data_path, emg_data_path,
         '--csv', log_path, '--output_dir', output_path
     ])
 elif len(t_r_ok) > 0:
     log_name = re.sub(".txt", "_log.csv", filename)
     log_path = os.path.join(log_input_dir, log_name)
     subprocess.check_call([
         "python", "mix_reader.py", '1', acc_data_path, emg_data_path,
         '--csv', log_path, '--output_dir', output_path
     ])
 elif len(e_t_ok) > 0:
     log_name = re.sub(".txt", "_log.csv", filename)
     log_path = os.path.join(log_input_dir, log_name)
     subprocess.check_call([
         "python", "mix_reader.py", '3', acc_data_path, emg_data_path,
         '--csv', log_path, '--output_dir', output_path
     ])
Example #43
0
def main(argv=sys.argv):
    parser = argparse.ArgumentParser(
        description='Generate and setup Grunt infrastructure, '
        'then compile JS/LESS bundles for Plone.',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-i',
                        '--instance',
                        dest='instance',
                        help='path to instance executable. If not provided, '
                        'will look in bin this was executed from for '
                        'instance or client1')
    parser.add_argument(
        '-s',
        '--site-id',
        dest='site_id',
        default='Plone',
        help='ID of the Plone site to fetch the configuration from. '
        'Used only while Gruntfile generation.')
    parser.add_argument(
        '-b',
        '--bundle',
        dest='bundle',
        default='all',
        help='Name of bundle to compile. Used while compile step.')
    parser.add_argument(
        '--compile-dir',
        dest='compile_dir',
        default='',
        help='Output directory for the compiled bundle files. '
        'Used only while Gruntfile generation. '
        'If not given the directory is looked up from Plone registry. ')
    parser.add_argument('-d',
                        '--base-dir',
                        dest='base_dir',
                        default='.',
                        help='Base directory for this script '
                        '(by default current working directory).')
    parser.add_argument('-G',
                        '--skip-generate-gruntfile',
                        dest='skip_gruntfile',
                        action='store_true',
                        help='Skip generation of Gruntfile.js')
    parser.add_argument(
        '-I',
        '--skip-npm-install',
        dest='skip_npminstall',
        action='store_true',
        help='Skip npm install step',
    )
    parser.add_argument(
        '-C',
        '--skip-compile',
        dest='skip_compile',
        action='store_true',
        help='Skip compile step (running grunt)',
    )

    args = parser.parse_args()

    base_path = args.base_dir
    if base_path == '.':
        base_path = os.getcwd()

    # generates only if not already there
    generate_package_json(base_path)
    if not args.skip_npminstall:
        cmd = [NPM_CMD, 'install']
        print('Setup npm env')
        print('Running command: %s' % ' '.join(cmd))
        subprocess.check_call(cmd)

    # Generate Gruntfile
    grunt = os.path.join(base_path, 'node_modules', 'grunt-cli', 'bin',
                         'grunt')
    if not args.skip_gruntfile:
        generate_gruntfile(base_path, args.instance, args.site_id,
                           args.compile_dir)
    gruntfile = os.path.join(base_path, 'Gruntfile.js')
    if not os.path.exists(gruntfile):
        print("Error, no Gruntfile.js generated at {0} where expected".format(
            gruntfile))

    if not args.skip_compile:
        print('Compile {0}'.format(args.bundle))
        cmd = [
            grunt, '--gruntfile={0}'.format(gruntfile),
            'compile-{0}'.format(args.bundle)
        ]
        print('Running command: %s' % ' '.join(cmd))
        subprocess.check_call(cmd)

    print('Done.')
 def test_main_exec(self):
     subprocess.check_call(
         [sys.executable, 'youtube_dl/__main__.py', '--version'],
         cwd=rootDir,
         stdout=_DEV_NULL)
Example #45
0
def hgadd(dest, directories):
    """Inform hg of the files in |directories|."""
    print("hg addremoving...")
    for d in directories:
        subprocess.check_call(["hg", "addremove", "%s/%s" % (dest, d)])
Example #46
0
                    f.write(r.content)
                os.execl(sys.executable, sys.executable, *sys.argv)
except:
    pass

# Repositories cloning
if "init" in sys.argv[1:]:
    for name in repos.keys():
        os.chdir(os.path.join(current_path))
        if not os.path.exists(name):
            url, need_recursive, need_develop, sha1 = repos[name]
            # clone repo (recursive if needed)
            print("[cloning " + name + "]...")
            full_url = url + name
            opts = "--recursive" if need_recursive else ""
            subprocess.check_call("git clone " + full_url + " " + opts, shell=True)
            if sha1 is not None:
                os.chdir(os.path.join(current_path, name))
                os.system("git checkout {:7x}".format(sha1))

# Repositories update
if "update" in sys.argv[1:]:
    for name in repos.keys():
        os.chdir(os.path.join(current_path))
        url, need_recursive, need_develop, sha1 = repos[name]
        print(url)
        if not os.path.exists(name):
            raise Exception("{} not initialized, please (re)-run init and install first.".format(name))
        # update
        print("[updating " + name + "]...")
        os.chdir(os.path.join(current_path, name))
def makeTempDirectory():
    tmpDirectory = getDataLocation('tmp')
    check_call(['mkdir', '-p', tmpDirectory])
Example #48
0
def run_script(args):
  fnull = open(os.devnull, 'w')
  subprocess.check_call(args, stdout=fnull, stderr=fnull)
Example #49
0
def real_main():
    (opts, action, cluster_name) = parse_args()

    # Input parameter validation
    if opts.wait is not None:
        # NOTE: DeprecationWarnings are silent in 2.7+ by default.
        #       To show them, run Python with the -Wdefault switch.
        # See: https://docs.python.org/3.5/whatsnew/2.7.html
        warnings.warn(
            "This option is deprecated and has no effect. "
            "spark-ec2 automatically waits as long as necessary for clusters to startup.",
            DeprecationWarning)

    if opts.ebs_vol_num > 8:
        print >> stderr, "ebs-vol-num cannot be greater than 8"
        sys.exit(1)

    try:
        conn = ec2.connect_to_region(opts.region)
    except Exception as e:
        print >> stderr, (e)
        sys.exit(1)

    # Select an AZ at random if it was not specified.
    if opts.zone == "":
        opts.zone = random.choice(conn.get_all_zones()).name

    if action == "launch":
        if opts.slaves <= 0:
            print >> sys.stderr, "ERROR: You have to start at least 1 slave"
            sys.exit(1)
        if opts.resume:
            (master_nodes,
             slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
        else:
            (master_nodes,
             slave_nodes) = launch_cluster(conn, opts, cluster_name)
        wait_for_cluster_state(cluster_instances=(master_nodes + slave_nodes),
                               cluster_state='ssh-ready',
                               opts=opts)
        setup_cluster(conn, master_nodes, slave_nodes, opts, True)

    elif action == "destroy":
        print "Are you sure you want to destroy the cluster %s?" % cluster_name
        print "The following instances will be terminated:"
        (master_nodes, slave_nodes) = get_existing_cluster(conn,
                                                           opts,
                                                           cluster_name,
                                                           die_on_error=False)
        for inst in master_nodes + slave_nodes:
            print "> %s" % inst.public_dns_name

        msg = "ALL DATA ON ALL NODES WILL BE LOST!!\nDestroy cluster %s (y/N): " % cluster_name
        response = raw_input(msg)
        if response == "y":
            print "Terminating master..."
            for inst in master_nodes:
                inst.terminate()
            print "Terminating slaves..."
            for inst in slave_nodes:
                inst.terminate()

            # Delete security groups as well
            if opts.delete_groups:
                print "Deleting security groups (this will take some time)..."
                group_names = [
                    cluster_name + "-master", cluster_name + "-slaves"
                ]
                wait_for_cluster_state(cluster_instances=(master_nodes +
                                                          slave_nodes),
                                       cluster_state='terminated',
                                       opts=opts)
                attempt = 1
                while attempt <= 3:
                    print "Attempt %d" % attempt
                    groups = [
                        g for g in conn.get_all_security_groups()
                        if g.name in group_names
                    ]
                    success = True
                    # Delete individual rules in all groups before deleting groups to
                    # remove dependencies between them
                    for group in groups:
                        print "Deleting rules in security group " + group.name
                        for rule in group.rules:
                            for grant in rule.grants:
                                success &= group.revoke(
                                    ip_protocol=rule.ip_protocol,
                                    from_port=rule.from_port,
                                    to_port=rule.to_port,
                                    src_group=grant)

                    # Sleep for AWS eventual-consistency to catch up, and for instances
                    # to terminate
                    time.sleep(30)  # Yes, it does have to be this long :-(
                    for group in groups:
                        try:
                            conn.delete_security_group(group.name)
                            print "Deleted security group " + group.name
                        except boto.exception.EC2ResponseError:
                            success = False
                            print "Failed to delete security group " + group.name

                    # Unfortunately, group.revoke() returns True even if a rule was not
                    # deleted, so this needs to be rerun if something fails
                    if success:
                        break

                    attempt += 1

                if not success:
                    print "Failed to delete all security groups after 3 tries."
                    print "Try re-running in a few minutes."

    elif action == "login":
        (master_nodes,
         slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
        master = master_nodes[0].public_dns_name
        print "Logging into master " + master + "..."
        proxy_opt = []
        if opts.proxy_port is not None:
            proxy_opt = ['-D', opts.proxy_port]
        subprocess.check_call(
            ssh_command(opts) + proxy_opt +
            ['-t', '-t', "%s@%s" % (opts.user, master)])

    elif action == "reboot-slaves":
        response = raw_input("Are you sure you want to reboot the cluster " +
                             cluster_name + " slaves?\n" +
                             "Reboot cluster slaves " + cluster_name +
                             " (y/N): ")
        if response == "y":
            (master_nodes,
             slave_nodes) = get_existing_cluster(conn,
                                                 opts,
                                                 cluster_name,
                                                 die_on_error=False)
            print "Rebooting slaves..."
            for inst in slave_nodes:
                if inst.state not in ["shutting-down", "terminated"]:
                    print "Rebooting " + inst.id
                    inst.reboot()

    elif action == "get-master":
        (master_nodes,
         slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
        print master_nodes[0].public_dns_name

    elif action == "stop":
        response = raw_input(
            "Are you sure you want to stop the cluster " + cluster_name +
            "?\nDATA ON EPHEMERAL DISKS WILL BE LOST, " +
            "BUT THE CLUSTER WILL KEEP USING SPACE ON\n" +
            "AMAZON EBS IF IT IS EBS-BACKED!!\n" +
            "All data on spot-instance slaves will be lost.\n" +
            "Stop cluster " + cluster_name + " (y/N): ")
        if response == "y":
            (master_nodes,
             slave_nodes) = get_existing_cluster(conn,
                                                 opts,
                                                 cluster_name,
                                                 die_on_error=False)
            print "Stopping master..."
            for inst in master_nodes:
                if inst.state not in ["shutting-down", "terminated"]:
                    inst.stop()
            print "Stopping slaves..."
            for inst in slave_nodes:
                if inst.state not in ["shutting-down", "terminated"]:
                    if inst.spot_instance_request_id:
                        inst.terminate()
                    else:
                        inst.stop()

    elif action == "start":
        (master_nodes,
         slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
        print "Starting slaves..."
        for inst in slave_nodes:
            if inst.state not in ["shutting-down", "terminated"]:
                inst.start()
        print "Starting master..."
        for inst in master_nodes:
            if inst.state not in ["shutting-down", "terminated"]:
                inst.start()
        wait_for_cluster_state(cluster_instances=(master_nodes + slave_nodes),
                               cluster_state='ssh-ready',
                               opts=opts)
        setup_cluster(conn, master_nodes, slave_nodes, opts, False)

    else:
        print >> stderr, "Invalid action: %s" % action
        sys.exit(1)
Example #50
0
    def do_install(self, name, data):
        if name in data:
            utils.makedirs(self.output_dir)
            LOGGER.notice('Downloading: ' + data[name])
            zip_file = BytesIO()
            zip_file.write(requests.get(data[name]).content)
            LOGGER.notice('Extracting: {0} into plugins'.format(name))
            utils.extract_all(zip_file, 'plugins')
            dest_path = os.path.join('plugins', name)
        else:
            try:
                plugin_path = utils.get_plugin_path(name)
            except:
                LOGGER.error("Can't find plugin " + name)
                return False

            utils.makedirs(self.output_dir)
            dest_path = os.path.join(self.output_dir, name)
            if os.path.exists(dest_path):
                LOGGER.error("{0} is already installed".format(name))
                return False

            LOGGER.notice('Copying {0} into plugins'.format(plugin_path))
            shutil.copytree(plugin_path, dest_path)

        reqpath = os.path.join(dest_path, 'requirements.txt')
        print(reqpath)
        if os.path.exists(reqpath):
            LOGGER.notice('This plugin has Python dependencies.')
            LOGGER.notice('Installing dependencies with pip...')
            try:
                subprocess.check_call(('pip', 'install', '-r', reqpath))
            except subprocess.CalledProcessError:
                LOGGER.error('Could not install the dependencies.')
                print('Contents of the requirements.txt file:\n')
                with codecs.open(reqpath, 'rb', 'utf-8') as fh:
                    print(indent(fh.read(), 4 * ' '))
                print('You have to install those yourself or through a '
                      'package manager.')
            else:
                LOGGER.notice('Dependency installation succeeded.')
        reqnpypath = os.path.join(dest_path, 'requirements-nonpy.txt')
        if os.path.exists(reqnpypath):
            LOGGER.notice('This plugin has third-party '
                          'dependencies you need to install '
                          'manually.')
            print('Contents of the requirements-nonpy.txt file:\n')
            with codecs.open(reqnpypath, 'rb', 'utf-8') as fh:
                for l in fh.readlines():
                    i, j = l.split('::')
                    print(indent(i.strip(), 4 * ' '))
                    print(indent(j.strip(), 8 * ' '))
                    print()

            print('You have to install those yourself or through a package '
                  'manager.')
        confpypath = os.path.join(dest_path, 'conf.py.sample')
        if os.path.exists(confpypath):
            LOGGER.notice('This plugin has a sample config file.')
            print('Contents of the conf.py.sample file:\n')
            with codecs.open(confpypath, 'rb', 'utf-8') as fh:
                print(
                    indent(
                        pygments.highlight(fh.read(), PythonLexer(),
                                           TerminalFormatter()), 4 * ' '))
        return True
Example #51
0
 def run(self):
     """Run a command to compile and build assets."""
     subprocess.check_call('sh ./asreview/webapp/compile_assets.sh',
                           shell=True)
def _call(args):
    DEVNULL = open(os.devnull, 'w')
    subprocess.check_call(" ".join(args),
                          shell=True, stdout=DEVNULL, stderr=DEVNULL)
Example #53
0
def systemctl(action: str, *units: str) -> int:
    """Invokes systemctl on the respective units."""

    systemctl_ = CONFIG.get('MollyGuard', 'systemctl', fallback=SYSTEMCTL)
    return check_call((systemctl_, action, *units))
Example #54
0
def deploy_files(conn, root_dir, opts, master_nodes, slave_nodes, modules):
    active_master = master_nodes[0].public_dns_name

    num_disks = get_num_disks(opts.instance_type)
    hdfs_data_dirs = "/mnt/ephemeral-hdfs/data"
    mapred_local_dirs = "/mnt/hadoop/mrlocal"
    spark_local_dirs = "/mnt/spark"
    if num_disks > 1:
        for i in range(2, num_disks + 1):
            hdfs_data_dirs += ",/mnt%d/ephemeral-hdfs/data" % i
            mapred_local_dirs += ",/mnt%d/hadoop/mrlocal" % i
            spark_local_dirs += ",/mnt%d/spark" % i

    cluster_url = "%s:7077" % active_master

    if "." in opts.spark_version:
        # Pre-built spark & shark deploy
        (spark_v, shark_v) = get_spark_shark_version(opts)
    else:
        # Spark-only custom deploy
        spark_v = "%s|%s" % (opts.spark_git_repo, opts.spark_version)
        shark_v = ""
        modules = filter(lambda x: x != "shark", modules)

    template_vars = {
        "master_list": '\n'.join([i.public_dns_name for i in master_nodes]),
        "active_master": active_master,
        "slave_list": '\n'.join([i.public_dns_name for i in slave_nodes]),
        "cluster_url": cluster_url,
        "hdfs_data_dirs": hdfs_data_dirs,
        "mapred_local_dirs": mapred_local_dirs,
        "spark_local_dirs": spark_local_dirs,
        "swap": str(opts.swap),
        "modules": '\n'.join(modules),
        "spark_version": spark_v,
        "shark_version": shark_v,
        "hadoop_major_version": opts.hadoop_major_version,
        "spark_worker_instances": "%d" % opts.worker_instances,
        "spark_master_opts": opts.master_opts
    }

    if opts.copy_aws_credentials:
        template_vars["aws_access_key_id"] = conn.aws_access_key_id
        template_vars["aws_secret_access_key"] = conn.aws_secret_access_key
    else:
        template_vars["aws_access_key_id"] = ""
        template_vars["aws_secret_access_key"] = ""

    # Create a temp directory in which we will place all the files to be
    # deployed after we substitue template parameters in them
    tmp_dir = tempfile.mkdtemp()
    for path, dirs, files in os.walk(root_dir):
        if path.find(".svn") == -1:
            dest_dir = os.path.join('/', path[len(root_dir):])
            local_dir = tmp_dir + dest_dir
            if not os.path.exists(local_dir):
                os.makedirs(local_dir)
            for filename in files:
                if filename[0] not in '#.~' and filename[-1] != '~':
                    dest_file = os.path.join(dest_dir, filename)
                    local_file = tmp_dir + dest_file
                    with open(os.path.join(path, filename)) as src:
                        with open(local_file, "w") as dest:
                            text = src.read()
                            for key in template_vars:
                                text = text.replace("{{" + key + "}}",
                                                    template_vars[key])
                            dest.write(text)
                            dest.close()
    # rsync the whole directory over to the master machine
    command = [
        'rsync', '-rv', '-e',
        stringify_command(ssh_command(opts)),
        "%s/" % tmp_dir,
        "%s@%s:/" % (opts.user, active_master)
    ]
    subprocess.check_call(command)
    # Remove the temp directory we created above
    shutil.rmtree(tmp_dir)
Example #55
0
 def run(self):  # noqa
     """Run a command to compile and build assets."""
     subprocess.check_call('./airflow/www/compile_assets.sh')
Example #56
0
def cryptsetup(action: str, *args: str) -> int:
    """Runs cryptsetup."""

    cryptsetup_ = CONFIG.get('MollyGuard', 'cryptsetup', fallback=CRYPTSETUP)
    return check_call((cryptsetup_, action, *args))
#08.04.2020 - Holoflow 0.1.

import subprocess
import argparse

#Argument parsing
parser = argparse.ArgumentParser(description='Runs holoflow pipeline.')
parser.add_argument('-hostrg', help="host reference genome", dest="host_ref_gen", required=True)
parser.add_argument('-ibam', help="all bam file", dest="all_bam", required=True)
parser.add_argument('-1', help="path1", dest="read1", required=True)
parser.add_argument('-2', help="path2", dest="read2", required=True)
parser.add_argument('-obam', help="host bam file", dest="host_bam", required=True)
args = parser.parse_args()

all_bam=args.all_bam
host_ref_gen=args.host_ref_gen
host_bam=args.host_bam
read1=args.read1
read2=args.read2

# Run
hostbam1Cmd = 'module load tools samtools/1.9 && samtools view -T'+host_ref_gen+' -b -F12 '+all_bam+' > '+host_bam+''
subprocess.check_call(hostbam1Cmd, shell=True)
hostbam2Cmd = 'module load tools samtools/1.9 && samtools view -T '+host_ref_gen+' -b -f12 '+all_bam+' | samtools fastq -1 '+read1+' -2 '+read2+' -'
subprocess.check_call(hostbam2Cmd, shell=True)
rmAllbamCmd = 'rm'+all_bam+''
subprocess.check_call(rmAllbamCmd, shell=True)
Example #58
0
    def setUpClass(cls):
        """Run before all tests:
        Creates an auth configuration"""
        cls.port = QGIS_POSTGRES_SERVER_PORT
        cls.username = '******'
        cls.password = '******'
        cls.dbname = 'test_password'
        cls.tempfolder = QGIS_PG_TEST_PATH
        cls.certsdata_path = os.path.join(unitTestDataPath('auth_system'),
                                          'certs_keys')
        cls.hostname = 'localhost'
        cls.data_path = os.path.join(cls.tempfolder, 'data')
        os.mkdir(cls.data_path)

        # Disable SSL verification for setup operations
        env = dict(os.environ)
        env['PGSSLMODE'] = 'disable'

        cls.setUpAuth()
        subprocess.check_call([
            os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'initdb'), '-D',
            cls.data_path
        ])

        cls.server = subprocess.Popen([
            os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'postgres'), '-D',
            cls.data_path, '-c',
            "config_file=%s" % cls.pg_conf
        ],
                                      env=env,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE)
        # Wait max 10 secs for the server to start
        end = time.time() + 10
        while True:
            line = cls.server.stderr.readline()
            print(line)
            if line.find(b"database system is ready to accept") != -1:
                break
            if time.time() > end:
                raise Exception("Timeout connecting to PostgreSQL")
        # Create a DB
        subprocess.check_call([
            os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'createdb'), '-h',
            'localhost', '-p', cls.port, 'test_password'
        ],
                              env=env)
        # Inject test SQL from test path
        test_sql = os.path.join(unitTestDataPath('provider'),
                                'testdata_pg.sql')
        subprocess.check_call([
            os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'psql'), '-h',
            'localhost', '-p', cls.port, '-f', test_sql, cls.dbname
        ],
                              env=env)
        # Create a role
        subprocess.check_call([
            os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH,
                         'psql'), '-h', 'localhost', '-p', cls.port, '-c',
            'CREATE ROLE "%s" WITH SUPERUSER LOGIN PASSWORD \'%s\'' %
            (cls.username, cls.password), cls.dbname
        ],
                              env=env)
Example #59
0
 def run(self):
     """Run twine to upload to test.pypi.org."""
     subprocess.check_call(  # nosec
         ("twine upload --repository-url https://test.pypi.org/legacy/" +
          " --verbose dist/*").split())
def twitter_send(data):
    dt_now = datetime.datetime.now()
    date = dt_now.strftime('%m月%d日 %H時%M分%S秒')
    img_ok = True
    send_status = "動作なし\n"
    img_path = "start.jpg"
    rospy.loginfo("status : %d",data.status)
    if data.status == 1:
        send_status = "ただいまスタートゾーンから出発しました。\n"
        img_path = "start.jpg"
    elif data.status == 5:
        img_ok = False
        if data.towel1:
            send_status = "バスタオル1枚目を干しに行きます。\n"
        else:
            send_status = "バスタオル2枚目を干しに行きます。\n"
    elif data.status == 10:
        img_ok = False
        send_status = "シーツを干しに行きます。\n"
    elif data.status == 7:
        img_path = "bathtowel.jpg"
        if data.towel1:
            send_status = "バスタオル1枚目を干し終わりました。\n"
        else:
            send_status = "バスタオル2枚目を干し終わりました。\n"
    elif data.status == 12:
        img_ok = False
        send_status = "シーツを干し中です。\n"
    elif data.status == 15:
        img_path = "sheets.jpg"
        send_status = "シーツを干し終わりました。\n"
    elif data.status == 18:
        img_ok = False
        send_status = "スタートゾーンに戻ります。\n"
    elif data.status == 21:
        img_path = "arrival.jpg"
        send_status = "スタートゾーンに到着しました。\n"
    else:
        return

    text = date + "\n明石高専自動機からお伝えします。\n練習中\n" + coat_msg[data.coat] + fight_msg[data.fight] + send_status + "#ロボコン\n" + "#明石高専\n" + "#AkashiSuperDry"
    print(text)
    # 生の投稿データの出力
    media_id = 0
    param = {'status' : text}
    start_time = time.time()
    while not os.path.isfile(img_path) and img_ok:
        if time.time() - start_time > 2:
            print("画像ファイルなし")
            img_ok = False
            break
    
    if img_ok:
        img_data = {'media' : open(img_path, 'rb')}
        req_media = twitter.post(url_media, files = img_data)
        if req_media.status_code != 200:
            print("画像アップロード失敗 : "+ req_media.text)
        else:
            print("画像アップロード成功 : "+ req_media.text)
            media_id = json.loads(req_media.text)['media_id']
            print('media_id:%d'% media_id)
            param['media_ids'] = [media_id]

    req_media = twitter.post(url_text, params = param)
    args = ['rm','-f',img_path]
    try:
        res = subprocess.check_call(args)
        print(res)
    except:
        print("cannot remove : " + img_path)