예제 #1
0
    def do_patch(self):
        info('Patching package sources...', 3)

        filesdir = join(self.builddir, 'File')

        for child in self.package.xpath('b:RemoveFile', namespaces=XP_NSMAP):
            if isabs(child.get('Id')):
                error('Invalid RemoveFile action: Id attribute should not be an absolute path ("%s")' % child.get('Id'), 4)

            f = abspath(join(filesdir, child.get('Id')))

            if isfile(f):
                os.unlink(f)
            else:
                error('Invalid RemoveFile action: "%s" does not exist in "%s"' % (child.get('Id'), filesdir), 4)

        for child in self.package.xpath('b:CopyFile', namespaces=XP_NSMAP):
            if isabs(child.get('Src')):
                error('Invalid CopyFile action: Src attribute should not be an absolute path ("%s")' % child.get('Src'), 4)

            if isabs(child.get('Dest')):
                error('Invalid CopyFile action: Dest attribute should not be an absolute path ("%s")' % child.get('Dest'), 4)

            src = abspath(join(WIXDIR, child.get('Src'), child.get('Id')))
            dest = abspath(join(filesdir, child.get('Dest'), child.get('Id')))

            if isfile(src):
                copyfile(src, dest)
            else:
                error('Invalid CopyFile action: "%s" does not exist in "%s"' % (child.get('Src'), WIXDIR), 4)
    def __init__(self, docker_client, tester, tester_name, 
                 tester_address, force_rm, nb_jobs, testsuite, 
                 testresults, use_fedora_selinux_policy):
        assert path.isabs(testsuite.path), 'testsuite needs to be an absolute path'
        assert path.isabs(testresults), 'testresults needs to be an absolute path'
        self.docker_client = docker_client
        self.force_rm = force_rm
        self.environment={"CGAL_TESTER" : tester,
                          "CGAL_TESTER_NAME" : tester_name,
                          "CGAL_TESTER_ADDRESS": tester_address,
                          "CGAL_NUMBER_OF_JOBS" : nb_jobs
        }
        self.host_config = docker.utils.create_host_config(binds={
            testsuite.path:
            {
                'bind': '/mnt/testsuite',
                'ro': True
            },
            testresults:
            {
                'bind': '/mnt/testresults',
                'ro': False
            }
        })

        if use_fedora_selinux_policy:
            self.host_config['Binds'][0] += 'z'
            self.host_config['Binds'][1] += 'z'
예제 #3
0
def SpawnPatchTarget(destination_folder, payload_path):
    try:
        if payload_path and not path.isabs(payload_path):
            payload_path = path.join(RoverSettings._LaunchedFromDirectory, payload_path)

        if not path.isabs(destination_folder):
            destination_folder = path.join(RoverSettings._LaunchedFromDirectory, destination_folder)

        if not path.exists(str(payload_path)):
            fallback_url = 'https://dotnetcli.blob.core.windows.net/dotnet/Sdk/rel-1.0.0/dotnet-dev-debian-x64.latest.tar.gz'               

            payload_filename    = 'dotnet.latest.tar.gz'
            payload_path        = path.join(RoverSettings._objDirectory, payload_filename)
            
            if not path.exists(payload_path):
                RoverPrint(RoverMods.Blue('is downloading latest .NET CLI for bootstrapping (%s)'%(payload_filename)))

            urlretrieve(fallback_url, payload_path)
        
        # lets force the path to be made absolute - assuming that the payload path is relative to the directory we launched the script from.
        # otherwise if we have an abs path already - fantastic.

        RoverShellCall('tar xf %s -C %s'%(payload_path, destination_folder))    
    except:
        RoverSettings._DevMode = True
        UnexpectedRoverException(sys.exc_info())
    
    RoverSettings.SetPatchTargetPath(path.join(RoverSettings._ScriptDirectory, destination_folder))
예제 #4
0
def startstop(action=None, stdout=os.devnull, stderr=None, stdin=os.devnull,
              pidfile='pid.txt', startmsg = 'started with pid %s' ):
    '''
        This is the "front-end"method for starting the daemon, stopping
        and restarting it.
    '''
    if len(action) > 1:
        setup()
        from os import path
        if not path.isabs(stdout):
            stdout = path.join(getAngelVarPath(), stdout)
        if not path.isabs(stderr):
            stderr = path.join(getAngelVarPath(), stderr)
        if not path.isabs(stdin):
            stdin = path.join(getAngelVarPath(), stdin)
        if not path.isabs(pidfile):
            pidfile = path.join(getAngelVarPath(), pidfile)
        try:
            pf  = file(pidfile,'r')
            pid = int(pf.read().strip())
            pf.close()
        except IOError:
            pid = None
        if 'stop' == action or 'restart' == action:
            if not pid:
                mess = "Could not stop, pid file '%s' missing.%s"
                sys.stderr.write(mess % (pidfile, os.linesep))
                sys.exit(1)
            try:
                countSIGTERM = 0
                sleepTime = 1
                while 1:
                    if countSIGTERM > 3:
                        sys.stderr.write("Process not responding, sending SIGKILL to process with PID %i.%s" % (pid, os.linesep))
                        os.kill(pid,SIGKILL)
                    else:
                        os.kill(pid,SIGTERM)
                    countSIGTERM += 1
                    time.sleep(sleepTime)
                    # send signal 0 straight away, no need to re-enter the loop
                    os.kill(pid, 0)
                    sleepTime = sleepTime + 1
            except OSError, err:
                err = str(err)
                if err.find("No such process") > 0:
                    os.remove(pidfile)
                    if 'stop' == action:
                        sys.exit(0)
                    action = 'start'
                    pid = None
                else:
                    print str(err)
                    sys.exit(1)
        if 'start' == action:
            if pid:
                mess = "Start aborded since pid file '%s' exists.%s"
                sys.stderr.write(mess % (pidfile, os.linesep))
                sys.exit(1)
            daemonize(stdout,stderr,stdin,pidfile,startmsg)
            return
예제 #5
0
파일: server.py 프로젝트: radziksh/floe
def correct_resources_path(previewdir):
    res_dir_path = join(previewdir, "resources")
    guard_slashes = blend4web.exporter.guard_slashes
    if exists(res_dir_path):
        shutil.rmtree(res_dir_path)
    os.mkdir(res_dir_path)
    root = bpy.context.user_preferences.addons[__package__].preferences.b4w_src_path
    json_path = join(previewdir, "preview.json")

    json_parsed = json.loads(blend4web.exporter.get_main_json_data())

    if "images" in json_parsed:
        for img in json_parsed["images"]:
            if isabs(img["filepath"]) or len(img["filepath"].split("..")) > 3:
                file_name = copy_resource(img["filepath"], previewdir)
                img["filepath"] = guard_slashes(join("resources", file_name))
    if "sounds" in json_parsed:
        for sound in json_parsed["sounds"]:
            if isabs(sound["filepath"]) or len(sound["filepath"].split("..")) > 3:
                file_name = copy_resource(sound["filepath"], previewdir)
                sound["filepath"] = guard_slashes(join("resources", file_name))
    if len(os.listdir(res_dir_path)):
        try:
            f  = open(json_path, "w", encoding="utf-8")
        except IOError as exp:
            raise FileError("Permission denied")
        else:
            f.write(json.dumps(json_parsed))
            f.close()
예제 #6
0
 def sanityChecks(self, cliargs):
     passed = HSN2Service.sanityChecks(self, cliargs)
     if path.isdir(cliargs.nugget):
         logging.error("'%s' is a directory" % cliargs.nugget)
         passed = False
     if not access(cliargs.nugget, X_OK):
         logging.error("'%s' isn't executable or does not exist!" % cliargs.nugget)
         passed = False
     if not path.isabs(cliargs.inputmapping):
         cliargs.inputmapping = self.mappings + cliargs.inputmapping
     if path.isdir(cliargs.inputmapping):
         logging.error("'%s' is a directory" % cliargs.inputmapping)
         passed = False
     elif not path.isfile(cliargs.inputmapping):
         logging.error("'%s' does not exist!" % cliargs.inputmapping)
         passed = False
     if not path.isabs(cliargs.outputmapping):
         cliargs.outputmapping = self.mappings + cliargs.outputmapping
     if path.isdir(cliargs.outputmapping):
         logging.error("'%s' is a directory" % cliargs.outputmapping)
         passed = False
     elif not path.isfile(cliargs.outputmapping):
         logging.error("'%s' does not exist!" % cliargs.outputmapping)
         passed = False
     if passed is True:
         cliargs.inputmapping = self.importMapping(cliargs.inputmapping)
         cliargs.outputmapping = self.importMapping(cliargs.outputmapping)
         if cliargs.inputmapping is None or cliargs.outputmapping is None:
             passed = False
     return passed
예제 #7
0
def read_settings(path=None, override=None):
    if path:
        local_settings = get_settings_from_file(path)
        # Make the paths relative to the settings file
        for p in ['PATH', 'OUTPUT_PATH', 'THEME']:
            if p in local_settings and local_settings[p] is not None \
                    and not isabs(local_settings[p]):
                absp = os.path.abspath(os.path.normpath(os.path.join(
                    os.path.dirname(path), local_settings[p])))
                if p not in ('THEME') or os.path.exists(absp):
                    local_settings[p] = absp

        if isinstance(local_settings['PLUGIN_PATH'], six.string_types):
            logger.warning("Defining %s setting as string has been deprecated (should be a list)" % 'PLUGIN_PATH')
            local_settings['PLUGIN_PATH'] = [local_settings['PLUGIN_PATH']]
        else:
            if 'PLUGIN_PATH' in local_settings and local_settings['PLUGIN_PATH'] is not None:
                local_settings['PLUGIN_PATH'] = [os.path.abspath(os.path.normpath(os.path.join(os.path.dirname(path), pluginpath)))
                                    if not isabs(pluginpath) else pluginpath for pluginpath in local_settings['PLUGIN_PATH']]
    else:
        local_settings = copy.deepcopy(DEFAULT_CONFIG)

    if override:
        local_settings.update(override)

    parsed_settings = configure_settings(local_settings)
    # This is because there doesn't seem to be a way to pass extra
    # parameters to docutils directive handlers, so we have to have a
    # variable here that we'll import from within Pygments.run (see
    # rstdirectives.py) to see what the user defaults were.
    global PYGMENTS_RST_OPTIONS
    PYGMENTS_RST_OPTIONS = parsed_settings.get('PYGMENTS_RST_OPTIONS', None)
    return parsed_settings
예제 #8
0
    def __init__(self, name, test_definition):
        self.name = name
        test_definition = deep_merge(default_config, test_definition)
        # quick shortcuts
        self.test_env = test_definition['environment']
        self.test_meta = test_definition['meta']
        self.test_commands = test_definition.get('test_commands', [])
        # take care of commands ...
        self.test_commands = _build_exec_array(self.test_commands)
        self.test_meta['test_before'] = \
            _build_exec_array(self.test_meta.get('test_before', None))
        self.test_meta['test_after'] = \
            _build_exec_array(self.test_meta.get('test_after', None))

        # okay.
        # let's keep all file references relative to the configuration
        # file. easy to remember.
        configfilepath = realpath(dirname(self.test_meta.get('_configfile',
                                                             './dummy')))
        # self.TEMPLATE / .TEMPLATE_NAME
        tmp = self.test_meta['docker_compose_template']
        if not isabs(tmp):
            tmp = realpath(join(configfilepath, tmp))
        self.template = tmp
        self.template_name = basename(self.template)
        # self.BASEDIR
        tmp = self.test_meta.get('test_basedir', configfilepath)
        if not isabs(tmp):
            tmp = realpath(join(configfilepath, tmp))
        self.base_dir = tmp
        # self.SANITIZED_NAME, .TEST_DIR
        self.sanitized_name = resub("[^a-zA-Z0-9_]", "-", self.name)
        self.test_dir = dbg_tr_get_testdir(self.base_dir, self.sanitized_name)
        # extend SELF.TEST_ENV with TEST_DIR
        self.test_env['test_dir'] = self.test_dir
        # create SELF.COMMANDLINE
        self.commandline = copy.copy(default_commandline_start)
        for param in self.test_meta['docker_compose_params']:
            self.commandline.append(param)
        for key, val in self.test_env.items():
            self.commandline.append("-e")
            self.commandline.append("%s=%s" % (key, val))
        self.commandline.append("--rm")
        self.commandline.extend(copy.copy(default_commandline_end))
        self.commandline.append(self.test_meta['test_service'])
        # create .STATE, .RESULT, .EXCEPTION, .REASON
        self.state = self.NOTRUN
        self.results = []
        self.exception = None
        self.reason = None
        # log setup
        # NO LOGGING BEFORE HERE
        log_filename = join(self.base_dir, basename(self.test_dir)) + ".log"
        self.log = get_logger("t-%s" % self.name, filename=log_filename)
        # some debug output
        self.log.info("base commandline '%s'" % " ".join(self.commandline))
        self.log.debug("test directory '%s'" % self.test_dir)
        self.log.debug("template path '%s'" % self.template)
        for key, val in self.test_env.items():
            self.log.debug("env %s=%s" % (key, val))
예제 #9
0
def load_pipeline_from_template(name, func=None, args=None, kwargs=None):
    """Given a name, loads that pipeline from datalad.crawler.pipelines

    and later from other locations

    Parameters
    ----------
    name: str
        Name of the pipeline defining the filename. Or full path to it (TODO)
    args: dict, optional
        Positional args for the pipeline, passed as *args into the pipeline call
    kwargs: dict, optional
        Keyword args for the pipeline, passed as **kwargs into the pipeline call
    """

    if isabs(name) or exists(name):
        raise NotImplementedError("Don't know how to import straight path %s yet" % name)

    # explicit isabs since might not exist
    filename = name \
        if (isabs(name) or exists(name)) \
        else _find_pipeline(name)

    if filename:
        if not exists(filename):
            raise IOError("Pipeline file %s is N/A" % filename)
    else:
        raise ValueError("could not find pipeline for %s" % name)

    return load_pipeline_from_module(filename, func=func, args=args, kwargs=kwargs)
예제 #10
0
 def __init__(self, protocol, path, mp,
              host=None,
              user=None,
              pw=None,
              fs_type=FsType.UNKNOWN,
              read_only=False,
              no_atime_updates=False,
              direct_attribute_access=False,
              domain=None):
     assert is_valid_protocol(protocol), "%s is not a valid protocol code" % protocol
     self.protocol = protocol
     self.path = path
     assert isabs(mp), "Mount point %s not absolute" % mp
     self.mp = mp
     if ' ' in self.mp:
         logger.warn("'%s': Mount points with spaces are not supported by get_mount_info()"
                     % self.mp)
     assert isabs(self.mp)
     self.host = host
     self.user = user
     self.pw = pw
     assert is_valid_fstype(fs_type), "%s is not a valid fs_type code" % fs_type
     self.fs_type = fs_type
     self.read_only = read_only
     self.no_atime_updates = no_atime_updates
     self.direct_attribute_access = direct_attribute_access
     self.domain = domain
예제 #11
0
def run_bam_to_bam(subread_set_file, barcode_set_file, output_file_name,
                   nproc=1):
    bc = BarcodeSet(barcode_set_file)
    if len(bc.resourceReaders()) > 1:
        raise NotImplementedError("Multi-FASTA BarcodeSet input is not supported.")
    barcode_fasta = bc.toExternalFiles()[0]
    with SubreadSet(subread_set_file) as ds:
        # TODO(nechols)(2016-03-15): replace with BarcodedSubreadSet
        ds_new = SubreadSet(strict=True)
        for ext_res in ds.externalResources:
            subreads_bam = ext_res.bam
            scraps_bam = ext_res.scraps
            assert subreads_bam is not None
            if scraps_bam is None:
                raise TypeError("The input SubreadSet must include scraps.")
            new_prefix = op.join(op.dirname(output_file_name),
                re.sub(".subreads.bam", "_barcoded", op.basename(subreads_bam)))
            if not op.isabs(subreads_bam):
                subreads_bam = op.join(op.dirname(subread_set_file),
                    subreads_bam)
            if not op.isabs(scraps_bam):
                scraps_bam = op.join(op.dirname(subread_set_file), scraps_bam)
            args = [
                "bam2bam",
                "-j", str(nproc),
                "-b", str(nproc),
                "-o", new_prefix,
                "--barcodes", barcode_fasta,
                subreads_bam, scraps_bam
            ]
            print args
            log.info(" ".join(args))
            result = run_cmd(" ".join(args),
                             stdout_fh=sys.stdout,
                             stderr_fh=sys.stderr)
            if result.exit_code != 0:
                return result.exit_code
            subreads_bam = new_prefix + ".subreads.bam"
            scraps_bam = new_prefix + ".scraps.bam"
            assert op.isfile(subreads_bam), "Missing {f}".format(f=subreads_bam)
            # FIXME we need a more general method for this
            ext_res_new = ExternalResource()
            ext_res_new.resourceId = subreads_bam
            ext_res_new.metaType = 'PacBio.SubreadFile.SubreadBamFile'
            ext_res_new.addIndices([subreads_bam + ".pbi"])
            ext_res_inner = ExternalResources()
            ext_res_scraps = ExternalResource()
            ext_res_scraps.resourceId = scraps_bam
            ext_res_scraps.metaType = 'PacBio.SubreadFile.ScrapsBamFile'
            ext_res_scraps.addIndices([scraps_bam + ".pbi"])
            ext_res_inner.append(ext_res_scraps)
            ext_res_new.append(ext_res_inner)
            ds_new.externalResources.append(ext_res_new)
        ds._filters.clearCallbacks()
        ds_new._filters = ds._filters
        ds_new._populateMetaTypes()
        ds_new.updateCounts()
        ds_new.write(output_file_name)
    return 0
예제 #12
0
def _validate_paths(virtualenv_path, setup_or_req_file):
    """
    Validate that both the virtualenv folder path and the "other file" is an
    absolute path, additionally checking that the "other file" exists and is
    a file.
    """
    assert path.isabs(virtualenv_path)
    assert path.isabs(setup_or_req_file) and path.isfile(setup_or_req_file)
예제 #13
0
파일: plugin.py 프로젝트: baraserg/topology
def pytest_configure(config):
    """
    pytest hook to configure plugin.
    """
    # Get registered options
    platform = config.getoption('--topology-platform')
    plot_format = config.getoption('--topology-plot-format')
    plot_dir = config.getoption('--topology-plot-dir')
    nml_dir = config.getoption('--topology-nml-dir')
    injection_file = config.getoption('--topology-inject')

    # Determine plot directory and create it if required
    if plot_dir:
        if not isabs(plot_dir):
            plot_dir = join(abspath(getcwd()), plot_dir)
        if not exists(plot_dir):
            makedirs(plot_dir)

    # Determine NML export directory and create it if required
    if nml_dir:
        if not isabs(nml_dir):
            nml_dir = join(abspath(getcwd()), nml_dir)
        if not exists(nml_dir):
            makedirs(nml_dir)

    # Parse attributes injection file
    from ..injection import parse_attribute_injection
    injected_attr = None
    if injection_file is not None:

        # Get a list of all testing directories
        search_paths = [
            abspath(arg) for arg in config.args if isdir(arg)
        ]

        injected_attr = parse_attribute_injection(
            injection_file,
            search_paths=search_paths
        )

    # Create and register plugin
    config._topology_plugin = TopologyPlugin(
        platform, plot_dir, plot_format.lstrip('.'), nml_dir, injected_attr
    )
    config.pluginmanager.register(config._topology_plugin)

    # Add test_id marker
    config.addinivalue_line(
        'markers',
        'test_id(id): assign a test identifier to the test'
    )

    # Add topology_compatible marker
    config.addinivalue_line(
        'markers',
        'platform_incompatible(platforms): '
        'mark a test as incompatible with a list of platform engines'
    )
예제 #14
0
파일: __main__.py 프로젝트: cykerway/blrm
def safe(arg, blacklist, matcher):
    '''
    Check whether an argument is safe to delete.

    Parameters
    ----------
    arg
        Input argument.
    blacklist
        A list of files that should be kept.
    matcher : str
        Name of the matcher: `str`, `fnmatch` or `re`.

    Returns
    -------
    bool
        True if argument is safe to delete, False otherwise.

    Raises
    ------
    Exception
        The matcher is unknown.
    '''
    path = abspath(expanduser(arg))

    if matcher == 'str':
        for pattern in blacklist:
            pattern = expanduser(pattern)
            if isabs(pattern):
                if pattern == path:
                    return False
            else:
                if pattern in path:
                    return False
        return True
    elif matcher == 'fnmatch':
        for pattern in blacklist:
            pattern = expanduser(pattern)
            if isabs(pattern):
                if fnmatch(path, pattern):
                    return False
            else:
                if fnsearch(path, pattern):
                    return False
        return True
    elif matcher == 're':
        for pattern in blacklist:
            pattern = expanduser(pattern)
            if isabs(pattern):
                if re.match(pattern, path):
                    return False
            else:
                if re.search(pattern, path):
                    return False
        return True
    else:
        raise Exception('unknown matcher {}'.format(matcher))
예제 #15
0
def main():
    parser = create_parser()
    try:
        args = parser.parse_args()
    except argparse.ArgumentError as exc:
        logging.exception('Error parsing arguments.')
        parser.error(str(exc))
        sys.exit(1)

    conf_file = args.config

    # load config file
    with open(conf_file, 'r') as yml_file:
        config = yaml.load(yml_file)

    # read config values
    bot_name = config.get("BOT_NAME",    '')
    daemon   = config.get("DAEMON",      False)
    token    = config.get("SLACK_TOKEN", '')
    implants = config.get("IMPLANTS",    '')
    logfile  = config.get("LOGFILE",     '')
    debug    = config.get("DEBUG",       True)

    # logging setup
    loglvl = logging.DEBUG if debug else logging.INFO
    if not op.isabs(logfile):
        logfile = op.join(op.dirname(op.abspath(conf_file)), logfile)
    setup_logging(logfile, loglvl)

    # add implants folders to sys.path and correct relative paths to be relative to the config file
    config_dir = op.dirname(op.abspath(conf_file))
    for name in implants:
        im_file = implants[name].get('file', '')
        if im_file and not op.isabs(im_file):
            im_file = implants[name]['file'] = op.join(config_dir, im_file)

        if op.exists(im_file):
            im_dir = op.dirname(im_file)
            if im_dir not in sys.path:
                sys.path.insert(0, im_dir)

    # setup bot
    setup_bot_and_guardian(bot_name, token, implants)

    # start
    if daemon:
        log.info('Launching daemon.')
        import daemon
        try:
            with daemon.DaemonContext(pidfile="daemon.pid",
                                      stdout=open("daemon_out.log", "wb"),
                                      stderr=open("daemon-err.log", "wb")):
                bot_loop()
        except:
            raise
    else:
        bot_loop()
예제 #16
0
파일: troop.py 프로젝트: ooici/ape
 def __init__(self, clobber=False, target=WORK_DIRECTORY, template=TEMPLATE_DIRECTORY, weather='sunny'):
     self.configuration = { }
     self.service_by_name = None
     self.clobber = clobber
     self.base_directory = self._get_base_directory()
     self.target_directory = target if isabs(target) else join(self.base_directory, target)
     self.template_directory = template if isabs(template) else join(self.base_directory, template)
     self.state = CREATED
     self.weather = weather
예제 #17
0
	def __init__(self, dir=None, target='t000_', prefix=None, overwrite=False, rundir=None ):

#the name of the target
		self.name=target

#overwrite files if they are re-added despite different checksum ?
		self.overwrite=overwrite

		self.files=TrackChangeList()
#figure out the target-directory
		self.fn_target_dir=""
		if dir: #it is a one-off directory
			self.fn_target_dir=dir
		elif prefix: #it is in the targetlibrary
			self.fn_target_dir=path.normpath(prefix+"/"+target).replace('//','/')
		else:
			raise MissingInput("require either directory or directory-prefix to figure out target directory" )

	#	if not path.exists( self.fn_target_dir ):
	#		library.mkdirp( self.fn_target_dir )
		#this prefix is the relative position from the final run-directory if relative paths are used
		self.target_dir_prefix_for_run=""
		self.fn_target_dir=path.normpath(self.fn_target_dir).replace('//','/')
#		print "TARGET_DIR: ", rundir, self.fn_target_dir
		if rundir and not path.isabs(self.fn_target_dir):
			if path.isabs(rundir):
				self.fn_target_dir=path.abspath(self.fn_target_dir)
			else:
				self.rel_path_target_dir=path.normpath(path.relpath(self.fn_target_dir, rundir)).replace('//','/')
				com=(path.commonprefix([rundir, self.fn_target_dir ])+"/").replace('//','/')
#				if com: print "Common-Prefix: ", com, self.fn_target_dir, rundir
				if not com=="." and not com=="./" and not com=="/":
					target_dir_alone=self.fn_target_dir.replace(com,"")
				else:
					target_dir_alone=self.fn_target_dir
#				print 'target_dir_alone: ',target_dir_alone
				self.target_dir_prefix_for_run=self.rel_path_target_dir.replace(target_dir_alone,"./")
				print "relative path from run-dir to input dir", self.rel_path_target_dir, self.target_dir_prefix_for_run
#				com=path.commonprefix([rundir, self.fn_target_dir ])
#				If com: print "Common-Prefix: ", com
#				target_dir_alone=self.fn_target_dir.replace(com,"")
#				rundir_alone=rundir.replace(com,"")
#				print "correcte paths", target_dir_alone, rundir_alone


		#keep track of the original_paths  -- keep the hostname
		self.hostname=socket.gethostname()
		self.original_paths={}

		#secondary data -- generate from stored file-list at each instantiaten
		self.absolute_paths={}

    #the master list of files stored in this directory (with add_files)
		self.directory_file=self.fn_target_dir+'/file_directory.txt'
		if path.exists( self.directory_file ):
			self.read_file_database()
예제 #18
0
파일: keyfs.py 프로젝트: zerotired/devpi
 def _rename(self, sourcekey, destkey):
     assert not isabs(sourcekey), sourcekey
     assert not isabs(destkey), destkey
     source = join(str(self.basedir), sourcekey)
     dest = join(str(self.basedir), destkey)
     try:
         os.rename(source, dest)
     except OSError as e:
         os.makedirs(os.path.dirname(dest))
         os.rename(source, dest)
예제 #19
0
def PyNLLoc(obsFile, obsFileType, ctrlFile, ttFileRoot, outPath, iSwapBytes=1,
            delayFile=None):
    """
    Locate an earthquake by calling NLLoc program and try to save the output
    files with the same name as input file (e.g. gfzYYYYxxxx.loc.hyp).

    :param str obsFile: Path to/name of phase observation file.
    :param str obsFileType: Obsevation file format {'NLLOC_OBS', 'GFN'}.
    :param str ctrlFile:
        Path to/name of NLLoc control file statements.
        IMPORTANT: it should not include `LOCFILES` line.
    :param str ttFileRoot:
        Path to/`root` name (no extension) of input time grids.
    :param str outPath: Directory path to save output location files.
    :param str delayFile: Path to/name of station delays file.
    """
    # IMPORTANT: `outPath` must be the full path.
    # necessary for renaming out file.
    if not op.isabs(outPath):
        outPath = op.abspath(outPath)
    outNameRoot = op.basename(obsFile).split(".")[0]
    outFileRoot = op.join(outPath, outNameRoot)

    with tempfile.NamedTemporaryFile(mode='a+t', delete=True) as tmpCtrlFile:
        shutil.copy(ctrlFile, tmpCtrlFile.name)
        params = (obsFile, obsFileType, ttFileRoot, outFileRoot, iSwapBytes)
        tmpCtrlFile.write("\nLOCFILES {} {} {} {} {}\n".format(*params))

        if delayFile:
            if not op.isabs(delayFile):
                delayFile = op.abspath(delayFile)
            tmpCtrlFile.write("\nINCLUDE {}\n".format(delayFile))

        # IMPORTANT: return the pointer to the top of the file before reading
        # it. Otherwise, just an empty string will be read!
        tmpCtrlFile.seek(0)

        # Run NLLoc program
        subprocess.call(['NLLoc', tmpCtrlFile.name])

    # IMPORTANT: delete redundant summary and (tmp) control files
    for sumfile in glob.glob(outFileRoot + '.sum.*'):
        os.remove(sumfile)
    for tmpfile in glob.glob(outFileRoot + '*_tmp*'):
        os.remove(tmpfile)

    try:
        # Location completed. Rename output files.
        for ext in ['hyp', 'hdr', 'scat']:
            oldfile = glob.glob(outFileRoot + '.[0-9]*.[0-9]*.loc.' + ext)[0]
            newfile = outFileRoot + '.loc.' + ext
            os.rename(oldfile, newfile)
    except IndexError:
        # No location completed.
        pass
예제 #20
0
def run_bam_to_bam(subread_set_file, barcode_set_file, output_file_name,
                   nproc=1, score_mode="symmetric"):
    if not score_mode in ["asymmetric", "symmetric"]:
        raise ValueError("Unrecognized score mode '{m}'".format(m=score_mode))
    bc = BarcodeSet(barcode_set_file)
    if len(bc.resourceReaders()) > 1:
        raise NotImplementedError("Multi-FASTA BarcodeSet input is not supported.")
    barcode_fasta = bc.toExternalFiles()[0]
    with SubreadSet(subread_set_file) as ds:
        ds_new = SubreadSet(strict=True)
        for ext_res in ds.externalResources:
            subreads_bam = ext_res.bam
            scraps_bam = ext_res.scraps
            assert subreads_bam is not None
            if scraps_bam is None:
                raise TypeError("The input SubreadSet must include scraps.")
            new_prefix = op.join(op.dirname(output_file_name),
                re.sub(".subreads.bam", "_barcoded", op.basename(subreads_bam)))
            if not op.isabs(subreads_bam):
                subreads_bam = op.join(op.dirname(subread_set_file),
                    subreads_bam)
            if not op.isabs(scraps_bam):
                scraps_bam = op.join(op.dirname(subread_set_file), scraps_bam)
            args = [
                "bam2bam",
                "-j", str(nproc),
                "-b", str(nproc),
                "-o", new_prefix,
                "--barcodes", barcode_fasta,
                "--scoreMode", score_mode,
                subreads_bam, scraps_bam
            ]
            log.info(" ".join(args))
            result = run_cmd(" ".join(args),
                             stdout_fh=sys.stdout,
                             stderr_fh=sys.stderr)
            if result.exit_code != 0:
                return result.exit_code
            subreads_bam = new_prefix + ".subreads.bam"
            scraps_bam = new_prefix + ".scraps.bam"
            assert op.isfile(subreads_bam), "Missing {f}".format(f=subreads_bam)
            add_subread_resources(ds_new,
                subreads=subreads_bam,
                scraps=scraps_bam,
                barcodes=barcode_set_file)
        ds._filters.clearCallbacks()
        ds_new._filters = ds._filters
        ds_new._populateMetaTypes()
        ds_new.metadata = ds.metadata
        ds_new.name = ds.name + " (barcoded)"
        ds_new.updateCounts()
        ds_new.newUuid()
        ds_new.write(output_file_name)
    return 0
예제 #21
0
    def _normalize_filenames(self):
        """ sanitize filenames, normalizing paths
            TODO think about using forward slashes for crossplatform issues
                 (diff/patch were born as a unix utility after all)
            return True on success
        """
        errors = 0
        for i, p in enumerate(self.items):
            if p.type in (HG, GIT):
                # TODO: figure out how to deal with /dev/null entries
                debug("stripping a/ and b/ prefixes")
                if p.source != '/dev/null':
                    if not p.source.startswith("a/"):
                        warning("invalid source filename")
                    else:
                        p.source = p.source[2:]
                if p.target != '/dev/null':
                    if not p.target.startswith("b/"):
                        warning("invalid target filename")
                    else:
                        p.target = p.target[2:]

            p.source = normpath(p.source)
            p.target = normpath(p.target)

            # references to parent are not allowed
            if p.source.startswith(".." + os.sep):
                warning(
                    "error: stripping parent path for source file patch no.%d" % (i + 1))
                errors += 1
                while p.source.startswith(".." + os.sep):
                    p.source = p.source.partition(os.sep)[2]
            if p.target.startswith(".." + os.sep):
                warning(
                    "error: stripping parent path for target file patch no.%d" % (i + 1))
                errors += 1
                while p.target.startswith(".." + os.sep):
                    p.target = p.target.partition(os.sep)[2]

            # absolute paths are not allowed
            if isabs(p.source) or isabs(p.target):
                errors += 1
                warning("error: absolute paths are not allowed for file patch no.%d" % (
                i + 1))
                if isabs(p.source):
                    p.source = p.source.partition(os.sep)[2]
                if isabs(p.target):
                    p.target = p.target.partition(os.sep)[2]

            self.items[i].source = p.source
            self.items[i].target = p.target

        return (errors == 0)
예제 #22
0
    def test_get_template_path(self):
        default_path = configme.resolve_dir()
        self.assertTrue(path.isabs(default_path))
        self.assertTrue(path.exists(default_path))

        custom_path = configme.resolve_dir('/usr/local/bin')
        self.assertTrue(path.isabs(default_path))
        self.assertTrue(path.exists(custom_path))

        rel_path = configme.resolve_dir('../tests')
        self.assertTrue(path.isabs(rel_path))
        self.assertTrue(path.exists(rel_path))
예제 #23
0
파일: path.py 프로젝트: winksaville/craftr
def glob(patterns, parent=None, excludes=(), include_dotfiles=False):
    """
  Wrapper for :func:`glob2.glob` that accepts an arbitrary number of
  patterns and matches them. The paths are normalized with :func:`norm`.

  Relative patterns are automaticlly joined with *parent*. If the
  parameter is omitted, it defaults to the currently executed build
  scripts project directory.

  If *excludes* is specified, it must be a string or a list of strings
  that is/contains glob patterns or filenames to be removed from the
  result before returning.

  .. note::

    Every file listed in *excludes* will only remove **one** item from
    the result list that was generated from *patterns*. Thus, if you
    want to exclude some files with a pattern except for a specific file
    that would also match that pattern, simply list that file another
    time in the *patterns*.

  :param patterns: A list of glob patterns or filenames.
  :param parent: The parent directory for relative paths.
  :param excludes: A list of glob patterns or filenames.
  :param include_dotfiles: If True, ``*`` and ``**`` can also capture
    file or directory names starting with a dot.
  :return: A list of filenames.
  """

    argspec.validate("patterns", patterns, {"type": [list, tuple]})
    argspec.validate("excludes", excludes, {"type": [list, tuple]})
    argspec.validate("parent", parent, {"type": [None, str]})

    if not parent:
        parent = getcwd()

    result = []
    for pattern in patterns:
        if not isabs(pattern):
            pattern = join(parent, pattern)
        result += glob2.glob(norm(pattern))

    for pattern in excludes:
        if not isabs(pattern):
            pattern = join(parent, pattern)
        pattern = norm(pattern)
        if not isglob(pattern):
            result.remove(pattern)
        else:
            for item in glob2.glob(pattern):
                result.remove(item)

    return result
예제 #24
0
파일: install.py 프로젝트: ankaan/dotfiles
    def __init__(self, install, rel_src, dst, generated):
        """
        Create an entity that should be installed.

        :param install: The parent install object.
        :param rel_src: Relative path from dotdir to the entity that should be
                        installed. Must not be a dangling directory.
        :param dst: Where the entity should be installed. If given as a
                    dangling directory, the basename of the installed entity
                    will be added to the end.
        :param generated: If the entity should be executed and generate
                          something to install instead of being installed
                          directly.

        A dangling directory is a path where the last segment is empty, that
        is, it ends with a slash.

        When generated is True, the files will be run with a path as an
        argument. This is a path into a generated directory where a file or
        directory must be created by the script. The installer will then
        automatically symlink to this generated location instead of the
        executable directly.
        """
        self.parent = install
        self.generated = generated

        if dangling_dir(rel_src):
            raise DotFilesException(
                'Source directory may not be dangling (trailing slash.)')

        if isabs(rel_src):
            raise DotFilesException('Source must be a relative path.')

        self.rel_src = rel_src
        self.src = join(self.parent.dot_dir, self.rel_src)

        if not exists(self.src):
            raise DotFilesException('Source does not exist.')

        dst = expanduser(dst)
        if not isabs(dst):
            raise DotFilesException('Destination must be an absolute path.')

        if dangling_dir(dst):
            self.dst_dir = dst
            self.dst = join(dst, basename(self.rel_src))
        else:
            self.dst_dir = dirname(dst)
            self.dst = dst

        if same_path(self.dst, '~'):
            raise DotFilesException(
                'Trying to replace the whole home directory.')
예제 #25
0
 def _get_filepath(self):
     if not self.is_pickled:
         fpath = self.specification.get('path', "")
     else:
         fpath = self.pickled_args['filepath_or_buffer']
     if isinstance(fpath, list):
         for path in fpath:
             if not (op.exists(path) and op.isabs(path)):
                 raise TraitError("filepaths must be absolute.")
     elif isinstance(fpath, str):
         if not (op.exists(fpath) and op.isabs(fpath)):
             raise TraitError("filepaths must be absolute.")
     return fpath
예제 #26
0
파일: keyfs.py 프로젝트: kenatbasis/devpi
 def _rename(self, rel_source, rel_dest):
     assert not isabs(rel_source), rel_source
     assert not isabs(rel_dest), rel_dest
     source = join(str(self.basedir), rel_source)
     dest = join(str(self.basedir), rel_dest)
     try:
         os.rename(source, dest)
     except OSError:
         destdir = os.path.dirname(dest)
         if not os.path.exists(destdir):
             os.makedirs(destdir)
         if sys.platform == "win32" and os.path.exists(dest):
             os.remove(dest)
         os.rename(source, dest)
예제 #27
0
def _samefile(a, b):
    """Return true iff paths 'a' and 'b' refer to the same file."""
    if sys.platform == "win32":
        #XXX Will not properly handle LONGNAME == SHORTNAME. Nor will it
        #    handle multiple paths to the same file with different drives
        #    or UNC shares.
        norma  = normcase(normpath(a))
        normb = normcase(normpath(b))
        if isabs(norma) or isabs(normb):
            norma = abspath(norma)
            normb = abspath(normb)
        return norma == normb
    else:
        return os.path.samefile(a, b)
예제 #28
0
파일: test_base.py 프로젝트: hanke/datalad
def test_get_contentlocation(tdir):
    repo = AnnexRepo(tdir, create=True, init=True)
    repo.add('file.dat')
    repo.commit('added file.dat')

    key = repo.get_file_key('file.dat')
    cr = AnnexCustomRemote(tdir)
    key_path = cr.get_contentlocation(key, absolute=False)
    assert not isabs(key_path)
    key_path_abs = cr.get_contentlocation(key, absolute=True)
    assert isabs(key_path_abs)
    assert cr._contentlocations == {key: key_path}
    repo.drop('file.dat', options=['--force'])
    assert not cr.get_contentlocation(key, absolute=True)
예제 #29
0
def expand_path(src, dst):
    if src.startswith('~'):
        src = path.expanduser(src)
    elif not path.isabs(src):
        src = path.abspath(src)
    if dst.startswith('~'):
        dst = path.expanduser(dst)
    elif not path.isabs(dst):
        dst = path.abspath(path.normpath(dst))
    if not path.basename(src) == path.basename(dst):
        dst = path.join(dst, path.basename(src))
    src = path.normcase(src)
    dst = path.normcase(dst)
    return (src, dst)
예제 #30
0
    def symlink(cls, target, link):
        assert isabs(target)
        assert isabs(link)

        if islink(link):
            unlink(link)
        if exists(link):
            print('Error: {} exists and is not a symlink!'.format(link))
            return

        rel_target = relpath(target, dirname(link))
        #print('{} -> {}'.format(link, rel_target))
        symlink(rel_target, link)
        if basename(link).startswith('.'):
            cls.hide_file(link)
예제 #31
0
def results_from_annex_noinfo(ds, requested_paths, respath_by_status, dir_fail_msg,
                              noinfo_dir_msg, noinfo_file_msg, noinfo_status='notneeded',
                              **kwargs):
    """Helper to yield results based on what information git annex did no give us.

    The helper assumes that the annex command returned without an error code,
    and interprets which of the requested paths we have heard nothing about,
    and assumes that git annex was happy with their current state.

    Parameters
    ==========
    ds : Dataset
      All results have to be concerning this single dataset (used to resolve
      relpaths).
    requested_paths : list
      List of path arguments sent to `git annex`
    respath_by_status : dict
      Mapping of 'success' or 'failure' labels to lists of result paths
      reported by `git annex`. Everything that is not in here, we assume
      that `git annex` was happy about.
    dir_fail_msg : str
      Message template to inject into the result for a requested directory where
      a failure was reported for some of its content. The template contains two
      string placeholders that will be expanded with 1) the path of the
      directory, and 2) the content failure paths for that directory
    noinfo_dir_msg : str
      Message template to inject into the result for a requested directory that
      `git annex` was silent about (incl. any content). There must be one string
      placeholder that is expanded with the path of that directory.
    noinfo_file_msg : str
      Message to inject into the result for a requested file that `git
      annex` was silent about.
    noinfo_status : str
      Status to report when annex provides no information
    **kwargs
      Any further kwargs are included in the yielded result dictionary.
    """
    for p in requested_paths:
        # any relpath is relative to the currently processed dataset
        # not the global reference dataset
        p = p if isabs(p) else normpath(opj(ds.path, p))
        if any(p in ps for ps in respath_by_status.values()):
            # we have a report for this path already
            continue
        common_report = dict(path=p, **kwargs)
        if isdir(p):
            # `annex` itself will not report on directories, but if a
            # directory was requested, we want to say something about
            # it in the results.  we are inside a single, existing
            # repo, hence all directories are already present, if not
            # we had an error
            # do we have any failures in a subdir of the requested dir?
            failure_results = [
                fp for fp in respath_by_status.get('failure', [])
                if path_is_subpath(fp, p)]
            if failure_results:
                # we were not able to process all requested_paths, let's label
                # this 'impossible' to get a warning-type report
                # after all we have the directory itself, but not
                # (some) of its requested_paths
                yield get_status_dict(
                    status='impossible', type='directory',
                    message=(dir_fail_msg, p, failure_results),
                    **common_report)
            else:
                # otherwise cool, but how cool?
                success_results = [
                    fp for fp in respath_by_status.get('success', [])
                    if path_is_subpath(fp, p)]
                yield get_status_dict(
                    status='ok' if success_results else noinfo_status,
                    message=None if success_results else (noinfo_dir_msg, p),
                    type='directory', **common_report)
            continue
        else:
            # not a directory, and we have had no word from `git annex`,
            # yet no exception, hence the file was most probably
            # already in the desired state
            yield get_status_dict(
                status=noinfo_status, type='file',
                message=noinfo_file_msg,
                **common_report)
예제 #32
0
def read_montage(kind, ch_names=None, path=None, unit='m', transform=False):
    """Read a generic (built-in) montage.

    Individualized (digitized) electrode positions should be
    read in using :func:`read_dig_montage`.

    In most cases, you should only need the `kind` parameter to load one of
    the built-in montages (see Notes).

    Parameters
    ----------
    kind : str
        The name of the montage file without the file extension (e.g.
        kind='easycap-M10' for 'easycap-M10.txt'). Files with extensions
        '.elc', '.txt', '.csd', '.elp', '.hpts', '.sfp' or '.loc' ('.locs' and
        '.eloc') are supported.
    ch_names : list of str | None
        If not all electrodes defined in the montage are present in the EEG
        data, use this parameter to select subset of electrode positions to
        load. If None (default), all defined electrode positions are returned.

        .. note:: ``ch_names`` are compared to channel names in the montage
                  file after converting them both to upper case. If a match is
                  found, the letter case in the original ``ch_names`` is used
                  in the returned montage.

    path : str | None
        The path of the folder containing the montage file. Defaults to the
        mne/channels/data/montages folder in your mne-python installation.
    unit : 'm' | 'cm' | 'mm'
        Unit of the input file. If not 'm' (default), coordinates will be
        rescaled to 'm'.
    transform : bool
        If True, points will be transformed to Neuromag space.
        The fidicuals, 'nasion', 'lpa', 'rpa' must be specified in
        the montage file. Useful for points captured using Polhemus FastSCAN.
        Default is False.

    Returns
    -------
    montage : instance of Montage
        The montage.

    See Also
    --------
    DigMontage
    Montage
    read_dig_montage

    Notes
    -----
    Built-in montages are not scaled or transformed by default.

    Montages can contain fiducial points in addition to electrode
    locations, e.g. ``biosemi64`` contains 67 total channels.

    The valid ``kind`` arguments are:

    ===================   =====================================================
    Kind                  description
    ===================   =====================================================
    standard_1005         Electrodes are named and positioned according to the
                          international 10-05 system.
    standard_1020         Electrodes are named and positioned according to the
                          international 10-20 system.
    standard_alphabetic   Electrodes are named with LETTER-NUMBER combinations
                          (A1, B2, F4, etc.)
    standard_postfixed    Electrodes are named according to the international
                          10-20 system using postfixes for intermediate
                          positions.
    standard_prefixed     Electrodes are named according to the international
                          10-20 system using prefixes for intermediate
                          positions.
    standard_primed       Electrodes are named according to the international
                          10-20 system using prime marks (' and '') for
                          intermediate positions.

    biosemi16             BioSemi cap with 16 electrodes
    biosemi32             BioSemi cap with 32 electrodes
    biosemi64             BioSemi cap with 64 electrodes
    biosemi128            BioSemi cap with 128 electrodes
    biosemi160            BioSemi cap with 160 electrodes
    biosemi256            BioSemi cap with 256 electrodes

    easycap-M10           Brainproducts EasyCap with electrodes named
                          according to the 10-05 system
    easycap-M1            Brainproduct EasyCap with numbered electrodes

    EGI_256               Geodesic Sensor Net with 256 channels

    GSN-HydroCel-32       HydroCel Geodesic Sensor Net with 32 electrodes
    GSN-HydroCel-64_1.0   HydroCel Geodesic Sensor Net with 64 electrodes
    GSN-HydroCel-65_1.0   HydroCel Geodesic Sensor Net with 64 electrodes + Cz
    GSN-HydroCel-128      HydroCel Geodesic Sensor Net with 128 electrodes
    GSN-HydroCel-129      HydroCel Geodesic Sensor Net with 128 electrodes + Cz
    GSN-HydroCel-256      HydroCel Geodesic Sensor Net with 256 electrodes
    GSN-HydroCel-257      HydroCel Geodesic Sensor Net with 256 electrodes + Cz
    ===================   =====================================================

    .. versionadded:: 0.9.0
    """
    if path is None:
        path = op.join(op.dirname(__file__), 'data', 'montages')
    if not op.isabs(kind):
        supported = ('.elc', '.txt', '.csd', '.sfp', '.elp', '.hpts', '.loc',
                     '.locs', '.eloc')
        montages = [op.splitext(f) for f in os.listdir(path)]
        montages = [m for m in montages if m[1] in supported and kind == m[0]]
        if len(montages) != 1:
            raise ValueError('Could not find the montage. Please provide the '
                             'full path.')
        kind, ext = montages[0]
    else:
        kind, ext = op.splitext(kind)
    fname = op.join(path, kind + ext)

    if ext == '.sfp':
        # EGI geodesic
        with open(fname, 'r') as f:
            lines = f.read().replace('\t', ' ').splitlines()

        ch_names_, pos = [], []
        for ii, line in enumerate(lines):
            line = line.strip().split()
            if len(line) > 0:  # skip empty lines
                if len(line) != 4:  # name, x, y, z
                    raise ValueError("Malformed .sfp file in line " + str(ii))
                this_name, x, y, z = line
                ch_names_.append(this_name)
                pos.append([float(cord) for cord in (x, y, z)])
        pos = np.asarray(pos)
    elif ext == '.elc':
        # 10-5 system
        ch_names_ = []
        pos = []
        with open(fname) as fid:
            # Default units are meters
            for line in fid:
                if 'UnitPosition' in line:
                    units = line.split()[1]
                    scale_factor = dict(m=1., mm=1e-3)[units]
                    break
            else:
                raise RuntimeError('Could not detect units in file %s' % fname)
            for line in fid:
                if 'Positions\n' in line:
                    break
            pos = []
            for line in fid:
                if 'Labels\n' in line:
                    break
                pos.append(list(map(float, line.split())))
            for line in fid:
                if not line or not set(line) - set([' ']):
                    break
                ch_names_.append(line.strip(' ').strip('\n'))
        pos = np.array(pos) * scale_factor
    elif ext == '.txt':
        # easycap
        try:  # newer version
            data = np.genfromtxt(fname, dtype='str', skip_header=1)
        except TypeError:
            data = np.genfromtxt(fname, dtype='str', skiprows=1)
        ch_names_ = list(data[:, 0])
        az = np.deg2rad(data[:, 2].astype(float))
        pol = np.deg2rad(data[:, 1].astype(float))
        pos = _sph_to_cart(np.array([np.ones(len(az)) * 85., az, pol]).T)
    elif ext == '.csd':
        # CSD toolbox
        dtype = [('label', 'S4'), ('theta', 'f8'), ('phi', 'f8'),
                 ('radius', 'f8'), ('x', 'f8'), ('y', 'f8'), ('z', 'f8'),
                 ('off_sph', 'f8')]
        try:  # newer version
            table = np.loadtxt(fname, skip_header=2, dtype=dtype)
        except TypeError:
            table = np.loadtxt(fname, skiprows=2, dtype=dtype)
        ch_names_ = table['label']
        az = np.deg2rad(table['theta'])
        pol = np.deg2rad(90. - table['phi'])
        pos = _sph_to_cart(np.array([np.ones(len(az)), az, pol]).T)
    elif ext == '.elp':
        # standard BESA spherical
        dtype = np.dtype('S8, S8, f8, f8, f8')
        try:
            data = np.loadtxt(fname, dtype=dtype, skip_header=1)
        except TypeError:
            data = np.loadtxt(fname, dtype=dtype, skiprows=1)

        ch_names_ = data['f1'].astype(np.str)
        az = data['f2']
        horiz = data['f3']
        radius = np.abs(az / 180.)
        az = np.deg2rad(
            np.array([h if a >= 0. else 180 + h for h, a in zip(horiz, az)]))
        pol = radius * np.pi
        pos = _sph_to_cart(np.array([np.ones(len(az)) * 85., az, pol]).T)
    elif ext == '.hpts':
        # MNE-C specified format for generic digitizer data
        dtype = [('type', 'S8'), ('name', 'S8'), ('x', 'f8'), ('y', 'f8'),
                 ('z', 'f8')]
        data = np.loadtxt(fname, dtype=dtype)
        ch_names_ = data['name'].astype(np.str)
        pos = np.vstack((data['x'], data['y'], data['z'])).T
    elif ext in ('.loc', '.locs', '.eloc'):
        ch_names_ = np.loadtxt(fname, dtype='S4',
                               usecols=[3]).astype(np.str).tolist()
        dtype = {'names': ('angle', 'radius'), 'formats': ('f4', 'f4')}
        topo = np.loadtxt(fname, dtype=float, usecols=[1, 2])
        sph = _topo_to_sph(topo)
        pos = _sph_to_cart(sph)
        pos[:, [0, 1]] = pos[:, [1, 0]] * [-1, 1]
    else:
        raise ValueError('Currently the "%s" template is not supported.' %
                         kind)
    selection = np.arange(len(pos))

    if unit == 'mm':
        pos /= 1e3
    elif unit == 'cm':
        pos /= 1e2
    elif unit != 'm':
        raise ValueError("'unit' should be either 'm', 'cm', or 'mm'.")
    if transform:
        names_lower = [name.lower() for name in list(ch_names_)]
        if ext == '.hpts':
            fids = ('2', '1', '3')  # Alternate cardinal point names
        else:
            fids = ('nasion', 'lpa', 'rpa')

        missing = [name for name in fids if name not in names_lower]
        if missing:
            raise ValueError("The points %s are missing, but are needed "
                             "to transform the points to the MNE coordinate "
                             "system. Either add the points, or read the "
                             "montage with transform=False. " % missing)
        nasion = pos[names_lower.index(fids[0])]
        lpa = pos[names_lower.index(fids[1])]
        rpa = pos[names_lower.index(fids[2])]

        neuromag_trans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
        pos = apply_trans(neuromag_trans, pos)

    if ch_names is not None:
        # Ensure channels with differing case are found.
        upper_names = [ch_name.upper() for ch_name in ch_names]
        sel, ch_names_ = zip(
            *[(i, ch_names[upper_names.index(e)])
              for i, e in enumerate([n.upper() for n in ch_names_])
              if e in upper_names])
        sel = list(sel)
        pos = pos[sel]
        selection = selection[sel]
    else:
        ch_names_ = list(ch_names_)
    kind = op.split(kind)[-1]
    return Montage(pos=pos, ch_names=ch_names_, kind=kind, selection=selection)
예제 #33
0
파일: base.py 프로젝트: yaccz/GitPython
    def _clone(cls,
               git,
               url,
               path,
               odb_default_type,
               progress,
               multi_options=None,
               **kwargs):
        if progress is not None:
            progress = to_progress_instance(progress)

        odbt = kwargs.pop('odbt', odb_default_type)

        # when pathlib.Path or other classbased path is passed
        if not isinstance(path, str):
            path = str(path)

        ## A bug win cygwin's Git, when `--bare` or `--separate-git-dir`
        #  it prepends the cwd or(?) the `url` into the `path, so::
        #        git clone --bare  /cygwin/d/foo.git  C:\\Work
        #  becomes::
        #        git clone --bare  /cygwin/d/foo.git  /cygwin/d/C:\\Work
        #
        clone_path = (Git.polish_url(path)
                      if Git.is_cygwin() and 'bare' in kwargs else path)
        sep_dir = kwargs.get('separate_git_dir')
        if sep_dir:
            kwargs['separate_git_dir'] = Git.polish_url(sep_dir)
        multi = None
        if multi_options:
            multi = ' '.join(multi_options).split(' ')
        proc = git.clone(multi,
                         Git.polish_url(url),
                         clone_path,
                         with_extended_output=True,
                         as_process=True,
                         v=True,
                         universal_newlines=True,
                         **add_progress(kwargs, git, progress))
        if progress:
            handle_process_output(proc,
                                  None,
                                  progress.new_message_handler(),
                                  finalize_process,
                                  decode_streams=False)
        else:
            (stdout, stderr) = proc.communicate()
            log.debug("Cmd(%s)'s unused stdout: %s", getattr(proc, 'args', ''),
                      stdout)
            finalize_process(proc, stderr=stderr)

        # our git command could have a different working dir than our actual
        # environment, hence we prepend its working dir if required
        if not osp.isabs(path) and git.working_dir:
            path = osp.join(git._working_dir, path)

        repo = cls(path, odbt=odbt)

        # retain env values that were passed to _clone()
        repo.git.update_environment(**git.environment())

        # adjust remotes - there may be operating systems which use backslashes,
        # These might be given as initial paths, but when handling the config file
        # that contains the remote from which we were clones, git stops liking it
        # as it will escape the backslashes. Hence we undo the escaping just to be
        # sure
        if repo.remotes:
            with repo.remotes[0].config_writer as writer:
                writer.set_value('url', Git.polish_url(repo.remotes[0].url))
        # END handle remote repo
        return repo
예제 #34
0
    def __init__(self,
                 pdf_fname,
                 config_fname='config',
                 head_shape_fname='hs_file',
                 rotation_x=None,
                 translation=(0.0, 0.02, 0.11),
                 ecg_ch='E31',
                 eog_ch=('E63', 'E64'),
                 verbose=None):

        if not op.isabs(pdf_fname):
            pdf_fname = op.abspath(pdf_fname)

        if not op.isabs(config_fname):
            config_fname = op.join(op.dirname(pdf_fname), config_fname)

        if not op.exists(config_fname):
            raise ValueError('Could not find the config file %s. Please check'
                             ' whether you are in the right directory '
                             'or pass the full name' % config_fname)

        if not op.isabs(head_shape_fname):
            head_shape_fname = op.join(op.dirname(pdf_fname), head_shape_fname)

        if not op.exists(head_shape_fname):
            raise ValueError('Could not find the head_shape file %s. You shoul'
                             'd check whether you are in the right directory o'
                             'r pass the full file name.' % head_shape_fname)

        logger.info('Reading 4D PDF file %s...' % pdf_fname)
        bti_info = _read_bti_header(pdf_fname, config_fname)

        # XXX indx is informed guess. Normally only one transform is stored.
        dev_ctf_t = bti_info['bti_transform'][0].astype('>f8')
        bti_to_nm = bti_to_vv_trans(adjust=rotation_x,
                                    translation=translation,
                                    dtype='>f8')

        use_hpi = False  # hard coded, but marked as later option.
        logger.info('Creating Neuromag info structure ...')
        info = Info()
        info['bads'] = []
        info['meas_id'] = None
        info['file_id'] = None
        info['projs'] = list()
        info['comps'] = list()
        date = bti_info['processes'][0]['timestamp']
        info['meas_date'] = [date, 0]
        info['sfreq'] = 1e3 / bti_info['sample_period'] * 1e-3
        info['nchan'] = len(bti_info['chs'])

        # browse processing info for filter specs.
        hp, lp = 0.0, info['sfreq'] * 0.4  # find better default
        for proc in bti_info['processes']:
            if 'filt' in proc['process_type']:
                for step in proc['processing_steps']:
                    if 'high_freq' in step:
                        hp, lp = step['high_freq'], step['low_freq']
                    elif 'hp' in step['process_type']:
                        hp = step['freq']
                    elif 'lp' in step['process_type']:
                        lp = step['freq']

        info['highpass'] = hp
        info['lowpass'] = lp
        info['acq_pars'], info['acq_stim'] = None, None
        info['filename'] = None
        info['filenames'] = []
        chs = []

        ch_names = [ch['name'] for ch in bti_info['chs']]
        info['ch_names'] = _rename_channels(ch_names)
        ch_mapping = zip(ch_names, info['ch_names'])
        logger.info('... Setting channel info structure.')
        for idx, (chan_4d, chan_vv) in enumerate(ch_mapping):
            chan_info = dict(zip(FIFF_INFO_CHS_FIELDS, FIFF_INFO_CHS_DEFAULTS))
            chan_info['ch_name'] = chan_vv
            chan_info['logno'] = idx + BTI.FIFF_LOGNO
            chan_info['scanno'] = idx + 1
            chan_info['cal'] = bti_info['chs'][idx]['scale']

            if any([chan_vv.startswith(k) for k in ('MEG', 'RFG', 'RFM')]):
                t, loc = bti_info['chs'][idx]['coil_trans'], None
                if t is not None:
                    t, loc = _convert_coil_trans(t.astype('>f8'), dev_ctf_t,
                                                 bti_to_nm)
                    if idx == 1:
                        logger.info('... putting coil transforms in Neuromag '
                                    'coordinates')
                chan_info['coil_trans'] = t
                if loc is not None:
                    chan_info['loc'] = loc.astype('>f4')

            if chan_vv.startswith('MEG'):
                chan_info['kind'] = FIFF.FIFFV_MEG_CH
                chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_MAG
                chan_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
                chan_info['unit'] = FIFF.FIFF_UNIT_T

            elif chan_vv.startswith('RFM'):
                chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH
                chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_R_MAG
                chan_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
                chan_info['unit'] = FIFF.FIFF_UNIT_T

            elif chan_vv.startswith('RFG'):
                chan_info['kind'] = FIFF.FIFFV_REF_MEG_CH
                chan_info['coord_frame'] = FIFF.FIFFV_COORD_DEVICE
                chan_info['unit'] = FIFF.FIFF_UNIT_T_M
                if chan_4d in ('GxxA', 'GyyA'):
                    chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_R_GRAD_DIA
                elif chan_4d in ('GyxA', 'GzxA', 'GzyA'):
                    chan_info['coil_type'] = FIFF.FIFFV_COIL_MAGNES_R_GRAD_OFF

            elif chan_vv.startswith('EEG'):
                chan_info['kind'] = FIFF.FIFFV_EEG_CH
                chan_info['coil_type'] = FIFF.FIFFV_COIL_EEG
                chan_info['coord_frame'] = FIFF.FIFFV_COORD_HEAD
                chan_info['unit'] = FIFF.FIFF_UNIT_V

            elif chan_vv == 'STI 013':
                chan_info['kind'] = FIFF.FIFFV_RESP_CH
            elif chan_vv == 'STI 014':
                chan_info['kind'] = FIFF.FIFFV_STIM_CH
            elif chan_vv.startswith('EOG'):
                chan_info['kind'] = FIFF.FIFFV_EOG_CH
            elif chan_vv == 'ECG 001':
                chan_info['kind'] = FIFF.FIFFV_ECG_CH
            elif chan_vv.startswith('EXT'):
                chan_info['kind'] = FIFF.FIFFV_MISC_CH
            elif chan_vv.startswith('UTL'):
                chan_info['kind'] = FIFF.FIFFV_MISC_CH

            chs.append(chan_info)

        info['chs'] = chs

        logger.info('... Reading digitization points from %s' %
                    head_shape_fname)
        logger.info('... putting digitization points in Neuromag c'
                    'oordinates')
        info['dig'], ctf_head_t = _setup_head_shape(head_shape_fname, use_hpi)
        logger.info('... Computing new device to head transform.')
        dev_head_t = _convert_dev_head_t(dev_ctf_t, bti_to_nm, ctf_head_t)

        info['dev_head_t'] = dict()
        info['dev_head_t']['from'] = FIFF.FIFFV_COORD_DEVICE
        info['dev_head_t']['to'] = FIFF.FIFFV_COORD_HEAD
        info['dev_head_t']['trans'] = dev_head_t
        info['dev_ctf_t'] = dict()
        info['dev_ctf_t']['from'] = FIFF.FIFFV_MNE_COORD_CTF_DEVICE
        info['dev_ctf_t']['to'] = FIFF.FIFFV_COORD_HEAD
        info['dev_ctf_t']['trans'] = dev_ctf_t
        info['ctf_head_t'] = dict()
        info['ctf_head_t']['from'] = FIFF.FIFFV_MNE_COORD_CTF_HEAD
        info['ctf_head_t']['to'] = FIFF.FIFFV_COORD_HEAD
        info['ctf_head_t']['trans'] = ctf_head_t
        logger.info('Done.')

        if False:  # XXX : reminds us to support this as we go
            # include digital weights from reference channel
            comps = info['comps'] = list()
            weights = bti_info['weights']
            by_name = lambda x: x[1]
            chn = dict(ch_mapping)
            columns = [chn[k] for k in weights['dsp_ch_names']]
            rows = [chn[k] for k in weights['ch_names']]
            col_order, col_names = zip(
                *sorted(enumerate(columns), key=by_name))
            row_order, row_names = zip(*sorted(enumerate(rows), key=by_name))
            # for some reason the C code would invert the signs, so we follow.
            mat = -weights['dsp_wts'][row_order, :][:, col_order]
            comp_data = dict(data=mat,
                             col_names=col_names,
                             row_names=row_names,
                             nrow=mat.shape[0],
                             ncol=mat.shape[1])
            comps += [
                dict(
                    data=comp_data,
                    ctfkind=101,
                    #  no idea how to calibrate, just ones.
                    rowcals=np.ones(mat.shape[0], dtype='>f4'),
                    colcals=np.ones(mat.shape[1], dtype='>f4'),
                    save_calibrated=0)
            ]
        else:
            logger.warning('Warning. Currently direct inclusion of 4D weight t'
                           'ables is not supported. For critical use cases '
                           '\nplease take into account the MNE command '
                           '\'mne_create_comp_data\' to include weights as '
                           'printed out \nby the 4D \'print_table\' routine.')

        # check that the info is complete
        assert not set(RAW_INFO_FIELDS) - set(info.keys())

        # check nchan is correct
        assert len(info['ch_names']) == info['nchan']

        cals = np.zeros(info['nchan'])
        for k in range(info['nchan']):
            cals[k] = info['chs'][k]['range'] * info['chs'][k]['cal']

        self.verbose = verbose
        self.cals = cals
        self.rawdir = None
        self.proj = None
        self.comp = None
        self.fids = list()
        self._preloaded = True
        self._projector_hashes = [None]
        self.info = info

        logger.info('Reading raw data from %s...' % pdf_fname)
        self._data = _read_data(bti_info)
        self.first_samp, self.last_samp = 0, self._data.shape[1] - 1

        assert len(self._data) == len(self.info['ch_names'])
        self._times = np.arange(self.first_samp,
                                self.last_samp + 1) / info['sfreq']
        self._projectors = [None]
        logger.info('    Range : %d ... %d =  %9.3f ... %9.3f secs' %
                    (self.first_samp, self.last_samp, float(self.first_samp) /
                     info['sfreq'], float(self.last_samp) / info['sfreq']))

        logger.info('Ready.')
예제 #35
0
def full_path(path, base_path):
  from os.path import isabs, join, dirname
  if isabs(path) or base_path is None:
    return path
  return join(dirname(base_path), path)
예제 #36
0
 def generated_filepath(self, f_path):
     return f_path if path.isabs(f_path) else path.join(
         self.attr.get('ARCHIVE_DIRECTORY'), f_path)
예제 #37
0
def _get_cassette_path(path):
    """Return a path to the cassette within our unified 'storage'"""
    if not isabs(path):  # so it was given as a name
        return "fixtures/vcr_cassettes/%s.yaml" % path
    return path
예제 #38
0
 def test_get_filepath_or_buffer_with_path(self):
     filename = '~/sometest'
     filepath_or_buffer, _, _ = common.get_filepath_or_buffer(filename)
     assert filepath_or_buffer != filename
     assert isabs(filepath_or_buffer)
     assert os.path.expanduser(filename) == filepath_or_buffer
예제 #39
0
    def run(self):

        # load settings
        settings = sublime.load_settings('SublimePrint.sublime-settings')

        # check whether plain text or file name and line numbers must be printed
        isFileNameAndLineNumberRequested = settings.get(
            "print_file_name_and_line_numbers_for_selection", 0)

        # START: get selected text to be printed
        # taken from plug-in copy_with_line_numbers
        textToPrint = ""

        view = sublime.Window.active_view(sublime.active_window())

        # set file name
        if isFileNameAndLineNumberRequested == 1:
            if view.file_name():
                textToPrint = "File: " + view.file_name() + "\n"
            else:
                textToPrint = "File: <unsaved>\n"

        # handle text
        isFollowupSelection = None
        for selection in view.sel():
            if isFollowupSelection:
                # split multi selections with ---
                textToPrint += "---\n"
            else:
                # but not the first one
                isFollowupSelection = True
            # for each selection
            for line in view.lines(selection):
                if isFileNameAndLineNumberRequested == 1:
                    textToPrint += str(view.rowcol(line.begin())[0] +
                                       1) + ": " + view.substr(line) + "\n"
                else:
                    textToPrint += view.substr(line) + "\n"
        # END: get selected text to be printed

        # check where the print command has an absolute path
        printcommand = settings.get("command")
        # if not absolute, search it
        if not isabs(printcommand):
            if isfile("/usr/bin/" + printcommand):
                printcommand = "/usr/bin/" + printcommand
            elif isfile("/usr/local/bin/" + printcommand):
                printcommand = "/usr/local/bin/" + printcommand
            # and save in user settings for the next time
            settings.set("command", printcommand)
            sublime.save_settings('SublimePrint.sublime-settings')
        else:
            if not isfile(printcommand):
                sublime.error_message(
                    "Program '" + printcommand +
                    "' not found! Please review documentation.")
                return

        # additional options but ignore "line-numbers" because this is from clipboard
        options = [
            "--%s=%s" % (k, v) for k, v in settings.get("options").iteritems()
            if v != "" and k != "line-numbers"
        ]
        options += [
            "--%s" % k for k, v in settings.get("options").iteritems()
            if v == ""
        ]

        # create printer list in user settings if not defined
        printer = settings.get("used_printer", "DEFAULT")
        if printer is None or printer == "DEFAULT":
            # where is lpstat to list all printers?
            if isfile("/usr/bin/lpstat"):
                lpstatcommand = "/usr/bin/lpstat"
            elif isfile("/usr/local/bin/lpstat"):
                lpstatcommand = "/usr/local/bin/lpstat"
            # get default printer
            p = subprocess.Popen([lpstatcommand] + ["-d"],
                                 stdout=subprocess.PIPE)
            ret = p.wait()
            # Example:
            # $ lpstat -d
            # System-Standardzielort: Samsung_CLP_310_Series__SAMSUNG_CLP310N_
            # $ lpstat -d
            # system default destination: Cups-PDF
            if not ret:
                defaultPrinter = p.stdout.read().split(":")[1].strip()
                settings.set("used_printer", defaultPrinter)
                sublime.save_settings('SublimePrint.sublime-settings')
            # get all printers
            p = subprocess.Popen([lpstatcommand] + ["-a"],
                                 stdout=subprocess.PIPE)
            ret = p.wait()
            # Example:
            # $ lpstat -a
            # Samsung_CLP_310_Series__SAMSUNG_CLP310N_ akzeptiert Anfragen seit Sa 29 Sep 23:41:57 2012
            # $ lpstat -a
            # Cups-PDF accepting requests since Sun 30 Sep 2012 01:14:33 AM CEST
            # DEMUC001 accepting requests since Wed 05 Sep 2012 06:28:30 PM CEST
            # HP_Color_LaserJet_4700 accepting requests since Fri 28 Sep 2012 01:24:25 PM CEST
            # SamsungCLP310 accepting requests since Sun 30 Sep 2012 01:16:19 AM CEST
            if not ret:
                printerCount = 0
                for line in p.stdout:
                    printerCount += 1
                    availablePrinterName = "printer_%d" % (printerCount)
                    settings.set(availablePrinterName, line.split()[0])
                sublime.save_settings('SublimePrint.sublime-settings')
        # use the printer
        if printer is not None and printer != "DEFAULT":
            options += ["-P", printer]

        # print
        cmd = [printcommand] + options

        p = subprocess.Popen(cmd,
                             stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT)
        p.communicate(textToPrint)
        ret = p.wait()

        if ret:
            raise EnvironmentError((cmd, ret, p.stdout.read()))
예제 #40
0
def write_contents_to_file(path,
                           contents=None,
                           link_to=None,
                           content_mode='text',
                           root=None,
                           conflicts='fail'):
    """
    Uses provided filename patterns to write contents to a new path, given
    a corresponding entity map.

    Parameters
    ----------
    path : str
        Destination path of the desired contents.
    contents : str
        Raw text or binary encoded string of contents to write
        to the new path.
    link_to : str
        Optional path with which to create a symbolic link to.
        Used as an alternative to and takes priority over the contents
        argument.
    content_mode : {'text', 'binary'}
        Either 'text' or 'binary' to indicate the writing
        mode for the new file. Only relevant if contents is provided.
    root : str
        Optional root directory that all patterns are relative
        to. Defaults to current working directory.
    conflicts : {'fail', 'skip', 'overwrite', 'append'}
        One of 'fail', 'skip', 'overwrite', or 'append'
        that defines the desired action when the output path already
        exists. 'fail' raises an exception; 'skip' does nothing;
        'overwrite' overwrites the existing file; 'append' adds  a suffix
        to each file copy, starting with 1. Default is 'fail'.
    """

    if root is None and not isabs(path):
        root = os.getcwd()

    if root:
        path = join(root, path)

    if exists(path) or islink(path):
        if conflicts == 'fail':
            msg = 'A file at path {} already exists.'
            raise ValueError(msg.format(path))
        elif conflicts == 'skip':
            msg = 'A file at path {} already exists, skipping writing file.'
            warnings.warn(msg.format(path))
            return
        elif conflicts == 'overwrite':
            if isdir(path):
                warnings.warn('New path is a directory, not going to '
                              'overwrite it, skipping instead.')
                return
            os.remove(path)
        elif conflicts == 'append':
            i = 1
            while i < sys.maxsize:
                path_splits = splitext(path)
                path_splits[0] = path_splits[0] + '_%d' % i
                appended_filename = os.extsep.join(path_splits)
                if not exists(appended_filename) and \
                        not islink(appended_filename):
                    path = appended_filename
                    break
                i += 1
        else:
            raise ValueError('Did not provide a valid conflicts parameter')

    if not exists(dirname(path)):
        os.makedirs(dirname(path))

    if link_to:
        os.symlink(link_to, path)
    elif contents:
        mode = 'wb' if content_mode == 'binary' else 'w'
        with open(path, mode) as f:
            f.write(contents)
    else:
        raise ValueError('One of contents or link_to must be provided.')
예제 #41
0
 def test_get_filepath_or_buffer_with_path(self):
     filename = '~/sometest'
     filepath_or_buffer, _, _ = common.get_filepath_or_buffer(filename)
     self.assertNotEqual(filepath_or_buffer, filename)
     self.assertTrue(isabs(filepath_or_buffer))
     self.assertEqual(os.path.expanduser(filename), filepath_or_buffer)
예제 #42
0
def main(outdir=None, *inputs):
    if not outdir or not inputs:
        print "Usage: %s OUTDIR INPUTS" % sys.argv[0]
        print "  OUTDIR is the war directory to copy to"
        print "  INPUTS is a list of files or patterns used to specify the input"
        print "   .dart files"
        print "This script should be run from the client root directory."
        print "Files will be merged and copied to: OUTDIR/relative-path-of-file,"
        print "except for dart files with absolute paths, which will be copied to"
        print " OUTDIR/absolute-path-as-directories"
        return 1

    entry_libraries = []
    for i in inputs:
        entry_libraries.extend(glob(i))

    for entrypoint in entry_libraries:
        # Get the transitive set of dart files this entrypoint depends on, merging
        # each library along the way.
        worklist = [os.path.normpath(entrypoint)]
        seen = set()
        while len(worklist) > 0:
            lib = worklist.pop()
            if lib in seen:
                continue

            seen.add(lib)

            if (dirname(dirname(lib)).endswith('dom/generated/src') or
                    dirname(lib).endswith('dom/src')):
                continue

            library = parseLibrary(lib)

            # Ensure output directory exists
            outpath = join(outdir, lib[1:] if isabs(lib) else lib)
            dstpath = dirname(outpath)
            if not exists(dstpath):
                os.makedirs(dstpath)

            # Create file containing all imports, and inlining all sources
            with open(outpath, 'w') as f:
                prefix = os.environ.get('DART_HTML_PREFIX')
                if prefix:
                    f.write(prefix + '\n')
                if library.name:
                    if library.comment:
                        f.write('%s' % (''.join(library.comment)))
                    f.write("library %s;\n\n" % library.name)
                else:
                    f.write("library %s;\n\n" % basename(lib))
                for importfile in library.imports:
                    f.write("import %s;\n" % importfile)
                f.write('%s' % (''.join(library.code)))
                mergefiles([normjoin(dirname(lib), s) for s in library.sources],
                           f)

            for suffix in library.imports:
                m = re.match(r'[\'"]([^\'"]+)[\'"](\s+as\s+\w+)?.*$', suffix)
                uri = m.group(1)
                if not uri.startswith('dart:'):
                    worklist.append(normjoin(dirname(lib), uri))

    return 0
예제 #43
0
def kim_property_dump(property_instances,
                      fp,
                      *,
                      fp_path=None,
                      cls=None,
                      indent=4,
                      default=None,
                      sort_keys=False):
    """Serialize ``property_instances`` object.

    Arguments:
        property_instances {string} -- A string containing the serialized
        KIM-EDN formatted property instances.

        fp {a ``.write()``-supporting file-like object or a name string to
        open a file} -- Serialize ``property_instances`` as a KIM-EDN
        formatted stream to ``fp``

    Keyword Arguments:
        fp_path should be an absolute path (or a valid relative path)
        to the KIM property definition folder. (default: None)

        To use a custom ``KIMEDNEncoder`` subclass (e.g. one that overrides
        the ``.default()`` method to serialize additional types), specify it
        with the ``cls`` kwarg; otherwise ``KIMEDNEncoder`` is used.

        If ``indent`` is a non-negative integer, then EDN array elements and
        object members will be pretty-printed with that indent level. An
        indent level of 0 will only insert newlines. (default 4)

        ``default(obj)`` is a function that should return a serializable
        version of obj or raise TypeError. The default simply raises
        TypeError.

        If *sort_keys* is true (default: ``False``), then the output of
        dictionaries will be sorted by key.

    """
    if property_instances is None or \
            property_instances in ('None', '', '[]'):
        msg = 'there is no property instance to dump it.'
        raise KIMPropertyError(msg)

    # Deserialize the KIM property instances.
    kim_property_instances = kim_edn.loads(property_instances)

    if fp_path is not None and isabs(fp_path):
        # Check the property instances
        check_property_instances(kim_property_instances, fp_path=fp_path)
    else:
        kim_properties = get_properties()
        check_property_instances(kim_property_instances,
                                 fp_path=kim_properties)

    if len(kim_property_instances) == 1:
        kim_edn.dump(kim_property_instances[0],
                     fp,
                     cls=cls,
                     indent=indent,
                     default=default,
                     sort_keys=sort_keys)
    else:
        kim_edn.dump(kim_property_instances,
                     fp,
                     cls=cls,
                     indent=indent,
                     default=default,
                     sort_keys=sort_keys)
예제 #44
0
    def _list_outputs(self):
        """Find the files and expose them as interface outputs."""
        outputs = {}
        info = dict([(k, v) for k, v in list(self.inputs.__dict__.items())
                     if k in self._infields])

        # check if the crumb is not absolute or if in info we have the parameter for the base directory
        if not self._crumb.isabs():
            first_arg_name, _ = self._crumb._first_open_arg()
            if first_arg_name not in info:
                raise KeyError(
                    'Crumb path is not absolute and could not find input for {}.'
                    .format(first_arg_name))
            elif not op.isabs(info[first_arg_name]):
                raise IOError(
                    'Expected an absolute path for {} argument in {} but got {}.'
                    .format(
                        first_arg_name,
                        self._crumb,
                        info[first_arg_name],
                    ))
        force_lists = self.inputs.force_lists
        if isinstance(force_lists, bool):
            force_lists = self._outfields if force_lists else []
        bad_fields = set(force_lists) - set(self._outfields)
        if bad_fields:
            bad_fields = ", ".join(list(bad_fields))
            plural = "s" if len(bad_fields) > 1 else ""
            verb = "were" if len(bad_fields) > 1 else "was"
            msg = ("The field%s '%s' %s set in 'force_lists' and not in "
                   "'templates'.") % (plural, bad_fields, verb)
            raise ValueError(msg)

        # loop over the crumb arguments to fill self_crumb
        crumb_info = {
            k: v
            for k, v in info.items() if k in self._crumb.open_args()
        }
        ocrumb = self._crumb.replace(**crumb_info)

        # check again if crumb path is absolute
        if not ocrumb.isabs():
            raise ValueError(
                'Expected a Crumb with an absolute path, got {}.'.format(
                    ocrumb))

        if not ocrumb.exists():
            raise IOError(
                'Expected an existing Crumb path, got {}.'.format(ocrumb))

        # loop over all the ouput items and fill them with the info in templates
        for field, template in self._templates.items():

            # Fill in the template and glob for files
            focrumb = ocrumb.replace(**dict(template))

            if list(focrumb.open_args()):
                raise ValueError(
                    'Expected a full specification of the Crumb path by now, got {}.'
                    .format(focrumb))

            filelist = [cr.path for cr in focrumb.unfold()]
            # Handle the case where nothing matched
            if not filelist:
                msg = "No files were found unfolding %s crumb path: %s" % (
                    field, focrumb)
                if self.inputs.raise_on_empty:
                    raise IOError(msg)
                else:
                    warn(msg)

            # Possibly sort the list
            if self.inputs.sort_filelist:
                filelist = human_order_sorted(filelist)

            # Handle whether this must be a list or not
            if field not in force_lists:
                filelist = list_to_filename(filelist)

            outputs[field] = filelist

            # add the crumb argument values for output
            for arg_name in focrumb.all_args():
                outputs[arg_name] = focrumb[arg_name][0]

        return outputs
예제 #45
0
파일: main_window.py 프로젝트: OSUser/pydm
 def join_to_current_file_path(self, ui_file):
     filename = None
     if path.isabs(ui_file) or len(self.back_stack) == 0:
         return str(ui_file)
     else:
         return path.join(path.dirname(self.current_file()), ui_file)
예제 #46
0
 def __init__(self, rootdir):
     super(FilesystemBlobDB, self).__init__()
     assert isabs(rootdir), rootdir
     self.rootdir = rootdir
예제 #47
0
def test_randfile():
    fpath = randfile(gettempdir(), '.txt')
    assert len(fpath) == 16
    fpath = randfile(gettempdir(), '.txt', fullpath=True)
    assert len(fpath) > 16
    assert path.isabs(fpath)
예제 #48
0
    def __init__(self, properties, directory=None):
        for prop in self.value_unit_props:
            if prop in properties:
                quant = self.process_quantity(properties[prop])
                setattr(self, prop.replace('-', '_'), quant)
            else:
                setattr(self, prop.replace('-', '_'), None)

        if 'rcm-data' in properties:
            orig_rcm_data = properties['rcm-data']
            rcm_props = {}
            for prop in self.rcm_data_props:
                if prop in orig_rcm_data:
                    quant = self.process_quantity(orig_rcm_data[prop])
                    rcm_props[prop.replace('-', '_')] = quant
                else:
                    rcm_props[prop.replace('-', '_')] = None
            self.rcm_data = RCMData(**rcm_props)
        else:
            self.rcm_data = None

        self.composition_type = properties['composition']['kind']
        composition = {}
        for species in properties['composition']['species']:
            species_name = species['species-name']
            amount = self.process_quantity(species['amount'])
            InChI = species.get('InChI')
            SMILES = species.get('SMILES')
            atomic_composition = species.get('atomic-composition')
            composition[species_name] = Composition(
                species_name=species_name,
                InChI=InChI,
                SMILES=SMILES,
                atomic_composition=atomic_composition,
                amount=amount)

        setattr(self, 'composition', composition)

        self.equivalence_ratio = properties.get('equivalence-ratio')
        self.ignition_type = deepcopy(properties.get('ignition-type'))

        if 'time-histories' in properties and 'volume-history' in properties:
            raise TypeError(
                'time-histories and volume-history are mutually exclusive')

        if 'time-histories' in properties:
            for hist in properties['time-histories']:
                if hasattr(self,
                           '{}_history'.format(hist['type'].replace(' ',
                                                                    '_'))):
                    raise ValueError(
                        'Each history type may only be specified once. {} was '
                        'specified multiple times'.format(hist['type']))
                time_col = hist['time']['column']
                time_units = hist['time']['units']
                quant_col = hist['quantity']['column']
                quant_units = hist['quantity']['units']
                if isinstance(hist['values'], list):
                    values = np.array(hist['values'])
                else:
                    # Load the values from a file
                    filename = hist['values']['filename']
                    if not isabs(filename):
                        filename = join(directory, filename)
                    values = np.genfromtxt(filename, delimiter=',')
                time_history = TimeHistory(
                    time=Q_(values[:, time_col], time_units),
                    quantity=Q_(values[:, quant_col], quant_units),
                    type=hist['type'],
                )

                setattr(self,
                        '{}_history'.format(hist['type'].replace(' ', '_')),
                        time_history)

        if 'volume-history' in properties:
            warn(
                'The volume-history field should be replaced by time-histories. '
                'volume-history will be removed after PyKED 0.4',
                DeprecationWarning)
            time_col = properties['volume-history']['time']['column']
            time_units = properties['volume-history']['time']['units']
            volume_col = properties['volume-history']['volume']['column']
            volume_units = properties['volume-history']['volume']['units']
            values = np.array(properties['volume-history']['values'])
            self.volume_history = VolumeHistory(
                time=Q_(values[:, time_col], time_units),
                volume=Q_(values[:, volume_col], volume_units),
            )

        history_types = [
            'volume', 'temperature', 'pressure', 'piston_position',
            'light_emission', 'OH_emission', 'absorption'
        ]
        for h in history_types:
            if not hasattr(self, '{}_history'.format(h)):
                setattr(self, '{}_history'.format(h), None)
예제 #49
0
def download_data(pkg_name,
                  path,
                  url,
                  md5,
                  download_client=None,
                  extract=False,
                  compressed_bags=None,
                  quiet=True):
    """Install test data checking md5 and rosbag decompress if needed."""
    if download_client is None:
        if is_google_drive_url(url):
            download_client = 'gdown'
        else:
            download_client = 'wget'
    if compressed_bags is None:
        compressed_bags = []
    if not osp.isabs(path):
        # get package path
        rp = rospkg.RosPack()
        try:
            pkg_path = rp.get_path(pkg_name)
        except rospkg.ResourceNotFound:
            print('\033[31m{name} is not found in {path}\033[0m'.format(
                name=pkg_name, path=rp.list()))
            return
        pkg_path = rp.get_path(pkg_name)
        path = osp.join(pkg_path, path)
        if not osp.exists(osp.dirname(path)):
            try:
                os.makedirs(osp.dirname(path))
            except OSError as e:
                print('\033[31mCould not make direcotry {dir} {err}\033[0m'.
                      format(dir=osp.dirname(path), err=e))
                return
    # prepare cache dir
    ros_home = os.getenv('ROS_HOME', osp.expanduser('~/.ros'))
    cache_dir = osp.join(ros_home, 'data', pkg_name)
    if not osp.exists(cache_dir):
        os.makedirs(cache_dir)
    cache_file = osp.join(cache_dir, osp.basename(path))
    # check if cache exists, and update if necessary
    if not (osp.exists(cache_file) and check_md5sum(cache_file, md5)):
        if osp.exists(cache_file):
            os.remove(cache_file)
        download(download_client, url, cache_file, quiet=quiet)
    if osp.islink(path):
        # overwrite the link
        os.remove(path)
        os.symlink(cache_file, path)
    elif not osp.exists(path):
        os.symlink(cache_file, path)  # create link
    else:
        # not link and exists so skipping
        sys.stderr.write("WARNING: '{0}' exists\n".format(path))
        return
    if extract:
        # extract files in cache dir and create symlink for them
        extracted_files = extract_file(cache_file, to_directory=cache_dir)
        for file_ in extracted_files:
            file_ = osp.join(cache_dir, file_)
            dst_path = osp.join(osp.split(path)[0], osp.basename(file_))
            if osp.islink(dst_path):
                os.remove(dst_path)
            elif osp.exists(dst_path) and not osp.isdir(dst_path):
                os.remove(dst_path)
            elif osp.exists(dst_path) and osp.isdir(dst_path):
                shutil.rmtree(dst_path)
            os.symlink(file_, dst_path)
    for compressed_bag in compressed_bags:
        if not osp.isabs(compressed_bag):
            rp = rospkg.RosPack()
            pkg_path = rp.get_path(pkg_name)
            compressed_bag = osp.join(pkg_path, compressed_bag)
        decompress_rosbag(compressed_bag, quiet=quiet)
예제 #50
0
def TestIsAbs():
    assert path.isabs('/abc')
    assert not path.isabs('abc/123')
예제 #51
0
def remove_file(path):
    """remove does not work with directories"""
    from ml_dash.config import Args
    assert isabs(path), "the path has to be absolute path."
    _path = join(Args.logdir, path[1:])
    os.remove(_path)
예제 #52
0
    # join(PROJECT_DIR, 'reports/static'),
)

# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
    'django.contrib.staticfiles.finders.FileSystemFinder',
    'django.contrib.staticfiles.finders.AppDirectoriesFinder',
    # 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)

_template_dirs = [join(PROJECT_DIR, 'templates')]

SHORTCUTS_TEMPLATE = config.get('web', 'shortcuts_template')
if SHORTCUTS_TEMPLATE:
    if not (isabs(SHORTCUTS_TEMPLATE) and isfile(SHORTCUTS_TEMPLATE)):
        raise ImproperlyConfigured(
            "SHORTCUTS_TEMPLATE '%s' is not absolute path to existing file" %
            SHORTCUTS_TEMPLATE)
    _shortcuts_template_dir, SHORTCUTS_TEMPLATE = split(SHORTCUTS_TEMPLATE)
    _template_dirs.append(_shortcuts_template_dir)

MIDDLEWARE_CLASSES = (
    'django.middleware.common.CommonMiddleware',
    'django.contrib.sessions.middleware.SessionMiddleware',
    'django.middleware.csrf.CsrfViewMiddleware',
    'django.contrib.auth.middleware.AuthenticationMiddleware',
    'django.contrib.messages.middleware.MessageMiddleware',
)

ROOT_URLCONF = 'reports.urls'
예제 #53
0
    def upload_create_or_update(filename_to_update=None, key="file"):
        """
        :return: json object. contains relative filename on success, and error message on failure.
        """
        # redirect_url = request.values.get('redirect', default=request.url, type=str)

        # 1. request -> files(data) -> local uploads folder + json response(error+filename)
        # Accept multiple files
        # file = request.files[key]
        files = request.files.getlist(key)
        if files is None or len(files) == 0:
            ret = {'error': 'no file part found in multipart/form-data'}
            return str(json.dumps(ret)), 400, RESPONSE_JSON_ACCESSCONTROL
        # NOTE: use [] * len(..) carefully.. it just do soft copy. use `for` instead.
        ret = [{} for _ in range(len(files))
               ]  # [{filename: str, error: optional(str)}]
        dispatch_arg = []

        error_count = 0
        for idx, file in enumerate(files):
            if file.filename == "":
                ret[idx].update({
                    'error':
                    "no file name is given or no file selected for uploading"
                })
                error_count += 1
                continue  # bypass to the next one

            if file and osp.splitext(
                    file.filename)[1].lower() in ALLOWED_EXTENSIONS:
                if filename_to_update is None:
                    # TODO: handle chinese filename. str.encode('utf-8')?
                    filepath = secure_filename(file.filename)
                    filepath = get_new_name_if_exists(
                        osp.join(app.config['UPLOAD_FOLDER'], filepath))
                else:
                    filepath = osp.join(app.config['UPLOAD_FOLDER'],
                                        filename_to_update)
                if not osp.isabs(filepath):
                    filepath = osp.join(app.root_path, filepath)
                try:
                    file.save(filepath)  # NOTE: overwrite existed one
                except Exception as e:
                    ret[idx].update(
                        {'error': f"Failed to upload file to {filepath}"})
                    error_count += 1
                    continue  # bypass to the next one
                INFO('file uploaded to: ' + filepath)
                dispatch_arg.append(filepath)
                ret[idx].update({'filename': osp.basename(filepath)})
            else:
                ret[idx].update({
                    'error':
                    f"only accept these image types: {ALLOWED_EXTENSIONS}"
                })
                error_count += 1
                continue  # bypass to the next one
        ret = {'uploaded': ret}

        # 2. dispatch to subscribers of `on_uploads` event
        if error_count < len(files):  # error_count == 0:
            dispatch_results = app.dispatch_handlers(
                app.__class__.EventUploads,
                dispatch_arg if len(dispatch_arg) > 1 else dispatch_arg[0])
            # NOTE: multiple inputs can be consumed by once, so results num can be less than inputs num.
            ret.update({'dispatched:': dispatch_results})

        return str(json.dumps(ret)), 200 if error_count < len(
            files) else 400, RESPONSE_JSON_ACCESSCONTROL
예제 #54
0
    def __call__(archive,
                 annex=None,
                 add_archive_leading_dir=False,
                 strip_leading_dirs=False,
                 leading_dirs_depth=None,
                 leading_dirs_consider=None,
                 use_current_dir=False,
                 delete=False,
                 key=False,
                 exclude=None,
                 rename=None,
                 existing='fail',
                 annex_options=None,
                 copy=False,
                 commit=True,
                 allow_dirty=False,
                 stats=None,
                 drop_after=False,
                 delete_after=False):
        """
        Returns
        -------
        annex
        """
        if exclude:
            exclude = assure_tuple_or_list(exclude)
        if rename:
            rename = assure_tuple_or_list(rename)

        # TODO: actually I see possibly us asking user either he wants to convert
        # his git repo into annex
        archive_path = archive
        pwd = getpwd()
        if annex is None:
            annex = get_repo_instance(pwd, class_=AnnexRepo)
            if not isabs(archive):
                # if not absolute -- relative to wd and thus
                archive_path = normpath(opj(realpath(pwd), archive))
                # abspath(archive) is not "good" since dereferences links in the path
                # archive_path = abspath(archive)
        elif not isabs(archive):
            # if we are given an annex, then assume that given path is within annex, not
            # relative to PWD
            archive_path = opj(annex.path, archive)
        annex_path = annex.path

        # _rpath below should depict paths relative to the top of the annex
        archive_rpath = relpath(
            archive_path,
            # Use `get_dataset_root` to avoid resolving the leading path. If no
            # repo is found, downstream code will raise FileNotInRepositoryError.
            get_dataset_root(archive_path) or ".")

        if archive in annex.untracked_files:
            raise RuntimeError(
                "The archive is not under annex yet. You should run 'datalad "
                "add {}' first".format(archive))

        if not allow_dirty and annex.dirty:
            # already saved me once ;)
            raise RuntimeError(
                "You better commit all the changes and untracked files first")

        if not key:
            # we were given a file which must exist
            if not exists(archive_path):
                raise ValueError("Archive {} does not exist".format(archive))
            # TODO: support adding archives content from outside the annex/repo
            origin = 'archive'
            key = annex.get_file_key(archive_rpath)
            archive_dir = dirname(archive_path)
        else:
            origin = 'key'
            key = archive
            archive_dir = None  # We must not have anything to do with the location under .git/annex

        archive_basename = file_basename(archive)

        if not key:
            # TODO: allow for it to be under git???  how to reference then?
            raise NotImplementedError(
                "Provided file %s is not under annex.  We don't support yet adding everything "
                "straight to git" % archive)

        # are we in a subdirectory of the repository?
        pwd_under_annex = commonprefix([pwd, annex_path]) == annex_path
        #  then we should add content under that
        # subdirectory,
        # get the path relative to the repo top
        if use_current_dir:
            # if outside -- extract to the top of repo
            extract_rpath = relpath(pwd, annex_path) \
                if pwd_under_annex \
                else None
        else:
            extract_rpath = relpath(archive_dir, annex_path)

        # relpath might return '.' as the relative path to curdir, which then normalize_paths
        # would take as instructions to really go from cwd, so we need to sanitize
        if extract_rpath == curdir:
            extract_rpath = None  # no special relpath from top of the repo

        # and operate from now on the key or whereever content available "canonically"
        try:
            key_rpath = annex.get_contentlocation(
                key)  # , relative_to_top=True)
        except:
            raise RuntimeError(
                "Content of %s seems to be N/A.  Fetch it first" % key)

        # now we simply need to go through every file in that archive and
        lgr.info("Adding content of the archive %s into annex %s", archive,
                 annex)

        from datalad.customremotes.archives import ArchiveAnnexCustomRemote
        # TODO: shouldn't we be able just to pass existing AnnexRepo instance?
        # TODO: we will use persistent cache so we could just (ab)use possibly extracted archive
        annexarchive = ArchiveAnnexCustomRemote(path=annex_path,
                                                persistent_cache=True)
        # We will move extracted content so it must not exist prior running
        annexarchive.cache.allow_existing = True
        earchive = annexarchive.cache[key_rpath]

        # TODO: check if may be it was already added
        if ARCHIVES_SPECIAL_REMOTE not in annex.get_remotes():
            init_datalad_remote(annex,
                                ARCHIVES_SPECIAL_REMOTE,
                                autoenable=True)
        else:
            lgr.debug("Special remote {} already exists".format(
                ARCHIVES_SPECIAL_REMOTE))

        precommitted = False
        delete_after_rpath = None
        try:
            old_always_commit = annex.always_commit
            # When faking dates, batch mode is disabled, so we want to always
            # commit.
            annex.always_commit = annex.fake_dates_enabled

            if annex_options:
                if isinstance(annex_options, string_types):
                    annex_options = shlex.split(annex_options)

            leading_dir = earchive.get_leading_directory(
                depth=leading_dirs_depth, exclude=exclude, consider=leading_dirs_consider) \
                if strip_leading_dirs else None
            leading_dir_len = len(leading_dir) + len(
                opsep) if leading_dir else 0

            # we need to create a temporary directory at the top level which would later be
            # removed
            prefix_dir = basename(tempfile.mktemp(prefix=".datalad", dir=annex_path)) \
                if delete_after \
                else None

            # dedicated stats which would be added to passed in (if any)
            outside_stats = stats
            stats = ActivityStats()

            for extracted_file in earchive.get_extracted_files():
                stats.files += 1
                extracted_path = opj(earchive.path, extracted_file)

                if islink(extracted_path):
                    link_path = realpath(extracted_path)
                    if not exists(
                            link_path
                    ):  # TODO: config  addarchive.symlink-broken='skip'
                        lgr.warning("Path %s points to non-existing file %s" %
                                    (extracted_path, link_path))
                        stats.skipped += 1
                        continue
                        # TODO: check if points outside of the archive -- warning and skip

                # preliminary target name which might get modified by renames
                target_file_orig = target_file = extracted_file

                # strip leading dirs
                target_file = target_file[leading_dir_len:]

                if add_archive_leading_dir:
                    target_file = opj(archive_basename, target_file)

                if rename:
                    target_file = apply_replacement_rules(rename, target_file)

                # continue to next iteration if extracted_file in excluded
                if exclude:
                    try:  # since we need to skip outside loop from inside loop
                        for regexp in exclude:
                            if re.search(regexp, extracted_file):
                                lgr.debug(
                                    "Skipping {extracted_file} since contains {regexp} pattern"
                                    .format(**locals()))
                                stats.skipped += 1
                                raise StopIteration
                    except StopIteration:
                        continue

                if prefix_dir:
                    target_file = opj(prefix_dir, target_file)
                    # but also allow for it in the orig
                    target_file_orig = opj(prefix_dir, target_file_orig)

                target_file_path_orig = opj(annex.path, target_file_orig)

                url = annexarchive.get_file_url(
                    archive_key=key,
                    file=extracted_file,
                    size=os.stat(extracted_path).st_size)

                # lgr.debug("mv {extracted_path} {target_file}. URL: {url}".format(**locals()))

                target_file_path = opj(extract_rpath, target_file) \
                    if extract_rpath else target_file

                target_file_path = opj(annex.path, target_file_path)

                if lexists(target_file_path):
                    handle_existing = True
                    if md5sum(target_file_path) == md5sum(extracted_path):
                        if not annex.is_under_annex(extracted_path):
                            # if under annex -- must be having the same content,
                            # we should just add possibly a new extra URL
                            # but if under git -- we cannot/should not do
                            # anything about it ATM
                            if existing != 'overwrite':
                                continue
                        else:
                            handle_existing = False
                    if not handle_existing:
                        pass  # nothing... just to avoid additional indentation
                    elif existing == 'fail':
                        raise RuntimeError(
                            "File {} already exists, but new (?) file {} was instructed "
                            "to be placed there while overwrite=False".format(
                                target_file_path, extracted_file))
                    elif existing == 'overwrite':
                        stats.overwritten += 1
                        # to make sure it doesn't conflict -- might have been a tree
                        rmtree(target_file_path)
                    else:
                        target_file_path_orig_ = target_file_path

                        # To keep extension intact -- operate on the base of the filename
                        p, fn = os.path.split(target_file_path)
                        ends_with_dot = fn.endswith('.')
                        fn_base, fn_ext = file_basename(fn, return_ext=True)

                        if existing == 'archive-suffix':
                            fn_base += '-%s' % archive_basename
                        elif existing == 'numeric-suffix':
                            pass  # archive-suffix will have the same logic
                        else:
                            raise ValueError(existing)
                        # keep incrementing index in the suffix until file doesn't collide
                        suf, i = '', 0
                        while True:
                            target_file_path_new = opj(
                                p, fn_base + suf +
                                ('.' if
                                 (fn_ext or ends_with_dot) else '') + fn_ext)
                            if not lexists(target_file_path_new):
                                break
                            lgr.debug("File %s already exists" %
                                      target_file_path_new)
                            i += 1
                            suf = '.%d' % i
                        target_file_path = target_file_path_new
                        lgr.debug("Original file %s will be saved into %s" %
                                  (target_file_path_orig_, target_file_path))
                        # TODO: should we reserve smth like
                        # stats.clobbed += 1

                if target_file_path != target_file_path_orig:
                    stats.renamed += 1

                #target_path = opj(getpwd(), target_file)
                if copy:
                    raise NotImplementedError(
                        "Not yet copying from 'persistent' cache")
                else:
                    # os.renames(extracted_path, target_path)
                    # addurl implementation relying on annex'es addurl below would actually copy
                    pass

                lgr.debug(
                    "Adding %s to annex pointing to %s and with options %r",
                    target_file_path, url, annex_options)

                out_json = annex.add_url_to_file(target_file_path,
                                                 url,
                                                 options=annex_options,
                                                 batch=True)

                if 'key' in out_json and out_json[
                        'key'] is not None:  # annex.is_under_annex(target_file, batch=True):
                    # due to http://git-annex.branchable.com/bugs/annex_drop_is_not___34__in_effect__34___for_load_which_was___34__addurl_--batch__34__ed_but_not_yet_committed/?updated
                    # we need to maintain a list of those to be dropped files
                    if drop_after:
                        annex.drop_key(out_json['key'], batch=True)
                        stats.dropped += 1
                    stats.add_annex += 1
                else:
                    lgr.debug(
                        "File {} was added to git, not adding url".format(
                            target_file_path))
                    stats.add_git += 1

                if delete_after:
                    # delayed removal so it doesn't interfer with batched processes since any pure
                    # git action invokes precommit which closes batched processes. But we like to count
                    stats.removed += 1

                # # chaining 3 annex commands, 2 of which not batched -- less efficient but more bullet proof etc
                # annex.add(target_path, options=annex_options)
                # # above action might add to git or to annex
                # if annex.file_has_content(target_path):
                #     # if not --  it was added to git, if in annex, it is present and output is True
                #     annex.add_url_to_file(target_file, url, options=['--relaxed'], batch=True)
                #     stats.add_annex += 1
                # else:
                #     lgr.debug("File {} was added to git, not adding url".format(target_file))
                #     stats.add_git += 1
                # # TODO: actually check if it is anyhow different from a previous version. If not
                # # then it wasn't really added

                del target_file  # Done with target_file -- just to have clear end of the loop

            if delete and archive and origin != 'key':
                lgr.debug("Removing the original archive {}".format(archive))
                # force=True since some times might still be staged and fail
                annex.remove(archive_rpath, force=True)

            lgr.info("Finished adding %s: %s" %
                     (archive, stats.as_str(mode='line')))

            if outside_stats:
                outside_stats += stats
            if delete_after:
                # force since not committed. r=True for -r (passed into git call
                # to recurse)
                delete_after_rpath = opj(
                    extract_rpath, prefix_dir) if extract_rpath else prefix_dir
                lgr.debug("Removing extracted and annexed files under %s",
                          delete_after_rpath)
                annex.remove(delete_after_rpath, r=True, force=True)
            if commit:
                commit_stats = outside_stats if outside_stats else stats
                annex.precommit(
                )  # so batched ones close and files become annex symlinks etc
                precommitted = True
                if annex.is_dirty(untracked_files=False):
                    annex.commit("Added content extracted from %s %s\n\n%s" %
                                 (origin, archive_rpath,
                                  commit_stats.as_str(mode='full')),
                                 _datalad_msg=True)
                    commit_stats.reset()
        finally:
            # since we batched addurl, we should close those batched processes
            # if haven't done yet.  explicitly checked to avoid any possible
            # "double-action"
            if not precommitted:
                annex.precommit()

            if delete_after_rpath:
                delete_after_path = opj(annex_path, delete_after_rpath)
                if exists(delete_after_path):  # should not be there
                    # but for paranoid yoh
                    lgr.warning(
                        "Removing temporary directory under which extracted "
                        "files were annexed and should have been removed: %s",
                        delete_after_path)
                    rmtree(delete_after_path)

            annex.always_commit = old_always_commit
            # remove what is left and/or everything upon failure
            earchive.clean(force=True)

        return annex
예제 #55
0
def safe_relpath(path):
    return not (isabs(path) or normpath(path).startswith(pardir))
예제 #56
0
def configure_logging(logger_name, filename=None):
    """ Configure logging and return the named logger and the location of the logging configuration file loaded.

    This function expects a Splunk app directory structure::

        <app-root>
            bin
                ...
            default
                ...
            local
                ...

    This function looks for a logging configuration file at each of these locations, loading the first, if any,
    logging configuration file that it finds::

        local/{name}.logging.conf
        default/{name}.logging.conf
        local/logging.conf
        default/logging.conf

    The current working directory is set to *<app-root>* before the logging configuration file is loaded. Hence, paths
    in the logging configuration file are relative to *<app-root>*. The current directory is reset before return.

    You may short circuit the search for a logging configuration file by providing an alternative file location in
    `path`. Logging configuration files must be in `ConfigParser format`_.

    #Arguments:

    :param logger_name: Logger name
    :type logger_name: bytes, unicode

    :param filename: Location of an alternative logging configuration file or `None`.
    :type filename: bytes, unicode or NoneType

    :returns: The named logger and the location of the logging configuration file loaded.
    :rtype: tuple

    .. _ConfigParser format: https://docs.python.org/2/library/logging.config.html#configuration-file-format

    """
    if filename is None:
        if logger_name is None:
            probing_paths = [
                path.join('local', 'logging.conf'),
                path.join('default', 'logging.conf')
            ]
        else:
            probing_paths = [
                path.join('local', logger_name + '.logging.conf'),
                path.join('default', logger_name + '.logging.conf'),
                path.join('local', 'logging.conf'),
                path.join('default', 'logging.conf')
            ]
        for relative_path in probing_paths:
            configuration_file = path.join(app_root, relative_path)
            if path.exists(configuration_file):
                filename = configuration_file
                break
    elif not path.isabs(filename):
        found = False
        for conf in 'local', 'default':
            configuration_file = path.join(app_root, conf, filename)
            if path.exists(configuration_file):
                filename = configuration_file
                found = True
                break
        if not found:
            raise ValueError(
                'Logging configuration file "{}" not found in local or default directory'
                .format(filename))
    elif not path.exists(filename):
        raise ValueError(
            'Logging configuration file "{}" not found'.format(filename))

    if filename is not None:
        global _current_logging_configuration_file
        filename = path.realpath(filename)

        if filename != _current_logging_configuration_file:
            working_directory = getcwd()
            chdir(app_root)
            try:
                fileConfig(filename, {'SPLUNK_HOME': splunk_home})
            finally:
                chdir(working_directory)
            _current_logging_configuration_file = filename

    if len(root.handlers) == 0:
        root.addHandler(StreamHandler())

    return None if logger_name is None else getLogger(logger_name), filename
예제 #57
0
    def read(self):
        """Reads the data stored in the files we have been initialized with. It will
        ignore files that cannot be read, possibly leaving an empty configuration

        :return: Nothing
        :raise IOError: if a file cannot be handled"""
        if self._is_initialized:
            return
        self._is_initialized = True

        if not isinstance(self._file_or_files, (tuple, list)):
            files_to_read = [self._file_or_files]
        else:
            files_to_read = list(self._file_or_files)
        # end assure we have a copy of the paths to handle

        seen = set(files_to_read)
        num_read_include_files = 0
        while files_to_read:
            file_path = files_to_read.pop(0)
            fp = file_path
            file_ok = False

            if hasattr(fp, "seek"):
                self._read(fp, fp.name)
            else:
                # assume a path if it is not a file-object
                try:
                    with open(file_path, 'rb') as fp:
                        file_ok = True
                        self._read(fp, fp.name)
                except IOError:
                    continue

            # Read includes and append those that we didn't handle yet
            # We expect all paths to be normalized and absolute (and will assure that is the case)
            if self._has_includes():
                for _, include_path in self._included_paths():
                    if include_path.startswith('~'):
                        include_path = osp.expanduser(include_path)
                    if not osp.isabs(include_path):
                        if not file_ok:
                            continue
                        # end ignore relative paths if we don't know the configuration file path
                        assert osp.isabs(
                            file_path
                        ), "Need absolute paths to be sure our cycle checks will work"
                        include_path = osp.join(osp.dirname(file_path),
                                                include_path)
                    # end make include path absolute
                    include_path = osp.normpath(include_path)
                    if include_path in seen or not os.access(
                            include_path, os.R_OK):
                        continue
                    seen.add(include_path)
                    # insert included file to the top to be considered first
                    files_to_read.insert(0, include_path)
                    num_read_include_files += 1
                # each include path in configuration file
            # end handle includes
        # END for each file object to read

        # If there was no file included, we can safely write back (potentially) the configuration file
        # without altering it's meaning
        if num_read_include_files == 0:
            self._merge_includes = False
예제 #58
0
파일: base.py 프로젝트: glalteva/datalad
 def _get_filepath(self, fpath):
     if isabs(fpath):
         return normpath(fpath)
     else:
         return normpath(opj(self.annex.path, fpath))
    def __init__(self,
                 annotations,
                 pipeline,
                 data_root=None,
                 img_prefix='',
                 seg_prefix=None,
                 proposal_file=None,
                 filter_empty=True,
                 positives_only=False,
                 test_mode=False):

        # MMDetection takes the filepath as input, then loads the
        # annotations. Here, annotations are already loaded, then
        # fed as input. Makes it easier to deal with CV splits.

        self.img_infos = annotations
        if not test_mode:
            self.neg_infos = [
                _ for _ in annotations if _['ann']['bboxes'].shape[0] == 0
            ]
        self.data_root = data_root
        self.img_prefix = img_prefix
        self.seg_prefix = seg_prefix
        self.proposal_file = proposal_file
        self.filter_empty = filter_empty
        self.positives_only = positives_only
        self.test_mode = test_mode
        self.logger = logging.getLogger('root')

        # join paths if data_root is specified
        if self.data_root is not None:
            if not (self.img_prefix is None or osp.isabs(self.img_prefix)):
                self.img_prefix = osp.join(self.data_root, self.img_prefix)
            if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)):
                self.seg_prefix = osp.join(self.data_root, self.seg_prefix)
            if not (self.proposal_file is None
                    or osp.isabs(self.proposal_file)):
                self.proposal_file = osp.join(self.data_root,
                                              self.proposal_file)
        # load proposals
        if self.proposal_file is not None:
            self.proposals = self.load_proposals(self.proposal_file)
        else:
            self.proposals = None

        # filter images with no annotation during training
        if not test_mode and filter_empty:
            valid_inds = self._filter_imgs()
            self.img_infos = [self.img_infos[i] for i in valid_inds]
            if self.proposals is not None:
                self.proposals = [self.proposals[i] for i in valid_inds]

        if not test_mode and filter_empty:
            self.logger.info(
                'Found {:,} annotations with bounding boxes ...'.format(
                    len(self.img_infos)))
            if not self.positives_only:
                self.logger.info(
                    'Found {:,} annotations without bounding boxes ...'.format(
                        len(self.neg_infos)))

        # set group flag for the sampler
        if not self.test_mode:
            self._set_group_flag()

        # processing pipeline
        self.pipeline = Compose(pipeline)
예제 #60
0
def startstop(action=None,
              stdout=os.devnull,
              stderr=None,
              stdin=os.devnull,
              pidfile='pid.txt',
              startmsg='started with pid %s'):
    '''
        This is the "front-end"method for starting the daemon, stopping
        and restarting it.
    '''
    if len(action) > 1:
        setup()
        from os import path
        if not path.isabs(stdout):
            stdout = path.join(getAngelVarPath(), stdout)
        if not path.isabs(stderr):
            stderr = path.join(getAngelVarPath(), stderr)
        if not path.isabs(stdin):
            stdin = path.join(getAngelVarPath(), stdin)
        if not path.isabs(pidfile):
            pidfile = path.join(getAngelVarPath(), pidfile)
        try:
            pf = file(pidfile, 'r')
            pid = int(pf.read().strip())
            pf.close()
        except IOError:
            pid = None
        if 'stop' == action or 'restart' == action:
            if not pid:
                mess = "Could not stop, pid file '%s' missing.%s"
                sys.stderr.write(mess % (pidfile, os.linesep))
                sys.exit(1)
            try:
                countSIGTERM = 0
                sleepTime = 1
                while 1:
                    if countSIGTERM > 3:
                        sys.stderr.write(
                            "Process not responding, sending SIGKILL to process with PID %i.%s"
                            % (pid, os.linesep))
                        os.kill(pid, SIGKILL)
                    else:
                        os.kill(pid, SIGTERM)
                    countSIGTERM += 1
                    time.sleep(sleepTime)
                    # send signal 0 straight away, no need to re-enter the loop
                    os.kill(pid, 0)
                    sleepTime = sleepTime + 1
            except OSError, err:
                err = str(err)
                if err.find("No such process") > 0:
                    os.remove(pidfile)
                    if 'stop' == action:
                        sys.exit(0)
                    action = 'start'
                    pid = None
                else:
                    print str(err)
                    sys.exit(1)
        if 'start' == action:
            if pid:
                mess = "Start aborded since pid file '%s' exists.%s"
                sys.stderr.write(mess % (pidfile, os.linesep))
                sys.exit(1)
            daemonize(stdout, stderr, stdin, pidfile, startmsg)
            return