Beispiel #1
0
	def __init__(self, windows_path, config):
		"""Create a song with the given windows path
		
		windows_path must be whatever was originally in the fpl file, normalized with abspath
		ex: ntpath.abspath("F:\Music\Trucker's Atlas.mp3")
		"""
		self.windows_path = windows_path
		self.config = config
		self.cached_size = None
		
		# the path to the song in the source directory
		self.source_path = self.windows_path
		if config.fb2k_source_mapping is not None:
			# need to transform e.g. F:\Music\artist\song.mp3 to /media/A/Music/artist/song.mp3
			if not self.source_path.startswith(config.fb2k_source_mapping):
				raise Exception("Song " + self.source_path + " does not use source mapping")
			relpath = ntpath.relpath(self.source_path, start=config.fb2k_source_mapping)
			relpath = relpath.replace(ntpath.sep, os.path.sep)
			self.source_path = os.path.join(self.config.source, relpath)
		elif not self.source_path.startswith(self.config.source):
			raise Exception("Song " + self.source_path + " is not within source")
		
		# the path of the song relative to the source directory
		self.relative_path = os.path.relpath(self.source_path, start=self.config.source)
		dest_path = os.path.join(self.config.dest, self.relative_path)
		# the path of the song after copied to dest, relative to playlist_dest
		self.playlist_path = os.path.relpath(dest_path, start=self.config.playlist_dest)
def process_summary(section_sink):
    fatal_errors = dict()
    errors = dict()
    warnings = dict()
    warn_stats = dict()
    suppressed_warn_stats = dict()

    while True:
        try:
            target_chain, messages = yield
        except GeneratorExit:
            break

        for message in messages:
            if message.kind == "warning":
                if message_filter(message):
                    warn_stats[message.code] = warn_stats.get(message.code,
                                                              0) + 1
                else:
                    suppressed_warn_stats[message.code] = \
                            suppressed_warn_stats.get(message.code, 0) + 1

        # Sort messages by kind and project

        assert target_chain[0].filename
        assert target_chain[0].ordinal == 1

        solution_path = target_chain[0].filename
        for chain_item in reversed(target_chain):
            if chain_item.filename:
                project_path = chain_item.filename
                break

        project_relpath = ntpath.relpath(project_path,
                                         ntpath.dirname(solution_path))
        for message in filter(message_filter, messages):
            if message.kind == "fatal error":
                fatal_errors.setdefault(project_relpath,
                                        list()).append(message)
            elif message.kind == "error":
                errors.setdefault(project_relpath, list()).append(message)
            elif message.kind == "warning":
                warnings.setdefault(project_relpath, list()).append(message)
            else:
                assert False, "Don't know what a {} is".format(message.kind)

    if fatal_errors:
        section_sink.send(
            (True, "Fatal Errors", True, summarize_messages(fatal_errors)))
    if errors:
        section_sink.send((True, "Errors", True, summarize_messages(errors)))
    if warnings:
        section_sink.send(
            (False, "Warnings", True, summarize_messages(warnings)))
    if warn_stats:
        section_sink.send(
            (False, "Warning Statistics", False, summarize_stats(warn_stats)))
    if suppressed_warn_stats:
        section_sink.send((False, "Suppressed Warnings", False,
                           summarize_stats(suppressed_warn_stats)))
Beispiel #3
0
def check_target_exe(config: TaskConfig, definition: TaskDefinition) -> None:
    if config.task.target_exe is None:
        if TaskFeature.target_exe in definition.features:
            raise TaskConfigError("missing target_exe")

        if TaskFeature.target_exe_optional in definition.features:
            return

        return

    # Azure Blob Store uses virtualized directory structures.  As such, we need
    # the paths to already be canonicalized.  As an example, accessing the blob
    # store path "./foo" generates an exception, but "foo" and "foo/bar" do
    # not.
    if (posixpath.relpath(config.task.target_exe) != config.task.target_exe or
            ntpath.relpath(config.task.target_exe) != config.task.target_exe):
        raise TaskConfigError(
            "target_exe must be a canonicalized relative path")

    container = [
        x for x in config.containers if x.type == ContainerType.setup
    ][0]
    if not blob_exists(container.name, config.task.target_exe,
                       StorageType.corpus):
        err = "target_exe `%s` does not exist in the setup container `%s`" % (
            config.task.target_exe,
            container.name,
        )
        LOGGER.warning(err)
Beispiel #4
0
def find_mapping(annotations, original):

    root_folder = detect_root_folder(original)

    mappings = []
    for annotation in annotations:
        try:
            text = subprocess.check_output([
                'synctex', 'edit', '-o',
                str(annotation.page) + ':' + str(annotation.x) + ':' +
                str(annotation.y) + ':' + original
            ])
        except subprocess.CalledProcessError:
            sys.exit('No SyncTeX file found?')

        (file, line, column) = parse_synctex_output(text)
        # get relative part of path and then move it relative to the directory the original file is in
        file = ntpath.join(ntpath.dirname(original),
                           ntpath.relpath(file, root_folder))

        # Use posix path going forward
        file = file.replace(ntpath.sep, ntpath.altsep)

        mappings.append(common.Mapping(file, line, column))
    return mappings
Beispiel #5
0
    def _oid2filename(self, oid):
        """Convert an oid to a filename.

        Args:
            oid (unicode):
                The given oid.

        Returns:
            unicode:
            The filename of the element relative to the repopath.

        Raises:
            reviewboard.scmtools.errors.SCMError:
                An error occurred while finding the filename.
        """
        result = ClearCaseTool.run_cleartool(
            ['describe', '-fmt', '%En@@%Vn',
             'oid:%s' % oid],
            cwd=self.repopath)

        drive = os.path.splitdrive(self.repopath)[0]

        if drive:
            result = os.path.join(drive, result)

        return cpath.relpath(result, self.repopath)
Beispiel #6
0
    def test_realpath_relative(self):
        ABSTFN = ntpath.abspath(support.TESTFN)
        open(ABSTFN, "wb").close()
        self.addCleanup(support.unlink, ABSTFN)
        self.addCleanup(support.unlink, ABSTFN + "1")

        os.symlink(ABSTFN, ntpath.relpath(ABSTFN + "1"))
        self.assertPathEqual(ntpath.realpath(ABSTFN + "1"), ABSTFN)
Beispiel #7
0
    def normalize_path_for_display(self, filename):
        """Return display friendly path without revision informations.

        In path construct for only display purpuse we don't need
        information about branch, version or even repository path
        so we return unextended path relative to repopath (view)
        """
        return cpath.relpath(
            self.unextend_path(filename)[1], self.repopath
        )
def process_summary(section_sink):
    fatal_errors = dict()
    errors = dict()
    warnings = dict()
    warn_stats = dict()
    suppressed_warn_stats = dict()

    while True:
        try:
            target_chain, messages = yield
        except GeneratorExit:
            break

        for message in messages:
            if message.kind == "warning":
                if message_filter(message):
                    warn_stats[message.code] = warn_stats.get(message.code, 0) + 1
                else:
                    suppressed_warn_stats[message.code] = \
                            suppressed_warn_stats.get(message.code, 0) + 1

        # Sort messages by kind and project

        assert target_chain[0].filename
        assert target_chain[0].ordinal == 1

        solution_path = target_chain[0].filename
        for chain_item in reversed(target_chain):
            if chain_item.filename:
                project_path = chain_item.filename
                break

        project_relpath = ntpath.relpath(project_path,
                                         ntpath.dirname(solution_path))
        for message in filter(message_filter, messages):
            if message.kind == "fatal error":
                fatal_errors.setdefault(project_relpath, list()).append(message)
            elif message.kind == "error":
                errors.setdefault(project_relpath, list()).append(message)
            elif message.kind == "warning":
                warnings.setdefault(project_relpath, list()).append(message)
            else:
                assert False, "Don't know what a {} is".format(message.kind)

    if fatal_errors:
        section_sink.send((True, "Fatal Errors", True, summarize_messages(fatal_errors)))
    if errors:
        section_sink.send((True, "Errors", True, summarize_messages(errors)))
    if warnings:
        section_sink.send((False, "Warnings", True, summarize_messages(warnings)))
    if warn_stats:
        section_sink.send((False, "Warning Statistics", False, summarize_stats(warn_stats)))
    if suppressed_warn_stats:
        section_sink.send((False, "Suppressed Warnings", False, summarize_stats(suppressed_warn_stats)))
Beispiel #9
0
    def relpath(cls, path, start):
        """Wrapper for os.path.relpath for Python 2.4.

        Python 2.4 doesn't have the os.path.relpath function, so this
        approximates it well enough for our needs.
        """
        if not hasattr(cpath, 'relpath'):
            if start[-1] != os.sep:
                start += os.sep

            return path[len(start):]

        return cpath.relpath(path, start)
Beispiel #10
0
    def relpath(cls, path, start):
        """Wrapper for os.path.relpath for Python 2.4.

        Python 2.4 doesn't have the os.path.relpath function, so this
        approximates it well enough for our needs.
        """
        if not hasattr(cpath, 'relpath'):
            if start[-1] != os.sep:
                start += os.sep

            return path[len(start):]

        return cpath.relpath(path, start)
Beispiel #11
0
def clean_path(a_path, force_os=None, force_start=None):
    """
    This function is used to normalize the path (of an output or
    dependency) and also provide the path in relative form. It is
    relative to the current working directory
    """
    if not force_start:
        force_start = os.curdir
    if force_os == "windows":
        import ntpath
        return ntpath.relpath(ntpath.normpath(a_path), start=force_start)
    if force_os == "posix":
        import posixpath
        return posixpath.relpath(posixpath.normpath(a_path), start=force_start)
    return os.path.relpath(os.path.normpath(a_path), start=force_start)
Beispiel #12
0
    def relpath(cls, path, start):
        """Wrapper for os.path.relpath for Python 2.4.

        Python 2.4 doesn't have the os.path.relpath function, so this
        approximates it well enough for our needs.

        ntpath.relpath() overflows and throws TypeError for paths containing
        atleast 520 characters (not that hard to encounter in UCM
        repository).
        """
        try:
            return cpath.relpath(path, start)
        except (AttributeError, TypeError):
            if start[-1] != os.sep:
                start += os.sep

            return path[len(start):]
Beispiel #13
0
    def relpath(cls, path, start):
        """Wrapper for os.path.relpath for Python 2.4.

        Python 2.4 doesn't have the os.path.relpath function, so this
        approximates it well enough for our needs.

        ntpath.relpath() overflows and throws TypeError for paths containing
        atleast 520 characters (not that hard to encounter in UCM
        repository).
        """
        try:
            return cpath.relpath(path, start)
        except (AttributeError, TypeError):
            if start[-1] != os.sep:
                start += os.sep

            return path[len(start):]
Beispiel #14
0
def clean_path(a_path, force_os=None, force_start=None):
    """
    This function is used to normalize the path (of an output or
    dependency) and also provide the path in relative form. It is
    relative to the current working directory
    """
    if not force_start:
        force_start = os.curdir
    if force_os == "windows":
        import ntpath
        return ntpath.relpath(ntpath.normpath(a_path),
                             start=force_start)
    if force_os == "posix":
        import posixpath
        return posixpath.relpath(posixpath.normpath(a_path),
                                start=force_start)
    return os.path.relpath(os.path.normpath(a_path),
                          start=force_start)
Beispiel #15
0
    def normalize_path_for_display(self, filename):
        """Return display friendly path without revision informations.

        In path construct for only display purpuse we don't need
        information about branch, version or even repository path
        so we return unextended path relative to repopath (view)
        """

        # There is no relpath function in Python 2.4
        # lets count relative path using manualy
        if not hasattr(cpath, 'relpath'):
            repo = self.repopath
            if repo[-1] != os.sep:
                repo += os.sep
            path = self.unextend_path(filename)[1]
            return path[path.find(repo) + len(repo):]

        return cpath.relpath(
            self.unextend_path(filename)[1], self.repopath
        )
Beispiel #16
0
    def normalize_path_for_display(self, filename, extra_data=None, **kwargs):
        """Normalize a path from a diff for display to the user.

        This will strip away information about the branch, version, and
        repository path, returning an unextended path relative to the view.

        Args:
            filename (unicode):
                The filename/path to normalize.

            extra_data (dict, optional):
                Extra data stored for the diff this file corresponds to.
                This may be empty or ``None``. Subclasses should not assume the
                presence of anything here.

            **kwargs (dict, unused):
                Additional keyword arguments.

        Returns:
            unicode:
            The resulting filename/path.

        """
        return cpath.relpath(self.unextend_path(filename)[1], self.repopath)
Beispiel #17
0
def gsimcli(stations_file, stations_header, no_data, stations_order,
            correct_method, detect_prob, detect_flag, detect_save, exe_path,
            par_file, outfolder, purge_sims, rad=0, correct_skew=None,
            correct_percentile=None, optional_stats=None, cores=None, dbgfile=None,
            print_status=False, skip_dss=False):
    """Main routine to run GSIMCLI homogenisation procedure in a set of
    stations.

    Parameters
    ----------
    stations_file : string or PointSet object
        Stations file path or PointSet instance.
    stations_header : boolean
        Stations file has the GSLIB standard header lines.
    no_data : number
        Missing data value.
    stations_order : array_like
        Stations' ID's in the order that they will be homogenised.
    correct_method : {'mean', 'median', 'skewness', 'percentile'} string,
        default 'mean'
        Method for the inhomogeneities correction:
            - mean: replace detected irregularities with the mean of simulated
                values;
            - median: replace detected irregularities with the median of
                simulated values;
            - skewness: use the sample skewness to decide whether detected
                irregularities will be replaced by the mean or by the median of
                simulated values.
            - percentile : replace detected irregularities with the percentile
                `100 * (1 - p)`, which is the same value used in the detection.
    detect_prob : float
        Probability value to build the detection interval centred in the local
        PDF.
    detect_flag : boolean
        DEPRECATED
    detect_save : boolean
        Save generated files in the procedure\: intermediary PointSet files
        containing candidate and reference stations, homogenised and simulated
        values, and DSS parameters files.
    exe_path : string
        DSS binary file path.
    par_file : string or DssParam object
        DSS parameters file path or DssParam instance.
    outfolder : string
        Directory to save the results.
    purge_sims : boolean
        Remove all simulated maps in the end.
    rad : number, default 0
        Tolerance radius used to search for neighbour nodes, used to calculate
        the local pdf's.
    correct_skew : float, optional
        Samples skewness threshold, used if `correct_method == 'skewness'`.
    correct_percentile: float, optional
        p value used if correct_method == 'percentile'.
    optional_stats : 

    cores : int, optional
        Maximum number of cores to be used. If None, it will use all available
        cores.
    dbgfile : string, optional
        Debug output file path. Write DSS console output to a file.
    print_status : boolean, default False
        Print some messages with the procedure status while it is running.
    skip_dss : boolean, default False
        Do not run DSS. Choose if the simulated maps are already in place and
        only the homogenisation process is needed.

    Returns
    -------
    homogenised_file : string
        Homogenised data file path. The generated file name ends with
        *_homogenised_data.csv*.
    dnumber_list : list of int
        Number of detected breakpoints in each candidate station.
    fnumber_list : list of int
        Number of missing data that were interpolated in each candidate
        station.

    """
    global is_alive

    if not cores or cores > mp.cpu_count():
        cores = mp.cpu_count()
    if print_status:
        print 'GSIMCLI using {0} cores'.format(cores)

    # load data and prepare the iterative process
    if isinstance(stations_file, gr.PointSet):
        stations_pset = stations_file
    else:
        stations_pset = gr.PointSet()
        stations_pset.load(stations_file, nd=no_data, header=stations_header)

    if isinstance(par_file, pdss.DssParam):
        dsspar = par_file
    else:
        dsspar = pdss.DssParam()
        dsspar.load_old(par_file)  # TODO: old
    dnumber_list = list()
    fnumber_list = list()

    # workaround for Qt forcing backslash
    if os.name == "nt":
        exe_path = ntpath.abspath(exe_path)

    commonpath = os.path.commonprefix((outfolder, exe_path))
    # start iterative process
    for i in xrange(len(stations_order)):
        if not is_alive:
            raise SystemError("process aborted")
        if print_status:
            print ('Processing candidate {0} out of {1} with ID {2}.'.
                   format(i + 1, len(stations_order), stations_order[i]))
        print "STATUS: candidate {0}".format(stations_order[i])
        # manage stations
        candidate, references = hmg.take_candidate(stations_pset,
                                                   stations_order[i])
        # prepare and launch DSS
        basename = os.path.basename(outfolder)
        refname = basename + '_references_' + str(i) + '.prn'
        outname = basename + '_dss_map_st' + str(i) + '_sim.out'  # TODO: +1
        parname = basename + '_dss_par_st' + str(i) + '.par'
        candname = basename + '_candidate_' + str(i) + '.prn'
        reffile = os.path.join(outfolder, refname)
        outfile = os.path.join(outfolder, outname)
        reffile_nt = ntpath.relpath(os.path.join(outfolder, refname),
                                    commonpath)
        outfile_nt = ntpath.relpath(os.path.join(outfolder, outname),
                                    commonpath)
        # workaround for mp_exec, it needs one less directory in the tree
        reffile_nt = reffile_nt[reffile_nt.index('\\') + 1:]
        outfile_nt = outfile_nt[outfile_nt.index('\\') + 1:]

        parfile = os.path.join(outfolder, parname)
        references.save(psetfile=reffile, header=False)
        if detect_save:
            candfile = os.path.join(outfolder, candname)
            candidate.save(psetfile=candfile, header=True)
        if not skip_dss:
            dsspar.update(['datapath', 'output'], [reffile_nt, outfile_nt])
            dsspar.save_old(parfile)  # TODO: old
            oldpar = pdss.DssParam()
            oldpar.load_old(parfile)
            oldpar.nsim = 1
            purge_temp = False
            for sim in xrange(1, dsspar.nsim + 1, cores):
                if not is_alive:
                    raise SystemError("process aborted")
                if print_status:
                    print ('[{0}/{1}] Working on realization {2}'.
                           format(i + 1, len(stations_order), sim))
                print "STATUS: realization {0}".format(sim)
                if sim >= dsspar.nsim + 1 - cores:
                    purge_temp = True
                dss.mp_exec(dss_path=exe_path, par_path=oldpar, dbg=dbgfile,
                            output=outfile_nt, simnum=sim, cores=cores,
                            purge=purge_temp, totalsim=dsspar.nsim)

        # prepare detection
        intermediary_files = os.path.join(outfolder, basename + '_homogenised_'
                                          + str(i) + '.prn')
        dims = [dsspar.xx[0], dsspar.yy[0], dsspar.zz[0]]
        first_coord = [dsspar.xx[1], dsspar.yy[1], dsspar.zz[1]]
        cells_size = [dsspar.xx[2], dsspar.yy[2], dsspar.zz[2]]
        sim_maps = gr.GridFiles()
        sim_maps.load(outfile, dsspar.nsim, dims, first_coord, cells_size,
                      no_data, headerin=0)

        # detect and fix inhomogeneities
        if print_status:
            print 'Detecting inhomogeneities...'
        homogenisation = hmg.detect(grids=sim_maps, obs_file=candidate,
                                    method=correct_method, prob=detect_prob,
                                    flag=detect_flag, save=detect_save,
                                    outfile=intermediary_files, header=True,
                                    skewness=correct_skew, rad=rad,
                                    percentile=correct_percentile,
                                    optional_stats=optional_stats)
        homogenised, detected_number, filled_number = homogenisation
        if print_status:
            print 'Inhomogeneities detected: {0}'.format(detected_number)
        dnumber_list.append(detected_number)
        fnumber_list.append(filled_number)
        # prepare next iteration
        stations_pset = hmg.update_station(stations_pset, homogenised)
        if not detect_save:
            [os.remove(fpath) for fpath in
             [reffile, parfile]]  # , dsspar.transfile]]
        if purge_sims:
            sim_maps.purge()
        else:
            sim_maps.dump()

    # save results
    if print_status:
        print 'Process completed.'
        print 'Detections: ', ', '.join(map(str, dnumber_list))
        print 'Missing data filled: ', ', '.join(map(str, fnumber_list))
        print 'Saving results...'
    homogenised_file = os.path.join(outfolder, basename +
                                    '_homogenised_data.csv')
    hmg.save_output(pset_file=stations_pset, outfile=homogenised_file,
                    fformat='gsimcli', header=True, save_stations=True)

    return homogenised_file, dnumber_list, fnumber_list
Beispiel #18
0
 def relpath_for_windows_display(path, base):
     return ntpath.relpath(
         ntpath.join(*path.split(os.path.sep)),
         ntpath.join(*base.split(os.path.sep)),
     )
Beispiel #19
0
    .format(humanfriendly.format_timespan(elapsed), len(duplicateRows)))

# I didn't expect this to be true a priori, but it appears to be true, and
# it saves us the trouble of checking consistency across multiple occurrences
# of an image.
assert (len(duplicateRows) == 0)

#%% Check for images that aren't included in the metadata file

# Enumerate all images
imageFullPaths = glob.glob(os.path.join(image_directory, '**/*.JPG'),
                           recursive=True)

for iImage, imagePath in enumerate(imageFullPaths):

    imageRelPath = ntpath.relpath(imagePath, image_directory)
    assert (imageRelPath in filenamesToRows)

print(
    'Finished checking {} images to make sure they\'re in the metadata'.format(
        len(imageFullPaths)))

#%% Create CCT dictionaries

# Also gets image sizes, so this takes ~6 minutes
#
# Implicitly checks images for overt corruptness, i.e. by not crashing.

images = []
annotations = []
categories = []
Beispiel #20
0
 def update_event(self, inp=-1):
     self.set_output_val(0, ntpath.relpath(self.input(0), self.input(1)))