Example #1
0
    def _write_quick_partition(
        self,
        start: int,
        end: int,
        template: str,
        name: str,
        indices: Sequence[IndexRow],
        partition: int,
    ) -> None:
        size = (end - start)

        ffindex_name = template.format(name=name,
                                       index=partition,
                                       ext="ffindex")

        ffdata_name = template.format(name=name, index=partition, ext="ffdata")

        index_dirname = psplit(ffindex_name)[0]
        makedirs(index_dirname, exist_ok=True)

        data_dirname = psplit(ffdata_name)[0]
        makedirs(data_dirname, exist_ok=True)

        partition_index = FFIndex(indices).bump_starts(by=(-1 * start))

        with open(ffindex_name, "wb") as handle:
            partition_index.write_to(handle)

        with open(ffdata_name, "wb") as handle:
            self.data.write_sized(start, size, handle)

        return
Example #2
0
def get_rundef(func_path):
    """ Create dictionary defining single functional run at path `func_path`

    Parameters
    ----------
    func_path : str
        path containing functional data
    """
    pth, fname = psplit(func_path)
    pth, task_run = psplit(pth)
    task_match = TASK_RE.match(task_run)
    if not task_match:
        raise ValueError('Did not expect this task_run value: ' + task_run)
    task, run = task_match.groups()
    return dict(filename=func_path, task_no=int(task), run_no=int(run))
Example #3
0
def test_space_time_realign():
    path, fname = psplit(funcfile)
    original_affine = load_image(funcfile).affine
    path, fname = psplit(funcfile)
    froot, _ = fname.split('.', 1)
    with InTemporaryDirectory():
        # Make another image with .nii extension and extra dot in filename
        save_image(load_image(funcfile), 'my.test.nii')
        for in_fname, out_fname in ((funcfile, froot + '_mc.nii.gz'),
                                    ('my.test.nii', 'my.test_mc.nii.gz')):
            xforms = reg.space_time_realign(in_fname, 2.0, out_name='.')
            assert_true(np.allclose(xforms[0].as_affine(), np.eye(4), atol=1e-7))
            assert_false(np.allclose(xforms[-1].as_affine(), np.eye(4), atol=1e-3))
            img = load_image(out_fname)
            npt.assert_almost_equal(original_affine, img.affine)
Example #4
0
    def __init__(self, filename: str):
        """ Initialize basic fileparser parameters

        Parameters
        ----------
        filename: str
            The filename
        """

        self.path = str(filename)
        if not isfile(self.path):
            for spath in search_path:
                path = ossep(pjoin(spath, str(filename)))
                if isfile(path):
                    self.path = path
                    break
                elif str(filename)[0] is not '*':
                    s = ossep(pjoin(spath, "*" + str(filename)))
                    gl = glob(s)
                    if len(gl) == 1:
                        self.path = gl[0]
                        break



        self.filename = psplit(self.path)[-1]
        self.is_ok = False

        if not isfile(self.path):
            log.err("Can not open file %s", self.path)
        else:
            self.header = self.get_header()
Example #5
0
def write_warped(fname,
                 mapping,
                 interpolation='nearest',
                 template_header=None):
    """ Warp an image in individual space to template space

    Parameters
    ----------
    fmame : str
        Filename of image to warp in template space
    mapping : mapping instance
        object containing mapping from individual space to template space
    interpolation : str, optional
        interpolation to use when resampling data from `fname`
    template_header : None or header instance
        template header with which to save image.  If None, use default header.
    """
    img = nib.load(fname)
    mapping = as_mapping(mapping)
    template_affine = mapping.codomain_grid2world
    data = img.get_data().astype(float)
    warped = mapping.transform(data, interpolation=interpolation)
    warped_img = nib.Nifti1Image(warped, template_affine, template_header)
    path, basename = psplit(fname)
    out_fname = pjoin(path, 'w_' + basename)
    nib.save(warped_img, out_fname)
Example #6
0
def diagnose(args):
    """ Calculate, write results from diagnostic screen

    Parameters
    ----------
    args : object
        object with attributes:

        * filename : str - 4D image filename
        * time_axis : str - name or number of time axis in `filename`
        * slice_axis : str - name or number of slice axis in `filename`
        * out_path : None or str - path to which to write results
        * out_fname_label : None or filename - suffix of output results files
        * ncomponents : int - number of PCA components to write images for

    Returns
    -------
    res : dict
        Results of running :func:`screen` on `filename`
    """
    img, time_axis, slice_axis = parse_fname_axes(args.filename,
                                                  args.time_axis,
                                                  args.slice_axis)
    res = screen(img, args.ncomponents, time_axis, slice_axis)
    froot, ext, addext = splitext_addext(args.filename)
    fpath, fbase = psplit(froot)
    fpath = fpath if args.out_path is None else args.out_path
    fbase = fbase if args.out_fname_label is None else args.out_fname_label
    write_screen_res(res, fpath, fbase, ext + addext)
    return res
Example #7
0
def generate_msg(pkg, files, out_dir, search_path):
    """
    Generate dart code for all messages in a package
    """
    # print('Generated packages {}'.format(generated_packages))

    msg_context = MsgContext.create_default()

    for f in files:
        f = os.path.abspath(f)
        infile = os.path.basename(f)
        full_type = genmsg.gentools.compute_full_type_name(pkg, infile)
        spec = genmsg.msg_loader.load_msg_from_file(msg_context, f, full_type)
        if spec.short_name == 'String':
            spec.short_name = 'StringMessage'

        generate_msg_from_spec(msg_context, spec, search_path, out_dir, pkg, f)
    indir = os.path.dirname(files[0])

    ########################################
    # 3. Write the package pubspec.yaml.dart file
    ########################################

    io = StringIO()
    s = IndentedWriter(io)
    write_pubspec(s, pkg, search_path, msg_context, indir)
    package_update = True
    pubspec = '{}/pubspec.yaml'.format(out_dir)
    mode = 'w+'
    if os.path.isfile(pubspec):
        mode = 'r+'
    with open(pubspec, mode) as f:
        if f.read() == io.getvalue(
        ) and time.time() - os.path.getmtime(pubspec) < 5:
            # print('Pubspec identical')
            package_update = False

    if package_update:
        with open(pubspec, 'w+') as f:
            f.write(io.getvalue())
        import subprocess
        try:
            # print('running pub upgrade in {}'.format(out_dir))
            subprocess.check_output('which pub', shell=True)
            p = subprocess.Popen(['pub', 'upgrade'],
                                 cwd=out_dir,
                                 stdout=subprocess.PIPE)
            p.wait()
        except subprocess.CalledProcessError as e:
            pass

    io.close()
    (directory, pack) = psplit(out_dir)
    if len(search_path.keys()) == 0:
        return
    for package in search_path.keys():
        if package != pkg and package is not None:
            # new_search = deepcopy(search_path)
            # new_search.pop(package)
            generate_all_msgs_for_package(package, directory, search_path)
Example #8
0
def test_uninstall_multiple_paths(path):
    ds = Dataset(path).create(force=True)
    subds = ds.create('deep', force=True)
    subds.save(recursive=True)
    ok_clean_git(subds.path)
    # needs to be able to add a combination of staged files, modified submodule,
    # and untracked files
    ds.save(recursive=True)
    ok_clean_git(ds.path)
    # drop content of all 'kill' files
    topfile = 'kill'
    deepfile = opj('deep', 'dir', 'kill')
    # use a tuple not a list! should also work
    ds.drop((topfile, deepfile), check=False)
    ok_clean_git(ds.path)
    files_left = glob(opj(ds.path, '*', '*', '*')) + glob(opj(ds.path, '*'))
    ok_(all([f.endswith('keep') for f in files_left if exists(f) and not isdir(f)]))
    ok_(not ds.repo.file_has_content(topfile))
    ok_(not subds.repo.file_has_content(opj(*psplit(deepfile)[1:])))
    # remove handles for all 'kill' files
    ds.remove([topfile, deepfile], check=False)
    ok_clean_git(ds.path)
    files_left = glob(opj(ds.path, '*', '*', '*')) + glob(opj(ds.path, '*'))
    ok_(all([f.endswith('keep') for f in files_left if exists(f) and not isdir(f)]))
    ok_(not any([f.endswith(topfile) for f in files_left]))
Example #9
0
def get_subject(subj_path):
    """ Create `subject` dictionary for subject at path `subj_path`

    Parameters
    ----------
    subj_path : str
        path containing subject data

    Returns
    -------
    subj_dict : dict
        dictionary containing information for this subject
    """
    subj_path = abspath(subj_path)
    subject = dict(anatomicals=[], functionals=[])
    # Get anatomicals
    anat_search = pjoin(subj_path, 'anatomy', 'highres*.nii.gz')
    for anat_path in sorted(glob(anat_search)):
        pth, fname = psplit(anat_path)
        if ANAT_RE.match(fname):
            subject['anatomicals'].append(anat_path)
    # Get functionals (as dicts)
    func_search = pjoin(subj_path, 'BOLD', '*', 'bold.nii.gz')
    for func_path in sorted(glob(func_search)):
        rundef = get_rundef(func_path)
        subject['functionals'].append(rundef)
    # Sort functionals by task_no, run_no
    subject['functionals'].sort(key=_run_key)
    # Compile list of tasks for convenience
    all_tasks = [f['task_no'] for f in subject['functionals']]
    unique_tasks = set(all_tasks)  # Retain only unique values
    subject['tasks'] = sorted(unique_tasks)
    return subject
Example #10
0
def path_is_under(values, path=None):
    """Whether a given path is a subdirectory of any of the given test values

    Parameters
    ----------
    values : sequence or dict
      Paths to be tested against. This can be a dictionary in which case
      all values from all keys will be tested against.
    path : path or None
      Test path. If None is given, the process' working directory is
      used.

    Returns
    -------
    bool
    """
    if path is None:
        from datalad.utils import getpwd
        path = getpwd()
    if isinstance(values, dict):
        values = chain(*values.values())
    for p in values:
        rpath = relpath(p, start=path)
        if rpath == curdir \
                or rpath == pardir \
                or set(psplit(rpath)) == {pardir}:
            # first match is enough
            return True
    return False
Example #11
0
def get_subject(subj_path):
    """ Create `subject` dictionary for subject at path `subj_path`

    Parameters
    ----------
    subj_path : str
        path containing subject data

    Returns
    -------
    subj_dict : dict
        dictionary containing information for this subject
    """
    subj_path = abspath(subj_path)
    subject = dict(anatomicals = [],
                 functionals = [])
    # Get anatomicals
    anat_search = pjoin(subj_path, 'anatomy', 'highres*.nii.gz')
    for anat_path in sorted(glob(anat_search)):
        pth, fname = psplit(anat_path)
        if ANAT_RE.match(fname):
            subject['anatomicals'].append(anat_path)
    # Get functionals (as dicts)
    func_search = pjoin(subj_path, 'BOLD', '*', 'bold.nii.gz')
    for func_path in sorted(glob(func_search)):
        rundef = get_rundef(func_path)
        subject['functionals'].append(rundef)
    # Sort functionals by task_no, run_no
    subject['functionals'].sort(key=_run_key)
    # Compile list of tasks for convenience
    all_tasks = [f['task_no'] for f in subject['functionals']]
    unique_tasks = set(all_tasks) # Retain only unique values
    subject['tasks'] = sorted(unique_tasks)
    return subject
def main():
    parser = OptionParser(usage=
                          'usage: %prog [options] <package_name> <mpkg_root>')
    parser.add_option("--component-directory",
                      action = 'store',
                      default = COMPONENT_DIRECTORY,
                      dest="comp_dir",
                      help="Subdirectory containing package directories; "
                      "defaults to " + COMPONENT_DIRECTORY)
    options, args = parser.parse_args()
    if len(args) != 2:
        parser.print_help()
        sys.exit(1)
    pkg_name, wd = args
    wd = abspath(wd)
    package_names = glob(pjoin(wd, COMPONENT_DIRECTORY, '*.pkg'))
    package_names = [psplit(pn)[1] for pn in package_names]
    n_pkgs = len(package_names)
    extra_plist = dict(
            IFRequirementDicts=[python_requirement(pkg_name)],
            IFPkgFlagComponentDirectory=tools.unicode_path(
                './' + COMPONENT_DIRECTORY))
    plist = mpkg_info(pkg_name, '1.7',
                      zip(package_names, ('selected',) * n_pkgs))
    plist.update(extra_plist)
    write(plist, pjoin(wd, 'Contents', 'Info.plist'))
Example #13
0
def diagnose(args):
    """ Calculate, write results from diagnostic screen

    Parameters
    ----------
    args : object
        object with attributes:

        * filename : str - 4D image filename
        * time_axis : str - name or number of time axis in `filename`
        * slice_axis : str - name or number of slice axis in `filename`
        * out_path : None or str - path to which to write results
        * out_fname_label : None or filename - suffix of output results files
        * ncomponents : int - number of PCA components to write images for

    Returns
    -------
    res : dict
        Results of running :func:`screen` on `filename`
    """
    img, time_axis, slice_axis = parse_fname_axes(args.filename,
                                                  args.time_axis,
                                                  args.slice_axis)
    res = screen(img, args.ncomponents, time_axis, slice_axis)
    froot, ext, addext = splitext_addext(args.filename)
    fpath, fbase = psplit(froot)
    fpath = fpath if args.out_path is None else args.out_path
    fbase = fbase if args.out_fname_label is None else args.out_fname_label
    write_screen_res(res, fpath, fbase, ext + addext)
    return res
Example #14
0
def save_result(input_path, output_path, annotations, file):
    # Paths
    images_path = "{}_images.csv".format(output_path)
    ann_paths = "{}_ann.csv".format(output_path)

    # Get image HxW
    img = cv2.imread(input_path)
    dimensions = img.shape

    # Get an ID
    a_id = int(str(time.time()).replace(".", ""))

    ref, _ = psplit(file)

    with open(images_path, 'a') as f:
        wr = csv.writer(f)

        wr.writerow((dimensions[1], dimensions[0], "{}.jpg".format(
            ref), "http://{}.jpg".format(ref), ref))

    with open(ann_paths, 'a') as f:
        wr = csv.writer(f)

        # Add annotation
        for annotation in annotations:
            wr.writerow((0, ref, annotation[:4].tolist(), int(
                annotation[4:-1].argmax()), a_id, int(annotation[4:-1].max())))
Example #15
0
def _describe_file(fpath, bids_directory):
    fname = psplit(fpath)[-1]
    fname_components = fname.split(".")[0].split('_')
    info = {
        'Sample Name': fname_components[0][4:],
        # assay name is the entire filename except for the modality suffix
        # so that, e.g. simultaneous recordings match wrt to the assay name
        # across assay tables
        'Assay Name': '_'.join(fname_components[:-1]),
        'Raw Data File': fpath[len(bids_directory):],
        'Parameter Value[modality]': fname_components[-1]
    }
    comp_dict = dict([c.split('-') for c in fname_components[:-1]])
    for l in ('rec', 'recording'):
        if l in comp_dict:
            info['Parameter Value[recording label]'] = comp_dict[l]
    for l in ('acq', 'acquisition'):
        if l in comp_dict:
            info['Parameter Value[acquisition label]'] = comp_dict[l]
    if 'task' in comp_dict:
        info['Factor Value[task]'] = comp_dict['task']
    info['other_fields'] = get_bids_metadata(
        bids_directory,
        '_'.join(fname_components)
    )
    return info
Example #16
0
def path_is_under(values, path=None):
    """Whether a given path is a subdirectory of any of the given test values

    Parameters
    ----------
    values : sequence or dict
      Paths to be tested against. This can be a dictionary in which case
      all values from all keys will be tested against.
    path : path or None
      Test path. If None is given, the process' working directory is
      used.

    Returns
    -------
    bool
    """
    if path is None:
        from datalad.utils import getpwd
        path = getpwd()
    if isinstance(values, dict):
        values = chain(*values.values())
    for p in values:
        rpath = relpath(p, start=path)
        if rpath == curdir \
                or rpath == pardir \
                or set(psplit(rpath)) == {pardir}:
            # first match is enough
            return True
    return False
Example #17
0
def make_prundle(uri, pkg_name=None, meta=None):
    """ Create prundle from address `uri`

    Parameters
    ----------
    uri : str
        address of data from which to make prundle.
    pkg_name : None or str, optional
        name of package within prundle.  We'll read the prundle to get a name;
        if the read name conflicts with `pkg_name` we raise an error.
    meta : None or dict, optional
        metadata for package instantiation.  We read the prundle for metadata
        too.  `meta` should match the read metadata.

    Returns
    -------
    prd : prundle instance

    Examples
    --------
    >>> import os
    >>> from dang.testing import DANG_DATA_PATH
    >>> fsp_path = os.path.join(DANG_DATA_PATH, 'eg-pkg')
    >>> fsprd = make_prundle(fsp_path)

    The metadata comes from the read package:

    >>> fsprd.pinstant.pkg_name
    'example-package'
    >>> fsprd.base_path == fsp_path
    True

    If the read metadata conflicts with the passed metadata, generate an error:

    >>> fsprd = make_prundle(fsp_path, pkg_name='another_name')
    Traceback (most recent call last):
        ...
    PrundleError: Read package name "example-package" differs from passed package name "another_name"

    A read or passed package name - is necessary:

    >>> fsp_path = os.path.join(DANG_DATA_PATH, 'no-meta')
    >>> fsprd = make_prundle(fsp_path)
    Traceback (most recent call last):
        ...
    PrundleError: No read or passed package name
    """
    uri_match = URI_REG.match(uri)
    if not uri_match is None:
        return UrlPathPrundle.from_path(uri, pkg_name, meta)
    # Assume it's a filename
    pth, ext = psplit(uri)
    if ext in ('tar', '.tar', '.tgz', '.bz2'):
        PrundleError("Can't deal with this right now")
    if uri.endswith('.zip'):
        return ZipPrundle.from_path(uri, pkg_name, meta)
    if not isdir(uri):
        PrundleError('I thought you were going to give me a directory')
    return PathPrundle.from_path(uri, pkg_name, meta)
    def __init__(self, name, filename, build_cmd,
                 depends=(),
                 after=(),
                 patcher=None,
                 unpacked_sdir=None,
                 build_src_sdir='src',
                ):
        """ Initialize object for creating unpack, patch, build tasks

        Unpacking assumed to have no dependencies.

        Patching assumed to depend only on the unpacking.

        Build depends on packing / patching and on given dependencies

        Parameters
        ----------
        name : str
            package name
        filename : str
            filename containing source archive to unpack
        build_cmd : str or callable
            command to build after extracting
        depends : str or sequence, optional
            depends for build
        after : str or sequence, optional
            names to set build to follow after (task name depends)
        patcher : None or str or callable, optional
            If str, a file containing a ``-p1`` patch for the sources.  If
            callable, then a rule to apply for patching. If None, don't patch
        unpacked_sdir : str or None, optional
            directory created by unpacking `filename`.  If None we guess from
            `filename`
        build_src_sdir : str, optional
            subdirectory in build directory into which to unpack
        """
        self.name = name
        self.filename = filename
        self.build_cmd = build_cmd
        _, fname = psplit(filename)
        if fname.endswith('.tar.gz'):
            self.unpack_cmd = 'tar zxf'
            fname = fname[:-7]
        elif fname.endswith('.tar.bz2'):
            self.unpack_cmd = 'tar jxf'
            fname = fname[:-8]
        elif fname.endswith('.zip'):
            self.unpack_cmd = 'unzip'
            fname = fname[:-4]
        else:
            raise ValueError("Can't work out type of archive " + fname)
        self.patcher = patcher
        if unpacked_sdir is None: # Guess at output subdirectory
            unpacked_sdir = fname
        self.unpacked_sdir = unpacked_sdir
        self.depends = seq_to_list(depends)
        self.after = seq_to_list(after)
        self.build_src_sdir = build_src_sdir
        self._register_instance()
Example #19
0
def runner(args: argparse.Namespace) -> None:
    checksums: Dict[str, str] = dict()
    id_conv = IdConverter(prefix=args.prefix, length=args.length)

    i = 0
    j = 1
    seq_chunk = list()
    tab_chunk = list()
    for infile in args.infiles:

        seqs = SeqIO.parse(infile, "fasta")
        for seq in seqs:
            fixed_seq = (str(seq.seq).replace(
                "-", "").rstrip("*").upper().replace("*", "X").replace(
                    "J", "X").replace("B", "X").replace("Z", "X").replace(
                        "U", "X").replace("O", "X"))

            if INVALID_CHARS.match(fixed_seq) is not None:
                raise ValueError(
                    f"The sequence {seq.id} contains invalid characters.")

            seq.seq = Seq(fixed_seq)

            id_, checksum = get_checksum(seq)
            if checksum in checksums:
                encoded = checksums[checksum]
                new_seq = False
            else:
                encoded = id_conv.encode(i)
                checksums[checksum] = encoded
                i += 1
                new_seq = True

            line = TableLine(encoded, psplit(infile)[1], id_, checksum)
            tab_chunk.append(format_table_line(line))

            if new_seq:
                seq.id = encoded
                seq.name = encoded
                seq.description = encoded

                seq_chunk.append(seq.format("fasta").strip())

            if j % 10000 == 0:
                args.outfasta.write('\n'.join(seq_chunk) + '\n')
                args.outmap.write('\n'.join(tab_chunk) + '\n')
                seq_chunk = list()
                tab_chunk = list()

            j += 1

    if len(seq_chunk) > 0:
        args.outfasta.write('\n'.join(seq_chunk) + '\n')

    if len(tab_chunk) > 0:
        args.outmap.write('\n'.join(tab_chunk) + '\n')

    print(len(checksums))
    return
Example #20
0
def make_prundle(uri, pkg_name=None, meta=None):
    """ Create prundle from address `uri`

    Parameters
    ----------
    uri : str
        address of data from which to make prundle.
    pkg_name : None or str, optional
        name of package within prundle.  We'll read the prundle to get a name;
        if the read name conflicts with `pkg_name` we raise an error.
    meta : None or dict, optional
        metadata for package instantiation.  We read the prundle for metadata
        too.  `meta` should match the read metadata.

    Returns
    -------
    prd : prundle instance

    Examples
    --------
    >>> import os
    >>> from dang.testing import DANG_DATA_PATH
    >>> fsp_path = os.path.join(DANG_DATA_PATH, 'eg-pkg')
    >>> fsprd = make_prundle(fsp_path)

    The metadata comes from the read package:

    >>> fsprd.pinstant.pkg_name
    'example-package'
    >>> fsprd.base_path == fsp_path
    True

    If the read metadata conflicts with the passed metadata, generate an error:

    >>> fsprd = make_prundle(fsp_path, pkg_name='another_name')
    Traceback (most recent call last):
        ...
    PrundleError: Read package name "example-package" differs from passed package name "another_name"

    A read or passed package name - is necessary:

    >>> fsp_path = os.path.join(DANG_DATA_PATH, 'no-meta')
    >>> fsprd = make_prundle(fsp_path)
    Traceback (most recent call last):
        ...
    PrundleError: No read or passed package name
    """
    uri_match = URI_REG.match(uri)
    if not uri_match is None:
        return UrlPathPrundle.from_path(uri, pkg_name, meta)
    # Assume it's a filename
    pth, ext = psplit(uri)
    if ext in ("tar", ".tar", ".tgz", ".bz2"):
        PrundleError("Can't deal with this right now")
    if uri.endswith(".zip"):
        return ZipPrundle.from_path(uri, pkg_name, meta)
    if not isdir(uri):
        PrundleError("I thought you were going to give me a directory")
    return PathPrundle.from_path(uri, pkg_name, meta)
Example #21
0
def get_bids_metadata(bids_root, basepath):
    """Query the BIDS meta data JSON file hierarchy

    Parameters
    ----------
    bids_root : path
      Path to the root of the BIDS dataset
    basepath : path
      Relative path to the file (filename without extension, e.g. no '.nii.gz')
      for which meta data shall be queried.
    """
    sidecar_json = '{}.json'.format(basepath)

    path_components = psplit(sidecar_json)
    filename_components = path_components[-1].split("_")
    session_level_componentList = []
    subject_level_componentList = []
    top_level_componentList = []
    ses = None
    sub = None

    for filename_component in filename_components:
        if filename_component[:3] != "run":
            session_level_componentList.append(filename_component)
            if filename_component[:3] == "ses":
                ses = filename_component
            else:
                subject_level_componentList.append(filename_component)
                if filename_component[:3] == "sub":
                    sub = filename_component
                else:
                    top_level_componentList.append(filename_component)

    # the top-level should have at least two components, e.g. task and modality
    # but could also have more, e.g. task, recording and modality
    # query sidecars for each single-component plus modality
    potential_jsons = []
    for comp in top_level_componentList[:-1]:
        potential_jsons.append(
            opj(bids_root, "_".join([comp, top_level_componentList[-1]])))
    # and one for all components combined
    potential_jsons.append(opj(bids_root, "_".join(top_level_componentList)))

    subject_level_json = opj(bids_root, sub, "_".join(subject_level_componentList))
    potential_jsons.append(subject_level_json)

    if ses:
        session_level_json = opj(bids_root, sub, ses, "_".join(session_level_componentList))
        potential_jsons.append(session_level_json)

    potential_jsons.append(sidecar_json)

    merged_param_dict = {}
    for json_file_path in potential_jsons:
        if exists(json_file_path):
            param_dict = json.load(open(json_file_path, "r"))
            merged_param_dict.update(param_dict)

    return merged_param_dict
Example #22
0
    def __init__(self, fn, tpl):
        """Split file contents into sections."""
        self.commands = []
        self.name     = first( splitext( psplit(fn)[1] ) )
        self.tpl      = tpl

        with open(fn, encoding="utf-8") as fp:
            self.sections = re.split(splitpat, fp.read(), flags=re.VERBOSE)
Example #23
0
def get_rundef(func_path):
    """ Create dictionary defining single functional run at path `func_path`

    Parameters
    ----------
    func_path : str
        path containing functional data
    """
    pth, fname = psplit(func_path)
    pth, task_run = psplit(pth)
    task_match = TASK_RE.match(task_run)
    if not task_match:
        raise ValueError('Did not expect this task_run value: ' + task_run)
    task, run = task_match.groups()
    return dict(filename=func_path,
                task_no = int(task),
                run_no = int(run))
Example #24
0
def test_space_time_realign():
    path, fname = psplit(funcfile)
    original_affine = load_image(funcfile).affine
    path, fname = psplit(funcfile)
    froot, _ = fname.split('.', 1)
    with InTemporaryDirectory():
        # Make another image with .nii extension and extra dot in filename
        save_image(load_image(funcfile), 'my.test.nii')
        for in_fname, out_fname in ((funcfile, froot + '_mc.nii.gz'),
                                    ('my.test.nii', 'my.test_mc.nii.gz')):
            xforms = reg.space_time_realign(in_fname, 2.0, out_name='.')
            assert_true(
                np.allclose(xforms[0].as_affine(), np.eye(4), atol=1e-7))
            assert_false(
                np.allclose(xforms[-1].as_affine(), np.eye(4), atol=1e-3))
            img = load_image(out_fname)
            npt.assert_almost_equal(original_affine, img.affine)
Example #25
0
def posix_relpath(path, start=None):
    """Behave like os.path.relpath, but always return POSIX paths...

    on any platform."""
    # join POSIX style
    return posixpath.join(
        # split and relpath native style
        # python2.7 ntpath implementation of relpath cannot handle start=None
        *psplit(relpath(path, start=start if start is not None else '')))
def imsave(filename, img):
    """Save a |SpatialImage| to filename.

    .. note: `img` **must** be a |SpatialImage|.

    The filewriter is choosen according to the file extension. However all file extensions
    will not match the data held by img, in dimensionnality or encoding, and might raise `IOError`s.

    For real volume data, Inrimage and NPY are currently supported.
    For |SpatialImage|s that are actually 2D, PNG, BMP, JPG among others are supported if PIL is installed.

    :Parameters:
     - `filename` (str)
     - `img` (|SpatialImage|)
    """

    assert isinstance(img, SpatialImage)
    # -- images are always at least 3D! If the size of dimension 3 (indexed 2) is 1, then it is actually
    # a 2D image. If it is 4D it has vectorial or RGB[A] data. --
    filename = expusr(filename)
    head, tail = psplit(filename)
    head = head or "."
    if not exists(head):
        raise IOError("The directory do not exist: %s" % head)

    root, ext = splitext(filename)

    is2D = img.shape[2] == 1
    ext = ext.lower()
    if ext == ".gz":
        root, ext = splitext(root)
        ext = ext.lower()
    if ext == ".inr":
        write_inrimage(filename, img)
    elif ext in [".npz", ".npy"]:
        save(filename, img)
    elif ext in [".tiff", ".tif"]:
        write_tif(filename, img)
    else:
        if not is2D:
            raise IOError("No writer found for format of 3D image %s"%filename)
        else:
            # -- fallback on Pylab.
            # WARNING: Careful, this can fail in many ways still!
            # For example, many formats wont support writing scalar floats, or
            # vector floats, or encodings different from uchar8 --

            #WARNING 2: Still this damn transposition thing that may appear.
            #the problem is that what we write doesn't look like what is shown
            #with "display()". display() is broken, not the write functions.
            if len(img.shape) == 4: # RGB[A] images
                _imsave(filename,img[:,:,0,:])
            elif len(img.shape) == 3: #scalar images
                _imsave(filename, img[:,:,0])
            else:
                raise IOError("Unhandled image shape %s"%str(img.shape))
 def kerrfilt(dirpath):
     ignoredirs = ['Analysis']
     if psplit(dirpath)[-1] in ignoredirs:
         return False
     if fnmatch.fnmatch(dirpath, '*\Analysis\*'):
         return False
     if skipdone and os.path.isfile(pjoin(dirpath, 'Analysis', 'ROI.txt')):
         return False
     if len(kerrims(dirpath)) > 0:
         return True
Example #28
0
def tsdiffana(args):
    """ Generate tsdiffana plots from command line params `args`

    Parameters
    ----------
    args : object
        object with attributes

        * filename : str - 4D image filename
        * out_file : str - graphics file to write to instead of leaving
          graphics on screen
        * time_axis : str - name or number of time axis in `filename`
        * slice_axis : str - name or number of slice axis in `filename`
        * write_results : bool - if True, write images and plots to files
        * out_path : None or str - path to which to write results
        * out_fname_label : None or filename - suffix of output results files

    Returns
    -------
    axes : Matplotlib axes
       Axes on which we have done the plots.
    """
    if args.out_file is not None and args.write_results:
        raise ValueError("Cannot have OUT_FILE and WRITE_RESULTS options "
                         "together")
    img, time_axis, slice_axis = parse_fname_axes(args.filename,
                                                  args.time_axis,
                                                  args.slice_axis)
    results = time_slice_diffs_image(img, time_axis, slice_axis)
    axes = plot_tsdiffs(results)
    if args.out_file is None and not args.write_results:
        # interactive mode
        return axes
    if args.out_file is not None:
        # plot only mode
        axes[0].figure.savefig(args.out_file)
        return axes
    # plot and images mode
    froot, ext, addext = splitext_addext(args.filename)
    fpath, fbase = psplit(froot)
    fpath = fpath if args.out_path is None else args.out_path
    fbase = fbase if args.out_fname_label is None else args.out_fname_label
    axes[0].figure.savefig(pjoin(fpath, 'tsdiff_' + fbase + '.png'))
    # Save image volumes
    for key, prefix in (('slice_diff2_max_vol', 'dv2_max_'), ('diff2_mean_vol',
                                                              'dv2_mean_')):
        fname = pjoin(fpath, prefix + fbase + ext + addext)
        nipy.save_image(results[key], fname)
    # Save time courses into npz
    np.savez(
        pjoin(fpath, 'tsdiff_' + fbase + '.npz'),
        volume_means=results['volume_means'],
        slice_mean_diff2=results['slice_mean_diff2'],
    )
    return axes
def test_bibs():
    for bibfile in BIBS:
        _, name = psplit(bibfile)
        name, _ = splitext(name)
        txt = open(bibfile, 'rt').read()
        res = parser.parse(txt)
        if name in _exp_res:
            assert_equal(res, _exp_res[name])
        elif DEBUG:
            print name
            print res.entries
Example #30
0
def tsdiffana(args):
    """ Generate tsdiffana plots from command line params `args`

    Parameters
    ----------
    args : object
        object with attributes

        * filename : str - 4D image filename
        * out_file : str - graphics file to write to instead of leaving
          graphics on screen
        * time_axis : str - name or number of time axis in `filename`
        * slice_axis : str - name or number of slice axis in `filename`
        * write_results : bool - if True, write images and plots to files
        * out_path : None or str - path to which to write results
        * out_fname_label : None or filename - suffix of output results files

    Returns
    -------
    axes : Matplotlib axes
       Axes on which we have done the plots.
    """
    if args.out_file is not None and args.write_results:
        raise ValueError("Cannot have OUT_FILE and WRITE_RESULTS options "
                         "together")
    img, time_axis, slice_axis = parse_fname_axes(args.filename,
                                                  args.time_axis,
                                                  args.slice_axis)
    results = time_slice_diffs_image(img, time_axis, slice_axis)
    axes = plot_tsdiffs(results)
    if args.out_file is None and not args.write_results:
        # interactive mode
        return axes
    if args.out_file is not None:
        # plot only mode
        axes[0].figure.savefig(args.out_file)
        return axes
    # plot and images mode
    froot, ext, addext = splitext_addext(args.filename)
    fpath, fbase = psplit(froot)
    fpath = fpath if args.out_path is None else args.out_path
    fbase = fbase if args.out_fname_label is None else args.out_fname_label
    axes[0].figure.savefig(pjoin(fpath, 'tsdiff_' + fbase + '.png'))
    # Save image volumes
    for key, prefix in (('slice_diff2_max_vol', 'dv2_max_'),
                        ('diff2_mean_vol', 'dv2_mean_')):
        fname = pjoin(fpath, prefix + fbase + ext + addext)
        nipy.save_image(results[key], fname)
    # Save time courses into npz
    np.savez(pjoin(fpath, 'tsdiff_' + fbase + '.npz'),
             volume_means=results['volume_means'],
             slice_mean_diff2=results['slice_mean_diff2'],
            )
    return axes
Example #31
0
def package_config():
    """Use pkg-config to get library build parameters and tesseract version."""
    p = subprocess.Popen(
        [
            'pkg-config',
            '--exists',
            '--atleast-version={}'.format(_TESSERACT_MIN_VERSION),
            '--print-errors',
            'tesseract',
        ],
        stderr=subprocess.PIPE,
    )
    _, error = p.communicate()
    if p.returncode != 0:
        if isinstance(error, bytes):
            error = error.decode()

        raise Exception(error)

    p = subprocess.Popen(['pkg-config', '--libs', '--cflags', 'tesseract'],
                         stdout=subprocess.PIPE)
    output, _ = p.communicate()
    flags = _read_string(output).strip().split()
    p = subprocess.Popen(['pkg-config', '--libs', '--cflags', 'lept'],
                         stdout=subprocess.PIPE)
    output, _ = p.communicate()
    flags2 = _read_string(output).strip().split()
    options = {'-L': 'library_dirs', '-I': 'include_dirs', '-l': 'libraries'}
    config = {'library_dirs': [], 'include_dirs': [], 'libraries': []}

    for f in itertools.chain(flags, flags2):
        try:
            opt = options[f[:2]]
        except KeyError:
            continue
        val = f[2:]
        if opt == 'include_dirs' and psplit(val)[1].strip(os.sep) in (
                'leptonica',
                'tesseract',
        ):
            val = dirname(val)
        config[opt] += [val]

    p = subprocess.Popen(['pkg-config', '--modversion', 'tesseract'],
                         stdout=subprocess.PIPE)
    version, _ = p.communicate()
    version = _read_string(version).strip()
    _LOGGER.info('Supporting tesseract v%s', version)
    config['compile_time_env'] = {
        'TESSERACT_MAJOR_VERSION': major_version(version),
        'TESSERACT_VERSION': version_to_int(version)
    }
    _LOGGER.info('Configs from pkg-config: %s', config)
    return config
Example #32
0
    def partition(self,
                  name: str,
                  order: Optional[Sequence[IndexRow]] = None,
                  template: str = "{name}_{index}.{ext}",
                  n: int = 10000) -> int:
        """ Chunk a database into partitions of size n """
        from math import ceil

        if order is None:
            indices: List[IndexRow] = sorted(self.index.index,
                                             key=lambda i: i.size,
                                             reverse=True)
        else:
            assert len(order) == len(self.index)
            indices = list(order)

        nchunks = ceil(len(self.index) / n)
        for i in range(nchunks):
            chunk = indices[i::nchunks]

            ffindex_name = template.format(name=name,
                                           index=i + 1,
                                           ext="ffindex")

            ffdata_name = template.format(name=name, index=i + 1, ext="ffdata")

            index_dirname = psplit(ffindex_name)[0]
            makedirs(index_dirname, exist_ok=True)

            data_dirname = psplit(ffdata_name)[0]
            makedirs(data_dirname, exist_ok=True)

            with open(ffindex_name, "wb") as index_handle, \
                    open(ffdata_name, "wb") as data_handle:

                chunkdb = FFDB.reorder_from(self,
                                            data_handle=data_handle,
                                            order=chunk)
                chunkdb.index.write_to(index_handle)

        return nchunks
Example #33
0
def main():
    build_tag = sys.argv[1]
    for wheel_fname in sys.argv[2:]:
        path, fname = psplit(wheel_fname)
        wf = WheelFile(fname)
        parsed = wf.parsed_filename.groupdict()
        parsed['build'] = build_tag
        out_fname = '{name}-{ver}-{build}-{pyver}-{abi}-{plat}.whl'.format(
            **parsed)
        out_path = pjoin(path, out_fname)
        print('Copying {} to {}'.format(wheel_fname, out_path))
        copyfile(wheel_fname, out_path)
Example #34
0
def newest_installed_binary():
    """
    Returns the version number of the newest installed server.
    """
    glob_result = glob(join(FOLDERS[0], "*"))
    if glob_result:
        # Find newest
        glob_result = [psplit(i)[1] for i in glob_result]
        glob_result = [[int(j) for j in i.split(".")] for i in glob_result]
        sorted(glob_result)
        return ".".join(str(i) for i in glob_result[0])
    return None
Example #35
0
def _get_investigation_template(bids_directory, mri_par_names):
    this_path = os.path.realpath(
        __file__[:-1] if __file__.endswith('.pyc') else __file__)
    template_path = opj(*(psplit(this_path)[:-1] +
                          ("i_investigation_template.txt", )))
    investigation_template = open(template_path).read()

    title = psplit(bids_directory)[-1]

    if exists(opj(bids_directory, "dataset_description.json")):
        with open(opj(bids_directory, "dataset_description.json"), "r") \
                as description_dict_fp:
            description_dict = json.load(description_dict_fp)
            if "Name" in description_dict:
                title = description_dict["Name"]

    investigation_template = investigation_template.replace(
        "[TODO: TITLE]", title)
    investigation_template = investigation_template.replace(
        "[TODO: MRI_PAR_NAMES]", ";".join(mri_par_names))
    return investigation_template
Example #36
0
def _get_investigation_template(bids_directory, mri_par_names):
    this_path = os.path.realpath(
        __file__[:-1] if __file__.endswith('.pyc') else __file__)
    template_path = opj(
        *(psplit(this_path)[:-1] + ("i_investigation_template.txt", )))
    investigation_template = open(template_path).read()

    title = psplit(bids_directory)[-1]

    if exists(opj(bids_directory, "dataset_description.json")):
        with open(opj(bids_directory, "dataset_description.json"), "r") \
                as description_dict_fp:
            description_dict = json.load(description_dict_fp)
            if "Name" in description_dict:
                title = description_dict["Name"]

    investigation_template = investigation_template.replace(
        "[TODO: TITLE]", title)
    investigation_template = investigation_template.replace(
        "[TODO: MRI_PAR_NAMES]", ";".join(mri_par_names))
    return investigation_template
Example #37
0
def main(infile, prefix, num_records, no_write=False, verbose=False):
    num_records = int(num_records)
    d = os.listdir(psplit(prefix)[0])
    d = [f for f in d if f.startswith(psplit(prefix)[1])]
    d = [int(splitext(f)[0].split("-")[-1]) for f in d if f.endswith(".fasta")]
    if len(d) > 0:
        chunk_num = max(d) + 1
    else:
        chunk_num = 1

    filepaths = list()
    with inhandler(infile, mode="rU") as inhandle:
        record_num = 0
        lines = list()
        for line in inhandle:
            line = line.strip()
            if line == "":
                continue
            if start_regex.match(line) is not None:
                if record_num >= num_records:
                    fp = prefix + "-" + str(chunk_num) + ".fasta"
                    if not no_write:
                        with open(fp, "w") as handle:
                            handle.write("\n".join(lines))
                    filepaths.append(fp)
                    chunk_num += 1
                    record_num = 0
                    lines = list()
                record_num += 1
            lines.append(line)
        fp = prefix + "-" + str(chunk_num) + ".fasta"
        if not no_write:
            with open(fp, "w") as handle:
                handle.write("\n".join(lines))
        filepaths.append(fp)

    if verbose:
        print(" ".join(filepaths))

    return
Example #38
0
def fullsplit(path, result=None):
    """
    Split a pathname into components (the opposite of os.path.join) in a
    platform-neutral way.
    """
    if result is None:
        result = []
    head, tail = psplit(path)
    if head == '':
        return [tail] + result
    if head == path:
        return result
    return fullsplit(head, [tail] + result)
Example #39
0
def main():
    argc = len(sys.argv)
    pkg_path = abspath(sys.argv[1] if argc > 1 else os.getcwd())
    n_bits = sys.argv[2] if argc > 2 else get_bitness()
    openblas_root = abspath(sys.argv[3] if argc > 3 else DEFAULT_OPENBLAS_ROOT)
    if n_bits not in ('32', '64'):
        raise RuntimeError("Number of bits should be 32 or 64")
    os.chdir(pkg_path)
    check_call(['git', 'clean', '-fxd'])
    check_call(['git', 'reset', '--hard'])
    blas_dir = pjoin(openblas_root, str(n_bits))
    with open(pjoin(blas_dir, 'site.cfg.template'), 'rt') as fobj:
        cfg_template = fobj.read()
    lib_basename = OPENBLAS_LIBNAME_RE.search(cfg_template).groups()[0]
    with open('site.cfg', 'wt') as fobj:
        fobj.write(cfg_template.format(openblas_root=openblas_root))
    # Copy guard against importing without SSE2
    if psplit(pkg_path)[-1] == 'numpy':
        shutil.copy2(pjoin(BUILD_STUFF, '_distributor_init.py'), pkg_path)
    check_call(['python', 'setup.py', 'bdist_wheel'])
    if psplit(pkg_path)[-1] == 'numpy':
        add_library(pkg_path, pjoin(blas_dir, 'bin', lib_basename + '.dll'))
Example #40
0
def write_requires(s,
                   spec,
                   search_path,
                   output_dir,
                   previous_packages=None,
                   prev_deps=None,
                   isSrv=False):
    "Writes out the require fields"
    if previous_packages is None:
        s.write('import \'dart:convert\';')
        s.write('import \'package:buffer/buffer.dart\';')
        s.write('import \'package:dartros/msg_utils.dart\';')

        previous_packages = {}
    if prev_deps is None:
        prev_deps = []
    # find other message packages and other messages in this packages
    # that this message depends on
    found_packages, local_deps, external_deps = find_requires(spec)
    # print('External Dependencies: {}'.format(external_deps))
    # filter out previously found local deps
    local_deps = [dep for dep in local_deps if dep not in prev_deps]
    # filter out previously found packages
    found_packages = {
        package
        for package in found_packages if package not in previous_packages
    }
    for package in found_packages:
        # print('External Package: {}, messages: {}'.format(
        # package, external_deps[package]))
        # TODO: finder is only relevant to node - we should support an option to
        #   create a flat message package directory. The downside is that it requires
        #   copying files between workspaces.
        s.write('import \'package:{}/msgs.dart\';'.format(package))
        (directory, pack) = psplit(output_dir)
        generate_all_msgs_for_package(package, directory, search_path)

    # require mesages from this package
    # messages from this package need to be requried separately
    # so that we don't create a circular requires dependency
    for dep in local_deps:
        if isSrv:
            s.write('import \'../msgs/{}.dart\';'.format(dep))
        else:
            s.write('import \'{}.dart\';'.format(dep))

    s.newline()
    s.write('//-----------------------------------------------------------')
    s.newline()
    return found_packages, local_deps
Example #41
0
    def authenticate(self, handler, data):
        username = data['username']

        zf = zipfile.ZipFile(BytesIO(handler.request.files['zipfile'][0]['body']))

        cluster_name = psplit(zf.namelist()[0])[0]
        self.docker_env_dir = mkdtemp(suffix='-carinaauth')

        for name in ('docker.env', 'cert.pem', 'ca.pem', 'ca-key.pem', 'key.pem'):
            zf.extract(pjoin(cluster_name, name), path=self.docker_env_dir)

        self.docker_env_dir = pjoin(self.docker_env_dir, cluster_name)

        return username
def write_ipynb(nb_path, out_dir, template_name=DEFAULT_TEMPLATE):
    fpath, fname = psplit(nb_path)
    froot, ext = splitext(fname)
    with io.open(nb_path, 'rt') as f:
        nb = nb_read(f, DEFAULT_READ_FORMAT)
    nb.metadata['name'] = froot
    nb_evaluated = evaluate_notebook(nb, working_dir=fpath)
    with io.open(pjoin(out_dir, fname), 'wt') as f:
        nb_write(nb, f, DEFAULT_WRITE_FORMAT)
    nb_html = nb_to_html(nb_convert(nb_evaluated, HTML_FORMAT),
                         template_name=template_name,
                         resources=dict(nb_fname=fname))
    with io.open(pjoin(out_dir, froot + '.html'), 'wb') as f:
        f.write(nb_html.encode('utf-8'))
Example #43
0
 def page_list(self, fn='Pages.lst'):
     fn = pjoin(self.path, fn)
     self.pagelist = OrderedDict()
     for line in open(fn):
         line = re.split('[#\r\n]', line)[0] 
         if line == "":
             continue
         if not '|' in line:
             raise Exception('%s contains a line without "|": %s' % (fn, line))
         p, content = line.split('|')
         if p in self.pagelist:
             raise Exception("%s contains same page twice" % fn)
         self.pagelist[p] = content
     self.pagefiles = [psplit(fn)[1] for fn in glob(pjoin(self.path, 'Pages/*.txt'))]
     self.pagefiles.sort()
Example #44
0
def main():
    print('ImageFap Gallery Downloader ' + version)
    config = Config('IFLoad.yaml')

    basedir = dirname(__file__)

    #    for name in glob(pjoin(basedir, 'plugins', '*.py')):
    for name in config.get():
        if name == 'common':
            continue

        module = splitext(psplit(name)[-1])[0]
        if not module.startswith(
                '_') and module.isidentifier and not iskeyword(module):
            try:
                __import__('plugins.' + module)
            except:
                print("Failed to import ", 'plugins.' + module)
            else:
                print("Imported " + name)

    print('Ready\n')

    from sys import argv
    from json import dumps
    if len(argv) > 1:
        from shelve import open as dbmopen
        from sys import exit
        with dbmopen(config.get()['common']['clipboard'], 'c') as clipboard:
            stamp = str(int(time()))
            entry = {'type': 'picture', 'url': argv[1]}
            clipboard[stamp] = dumps(entry)
        exit(0)

    urlqueue = UrlQueue(config)

    while True:
        """fetch clipboard content and check for new url; add to queue if valid url is detected"""
        try:
            urlqueue.CheckClipboard()

            ### check if queue contains a url
            urlqueue.Dispatch()

            sleep(0.5)
        except KeyboardInterrupt:
            print("Exiting...")
            break
Example #45
0
def main():
    args = make_parser().parse_args()
    for wheel_fname in args.files:
        path, fname = psplit(wheel_fname)
        wf = WheelFile(fname)
        parsed = wf.parsed_filename.groupdict()
        parsed['build'] = args.build_tag
        parsed['build_suffix'] = args.build_suffix
        out_fname = ('{name}-{ver}{build_suffix}{build}-{pyver}-{abi}-{plat}'
                     '.whl'.format(**parsed))
        out_path = pjoin(path, out_fname)
        print('{} {} to {}'.format(
            'Renaming' if args.rename else 'Copying', wheel_fname, out_path))
        copyfile(wheel_fname, out_path)
        if args.rename:
            os.unlink(wheel_fname)
Example #46
0
def package_config():
    """Use pkg-config to get library build parameters and tesseract version."""
    p = subprocess.Popen([
        'pkg-config', '--exists',
        '--atleast-version={}'.format(_TESSERACT_MIN_VERSION),
        '--print-errors', 'tesseract'
    ],
                         stderr=subprocess.PIPE)
    output, error = p.communicate()
    _LOGGER.info(output)
    if p.returncode != 0:
        raise Exception(error)
    p = subprocess.Popen(['pkg-config', '--libs', '--cflags', 'tesseract'],
                         stdout=subprocess.PIPE)
    output, _ = p.communicate()
    _LOGGER.info(output)
    flags = _read_string(output).strip().split()
    p = subprocess.Popen(['pkg-config', '--libs', '--cflags', 'lept'],
                         stdout=subprocess.PIPE)
    output, _ = p.communicate()
    _LOGGER.info(output)
    flags2 = _read_string(output).strip().split()
    options = {'-L': 'library_dirs', '-I': 'include_dirs', '-l': 'libraries'}
    config = {}
    import itertools
    for f in itertools.chain(flags, flags2):
        try:
            opt = options[f[:2]]
        except KeyError:
            continue
        val = f[2:]
        if opt == 'include_dirs' and psplit(val)[1].strip(
                os.sep) in ('leptonica', 'tesseract'):
            val = dirname(val)
        config.setdefault(opt, set()).add(val)
    config = {k: list(v) for k, v in config.items()}
    p = subprocess.Popen(['pkg-config', '--modversion', 'tesseract'],
                         stdout=subprocess.PIPE)
    version, _ = p.communicate()
    _LOGGER.info(version)
    version = _read_string(version).strip()
    _LOGGER.info("Supporting tesseract v{}".format(version))
    config['cython_compile_time_env'] = {
        'TESSERACT_VERSION': version_to_int(version)
    }
    _LOGGER.info("Configs from pkg-config: {}".format(config))
    return config
Example #47
0
File: env.py Project: torotil/tagfs
def setupenv():
    from os.path import dirname, abspath, exists, join as pjoin, split as psplit

    global eventsdir
    global projectdir
    testdir = dirname(abspath(__file__))
    projectdir = pjoin(psplit(testdir)[0])
    srcdir = pjoin(projectdir, 'src')
    moddir = pjoin(srcdir, 'modules')
    eventsdir = pjoin(projectdir, 'etc', 'test', 'events')

    for x in (testdir, srcdir, moddir, eventsdir):
        assert exists(x), "Directory not found: %s" % x

    sys.path.insert(0, testdir)
    sys.path.insert(0, moddir)
    sys.path.insert(0, srcdir)
Example #48
0
def main():
    try:
        wd = sys.argv[1]
    except IndexError:
        wd = os.getcwd()
    wd = abspath(wd)
    package_names = glob(pjoin(wd, COMPONENT_DIRECTORY, '*.pkg'))
    package_names = [psplit(pn)[1] for pn in package_names]
    n_pkgs = len(package_names)
    extra_plist = dict(
            IFRequirementDicts=[python_requirement(PKG_NAME)],
            IFPkgFlagComponentDirectory=tools.unicode_path(
                './' + COMPONENT_DIRECTORY))
    plist = mpkg_info(PKG_NAME, '1.7',
                      zip(package_names, ('selected',) * n_pkgs))
    plist.update(extra_plist)
    write(plist, pjoin(wd, 'Contents', 'Info.plist'))
Example #49
0
    def putfo(self, path, fo=None, chmod=None):
        """
        Upload file like object to the remote server.

        Unlike put(), this method operates on file objects and not directly on
        file content which makes it much more efficient for large files since
        it utilizes pipelining.
        """
        extra = {"_path": path, "_chmod": chmod}
        self.logger.debug("Uploading file", extra=extra)

        sftp = self._get_sftp_client()

        # less than ideal, but we need to mkdir stuff otherwise file() fails
        head, tail = psplit(path)

        if path[0] == "/":
            sftp.chdir("/")
        else:
            # Relative path - start from a home directory (~)
            sftp.chdir(".")

        for part in head.split("/"):
            if part != "":
                try:
                    sftp.mkdir(part)
                except IOError:
                    # so, there doesn't seem to be a way to
                    # catch EEXIST consistently *sigh*
                    pass
                sftp.chdir(part)

        cwd = sftp.getcwd()
        cwd = self._sanitize_cwd(cwd=cwd)

        sftp.putfo(fo, path)
        if chmod is not None:
            ak = sftp.file(tail)
            ak.chmod(chmod)
            ak.close()

        file_path = self._sanitize_file_path(cwd=cwd, file_path=path)
        return file_path
Example #50
0
File: IO.py Project: leoguignard/IO
def imsave(filename, img):
    """Save a |SpatialImage| to filename.

    .. note: `img` **must** be a |SpatialImage|.

    The filewriter is choosen according to the file extension. However all file extensions
    will not match the data held by img, in dimensionnality or encoding, and might raise `IOError`s.

    For real volume data, Inrimage and NPY are currently supported.
    For |SpatialImage|s that are actually 2D, PNG, BMP, JPG among others are supported if PIL is installed.

    :Parameters:
     - `filename` (str)
     - `img` (|SpatialImage|)
    """

    filename = expusr(filename)
    root, ext = splitext(filename)

    # assert isinstance(img, SpatialImage) or ext == '.klb'
    # -- images are always at least 3D! If the size of dimension 3 (indexed 2) is 1, then it is actually
    # a 2D image. If it is 4D it has vectorial or RGB[A] data. --
    head, tail = psplit(filename)
    head = head or "."
    if not exists(head):
        raise IOError("The directory do not exist: %s" % head)

    # is2D = img.shape[2] == 1
    ext = ext.lower()
    if ext == ".gz":
        root, ext = splitext(root)
        ext = ext.lower()
    if ext == ".inr":
        write_inrimage(filename, img)
    elif ext in [".npz", ".npy"]:
        save(filename, img)
    elif ext in [".tiff", ".tif"]:
        write_tif(filename, img)
    elif ext == '.klb':
        write_klb(filename, img)
    elif ext in ['.h5', '.hdf5']:
        write_h5(filename, img)
Example #51
0
def get_dataset_root(path):
    """Return the root of an existent dataset containing a given path

    The root path is returned in the same absolute or relative form
    as the input argument. If no associated dataset exists, or the
    input path doesn't exist, None is returned.
    """
    suffix = '.git'
    if not isdir(path):
        path = dirname(path)
    apath = abspath(path)
    # while we can still go up
    while psplit(apath)[1]:
        if exists(opj(path, suffix)):
            return path
        # new test path in the format we got it
        path = normpath(opj(path, os.pardir))
        # no luck, next round
        apath = abspath(path)
    return None
Example #52
0
 def put(self, path, contents=None, chmod=None, mode='w'):
     sftp = self.client.open_sftp()
     # less than ideal, but we need to mkdir stuff otherwise file() fails
     head, tail = psplit(path)
     if path[0] == "/":
         sftp.chdir("/")
     for part in head.split("/"):
         if part != "":
             try:
                 sftp.mkdir(part)
             except IOError:
                 # so, there doesn't seem to be a way to
                 # catch EEXIST consistently *sigh*
                 pass
             sftp.chdir(part)
     ak = sftp.file(tail, mode=mode)
     ak.write(contents)
     if chmod is not None:
         ak.chmod(chmod)
     ak.close()
     sftp.close()
Example #53
0
 def run(self):
     install_scripts.run(self)
     if not os.name == "nt":
         return
     for filepath in self.get_outputs():
         # If we can find an executable name in the #! top line of the script
         # file, make .bat wrapper for script.
         with open(filepath, "rt") as fobj:
             first_line = fobj.readline()
         if not (first_line.startswith("#!") and "python" in first_line.lower()):
             log.info("No #!python executable found, skipping .bat " "wrapper")
             continue
         pth, fname = psplit(filepath)
         froot, ext = splitext(fname)
         bat_file = pjoin(pth, froot + ".bat")
         bat_contents = BAT_TEMPLATE.replace("{FNAME}", fname)
         log.info("Making %s wrapper for %s" % (bat_file, filepath))
         if self.dry_run:
             continue
         with open(bat_file, "wt") as fobj:
             fobj.write(bat_contents)
Example #54
0
def _get_study_df(bids_directory):
    subject_ids = []
    study_dict = OrderedDict()
    for file in glob(opj(bids_directory, "sub-*")):
        if os.path.isdir(file):
            subject_ids.append(psplit(file)[-1][4:])
    subject_ids.sort()
    study_dict["Source Name"] = subject_ids
    study_dict["Characteristics[organism]"] = "h**o sapiens"
    study_dict["Characteristics[organism part]"] = "brain"
    study_dict["Protocol REF"] = "Participant recruitment"
    study_dict["Sample Name"] = subject_ids
    df = pd.DataFrame(study_dict)

    participants_file = opj(bids_directory, "participants.tsv")
    if not exists(participants_file):
        return df

    participants_df = pd.read_csv(participants_file, sep="\t")
    rename_rule = sample_property_name_map.copy()
    # remove all mapping that do not match the columns at hand
    for r in rename_rule.keys():
        if not r in participants_df.keys():
            del rename_rule[r]
    # turn all unknown properties into comment columns
    for c in participants_df.keys():
        if not c in rename_rule:
            rename_rule[c] = "Comment[{}]".format(c.lower())

    participants_df.rename(columns=rename_rule, inplace=True)
    # simplify sample names by stripping the common prefix
    participants_df["Sample Name"] = \
        [s[4:] for s in list(participants_df["Sample Name"])]
    # merge participant info with study info
    df = pd.merge(
        df,
        participants_df,
        left_on="Sample Name",
        right_on="Sample Name")
    return df
Example #55
0
    def put(self, path, contents=None, chmod=None, mode='w'):
        extra = {'_path': path, '_mode': mode, '_chmod': chmod}
        self.logger.debug('Uploading file', extra=extra)

        sftp = self.client.open_sftp()
        # less than ideal, but we need to mkdir stuff otherwise file() fails
        head, tail = psplit(path)

        if path[0] == "/":
            sftp.chdir("/")
        else:
            # Relative path - start from a home directory (~)
            sftp.chdir('.')

        for part in head.split("/"):
            if part != "":
                try:
                    sftp.mkdir(part)
                except IOError:
                    # so, there doesn't seem to be a way to
                    # catch EEXIST consistently *sigh*
                    pass
                sftp.chdir(part)

        cwd = sftp.getcwd()

        ak = sftp.file(tail, mode=mode)
        ak.write(contents)
        if chmod is not None:
            ak.chmod(chmod)
        ak.close()
        sftp.close()

        if path[0] == '/':
            file_path = path
        else:
            file_path = pjoin(cwd, path)

        return file_path
Example #56
0
    def file_matches(self, text, mark):
        if '~' in text:
            if '/' in text:
                text = '%s%s%s' % (mark, expanduser(
                    text[text.find('~'):text.find('/')]),
                                   text[text.find('/'):])
            else:
                self.user_matches(text, mark)
                return

        text1 = text[1:]
        delim = '/'

        if not text1:
            directory = ''
        elif text1 == '.':
            directory = '.'
        elif text1 == '..':
            directory = '..'
        elif text1 == '/':
            directory = '/'
            delim = ''
        elif text1[-1] == '/':
            directory = text1[:-1]
            delim = text1[len(directory):]
        else:
            directory, partial = psplit(text1)
            delim = text1[len(directory):][:-len(partial)]

        if directory:
            listing = map(lambda x: '%s%s%s%s' % (mark, directory, delim, x),
                          listdir(directory).__iter__())
        else:
            listing = map(lambda x: '%s%s' % (mark, x),
                          listdir('.').__iter__())

        n = len(text)
        self.matches = filter(lambda x: x[:n] == text, listing.__iter__())