Exemplo n.º 1
0
def scale_range(in_file, value=1.0, in_mask=None, out_file=None):
    import numpy as np
    import nibabel as nb
    import os.path as op

    if out_file is None:
        fname, fext = op.splitext(op.basename(in_file))
        if fext == '.gz':
            fname, _ = op.splitext(fname)
        out_file = op.abspath('./%s_range.nii.gz' % fname)

    im = nb.load(in_file)
    idata = im.get_data()

    mask = np.ones_like(idata)
    if in_mask is not None:
        mask = nb.load(in_mask).get_data()
        mask[mask > 0.0] = 1
        mask[mask < 0.0] = 0

    rp0 = -1.0 * value
    rp1 = value
    ip0 = np.percentile(idata[mask > 0.0], 1.5)
    ip1 = np.percentile(idata[mask > 0.0], 98.5)

    factor = (rp1 - rp0) / (ip1 - ip0)
    idata *= factor
    idata -= np.median(idata[mask > 0.0])

    nb.Nifti1Image(
        idata, im.get_affine(), im.get_header()).to_filename(out_file)

    return out_file
Exemplo n.º 2
0
def _get_matfile_data():

    # compare to Arielle's in the .mat file
    mat_file = "sub01_session_1_raw_ROI_timeseries.mat"
    mat_file = osp.join(osp.dirname(osp.realpath(__file__)), mat_file)
    # to_check.keys() == ['all_rois', 'time_series', 
    #                     '__globals__', 'Nvox', '__header__', '__version__']
    to_check = sio.loadmat(mat_file)
    nvox = to_check['Nvox'][0]
    nb_runs = to_check['time_series'].shape[2] # has shape (time, rois, nb_runs)
    assert nb_runs == 4

    # make a dict for nvox
    check_nvox = {}
    for idx, roi in enumerate(to_check['all_rois']):
        k, _ = osp.splitext(osp.basename(roi[0][0]))
        check_nvox[k] = nvox[idx]

    # make a dict for signals
    arielle_runs = []
    for run in range(nb_runs):
        check_signals = {}
        for idx, roi in enumerate(to_check['all_rois']):
            k  = osp.splitext(osp.basename(roi[0][0]))[0]
            check_signals[k] = to_check['time_series'][:,idx,run]
        arielle_runs.append(check_signals)

    return check_nvox, arielle_runs
Exemplo n.º 3
0
def main_loop(argv=None):
    if argv is None:
        argv = sys.argv

    args = docopt.docopt(get_updated_docstring(), argv=argv[1:],
            version=".".join(map(str, __version__)))

    if not args["--silent"]:
        logcfg.set_loglevel(log, "INFO")
        for h in log.handlers:
            logcfg.set_loglevel(h, "INFO")
    elif args["--verbose"] > 0:
        logcfg.make_verbose()
        log.debug(pf(args))

    ext = args["--extension"]
    recursive = args["--recursive"]

    files_and_folders = []
    files_and_folders.extend(args["<file_or_folder>"])

    for faf in files_and_folders:
        if osp.isfile(faf):
            parse_file(faf, args)
        elif osp.isdir(faf):
            for entry in os.listdir(faf):
                path = osp.join(faf, entry)

                valid_file = osp.isfile(path)\
                    and osp.splitext(path)[-1] == ext\
                    and osp.basename(osp.splitext(path)[0]) != "cfg"
                valid_folder = recursive and osp.isdir(path)

                if valid_file or valid_folder:
                    files_and_folders.append(path)
Exemplo n.º 4
0
def filter_fmap(in_file, in_mask=None, out_file=None):
    from pyacwereg.filters import wavelets_denoise, laplacian_filter
    import numpy as np
    import nibabel as nb
    import os.path as op
    from math import pi
    from scipy.ndimage import median_filter

    if out_file is None:
        fname, fext = op.splitext(op.basename(in_file))
        if fext == '.gz':
            fname, _ = op.splitext(fname)
        out_file = op.abspath('./%s_filtered.nii.gz' % fname)

    filtered = wavelets_denoise(laplacian_filter(in_file, in_mask))

    im = nb.load(filtered)
    result = im.get_data()
    result = median_filter(result, 10)
    result -= np.median(result)
    result *= (pi / np.percentile(result, 99.97))
    nb.Nifti1Image(result, im.get_affine(),
                   im.get_header()).to_filename(out_file)

    return out_file
Exemplo n.º 5
0
 def hasChildren(self, index):
     QApplication.setOverrideCursor(Qt.WaitCursor)
     if index.isValid():
         item = self.itemFromIndex(index)
         path = item.path
         if item.hasChildren():
             children = True
         elif item.isClass or item.isFunction:
             children = False
         elif isdir(path):
             paths = [join(path, p) for p in listdir(path)]
             paths = [p for p in paths if isPackage(p) or isSource(p)]
             paths.sort()
             for key, subiter in groupby(paths, lambda x:splitext(x)[0]):
                 pth = sorted(subiter)[0]
                 item.appendRow(SysPathItem(pth, split(pth)[1]))
             children = bool(paths)
         elif splitext(path)[1] in pyexts:
             contents = readModule(self.dottedName(item), split(path)[0])
             for name, obj in sorted(contents.items()):
                 item.appendRow(SysPathItem(path, name, obj))
             children = bool(contents)
         else:
             children = False
     else:
         children = True
     QApplication.restoreOverrideCursor()
     return children
Exemplo n.º 6
0
    def generate(self):
        # "make" wants Unix paths
        self.resources.win_to_unix()

        to_be_compiled = []
        for r_type in ['s_sources', 'c_sources', 'cpp_sources']:
            r = getattr(self.resources, r_type)
            if r:
                for source in r:
                    base, ext = splitext(source)
                    to_be_compiled.append(base + '.o')

        libraries = []
        for lib in self.resources.libraries:
            l, _ = splitext(basename(lib))
            libraries.append(l[3:])

        ctx = {
            'name': self.program_name,
            'to_be_compiled': to_be_compiled,
            'object_files': self.resources.objects,
            'include_paths': self.resources.inc_dirs,
            'library_paths': self.resources.lib_dirs,
            'linker_script': self.resources.linker_script,
            'libraries': libraries,
            'symbols': self.toolchain.get_symbols()
        }
        self.gen_file('codesourcery_%s.tmpl' % self.target.lower(), ctx, 'Makefile')
def run_bootstrap(task_fname, action_fname, bootstrap_fname, burn_in = 40, tree_sizes = None, animate=False, no_cmat=False):
    """
    generates a bootstrapping tree
    taskfile has the training examples to use
    bootstrap_fname will be used as the file to create all of the bootstrapping trees
    tree_sizes controls the number of trees we want to build
    results for tree size i will be in bootstrap_fname_i.h5
    """
    if not tree_sizes:
        tree_sizes = DEFAULT_TREE_SIZES[:]
    taskf = h5py.File(task_fname, 'r')    
    assert len(taskf) >= burn_in + max(tree_sizes)
    taskf.close()
    task_ctr = 0
    setup_bootstrap_file(action_fname, bootstrap_fname)
    bootstrap_orig = osp.splitext(bootstrap_fname)[0] + '_orig.h5'
    shutil.copyfile(bootstrap_fname, bootstrap_orig)
    results = []
    for i in range(burn_in):
        print 'doing burn in {}/{}'.format(i, burn_in)
        res = run_example((task_fname, str(task_ctr), bootstrap_orig, bootstrap_fname, animate, no_cmat))
        results.append(res)
        task_ctr += 1                        
    for i in range(max(tree_sizes)):
        print 'doing bootstrapping {}/{}'.format(i, max(tree_sizes))
        if i in tree_sizes:
            bootstrap_i_fname = osp.splitext(bootstrap_fname)[0] + '_{}.h5'.format(i)
            shutil.copyfile(bootstrap_fname, bootstrap_i_fname)
        res = run_example((task_fname, str(task_ctr), bootstrap_fname, bootstrap_fname, animate, no_cmat))
        results.append(res)
        task_ctr += 1
    print 'success rate', sum(results)/float(len(results))
    return sum(results)/float(len(results))
Exemplo n.º 8
0
def enhance(in_file, clip_limit=0.010, in_mask=None, out_file=None):
    import numpy as np
    import nibabel as nb
    import os.path as op
    from skimage import exposure, img_as_int

    if out_file is None:
        fname, fext = op.splitext(op.basename(in_file))
        if fext == '.gz':
            fname, _ = op.splitext(fname)
        out_file = op.abspath('./%s_enh.nii.gz' % fname)

    im = nb.load(in_file)
    imdata = im.get_data()
    imshape = im.get_shape()

    if in_mask is not None:
        msk = nb.load(in_mask).get_data()
        msk[msk > 0] = 1
        msk[msk < 1] = 0
        imdata = imdata * msk

    immin = imdata.min()
    imdata = (imdata - immin).astype(np.uint16)

    adapted = exposure.equalize_adapthist(imdata.reshape(imshape[0], -1),
                                          clip_limit=clip_limit)

    nb.Nifti1Image(adapted.reshape(imshape), im.get_affine(),
                   im.get_header()).to_filename(out_file)

    return out_file
Exemplo n.º 9
0
  def read_data(self):
      print("\nRead Data")
      # List input directory containing the .csv files
      for filename in listdir(self.input_dir):
          fn = path.splitext(filename)[0]
          if path.splitext(filename)[1] == ".csv":
              file_path = path.join(self.input_dir, filename)
              print("File path: " + file_path)
              # Pandas.read_csv method returns DataFrame object
              try:
                  if "campus" in fn or 'cfy' == fn[0:3]:
                      df = read_csv(file_path,
                                    delimiter=",",
                                    header=0,
                                    low_memory=False)
                      df = df[df['Category'].isin(values)]
                  elif ('district' in fn or 'dfy' == fn[0:3] or
                        'state' in fn or 'sfy' == fn[0:3]):
                      df = read_csv(file_path,
                                    delimiter=",",
                                    header=0,
                                    low_memory=False)
                      df = df[df['Category'].isin(values)]
                      if 'state' in fn or 'sfy' == fn[0:3]:
                          print('\t State file modification')
                          df.insert(0, 'DISTRICT', "1")
                  else:
                      print("\t Skipping file: %s" % file_path)
                      continue
 
                  self.write_data(df, filename)
              except:
                  print("Error while reading %s" % filename)
Exemplo n.º 10
0
def siemens2rads(in_file, out_file=None):
    """
    Converts input phase difference map to rads
    """
    import numpy as np
    import nibabel as nb
    import os.path as op
    import math

    if out_file is None:
        fname, fext = op.splitext(op.basename(in_file))
        if fext == '.gz':
            fname, _ = op.splitext(fname)
        out_file = op.abspath('./%s_rads.nii.gz' % fname)

    in_file = np.atleast_1d(in_file).tolist()
    im = nb.load(in_file[0])
    data = im.get_data().astype(np.float32)
    hdr = im.get_header().copy()

    if len(in_file) == 2:
        data = nb.load(in_file[1]).get_data().astype(np.float32) - data
    elif (data.ndim == 4) and (data.shape[-1] == 2):
        data = np.squeeze(data[..., 1] - data[..., 0])
        hdr.set_data_shape(data.shape[:3])

    imin = data.min()
    imax = data.max()
    data = (2.0 * math.pi * (data - imin)/(imax-imin)) - math.pi
    hdr.set_data_dtype(np.float32)
    hdr.set_xyzt_units('mm')
    hdr['datatype'] = 16
    nb.Nifti1Image(data, im.get_affine(), hdr).to_filename(out_file)
    return out_file
Exemplo n.º 11
0
def demean_image(in_file, in_mask=None, out_file=None):
    """
    Demean image data inside mask
    """
    import numpy as np
    import nibabel as nb
    import os.path as op
    import math

    if out_file is None:
        fname, fext = op.splitext(op.basename(in_file))
        if fext == '.gz':
            fname, _ = op.splitext(fname)
        out_file = op.abspath('./%s_demean.nii.gz' % fname)

    im = nb.load(in_file)
    data = im.get_data().astype(np.float32)
    msk = np.ones_like(data)

    if in_mask is not None:
        msk = nb.load(in_mask).get_data().astype(np.float32)
        msk[msk > 0] = 1.0
        msk[msk < 1] = 0.0

    mean = np.median(data[msk == 1].reshape(-1))
    data[msk == 1] = data[msk == 1] - mean
    nb.Nifti1Image(data, im.get_affine(),
                   im.get_header()).to_filename(out_file)
    return out_file
Exemplo n.º 12
0
def recompose_dwi(in_dwi, in_bval, in_corrected, out_file=None):
    """
    Recompose back the dMRI data accordingly the b-values table after EC
    correction
    """
    import numpy as np
    import nibabel as nb
    import os.path as op

    if out_file is None:
        fname, ext = op.splitext(op.basename(in_dwi))
        if ext == ".gz":
            fname, ext2 = op.splitext(fname)
            ext = ext2 + ext
        out_file = op.abspath("%s_eccorrect%s" % (fname, ext))

    im = nb.load(in_dwi)
    dwidata = im.get_data()
    bvals = np.loadtxt(in_bval)
    dwis = np.where(bvals != 0)[0].tolist()

    if len(dwis) != len(in_corrected):
        raise RuntimeError(('Length of DWIs in b-values table and after'
                            'correction should match'))

    for bindex, dwi in zip(dwis, in_corrected):
        dwidata[..., bindex] = nb.load(dwi).get_data()

    nb.Nifti1Image(dwidata, im.get_affine(),
                   im.get_header()).to_filename(out_file)
    return out_file
Exemplo n.º 13
0
def b0_average(in_dwi, in_bval, out_file=None):
    """
    A function that averages the *b0* volumes from a DWI dataset.

    .. warning:: *b0* should be already registered (head motion artifact should
      be corrected).

    """
    import numpy as np
    import nibabel as nb
    import os.path as op

    if out_file is None:
        fname, ext = op.splitext(op.basename(in_dwi))
        if ext == ".gz":
            fname, ext2 = op.splitext(fname)
            ext = ext2 + ext
        out_file = op.abspath("%s_avg_b0%s" % (fname, ext))

    imgs = np.array(nb.four_to_three(nb.load(in_dwi)))
    bval = np.loadtxt(in_bval)
    b0s = [im.get_data().astype(np.float32)
           for im in imgs[np.where(bval == 0)]]
    b0 = np.average(np.array(b0s), axis=0)

    hdr = imgs[0].get_header().copy()
    hdr.set_data_shape(b0.shape)
    hdr.set_xyzt_units('mm')
    hdr.set_data_dtype(np.float32)
    nb.Nifti1Image(b0, imgs[0].get_affine(), hdr).to_filename(out_file)
    return out_file
Exemplo n.º 14
0
 def _load_rbo(self):
     """Load APC2015rbo dataset"""
     dataset_dir = osp.join(this_dir, 'dataset/APC2015rbo/berlin_samples')
     img_glob = osp.join(dataset_dir, '*_bin_[A-L].jpg')
     desc = 'rbo'
     for img_file in tqdm.tqdm(glob.glob(img_glob), ncols=80, desc=desc):
         basename = osp.splitext(osp.basename(img_file))[0]
         # apply mask, crop and save
         bin_mask_file = re.sub('.jpg$', '.pbm', img_file)
         bin_mask = imread(bin_mask_file, mode='L')
         where = np.argwhere(bin_mask)
         roi = where.min(0), where.max(0) + 1
         id_ = osp.join('rbo', basename)
         dataset_index = len(self.ids) - 1
         self.datasets['rbo'].append(dataset_index)
         mask_glob = re.sub('.jpg$', '_*.pbm', img_file)
         mask_files = [None] * self.n_class
         for mask_file in glob.glob(mask_glob):
             mask_basename = osp.splitext(osp.basename(mask_file))[0]
             label_name = re.sub(basename + '_', '', mask_basename)
             if label_name == 'shelf':
                 continue
             mask_files[self.target_names.index(label_name)] = mask_file
         self.ids.append(id_)
         self.rois.append(roi)
         self.img_files.append(img_file)
         self.mask_files.append(mask_files)
Exemplo n.º 15
0
def extract_bval(in_dwi, in_bval, b=0, out_file=None):
    """
    Writes an image containing only the volumes with b-value specified at
    input
    """
    import numpy as np
    import nibabel as nb
    import os.path as op

    if out_file is None:
        fname, ext = op.splitext(op.basename(in_dwi))
        if ext == ".gz":
            fname, ext2 = op.splitext(fname)
            ext = ext2 + ext
        out_file = op.abspath("%s_tsoi%s" % (fname, ext))

    im = nb.load(in_dwi)
    dwidata = im.get_data()
    bvals = np.loadtxt(in_bval)

    if b == 'diff':
        selection = np.where(bvals != 0)
    elif b == 'nodiff':
        selection = np.where(bvals == 0)
    else:
        selection = np.where(bvals == b)

    extdata = np.squeeze(dwidata.take(selection, axis=3))
    hdr = im.get_header().copy()
    hdr.set_data_shape(extdata.shape)
    nb.Nifti1Image(extdata, im.get_affine(),
                   hdr).to_filename(out_file)
    return out_file
Exemplo n.º 16
0
def _parse_version_file(version_file):
    """Get version info from the given file. It can be any of:
    
    - package.json with "version" field
    - VERSION.txt or VERSION file with just the version string
    - python file with __version_info__
    - .js file with `var VERSION = "1.2.3";`
    """
    f = codecs.open(version_file, 'r', 'utf-8')
    content = f.read()
    f.close()
    
    if basename(version_file) == "package.json":
        version_file_type = "package.json"
        obj = json.loads(content)
        version_info = _version_info_from_version(obj["version"])
    elif splitext(version_file)[1] == ".py":
        version_file_type = "python"
        m = re.search(r'^__version_info__ = (.*?)$', content, re.M)
        version_info = eval(m.group(1))
    elif splitext(version_file)[1] == ".js":
        version_file_type = "javascript"
        m = re.search(r'^var VERSION = "(.*?)";$', content, re.M)
        version_info = _version_info_from_version(m.group(1))
    else:
        # Presume a text file with just the version.
        version_file_type = "version"
        version_info = _version_info_from_version(content.strip())
    return version_file_type, version_info
Exemplo n.º 17
0
def bg_mask(in_file, in_mask, out_file=None):
    """
    Rough mask of background from brain masks
    """
    import nibabel as nb
    import numpy as np
    from scipy.ndimage import binary_dilation
    import scipy.ndimage as nd
    import os.path as op

    if out_file is None:
        fname, ext = op.splitext(op.basename(in_file))
        if ext == ".gz":
            fname, ext2 = op.splitext(fname)
            ext = ext2 + ext
        out_file = op.abspath("%s_bgmask%s" % (fname, ext))

    im = nb.load(in_file)
    hdr = im.get_header().copy()
    hdr.set_data_dtype(np.uint8)
    hdr.set_xyzt_units('mm')
    imdata = im.get_data()
    msk = nb.load(in_mask).get_data()
    msk = 1 - binary_dilation(msk,
                              structure=np.ones((20, 20, 20)))
    nb.Nifti1Image(msk.astype(np.uint8),
                   im.get_affine(), hdr).to_filename(out_file)
    return out_file
Exemplo n.º 18
0
 def processes_file(self, filename, outfile=None, errorfile=None, directory=''):
     """Take a filename of a file containing plantuml text and processes
     it into a .png image.
     
     :param str filename: Text file containing plantuml markup
     :param str outfile: Filename to write the output image to. If not
                 supplied, then it will be the input filename with the
                 file extension replaced with '.png'.
     :param str errorfile: Filename to write server html error page
                 to. If this is not supplined, then it will be the
                 input ``filename`` with the extension replaced with
                 '_error.html'.
     :returns: ``True`` if the image write succedded, ``False`` if there was
                 an error written to ``errorfile``.
     """
     if outfile is None:
         outfile = path.splitext(filename)[0] + '.png'
     if errorfile is None:
         errorfile = path.splitext(filename)[0] + '_error.html'
     if directory and not path.exists(directory):
         makedirs(directory)
     data = open(filename, 'U').read()
     try:
         content = self.processes(data)
     except PlantUMLHTTPError, e:
         err = open(path.join(directory, errorfile), 'w')
         err.write(e.content)
         err.close()
         return False
Exemplo n.º 19
0
 def setUp(self):
     
     # create a list of files to cleanup
     self._paths_to_clean_up = []
     self._dirs_to_clean_up = []
     
     # load query seqs
     self.seqs = Alignment(MinimalFastaParser(QUERY_SEQS.split()))
     
     # generate temp filename
     tmp_dir='/tmp'
     self.outfile = get_tmp_filename(tmp_dir)
     
     # create and write out reference sequence file
     self.outfasta=splitext(self.outfile)[0]+'.fasta'
     fastaout=open(self.outfasta,'w')
     fastaout.write(REF_SEQS)
     fastaout.close()
     self._paths_to_clean_up.append(self.outfasta)
     
     # create and write out starting tree file
     self.outtree=splitext(self.outfile)[0]+'.tree'
     treeout=open(self.outtree,'w')
     treeout.write(REF_TREE)
     treeout.close()
     self._paths_to_clean_up.append(self.outtree)
Exemplo n.º 20
0
def read_mesh(fname):
    """Read mesh data from file.

    Parameters
    ----------
    fname : str
        File name to read. Format will be inferred from the filename.
        Currently only '.obj' and '.obj.gz' are supported.

    Returns
    -------
    vertices : array
        Vertices.
    faces : array | None
        Triangle face definitions.
    normals : array
        Normals for the mesh.
    texcoords : array | None
        Texture coordinates.
    """
    # Check format
    fmt = op.splitext(fname)[1].lower()
    if fmt == '.gz':
        fmt = op.splitext(op.splitext(fname)[0])[1].lower()

    if fmt in ('.obj'):
        return WavefrontReader.read(fname)
    elif not format:
        raise ValueError('read_mesh needs could not determine format.')
    else:
        raise ValueError('read_mesh does not understand format %s.' % fmt)
Exemplo n.º 21
0
    def run(self):
        dest = self.parser.getArgs()[-1]
        if path.exists(dest) and not self.opts.force:
            self.error("CSV-file", dest, "exists already. Use --force to overwrite")
        sources = self.parser.getArgs()[0:-1]

        data = SpreadsheetData(csvName=sources[0], title=path.splitext(path.basename(sources[0]))[0])

        if self.opts.time == None:
            self.opts.time = data.names()[0]

        for s in sources[1:]:
            addition = path.splitext(path.basename(s))[0]
            sData = SpreadsheetData(csvName=s)
            if self.opts.addTimes:
                data.addTimes(
                    time=self.opts.time, times=sData.data[self.opts.time], interpolate=self.opts.interpolateNewTime
                )
            for n in sData.names():
                if n != self.opts.time:
                    d = data.resample(
                        sData,
                        n,
                        time=self.opts.time,
                        extendData=self.opts.extendData,
                        noInterpolation=not self.opts.newDataInterpolate,
                    )
                    data.append(addition + " " + n, d, allowDuplicates=True)

        data.writeCSV(dest, delimiter=self.opts.delimiter)
Exemplo n.º 22
0
def time_avg(in_file, index=[0], out_file=None):
    """
    Average the input time-series, selecting the indices given in index

    .. warning:: time steps should be already registered (corrected for
      head motion artifacts).

    """
    import numpy as np
    import nibabel as nb
    import os.path as op

    if out_file is None:
        fname, ext = op.splitext(op.basename(in_file))
        if ext == ".gz":
            fname, ext2 = op.splitext(fname)
            ext = ext2 + ext
        out_file = op.abspath("%s_baseline%s" % (fname, ext))

    index = np.atleast_1d(index).tolist()

    imgs = np.array(nb.four_to_three(nb.load(in_file)))[index]
    if len(index) == 1:
        data = imgs[0].get_data().astype(np.float32)
    else:
        data = np.average(np.array([im.get_data().astype(np.float32)
                                    for im in imgs]), axis=0)

    hdr = imgs[0].get_header().copy()
    hdr.set_data_shape(data.shape)
    hdr.set_xyzt_units('mm')
    hdr.set_data_dtype(np.float32)
    nb.Nifti1Image(data, imgs[0].get_affine(), hdr).to_filename(out_file)
    return out_file
Exemplo n.º 23
0
 def get_real_fn(self, path):
     filename = basename(path)
     fn, ext = splitext(filename)
     fn2, ext2 = splitext(fn)
     if ext2:
         return fn
     return filename
Exemplo n.º 24
0
def main():
    inPath = path.abspath(argv[1])
    inDir = path.dirname(inPath)
    inName = path.splitext(path.basename(inPath))[0]
    source = '%s/%s' % (inDir, inName)

    tar = tarfile.open(inPath, 'r')
    tar.extractall(path=inDir)
    tar.close()

    if path.exists('%s/normalize.lock' % source):
        print 'error[44] already normalized!'
        rmtree(source)
        return -1

    if path.exists('%s/dummy.lock' % source):
        print 'error[65] already dummy!'
        rmtree(source)
        return -1

    outPath = path.abspath(argv[2])
    outDir = path.dirname(outPath)
    outName = path.splitext(path.basename(outPath))[0]
    output = '%s/%s' % (outDir, outName)

    try:
        mkdir(output)
    except WindowsError, err:
        print err
Exemplo n.º 25
0
def pack_dir(in_dir, out_file, file_list, labels_dict=None, seed=None):
    if seed is not None:
        random.seed(seed)
    files = get_files_paths(in_dir, ext='.png')
    random.shuffle(files)

    with open(file_list, 'w') as fo:
        for f in files:
            fo.write(splitext(basename(f))[0]+'\n')
    
    with h5py.File(out_file, "w") as f5:
        X_dataset = f5.create_dataset("X",
                                     (len(files), constants.nChannels*np.square(constants.imSize)),
                                     dtype=np.uint8)
        if labels_dict is not None:
            y_dataset = f5.create_dataset("y", (len(files),), dtype=int)

        for i,f in enumerate(files):
            im=Image.open(f)
            # N x D (N, n_channels*HSize*WSize)
            d = np.asarray(im.getdata()).T.reshape(1,-1)
            X_dataset[i] = np.uint8(d)
            if labels_dict is not None:
                y_dataset[i] = labels_dict[splitext(splitext(basename(f))[0])[0]]
#                print y_dataset[i]
                
            if (i+1) % 50 == 0:
                print('%d/%d' % (i+1, len(files)))
Exemplo n.º 26
0
    def run(self):
        '''
        Finds all the tests files in tests/, and run doctest on them.
        '''
        pymods = []
        pyxmods = []
        for root, dirs, files in os.walk('arboris'):
            for file in files:
                package = '.'.join(root.split(sep))
                if file.endswith('.py') and file != '__init__.py':
                    pymods.append('.'.join([package, splitext(file)[0]]))
                elif file.endswith('.so'):
                    pyxmods.append('.'.join([package, splitext(file)[0]]))

        for mod in pymods:
            exec('import {0} as module'.format(mod))
            doctest.testmod(module)

        for mod in pyxmods:
            exec('import {0} as mod'.format(mod))
            fix_module_doctests(mod)
            doctest.testmod(mod)

        for rst in glob(pjoin('tests', '*.rst')):
            doctest.testfile(rst)
Exemplo n.º 27
0
def load_images(img_mat_fname, corpus_df, normalize=True, force_rebuild=False):
    _memo_fname = path.join(CACHE_DIR, 'IMAGES_' + path.splitext(path.basename(img_mat_fname))[0]) + '.pkl'
    if not path.exists(_memo_fname) or force_rebuild:
        used_pics = set(corpus_df['picture'])
        m = loadmat(img_mat_fname)
        fnames = []
        X = np.empty((m['Img'].shape[0] * 20, 4096), dtype=np.float32)
        idx_x = 0
        for idx_m in xrange(m['Img'].shape[0]):
            c = m['Img'][idx_m][0]
            fname = path.splitext(path.basename(c['fname'][0][0][0]))[0]
            if not fname in used_pics:
                continue
            codes = c['codes'][0][0][:, :-1]
            for img_segment in codes:
                fnames.append(fname)
                X[idx_x] = img_segment
                idx_x += 1
        X = X[:idx_x, :]
        assert(len(fnames) == X.shape[0])
        with open(_memo_fname, 'wb') as fid:
            pickle.dump((fnames, X), fid, -1)
    else:
        with open(_memo_fname, 'rb') as fid:
            fnames, X = pickle.load(fid)
    if normalize:
        X = StandardScaler().fit_transform(X)
    return fnames, X
def createDataTxt(imagePath, annotationPath, imagesInDir, split=False):
    JPG = '.jpg'
    TRAINING = 'training/'
    VALIDATION = 'validation/'

    if split:
        annotatedImages = os.listdir(annotationPath)
        # np.random.shuffle(annotatedImages)
        splitSize = ceil(len(annotatedImages) * 0.85)

        annotatedImagesTrain = annotatedImages[:splitSize]
        annotatedImagesValidation = annotatedImages[splitSize:]
    else:
        annotatedImagesTrain = os.listdir(join(annotationPath, TRAINING))
        annotatedImagesValidation = os.listdir(join(annotationPath, VALIDATION))

    with open(imagesInDir + 'train.txt', 'w') as file:
        for ann in tqdm(annotatedImagesTrain, desc='Writing train.txt for input dataset'):
            if isfile(join(imagePath, TRAINING, splitext(ann)[0]) + JPG):
                file.write(' '.join(
                    [join(imagePath, TRAINING, splitext(ann)[0]) + JPG,
                     join(annotationPath, TRAINING, ann)]) + '\n')

    with open(imagesInDir + 'val.txt', 'w') as file:
        for annv in tqdm(annotatedImagesValidation, desc='Writing valid.txt for input dataset'):
            if isfile(join(imagePath, VALIDATION, splitext(annv)[0]) + JPG):
                file.write(' '.join(
                    [join(imagePath, VALIDATION, splitext(annv)[0]) + JPG,
                     join(annotationPath, VALIDATION, annv)]) + '\n')

    return
Exemplo n.º 29
0
    def mem_stats(self, map):
        """! Creates parser object
        @param map Path to linker map file to parse and decode
        @return Memory summary structure with memory usage statistics
                None if map file can't be opened and processed
        """
        toolchain = self.__class__.__name__

        # Create memap object
        memap = MemapParser()

        # Parse and decode a map file
        if memap.parse(abspath(map), toolchain) is False:
            self.info("Unknown toolchain for memory statistics %s" % toolchain)
            return None

        # Write output to stdout in text (pretty table) format
        memap.generate_output('table')

        # Write output to file in JSON format
        map_out = splitext(map)[0] + "_map.json"
        memap.generate_output('json', map_out)

        # Write output to file in CSV format for the CI
        map_csv = splitext(map)[0] + "_map.csv"
        memap.generate_output('csv-ci', map_csv)

        # Here we return memory statistics structure (constructed after
        # call to generate_output) which contains raw data in bytes
        # about sections + summary
        return memap.mem_summary
Exemplo n.º 30
0
def b0_average(in_dwi, in_bval, max_b=10.0, out_file=None):
    """
    A function that averages the *b0* volumes from a DWI dataset.
    As current dMRI data are being acquired with all b-values > 0.0,
    the *lowb* volumes are selected by specifying the parameter max_b.

    .. warning:: *b0* should be already registered (head motion artifact should
      be corrected).

    """
    import numpy as np
    import nibabel as nb
    import os.path as op

    if out_file is None:
        fname, ext = op.splitext(op.basename(in_dwi))
        if ext == ".gz":
            fname, ext2 = op.splitext(fname)
            ext = ext2 + ext
        out_file = op.abspath("%s_avg_b0%s" % (fname, ext))

    imgs = np.array(nb.four_to_three(nb.load(in_dwi)))
    bval = np.loadtxt(in_bval)
    index = np.argwhere(bval <= max_b).flatten().tolist()

    b0s = [im.get_data().astype(np.float32)
           for im in imgs[index]]
    b0 = np.average(np.array(b0s), axis=0)

    hdr = imgs[0].get_header().copy()
    hdr.set_data_shape(b0.shape)
    hdr.set_xyzt_units('mm')
    hdr.set_data_dtype(np.float32)
    nb.Nifti1Image(b0, imgs[0].get_affine(), hdr).to_filename(out_file)
    return out_file
Exemplo n.º 31
0
def main():
    parser = argparse.ArgumentParser(
        description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument('kicad_pcb', help='the `.kicad_pcb` file to DIFF')
    parser.add_argument(
        '-c',
        '--commit',
        default='HEAD',
        help='git commit-id to compare current version against. Default: HEAD')
    parser.add_argument(
        '-l',
        '--layers',
        default=0,
        type=int,
        help='Number of inner layers (InX.Cu) to plot. Default: 0')
    parser.add_argument('-r',
                        '--resolution',
                        default=400,
                        type=int,
                        help='Plotting resolution in [dpi]. Default: 400')
    args = parser.parse_args()

    layers = ['F.Cu', 'B.Cu', 'F.SilkS', 'B.SilkS']
    layers += ['In{}.Cu'.format(i + 1) for i in range(args.layers)]
    print('layers: ' + ' '.join(layers))

    # Check for local (un-commited) changes
    do_stash = call(['git', 'diff-index', '--quiet', 'HEAD', '--']) > 0
    if not do_stash and args.commit == 'HEAD':
        print('No local changes, nothing to compare. Try -c <commit-id>')
        return -1

    # output directory name is derived from `git describe`
    try:
        git1_name = desc()
    except Exception:
        # this will happen if user isn't in a git repo
        print("No git description, can't continue")
        exit(1)

    # Do a .pdf plot of the current version
    dir1 = 'plot_' + git1_name
    print('> ' + dir1)
    bounds1 = plot_layers(args.kicad_pcb, dir1, layers)
    if bounds1 is None:
        exit(1)

    # Stash local changes if needed
    if do_stash:
        co(['git', 'stash'])

    # checkout specified git version (default: HEAD) ...
    if args.commit != 'HEAD':
        co(['git', 'checkout', args.commit])

    # ... and do a .pdf plot of it
    dir2 = 'plot_' + desc()
    print('> ' + dir2)
    bounds2 = plot_layers(args.kicad_pcb, dir2, layers)
    if bounds2 is None:
        exit(1)

    # Switch back to current version
    if args.commit != 'HEAD':
        co(['git', 'checkout', '-'])

    # Restore local changes
    if do_stash:
        co(['git', 'stash', 'pop'])

    # Generate plots into `diffs` directory
    try:
        mkdir('diffs')
    except OSError:
        print('diffs directory already exists')

    # Create a .png diff for each layer
    for ll in layers:
        pdf_name = splitext(args.kicad_pcb)[0]
        pdf_name += '-' + ll.replace('.', '_') + '.pdf'

        out_file = 'diffs/' + ll + '.png'
        print('> ' + out_file)

        i1 = load_pdf(join(dir1, pdf_name), r=args.resolution, **bounds1)
        i2 = load_pdf(join(dir2, pdf_name), r=args.resolution, **bounds1)
        i_out = img_diff(i1, i2)
        i_out.save(out_file)

    print('Removing temporary directories')
    rmtree(dir1)
    rmtree(dir2)
Exemplo n.º 32
0
def csort(
    pairs_path,
    chromosomes_path,
    index,
    chrom1,
    chrom2,
    pos1,
    pos2,
    flip_only,
    nproc,
    zero_based,
    sep,
    comment_char,
    sort_options,
    out,
    **kwargs
):
    """
    Sort and index a contact list.

    Order the mates of each pair record so that all contacts are upper
    triangular with respect to the chromosome ordering given by the chromosomes
    file, sort contacts by genomic location, and index the resulting file.

    PAIRS_PATH : Contacts (i.e. read pairs) text file, optionally compressed.

    CHROMOSOMES_PATH : File listing desired chromosomes in the desired order.
    May be tab-delimited, e.g. a UCSC-style chromsizes file. Contacts mapping to
    other chromosomes will be discarded.

    **Notes**

    \b
    - csort can also be used to sort and index a text representation of
      a contact *matrix* in bedGraph-like format. In this case, substitute
      `pos1` and `pos2` with `start1` and `start2`, respectively.
    - Requires Unix tools: sort, bgzip + tabix or pairix.

    If indexing with Tabix, the output file will have the following properties:

    \b
    - Upper triangular: the read pairs on each row are assigned to side 1 or 2
      in such a way that (chrom1, pos1) is always "less than" (chrom2, pos2)
    - Rows are lexicographically sorted by chrom1, pos1, chrom2, pos2;
      i.e. "positionally sorted"
    - Compressed with bgzip [*]
    - Indexed using Tabix [*] on chrom1 and pos1.

    If indexing with Pairix, the output file will have the following properties:

    \b
    - Upper triangular: the read pairs on each row are assigned to side 1 or 2
      in such a way that (chrom1, pos1) is always "less than" (chrom2, pos2)
    - Rows are lexicographically sorted by chrom1, chrom2, pos1, pos2; i.e.
      "block sorted"
    - Compressed with bgzip [*]
    - Indexed using Pairix [+] on chrom1, chrom2 and pos1.

    \b
    [*] Tabix manpage: <http://www.htslib.org/doc/tabix.html>.
    [+] Pairix on Github: <https://github.com/4dn-dcic/pairix>

    """
    if os.name == "nt":
        raise click.Abort(
            '"cooler csort" does not work on Windows. To ingest unsorted pairs '
            'data, see the "cooler cload pairs" command.'
        )

    from signal import signal, SIGPIPE, SIG_DFL

    signal(SIGPIPE, SIG_DFL)

    # Check for required Unix tools
    for tool in ["sort", "bgzip"] + [index]:
        if not cmd_exists(tool):
            print("Command {} not found".format(tool), file=sys.stderr)
            sys.exit(1)

    # If output path is not given, produce output path by stripping any .txt,
    # .gz or .txt.gz extension from the input path and appending .sorted[.txt].gz
    infile = pairs_path
    if out is None:
        if infile == "-" and not flip_only:
            logger.error("Output name required when input is stdin")
            raise click.Abort
        prefix = infile
        ext = ".gz"
        if prefix.endswith(".gz"):
            prefix = op.splitext(prefix)[0]
        if prefix.endswith(".txt"):
            prefix = op.splitext(prefix)[0]
            ext = ".txt.gz"
        if index == "pairix":
            sort_style = ".blksrt"
        else:
            sort_style = ".possrt"
        outfile = prefix + sort_style + ext
    else:
        outfile = out

    # Parse extra sort options and determine if sort supports --parallel option
    if sort_options is not None:
        sort_options = shlex.split(sort_options)
    elif _has_parallel_sort():
        sort_options = ["--parallel={}".format(nproc), "--buffer-size=50%"]
    else:
        sort_options = []

    # 1-based column numbers
    fields = {"C1": chrom1, "P1": pos1, "C2": chrom2, "P2": pos2}

    # build commands
    read_cmd = make_read_command(infile)
    flip_cmd = make_flip_command(chromosomes_path, sep, comment_char, fields)

    if flip_only:
        # run pipeline
        logger.info("Reordering pair mates...")
        pipeline = []

        logger.debug(" ".join(read_cmd))
        pipeline.append(
            subprocess.Popen(
                read_cmd,
                stdin=sys.stdin if infile == "-" else None,
                stdout=subprocess.PIPE,
            )
        )

        logger.debug(" ".join(flip_cmd))
        pipeline.append(
            subprocess.Popen(flip_cmd, stdin=pipeline[-1].stdout, stdout=sys.stdout)
        )
        for p in pipeline[::-1]:
            p.communicate()
            if p.returncode != 0:
                sys.exit(1)
    else:

        sort_cmd = make_sort_command(index, fields, sort_options)
        write_cmd = ["bgzip", "-c"]
        index_cmd = make_index_command(index, fields, zero_based, outfile)

        # run pipeline
        logger.info("Input: '{}'".format(infile))
        logger.info("Output: '{}'".format(outfile))
        assert infile != outfile

        with open(outfile, "wb") as fout:

            pipeline = []

            logger.debug(" ".join(read_cmd))
            pipeline.append(
                subprocess.Popen(
                    read_cmd,
                    stdin=sys.stdin if infile == "-" else None,
                    stdout=subprocess.PIPE,
                )
            )

            logger.info("Reordering pair mates and sorting pair records...")
            logger.debug(" ".join(flip_cmd))
            pipeline.append(
                subprocess.Popen(
                    flip_cmd, stdin=pipeline[-1].stdout, stdout=subprocess.PIPE
                )
            )

            if index == "pairix":
                logger.info("Sort order: block (chrom1, chrom2, pos1, pos2)")
            else:
                logger.info("Sort order: positional (chrom1, pos1, chrom2, pos2)")
            logger.info(" ".join(sort_cmd))
            pipeline.append(
                subprocess.Popen(
                    sort_cmd, stdin=pipeline[-1].stdout, stdout=subprocess.PIPE
                )
            )

            logger.debug(" ".join(write_cmd))
            pipeline.append(
                subprocess.Popen(write_cmd, stdin=pipeline[-1].stdout, stdout=fout)
            )

            for p in pipeline[::-1]:
                p.communicate()

                if p.returncode != 0:
                    logger.error(" ".join(p.args))
                    sys.exit(1)

        # Create index file
        logger.info("Indexing...")
        logger.info("Indexer: {}".format(index))
        logger.info(" ".join(index_cmd))
        p = subprocess.Popen(index_cmd)
        p.communicate()
        if p.returncode != 0:
            sys.exit(1)
Exemplo n.º 33
0
    return np.array([
        x * y * (1 - y**2) * (1 - z**2) + 2 * x * y * (1 - z**2),
        y**2 * (1 - x**2) * (1 - z**2) + (1 - y**2) * (2 - x**2 - z**2),
        y * z * (1 - x**2) * (1 - y**2) + 2 * y * z * (1 - x**2),
    ])


@LinearForm
def fv(v, w):
    from skfem.helpers import dot
    return dot(f(*w.x), v)


A = asm(dudv, basis)
f = asm(fv, basis)

D = basis.find_dofs()

x = solve(*condense(A, f, D=D))

y_basis = InteriorBasis(m, ElementVectorH1(ElementTetP1()))
y = project(x, basis, y_basis)

if __name__ == '__main__':

    from os.path import splitext
    from sys import argv
    name = splitext(argv[0])[0]

    m.save('{}_solution.vtk'.format(name), {'field': y[y_basis.nodal_dofs].T})
Exemplo n.º 34
0
# Always prefer setuptools over distutils
from setuptools import setup, find_packages

from glob import glob
from os.path import basename
from os.path import splitext

setup(
    name='babyzwip',
    version='0.1',
    description='Z-Wave In Python',
    url='https://github.com/magicus/babyzwip',
    author='Magnus Ihse Bursie',
    author_email='*****@*****.**',
    license='GPL3',
    packages=find_packages(where="src"),
    package_dir={"": "src"},
    py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
    install_requires=['pyserial'],
    setup_requires=['pytest-runner'],
    tests_require=['pytest'],
)
Exemplo n.º 35
0
    def __init__(self,
                 input_dim=(3, 32, 32),
                 num_filters=None,
                 filter_size=5,
                 hidden_dims=None,
                 num_classes=10,
                 weight_scale=1e-3,
                 reg=0.1,
                 dtype=np.float32,
                 use_batchnorm=False,
                 dropout=0,
                 seed=None,
                 loadData=None,
                 predict_fn=None,
                 augment_fn=None):
        """
    predic_fn, augment_fn: for data augmentation
    """
        self.params = {}
        self.reg = reg
        self.dtype = dtype

        self.num_filters = len(num_filters)
        self.filter_size = filter_size
        self.bn_batchnorm = use_batchnorm
        self.use_dropout = (dropout > 0)

        ############# the total number of layers including conv layer and affine layer#######
        self.num_layers = self.num_filters + len(hidden_dims) + 1
        #####################################################################################
        #    print "how many layers ? ",self.num_layers

        self.bn_params = []
        self.dropout_param = {}

        self.predict_fn = predict_fn
        self.augment_fn = augment_fn
        if augment_fn is not None:
            input_dim = (3, 28, 28)

        self.input_dim = input_dim
        if loadData is not None:
            print "Load Data is ", loadData
            for f in glob.glob("%s/convNet_params_*.npy" % loadData):
                name_lst = op.splitext(op.basename(f))[0].split("_")
                if len(name_lst) == 3:
                    param = name_lst[2]
                    if param == "dropout":
                        self.dropout_param = self.load_param(f)
                    elif param == "bn":
                        self.bn_params = self.load_param(f)
                    else:
                        self.params[param] = self.load_param(
                            f)  # W_i,b_i,beta_i, gamma_i
                        print self.params[param].shape,
                    print "load parameter %s successfully" % param
            return

        C, H, W = input_dim
        assert filter_size % 2 == 1, 'Filter size must be odd: got %d' % filter_size
        all_filters = np.array([C])
        all_filters = np.concatenate((all_filters, np.array(num_filters)),
                                     axis=0)
        for i in range(self.num_filters):
            t = i + 1
            self.params['W%d' % t] = weight_scale * np.random.randn(
                all_filters[t], all_filters[t - 1], filter_size, filter_size)
            self.params['b%d' % t] = np.zeros(all_filters[t])
            if self.bn_batchnorm is True:
                self.params['gamma%d' % t] = np.random.randn(all_filters[t])
                self.params['beta%d' % t] = np.random.randn(all_filters[t])

        all_hidden_layers = np.array(
            [H * W * all_filters[-1] / np.power(4, self.num_filters)])
        a = np.array(hidden_dims)
        all_hidden_layers = np.concatenate((all_hidden_layers, a), axis=0)
        b = np.array([num_classes])
        all_hidden_layers = np.concatenate((all_hidden_layers, b))
        length = len(all_hidden_layers) - 1
        for i in range(length):
            t = i + self.num_filters + 1
            self.params['W%d' % t] = weight_scale * np.random.randn(
                all_hidden_layers[i], all_hidden_layers[i + 1])
            self.params['b%d' % t] = np.zeros(all_hidden_layers[i + 1])
            if self.bn_batchnorm is True and i < length - 1:
                self.params['gamma%d' % t] = np.random.randn(
                    all_hidden_layers[i + 1])
                self.params['beta%d' % t] = np.random.randn(
                    all_hidden_layers[i + 1])

        if self.bn_batchnorm:
            self.bn_params = [{
                'mode': 'train'
            } for i in xrange(self.num_layers)]

        if self.use_dropout is True:
            self.dropout_param = {'mode': 'train', 'p': dropout}
            if seed is not None:
                self.dropout_param['seed'] = seed

        for k, v in self.params.iteritems():
            self.params[k] = v.astype(dtype)
Exemplo n.º 36
0
def __run(name, **kwargs):
    """
    Run an experiment.

    :param name: experiment name
    :param path: expanded path of the experiment (dynamically filled in through 'command' decorator with 'expand')
    :param kwargs: simulation keyword arguments (see the documentation for more information)
    """
    set_logging(kwargs.get('loglevel'))
    path = kwargs['path']
    check_structure(path, remove=True)
    with settings(hide(*HIDDEN_ALL), warn_only=True):
        for sim in ["without", "with"]:
            sim_path = join(path, "{}-malicious".format(sim))
            data, results = join(sim_path, 'data'), join(sim_path, 'results')
            # the Makefile is at experiment's root ('path')
            logger.debug(
                " > Running simulation {} the malicious mote...".format(sim))
            task = kwargs.get('task', "run")
            with lcd(sim_path):
                output = local("make run TASK={}".format(task), capture=True)
            remove_files(sim_path, '.{}'.format(task))
            error, interrupt, error_buffer = False, False, []
            for line in output.split('\n'):
                if line.strip().startswith("FATAL") or line.strip().startswith(
                        "ERROR"):
                    error, interrupt = True, True
                elif line.strip().startswith("INFO"):
                    error = False
                    if len(error_buffer) > 0:
                        logger.error('Cooja error:\n' +
                                     '\n'.join(error_buffer))
                        error_buffer = []
                if error:
                    error_buffer.append(line)
            if interrupt:
                logger.warn(
                    "Cooja failed to execute ; 'run' interrupted (no parsing done)"
                )
                raise Exception("Cooja failed to execute")
            # once the execution is over, gather the screenshots into a single GIF and keep the first and
            #  the last screenshots ; move these to the results folder
            logger.debug(" > Gathering screenshots in an animated GIF...")
            with lcd(data):
                local(
                    'convert -delay 10 -loop 0 network*.png wsn-{}-malicious.gif'
                    .format(sim),
                    capture=True)
            network_images = {
                int(fn.split('.')[0].split('_')[-1]): fn
                for fn in listdir(data) if fn.startswith('network_')
            }
            move_files(data, results, 'wsn-{}-malicious.gif'.format(sim))
            if len(network_images) > 0:
                net_start_old = network_images[min(network_images.keys())]
                net_start, ext = splitext(net_start_old)
                net_start_new = 'wsn-{}-malicious_start{}'.format(sim, ext)
                net_end_old = network_images[max(network_images.keys())]
                net_end, ext = splitext(net_end_old)
                net_end_new = 'wsn-{}-malicious_end{}'.format(sim, ext)
                move_files(data, results, (net_start_old, net_start_new),
                           (net_end_old, net_end_new))
                remove_files(data, *network_images.values())
            # then start the parsing functions to derive more results
            logger.debug(" > Parsing simulation results...")
            parsing_chain(sim_path)
            move_files(sim_path, results, 'COOJA.log')
        # finally, generate the PDF report
        generate_report(path, REPORT_THEME)
    return "Both Cooja executions succeeded"
Exemplo n.º 37
0
def is_python_source(filename):
    """
    rtype: bool
    return: True if the filename is a python source file
    """
    return splitext(filename)[1][1:] in PY_SOURCE_EXTS
Exemplo n.º 38
0
 def get_logfile(out_filename):
     "Get logfile name"
     return "%s.log" % splitext(out_filename)[0]
Exemplo n.º 39
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )
    parser.add_argument('input_dir', help='input annotated directory')
    parser.add_argument('output_dir', help='output dataset directory')
    parser.add_argument('--labels', help='labels file', required=True)
    args = parser.parse_args()

    if osp.exists(args.output_dir):
        print('Output directory already exists:', args.output_dir)
        sys.exit(1)
    os.makedirs(args.output_dir)
    os.makedirs(osp.join(args.output_dir, 'JPEGImages'))
    print('Creating dataset:', args.output_dir)

    now = datetime.datetime.now()

    data = dict(
        info=dict(
            description=None,
            url=None,
            version=None,
            year=now.year,
            contributor=None,
            date_created=now.strftime('%Y-%m-%d %H:%M:%S.%f'),
        ),
        licenses=[dict(
            url=None,
            id=0,
            name=None,
        )],
        images=[
            # license, url, file_name, height, width, date_captured, id
        ],
        type='instances',
        annotations=[
            # segmentation, area, iscrowd, image_id, bbox, category_id, id
        ],
        categories=[
            # supercategory, id, name
        ],
    )

    class_name_to_id = {}
    for i, line in enumerate(open(args.labels).readlines()):
        class_id = i - 1  # starts with -1
        class_name = line.strip()
        if class_id == -1:
            assert class_name == '__ignore__'
            continue
        class_name_to_id[class_name] = class_id
        data['categories'].append(dict(
            supercategory=None,
            id=class_id,
            name=class_name,
        ))

    out_ann_file = osp.join(args.output_dir, 'annotations.json')
    label_files = glob.glob(osp.join(args.input_dir, '*.json'))
    for image_id, label_file in enumerate(label_files):
        print('Generating dataset from:', label_file)
        with open(label_file) as f:
            label_data = json.load(f)

        base = osp.splitext(osp.basename(label_file))[0]
        out_img_file = osp.join(
            args.output_dir, 'JPEGImages', base + '.jpg'
        )

        img_file = osp.join(
            osp.dirname(label_file), label_data['imagePath']
        )
        img = np.asarray(PIL.Image.open(img_file).convert('RGB'))
        PIL.Image.fromarray(img).save(out_img_file)
        data['images'].append(dict(
            license=0,
            url=None,
            file_name=osp.relpath(out_img_file, osp.dirname(out_ann_file)),
            height=img.shape[0],
            width=img.shape[1],
            date_captured=None,
            id=image_id,
        ))

        masks = {}                                     # for area
        segmentations = collections.defaultdict(list)  # for segmentation
        for shape in label_data['shapes']:
            points = shape['points']
            label = shape['label']
            group_id = shape.get('group_id')
            shape_type = shape.get('shape_type')
            mask = labelme.utils.shape_to_mask(
                img.shape[:2], points, shape_type
            )

            if group_id is None:
                group_id = uuid.uuid1()

            instance = (label, group_id)

            if instance in masks:
                masks[instance] = masks[instance] | mask
            else:
                masks[instance] = mask

            points = np.asarray(points).flatten().tolist()
            segmentations[instance].append(points)
        segmentations = dict(segmentations)

        for instance, mask in masks.items():
            cls_name, group_id = instance
            if cls_name not in class_name_to_id:
                continue
            cls_id = class_name_to_id[cls_name]

            mask = np.asfortranarray(mask.astype(np.uint8))
            mask = pycocotools.mask.encode(mask)
            area = float(pycocotools.mask.area(mask))
            bbox = pycocotools.mask.toBbox(mask).flatten().tolist()

            data['annotations'].append(dict(
                id=len(data['annotations']),
                image_id=image_id,
                category_id=cls_id,
                segmentation=segmentations[instance],
                area=area,
                bbox=bbox,
                iscrowd=0,
            ))

    with open(out_ann_file, 'w') as f:
        json.dump(data, f)
Exemplo n.º 40
0
        
    def parts(self):
        return self.parser.sections()

# Read and load all the component libraries

listdir(lenslib)

dirs=[]
#Get the directories. Each directory is a library.
for  i in listdir(lenslib):
    di=join(lenslib,i)
    if isdir(di):
        dirs.append(di)

for di in dirs:
    fnames=listdir(di)
    # Retain only the .cmp files
    libfiles=[]
    for f in fnames:
        n,ext=splitext(f)
        if ext==".cmp":
            libfiles.append(f)
        
    if len(libfiles)>0:
        libname=basename(di)
        filename=[ join(di,fn) for fn in libfiles] 
        print "Loading component library",libname," from files ",filename
        Library(filename,libname=libname)
             
Exemplo n.º 41
0
        if (debug_path != ''):
            debug_subdir_path = join(debug_path, subfolder, dirname)
            if (not isdir(debug_subdir_path)):
                os.makedirs(debug_subdir_path)
        mask_np = scipy.misc.imread(join(dirpath, 'mask.png')) > 127
        no_mask_np = np.logical_not(mask_np)
        dates = []
        occupancies = []
        orbit_ids = []
        orbit_directions = []
        ious = []
        last_occupancy_np = None
        for filename in sorted(os.listdir(im_path)):
            filename_dt = datetime.strptime('_'.join(filename.split('_')[:2]),
                                            "%Y%m%d_%H%M%S")
            fileid = splitext(filename)[0].split('_')

            orbit_direction, orbit_id = common.get_direction_orbit(fileid)
            if (orbit_id is None):
                orbit_id = '?'
            if (orbit_direction is None):
                orbit_direction = '?'

            filepath = join(im_path, filename)
            im_np = tifffile.imread(filepath)
            threshold_np = im_np[:, :, 0] > args.threshold
            threshold_np[no_mask_np] = 0

            if (debug_subdir_path != ''):
                tifffile.imsave(join(debug_subdir_path, filename),
                                threshold_np.astype(float))
Exemplo n.º 42
0
 def get_task_lock_file(out_filename):
     "Get task-lock filename."
     return "%s.X" % splitext(out_filename)[0]
Exemplo n.º 43
0
    def _file2dict(filename, use_predefined_variables=True):
        filename = osp.abspath(osp.expanduser(filename))
        check_file_exist(filename)
        fileExtname = osp.splitext(filename)[1]
        if fileExtname not in ['.py', '.json', '.yaml', '.yml']:
            raise IOError('Only py/yml/yaml/json type are supported now!')

        with tempfile.TemporaryDirectory() as temp_config_dir:
            temp_config_file = tempfile.NamedTemporaryFile(dir=temp_config_dir,
                                                           suffix=fileExtname)
            if platform.system() == 'Windows':
                temp_config_file.close()
            temp_config_name = osp.basename(temp_config_file.name)
            # Substitute predefined variables
            if use_predefined_variables:
                Config._substitute_predefined_vars(filename,
                                                   temp_config_file.name)
            else:
                shutil.copyfile(filename, temp_config_file.name)

            if filename.endswith('.py'):
                temp_module_name = osp.splitext(temp_config_name)[0]
                sys.path.insert(0, temp_config_dir)
                Config._validate_py_syntax(filename)
                mod = import_module(temp_module_name)
                sys.path.pop(0)
                cfg_dict = {
                    name: value
                    for name, value in mod.__dict__.items()
                    if not name.startswith('__')
                }
                # delete imported module
                del sys.modules[temp_module_name]
            elif filename.endswith(('.yml', '.yaml', '.json')):
                import mmcv
                cfg_dict = mmcv.load(temp_config_file.name)
            # close temp file
            temp_config_file.close()

        cfg_text = filename + '\n'
        with open(filename, 'r') as f:
            cfg_text += f.read()

        if BASE_KEY in cfg_dict:
            cfg_dir = osp.dirname(filename)
            base_filename = cfg_dict.pop(BASE_KEY)
            base_filename = base_filename if isinstance(
                base_filename, list) else [base_filename]

            cfg_dict_list = list()
            cfg_text_list = list()
            for f in base_filename:
                _cfg_dict, _cfg_text = Config._file2dict(osp.join(cfg_dir, f))
                cfg_dict_list.append(_cfg_dict)
                cfg_text_list.append(_cfg_text)

            base_cfg_dict = dict()
            for c in cfg_dict_list:
                if len(base_cfg_dict.keys() & c.keys()) > 0:
                    raise KeyError('Duplicate key is not allowed among bases')
                base_cfg_dict.update(c)

            base_cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict)
            cfg_dict = base_cfg_dict

            # merge cfg_text
            cfg_text_list.append(cfg_text)
            cfg_text = '\n'.join(cfg_text_list)

        return cfg_dict, cfg_text
Exemplo n.º 44
0
    def create_resource(self, package_id, **kwargs):
        """Creates a single resource on filestore. You must supply either
        `url`, `filepath`, or `fileobj`.

        Args:
            package_id (str): The filestore package id.
            **kwargs: Keyword arguments that are passed to resource_create.

        Kwargs:
            url (str): New file url (for file link).
            filepath (str): New file path (for file upload).
            fileobj (obj): New file like object (for file upload).
            post (bool): Post data using requests instead of ckanapi.
            name (str): The resource name (defaults to the filename).
            description (str): The resource description.
            hash (str): The resource hash.

        Returns:
            obj: requests.Response object if `post` option is specified,
                ckan resource object otherwise.

        Raises:
            TypeError: If neither `url`, `filepath`, nor `fileobj` are supplied.

        Examples:
            >>> ckan = CKAN(quiet=True)
            >>> ckan.create_resource('pid')
            Traceback (most recent call last):
            TypeError: You must specify either a `url`, `filepath`, or `fileobj`
            >>> ckan.create_resource('pid', url='http://example.com/file')
            Package `pid` was not found.
        """
        if not any(map(kwargs.get, ['url', 'filepath', 'fileobj'])):
            raise TypeError(
                'You must specify either a `url`, `filepath`, or `fileobj`')

        path = filter(None, map(kwargs.get, ['url', 'filepath', 'fileobj']))[0]

        try:
            if 'docs.google.com' in path:
                def_name = path.split('gid=')[1].split('&')[0]
            else:
                def_name = p.basename(path)
        except AttributeError:
            def_name = None
            file_format = 'csv'
        else:
            # copy/pasted from utils... fix later
            if 'format=' in path:
                file_format = path.split('format=')[1]
            else:
                file_format = p.splitext(path)[1].lstrip('.')

        kwargs.setdefault('name', def_name)

        # Will get `ckan.logic.ValidationError` if url isn't set
        kwargs.setdefault('url', 'http://example.com')
        kwargs['format'] = file_format
        resource = {'package_id': package_id}
        message = 'Creating new resource in package %s...' % package_id
        return self._update_filestore(resource, message, **kwargs)
Exemplo n.º 45
0
def convert_timestamp(folder: str, ext: str = ".log") -> None:
    for file in listdir(folder):
        if path.splitext(file)[1] != ext:
            continue
        date_time = datetime.strptime(file, "%Y%m%d-%H%M%S{0}".format(ext))
        rename(path.join(folder, file), path.join(folder, str(date_time.timestamp()) + ext))
Exemplo n.º 46
0
     'rider'     : 25,
     'car'       : 26,
     'truck'     : 27,
     'train'     : 31,
     'motorcycle': 32,
     'bicycle'   : 33,
     'bus'       : 28
}
"""
instance_classes = [24, 25, 26, 27, 28, 31, 32, 33]

anns = json.load(open('/home/wangyu/env/mmdetection_train/mmdetection/data/foggy_cityscapes/annotations/instancesonly_filtered_gtFine_val.json'))
image = anns['images']
result = pickle.load(open('gnwm_result.pkl', 'rb'))
convert_result = []
for idx, img in enumerate(result):
    img_name = image[idx]
    name = osp.splitext(osp.split(img_name['file_name'])[-1])[0]
    for cat_idx, cat in enumerate(img) :
        for obj in cat:
            obj = [i.astype(float) for i in obj]
            det = {}
            det['image_id'] = name
            det['category_id'] = instance_classes[cat_idx]
            det['score'] = obj[-1]
            det['bbox'] = obj[0:4]
            convert_result.append(det)

with open('convert_gnwm_result.json', 'w') as f:
    json.dump(convert_result, f)
Exemplo n.º 47
0
def data_path(url, path=None, force_update=False, update_path=None):
    """Get path to local copy of MEGSIM dataset URL

    This is a low-level function useful for getting a local copy of a
    remote MEGSIM dataet.

    Parameters
    ----------
    url : str
        The dataset to use.
    path : None | str
        Location of where to look for the MEGSIM data storing location.
        If None, the environment variable or config parameter
        MNE_DATASETS_MEGSIM_PATH is used. If it doesn't exist, the
        "mne-python/examples" directory is used. If the MEGSIM dataset
        is not found under the given path (e.g., as
        "mne-python/examples/MEGSIM"), the data
        will be automatically downloaded to the specified folder.
    force_update : bool
        Force update of the dataset even if a local copy exists.
    update_path : bool | None
        If True, set the MNE_DATASETS_MEGSIM_PATH in mne-python
        config to the given path. If None, the user is prompted.

    Returns
    -------
    path : list of str
        Local paths to the given data files. If URL was a .fif file, this
        will be a list of length 1. If it was a .zip file, it may potentially
        be many files.

    Notes
    -----
    For example, one could do:

        >>> from mne.datasets import megsim
        >>> url = 'http://cobre.mrn.org/megsim/simdata/neuromag/visual/M87174545_vis_sim1A_4mm_30na_neuro_rn.fif'
        >>> megsim.data_path(url, os.getenv('HOME') + '/datasets') # doctest:+SKIP

    And this would download the given MEGSIM data file to the 'datasets'
    folder, and prompt the user to save the 'datasets' path to the mne-python
    config, if it isn't there already.

    The MEGSIM dataset is documented in the following publication:
        Aine CJ, Sanfratello L, Ranken D, Best E, MacArthur JA, Wallace T,
        Gilliam K, Donahue CH, Montano R, Bryant JE, Scott A, Stephen JM
        (2012) MEG-SIM: A Web Portal for Testing MEG Analysis Methods using
        Realistic Simulated and Empirical Data. Neuroinform 10:141-158
    """

    if path is None:
        # use an intelligent guess if it's not defined
        def_path = op.realpath(
            op.join(op.dirname(__file__), '..', '..', '..', 'examples'))
        key = 'MNE_DATASETS_MEGSIM_PATH'
        if get_config(key) is None:
            key = 'MNE_DATA'
        path = get_config(key, def_path)

        # use the same for all datasets
        if not op.exists(path) or not os.access(path, os.W_OK):
            try:
                os.mkdir(path)
            except OSError:
                try:
                    logger.info("Checking for megsim data in '~/mne_data'...")
                    path = op.join(op.expanduser("~"), "mne_data")
                    if not op.exists(path):
                        logger.info("Trying to create "
                                    "'~/mne_data' in home directory")
                        os.mkdir(path)
                except OSError:
                    raise OSError(
                        "User does not have write permissions "
                        "at '%s', try giving the path as an argument "
                        "to data_path() where user has write "
                        "permissions, for ex:data_path"
                        "('/home/xyz/me2/')" % (path))

    if not isinstance(path, string_types):
        raise ValueError('path must be a string or None')

    destination = _url_to_local_path(url, op.join(path, 'MEGSIM'))
    destinations = [destination]

    split = op.splitext(destination)
    is_zip = True if split[1].lower() == '.zip' else False
    # Fetch the file
    do_unzip = False
    if not op.isfile(destination) or force_update:
        if op.isfile(destination):
            os.remove(destination)
        if not op.isdir(op.dirname(destination)):
            os.makedirs(op.dirname(destination))
        _fetch_file(url, destination, print_destination=False)
        do_unzip = True

    if is_zip:
        z = zipfile.ZipFile(destination)
        decomp_dir, name = op.split(destination)
        files = z.namelist()
        # decompress if necessary (if download was re-done)
        if do_unzip:
            stdout.write('Decompressing %g files from\n'
                         '"%s" ...' % (len(files), name))
            z.extractall(decomp_dir)
            stdout.write(' [done]\n')
        z.close()
        destinations = [op.join(decomp_dir, f) for f in files]

    # Offer to update the path
    path = op.abspath(path)
    if update_path is None:
        if get_config(key, '') != path:
            update_path = True
            msg = ('Do you want to set the path:\n    %s\nas the default '
                   'MEGSIM dataset path in the mne-python config ([y]/n)? ' %
                   path)
            answer = raw_input(msg)
            if answer.lower() == 'n':
                update_path = False
        else:
            update_path = False
    if update_path is True:
        set_config(key, path)

    return destinations
def removeFileExtention(str_name):
    return path.splitext(str_name)[0]
Exemplo n.º 49
0
def plot_spikes(
    in_file,
    in_fft,
    spikes_list,
    cols=3,
    labelfmt="t={0:.3f}s (z={1:d})",
    out_file=None,
):
    from mpl_toolkits.axes_grid1 import make_axes_locatable

    nii = nb.as_closest_canonical(nb.load(in_file))
    fft = nb.load(in_fft).get_fdata()

    data = nii.get_fdata()
    zooms = nii.header.get_zooms()[:2]
    tstep = nii.header.get_zooms()[-1]
    ntpoints = data.shape[-1]

    if len(spikes_list) > cols * 7:
        cols += 1

    nspikes = len(spikes_list)
    rows = 1
    if nspikes > cols:
        rows = math.ceil(nspikes / cols)

    fig = plt.figure(figsize=(7 * cols, 5 * rows))

    for i, (t, z) in enumerate(spikes_list):
        prev = None
        pvft = None
        if t > 0:
            prev = data[..., z, t - 1]
            pvft = fft[..., z, t - 1]

        post = None
        psft = None
        if t < (ntpoints - 1):
            post = data[..., z, t + 1]
            psft = fft[..., z, t + 1]

        ax1 = fig.add_subplot(rows, cols, i + 1)
        divider = make_axes_locatable(ax1)
        ax2 = divider.new_vertical(size="100%", pad=0.1)
        fig.add_axes(ax2)

        plot_slice_tern(
            data[..., z, t],
            prev=prev,
            post=post,
            spacing=zooms,
            ax=ax2,
            label=labelfmt.format(t * tstep, z),
        )

        plot_slice_tern(
            fft[..., z, t],
            prev=pvft,
            post=psft,
            vmin=-5,
            vmax=5,
            cmap=get_parula(),
            ax=ax1,
        )

    plt.tight_layout()
    if out_file is None:
        fname, ext = op.splitext(op.basename(in_file))
        if ext == ".gz":
            fname, _ = op.splitext(fname)
        out_file = op.abspath("%s.svg" % fname)

    fig.savefig(out_file, format="svg", dpi=300, bbox_inches="tight")
    return out_file
Exemplo n.º 50
0
 def generate_filename(self, filename):
     # name like: "[uid].[ext]" -> example: "bea536a0-089c-a45b.pdf"
     name = self.generate_uid(
         text_to_append=splitext(basename(filename))[1])
     # final return result like: "profiles/bea536a0/089c/a45b.pdf":
     return os.path.join(self.upload_directory, name.replace('-', '/'))
Exemplo n.º 51
0
def get_options():
    from optparse import OptionParser
    from datetime import datetime

    parser = OptionParser(usage="Usage: %prog [options] <station pickle file>", \
        description="Program to create a KML file for plotting based on the contents of a station pickle file")
    parser.add_option("--keys", action="store", type=str, dest="keys", default="", \
        help="Specify a comma separated list of keys to return. These can be fragments " \
        "of a key to include all keys matching any fragment.")
    parser.add_option("-V","-v","--verb-level", action="store", type="int", dest="verb", default=0, \
        help="Specify verbosity. Default 0: no output. 1: network list. 2: include station list.")
    parser.add_option("-o","--outfile", action="store", type="string", dest="outfile", default="", \
        help="Output kml file")
    parser.add_option("-s","--icon-scale", action="store", type="string", dest="scale", default="1.8", \
        help="Icon Size Scale (default 1.8)")
    parser.add_option("-r","--no-random-colours", action="store_true", dest="randoff", default=False, \
        help="Turn off random colours (default random on)")
    parser.add_option("-T","--Document-title", action="store", type="string", dest="doctitle", default="", \
        help="KML Document Title")
    parser.add_option("-a","--ascii", action="store_false", dest="use_binary", default=True, \
        help="Specify to write ascii Pickle files instead of binary. Ascii are larger file size, " \
        "but more likely to be system independent.")

    # Parse Arguments
    (opts, args) = parser.parse_args()

    # Parse Input Filename
    if len(args) == 0:
        parser.error("Must provide at least 1 input database!")
    for iin in range(0, len(args)):
        if args[iin].find(".pkl") > 0:
            args[iin] = osp.splitext(args[iin])[0]
        elif args[iin][-1] == ".":
            args[iin] = args[iin][0:-1]

    # create output filename
    if len(opts.outfile) == 0:
        outfile = args[0].replace('.pkl', '') + ".kml"
    else:
        outfile = opts.outfile
        if outfile.find('.kml') < 0:
            outfile = outfile + ".kml"
    opts.outfile = outfile

    # set document title
    if len(opts.doctitle) == 0:
        opts.doctitle = "; ".join(args) + ";; Created: " + str(datetime.now())

    # Check input
    errfl = []
    for iin in range(0, len(args)):
        if not osp.exists(args[iin] + ".pkl"):
            errfl.append(args[iin] + ".pkl")
    #
    if len(errfl) > 0:
        parser.error("Input database(s) " + ", ".join(errfl) + " do not exist")

    # Construct keys
    if len(opts.keys) > 0:
        opts.keys = opts.keys.split(',')

    # Return extension
    for iin in range(0, len(args)):
        args[iin] = args[iin] + ".pkl"

    # return options
    return opts, args
Exemplo n.º 52
0
def plot_mosaic(
    img,
    out_file=None,
    ncols=8,
    title=None,
    overlay_mask=None,
    bbox_mask_file=None,
    only_plot_noise=False,
    annotate=True,
    vmin=None,
    vmax=None,
    cmap="Greys_r",
    plot_sagittal=True,
    fig=None,
    zmax=128,
):

    if isinstance(img, (str, bytes)):
        nii = nb.as_closest_canonical(nb.load(img))
        img_data = nii.get_fdata()
        zooms = nii.header.get_zooms()
    else:
        img_data = img
        zooms = [1.0, 1.0, 1.0]
        out_file = "mosaic.svg"

    # Remove extra dimensions
    img_data = np.squeeze(img_data)

    if img_data.shape[2] > zmax and bbox_mask_file is None:
        lowthres = np.percentile(img_data, 5)
        mask_file = np.ones_like(img_data)
        mask_file[img_data <= lowthres] = 0
        img_data = _bbox(img_data, mask_file)

    if bbox_mask_file is not None:
        bbox_data = nb.as_closest_canonical(
            nb.load(bbox_mask_file)).get_fdata()
        img_data = _bbox(img_data, bbox_data)

    z_vals = np.array(list(range(0, img_data.shape[2])))

    # Reduce the number of slices shown
    if len(z_vals) > zmax:
        rem = 15
        # Crop inferior and posterior
        if not bbox_mask_file:
            # img_data = img_data[..., rem:-rem]
            z_vals = z_vals[rem:-rem]
        else:
            # img_data = img_data[..., 2 * rem:]
            start_index = 2 * rem
            z_vals = z_vals[start_index:]

    while len(z_vals) > zmax:
        # Discard one every two slices
        # img_data = img_data[..., ::2]
        z_vals = z_vals[::2]

    n_images = len(z_vals)
    nrows = math.ceil(n_images / ncols)
    if plot_sagittal:
        nrows += 1

    if overlay_mask:
        overlay_data = nb.as_closest_canonical(
            nb.load(overlay_mask)).get_fdata()

    # create figures
    if fig is None:
        fig = plt.figure(figsize=(22, nrows * 3))

    est_vmin, est_vmax = _get_limits(img_data, only_plot_noise=only_plot_noise)
    if not vmin:
        vmin = est_vmin
    if not vmax:
        vmax = est_vmax

    naxis = 1
    for z_val in z_vals:
        ax = fig.add_subplot(nrows, ncols, naxis)

        if overlay_mask:
            ax.set_rasterized(True)
        plot_slice(
            img_data[:, :, z_val],
            vmin=vmin,
            vmax=vmax,
            cmap=cmap,
            ax=ax,
            spacing=zooms[:2],
            label="%d" % z_val,
            annotate=annotate,
        )

        if overlay_mask:
            from matplotlib import cm

            msk_cmap = cm.Reds  # @UndefinedVariable
            msk_cmap._init()
            alphas = np.linspace(0, 0.75, msk_cmap.N + 3)
            msk_cmap._lut[:, -1] = alphas
            plot_slice(
                overlay_data[:, :, z_val],
                vmin=0,
                vmax=1,
                cmap=msk_cmap,
                ax=ax,
                spacing=zooms[:2],
            )
        naxis += 1

    if plot_sagittal:
        naxis = ncols * (nrows - 1) + 1

        step = int(img_data.shape[0] / (ncols + 1))
        start = step
        stop = img_data.shape[0] - step

        if step == 0:
            step = 1

        for x_val in list(range(start, stop, step))[:ncols]:
            ax = fig.add_subplot(nrows, ncols, naxis)

            plot_slice(
                img_data[x_val, ...],
                vmin=vmin,
                vmax=vmax,
                cmap=cmap,
                ax=ax,
                label="%d" % x_val,
                spacing=[zooms[0], zooms[2]],
            )
            naxis += 1

    fig.subplots_adjust(left=0.05,
                        right=0.95,
                        bottom=0.05,
                        top=0.95,
                        wspace=0.05,
                        hspace=0.05)

    if title:
        fig.suptitle(title, fontsize="10")
    fig.subplots_adjust(wspace=0.002, hspace=0.002)

    if out_file is None:
        fname, ext = op.splitext(op.basename(img))
        if ext == ".gz":
            fname, _ = op.splitext(fname)
        out_file = op.abspath(fname + "_mosaic.svg")

    fig.savefig(out_file, format="svg", dpi=300, bbox_inches="tight")
    return out_file
Exemplo n.º 53
0
def is_supported_files(filename):
    supported = ".png .jpg".split()

    name, ext = splitext(filename)
    return ext in supported
Exemplo n.º 54
0
 def _save_thumbnails(self, thumbnail_objects):
     raw_path, raw_filename = self._get_image_path_and_name()
     path = join(raw_path, settings.THUMBNAILS_NAME)
     filename, _ = splitext(raw_filename)
     save_thumbnail_to_path = self._init_save_thumbnail(filename, path)
     return filter(lambda thumbnail_object: save_thumbnail_to_path(thumbnail_object), thumbnail_objects)
Exemplo n.º 55
0
                    meta['fel_shutter'],  # uint8
                    meta['laser_shutter'],  # uint8
                    0,  # uint8
                    meta['fel_intensity'],  # float64
                    meta['delay_motor'],  # float64
                    0,  # float64
                    0,  # float64
                    hits.get('nhits', 0)))  # uint32
            for hit in hits.get('hits', ()):
                write(pack2(hit['t'], hit['x'], hit['y']))
    print("Done!")


while True:  # run conversion in infinite loop
    hits = {
        splitext(basename(fn))[0]: getmtime(fn)
        for fn in iglob(hit_filename("*"))
    }
    bins = {
        splitext(basename(fn))[0]: getctime(fn)
        for fn in iglob(bin_filename("*"))
    }
    jobs = sorted(fn for fn, t in hits.items()
                  if fn not in bins or bins[fn] < t)
    print("Jobs: {}".format(', '.join(jobs)))
    if len(jobs) == 0:
        print("Nothing to do!")
    else:
        for fn in jobs:
            print('Converting file {}...'.format(fn))
            try:
Exemplo n.º 56
0
domainName = getenv("domainEnv")
username = getenv("uname")
password = getenv("pword")
wlUrl = getenv("url")
appLocation = getenv("appLoc")

filenames = []
lstWOExt = []

for files in listdir(appLocation):
    if files.endswith('.war'):
        filenames.append(files)
print(filenames)

for filext in filenames:
    lstWOExt.append(splitext(filext)[0])
print(lstWOExt)

if not listdir(appLocation):
    print('War file directory is empty. Nothing to deploy...')
else:
    print('There are war files in this directory')
    for file in lstWOExt:
        deploymentFile = '' + appLocation + '\\' + file + '.war'
        try:
            print('Trying to connect with ' + domainName + '')
            connect(username, password, wlUrl)
            print('Successfully connected with ' + domainName + '')
        except:
            print('Couldn\'t successfully connect with ' + domainName + '...')
Exemplo n.º 57
0
def from_reads(reads_folders_list_path,
               output_path,
               kmer_size,
               abundance_min,
               filter_singleton,
               phenotype_name,
               phenotype_metadata_path,
               gzip,
               temp_dir,
               nb_cores,
               verbose,
               progress,
               warning_callback=None,
               error_callback=None):
    supported_extensions = ['.fastq', '.fastq.gz']
    compression = "gzip" if gzip > 0 else None
    compression_opts = gzip if gzip > 0 else None

    # Execution callback functions
    if warning_callback is None:
        warning_callback = lambda w: logging.warning(w)
    if error_callback is None:

        def normal_raise(exception):
            raise exception

        error_callback = normal_raise

    # Make sure that the tmp data is unique to the current process
    temp_dir = join(temp_dir, str(getpid()))
    if not exists(temp_dir):
        mkdir(temp_dir)

    if (phenotype_name is None and phenotype_metadata_path is not None) or (
            phenotype_name is not None and phenotype_metadata_path is None):
        error_callback(
            ValueError(
                "If a phenotype is specified, it must have a name and a metadata file."
            ))

    # Find the read folder for each genome and verify that it exists
    reads_folder_by_genome_id = dict(
        l.split() for l in open(reads_folders_list_path, "r"))
    for g_id, read_dir in reads_folder_by_genome_id.iteritems():
        if not exists(read_dir):
            error_callback(
                IOError(
                    "The read directory for genome %s cannot be found: %s" %
                    (str(g_id), read_dir)))

    logging.debug("The k-mer matrix contains %d genomes." %
                  len(reads_folder_by_genome_id))
    if len(set(reads_folder_by_genome_id.keys())) < len(
            reads_folder_by_genome_id.keys()):
        error_callback(
            Exception(
                "The genomic data contains genomes with the same identifier."))

    h5py_file = _create_hdf5_file_no_chunk_caching(output_path)
    h5py_file.attrs["created"] = time()
    h5py_file.attrs["uuid"] = str(uuid1())
    h5py_file.attrs["genome_source_type"] = "reads"
    h5py_file.attrs["genomic_data"] = reads_folders_list_path
    h5py_file.attrs[
        "phenotype_name"] = phenotype_name if phenotype_name is not None else "NA"
    h5py_file.attrs[
        "phenotype_metadata_source"] = phenotype_metadata_path if phenotype_metadata_path is not None else "NA"
    h5py_file.attrs["filter"] = filter_singleton
    h5py_file.attrs["compression"] = "gzip (level %d)" % gzip

    # Extract/write the metadata
    if phenotype_name is not None:
        genome_ids, labels = _parse_metadata(phenotype_metadata_path,
                                             reads_folder_by_genome_id.keys(),
                                             warning_callback, error_callback)
        # Sort the genomes by label for optimal better performance
        logging.debug(
            "Sorting genomes by metadata label for optimal performance.")
        sorter = np.argsort(labels)
        genome_ids = genome_ids[sorter]
        labels = labels[sorter]
        logging.debug("Creating the phenotype metadata dataset.")
        phenotype = h5py_file.create_dataset("phenotype",
                                             data=labels,
                                             dtype=PHENOTYPE_LABEL_DTYPE)
        phenotype.attrs["name"] = phenotype_name
        del phenotype, labels

    # Write genome ids
    logging.debug("Creating the genome identifier dataset.")
    h5py_file.create_dataset("genome_identifiers",
                             data=genome_ids,
                             compression=compression,
                             compression_opts=compression_opts)
    h5py_file.close()

    logging.debug("Initializing DSK.")

    # Preparing input file for multidsk
    files_sorted = []
    list_reads_dsk_output = []
    for id in genome_ids:
        files = [
            join(reads_folder_by_genome_id[id], file)
            for file in listdir(reads_folder_by_genome_id[id])
            if file.endswith(tuple(supported_extensions))
        ]  # Supported extensions
        files_sorted.append(",".join(files) + "\n")
        list_reads_dsk_output.append(
            join(temp_dir,
                 basename(splitext(files[-1])[0]) + ".h5"))
    open(join(temp_dir, "list_reads_files"), "w").writelines(files_sorted)

    # Calling multidsk
    reads_count_kmers(file_path=join(temp_dir, "list_reads_files"),
                      out_dir=temp_dir,
                      kmer_size=kmer_size,
                      abundance_min=abundance_min,
                      out_compress=gzip,
                      nb_cores=nb_cores,
                      verbose=int(verbose),
                      progress=progress)
    logging.debug("K-mers counting completed.")

    # Preparing input file for dsk2kover
    file_dsk_output = open(join(temp_dir, "list_h5"), "w")
    for line in list_reads_dsk_output:
        file_dsk_output.write(line + "\n")
    file_dsk_output.close()

    # Calling dsk2kover
    logging.debug("Initializing DSK2Kover.")
    reads_pack_kmers(file_path=join(temp_dir, "list_h5"),
                     out_path=output_path,
                     filter_singleton=filter_singleton,
                     kmer_length=kmer_size,
                     compression=gzip,
                     chunk_size=BLOCK_SIZE,
                     nb_genomes=len(genome_ids),
                     progress=progress)

    logging.debug("Removing temporary files.")
    rmtree(temp_dir)

    # progress_callback("dsk2kover", 1)
    logging.debug("Dataset creation completed.")
Exemplo n.º 58
0
def iter_files_distros(path=None, repeated_distro='first'):
    if path is None:
        path = sys.path

    # Distributions found earlier in path will shadow those with the same name
    # found later. If these distributions used different module names, it may
    # actually be possible to import both, but in most cases this shadowing
    # will be correct.
    distro_names_seen = set()

    for folder in path:
        if folder.rstrip('/\\').endswith('.egg'):
            # Gah, eggs
            egg_name = osp.basename(folder)
            distro = Distribution.from_name_version(egg_name.split(".egg")[0])

            if (repeated_distro == 'first') \
                    and (distro.name in distro_names_seen):
                continue
            distro_names_seen.add(distro.name)

            if osp.isdir(folder):
                ep_path = osp.join(folder, 'EGG-INFO', 'entry_points.txt')
                if osp.isfile(ep_path):
                    cp = CaseSensitiveConfigParser(delimiters=('=', ))
                    cp.read([ep_path])
                    yield cp, distro

            elif zipfile.is_zipfile(folder):
                z = zipfile.ZipFile(folder)
                try:
                    info = z.getinfo('EGG-INFO/entry_points.txt')
                except KeyError:
                    continue
                cp = CaseSensitiveConfigParser(delimiters=('=', ))
                with z.open(info) as f:
                    fu = io.TextIOWrapper(f)
                    cp.read_file(fu,
                                 source=osp.join(folder, 'EGG-INFO',
                                                 'entry_points.txt'))
                yield cp, distro

        # zip imports, not egg
        elif zipfile.is_zipfile(folder):
            with zipfile.ZipFile(folder) as zf:
                for info in zf.infolist():
                    m = file_in_zip_pattern.match(info.filename)
                    if not m:
                        continue

                    distro_name_version = m.group('dist_version')
                    distro = Distribution.from_name_version(
                        distro_name_version)

                    if (repeated_distro == 'first') \
                            and (distro.name in distro_names_seen):
                        continue
                    distro_names_seen.add(distro.name)

                    cp = CaseSensitiveConfigParser(delimiters=('=', ))
                    with zf.open(info) as f:
                        fu = io.TextIOWrapper(f)
                        cp.read_file(fu,
                                     source=osp.join(folder, info.filename))
                    yield cp, distro

        # Regular file imports (not egg, not zip file)
        for path in itertools.chain(
                glob.iglob(
                    osp.join(glob.escape(folder), '*.dist-info',
                             'entry_points.txt')),
                glob.iglob(
                    osp.join(glob.escape(folder), '*.egg-info',
                             'entry_points.txt'))):
            distro_name_version = osp.splitext(osp.basename(
                osp.dirname(path)))[0]
            distro = Distribution.from_name_version(distro_name_version)

            if (repeated_distro == 'first') \
                    and (distro.name in distro_names_seen):
                continue
            distro_names_seen.add(distro.name)

            cp = CaseSensitiveConfigParser(delimiters=('=', ))
            cp.read([path])
            yield cp, distro
Exemplo n.º 59
0
def read_montage(kind, ch_names=None, path=None, unit='m', transform=False):
    """Read a generic (built-in) montage from a file

    This function can be used to read electrode positions from a user specified
    file using the `kind` and `path` parameters. Alternatively, use only the
    `kind` parameter to load one of the built-in montages:

    ===================   =====================================================
    Kind                  description
    ===================   =====================================================
    standard_1005         Electrodes are named and positioned according to the
                          international 10-05 system.
    standard_1020         Electrodes are named and positioned according to the
                          international 10-20 system.
    standard_alphabetic   Electrodes are named with LETTER-NUMBER combinations
                          (A1, B2, F4, etc.)
    standard_postfixed    Electrodes are named according to the international
                          10-20 system using postfixes for intermediate
                          positions.
    standard_prefixed     Electrodes are named according to the international
                          10-20 system using prefixes for intermediate
                          positions.
    standard_primed       Electrodes are named according to the international
                          10-20 system using prime marks (' and '') for
                          intermediate positions.

    biosemi16             BioSemi cap with 16 electrodes
    biosemi32             BioSemi cap with 32 electrodes
    biosemi64             BioSemi cap with 64 electrodes
    biosemi128            BioSemi cap with 128 electrodes
    biosemi160            BioSemi cap with 160 electrodes
    biosemi256            BioSemi cap with 256 electrodes

    easycap-M10           Brainproducts EasyCap with electrodes named
                          according to the 10-05 system
    easycap-M1            Brainproduct EasyCap with numbered electrodes

    EGI_256               Geodesic Sensor Net with 256 channels

    GSN-HydroCel-32       HydroCel Geodesic Sensor Net with 32 electrodes
    GSN-HydroCel-64_1.0   HydroCel Geodesic Sensor Net with 64 electrodes
    GSN-HydroCel-65_1.0   HydroCel Geodesic Sensor Net with 64 electrodes + Cz
    GSN-HydroCel-128      HydroCel Geodesic Sensor Net with 128 electrodes
    GSN-HydroCel-129      HydroCel Geodesic Sensor Net with 128 electrodes + Cz
    GSN-HydroCel-256      HydroCel Geodesic Sensor Net with 256 electrodes
    GSN-HydroCel-257      HydroCel Geodesic Sensor Net with 256 electrodes + Cz
    ===================   =====================================================

    Parameters
    ----------
    kind : str
        The name of the montage file (e.g. kind='easycap-M10' for
        'easycap-M10.txt'). Files with extensions '.elc', '.txt', '.csd',
        '.elp', '.hpts', '.sfp' or '.loc' ('.locs' and '.eloc') are supported.
    ch_names : list of str | None
        If not all electrodes defined in the montage are present in the EEG
        data, use this parameter to select subset of electrode positions to
        load. If None (default), all defined electrode positions are returned.
    path : str | None
        The path of the folder containing the montage file. Defaults to the
        mne/channels/data/montages folder in your mne-python installation.
    unit : 'm' | 'cm' | 'mm'
        Unit of the input file. If not 'm' (default), coordinates will be
        rescaled to 'm'.
    transform : bool
        If True, points will be transformed to Neuromag space.
        The fidicuals, 'nasion', 'lpa', 'rpa' must be specified in
        the montage file. Useful for points captured using Polhemus FastSCAN.
        Default is False.

    Returns
    -------
    montage : instance of Montage
        The montage.

    See Also
    --------
    read_dig_montage : To read subject-specific digitization information.

    Notes
    -----
    Built-in montages are not scaled or transformed by default.

    Montages can contain fiducial points in addition to electrode
    locations, e.g. ``biosemi-64`` contains 67 total channels.

    .. versionadded:: 0.9.0
    """

    if path is None:
        path = op.join(op.dirname(__file__), 'data', 'montages')
    if not op.isabs(kind):
        supported = ('.elc', '.txt', '.csd', '.sfp', '.elp', '.hpts', '.loc',
                     '.locs', '.eloc')
        montages = [op.splitext(f) for f in os.listdir(path)]
        montages = [m for m in montages if m[1] in supported and kind == m[0]]
        if len(montages) != 1:
            raise ValueError('Could not find the montage. Please provide the '
                             'full path.')
        kind, ext = montages[0]
        fname = op.join(path, kind + ext)
    else:
        kind, ext = op.splitext(kind)
        fname = op.join(path, kind + ext)

    if ext == '.sfp':
        # EGI geodesic
        with open(fname, 'r') as f:
            lines = f.read().replace('\t', ' ').split("\n")

        ch_names_, pos = [], []
        for ii, line in enumerate(lines):
            line = line.strip().split()
            if len(line) > 0:  # skip empty lines
                if len(line) != 4:  # name, x, y, z
                    raise ValueError("Malformed .sfp file in line " + str(ii))
                this_name, x, y, z = line
                ch_names_.append(this_name)
                pos.append([float(cord) for cord in (x, y, z)])
        pos = np.asarray(pos)
    elif ext == '.elc':
        # 10-5 system
        ch_names_ = []
        pos = []
        with open(fname) as fid:
            for line in fid:
                if 'Positions\n' in line:
                    break
            pos = []
            for line in fid:
                if 'Labels\n' in line:
                    break
                pos.append(list(map(float, line.split())))
            for line in fid:
                if not line or not set(line) - set([' ']):
                    break
                ch_names_.append(line.strip(' ').strip('\n'))
        pos = np.array(pos)
    elif ext == '.txt':
        # easycap
        try:  # newer version
            data = np.genfromtxt(fname, dtype='str', skip_header=1)
        except TypeError:
            data = np.genfromtxt(fname, dtype='str', skiprows=1)
        ch_names_ = list(data[:, 0])
        theta, phi = data[:, 1].astype(float), data[:, 2].astype(float)
        x = 85. * np.cos(np.deg2rad(phi)) * np.sin(np.deg2rad(theta))
        y = 85. * np.sin(np.deg2rad(theta)) * np.sin(np.deg2rad(phi))
        z = 85. * np.cos(np.deg2rad(theta))
        pos = np.c_[x, y, z]
    elif ext == '.csd':
        # CSD toolbox
        dtype = [('label', 'S4'), ('theta', 'f8'), ('phi', 'f8'),
                 ('radius', 'f8'), ('x', 'f8'), ('y', 'f8'), ('z', 'f8'),
                 ('off_sph', 'f8')]
        try:  # newer version
            table = np.loadtxt(fname, skip_header=2, dtype=dtype)
        except TypeError:
            table = np.loadtxt(fname, skiprows=2, dtype=dtype)
        ch_names_ = table['label']
        theta = (2 * np.pi * table['theta']) / 360.
        phi = (2 * np.pi * table['phi']) / 360.
        pos = _sphere_to_cartesian(theta, phi, r=1.0)
        pos = np.asarray(pos).T
    elif ext == '.elp':
        # standard BESA spherical
        dtype = np.dtype('S8, S8, f8, f8, f8')
        try:
            data = np.loadtxt(fname, dtype=dtype, skip_header=1)
        except TypeError:
            data = np.loadtxt(fname, dtype=dtype, skiprows=1)

        az = data['f2']
        horiz = data['f3']

        radius = np.abs(az / 180.)
        angles = np.array(
            [90. - h if a >= 0. else -90. - h for h, a in zip(horiz, az)])

        sph_phi = (0.5 - radius) * 180.
        sph_theta = angles

        azimuth = sph_theta / 180.0 * np.pi
        elevation = sph_phi / 180.0 * np.pi
        r = 85.

        y, x, z = _sphere_to_cartesian(azimuth, elevation, r)

        pos = np.c_[x, y, z]
        ch_names_ = data['f1'].astype(np.str)
    elif ext == '.hpts':
        # MNE-C specified format for generic digitizer data
        dtype = [('type', 'S8'), ('name', 'S8'), ('x', 'f8'), ('y', 'f8'),
                 ('z', 'f8')]
        data = np.loadtxt(fname, dtype=dtype)
        pos = np.vstack((data['x'], data['y'], data['z'])).T
        ch_names_ = data['name'].astype(np.str)
    elif ext in ('.loc', '.locs', '.eloc'):
        ch_names_ = np.loadtxt(fname, dtype='S4',
                               usecols=[3]).astype(np.str).tolist()
        dtype = {'names': ('angle', 'radius'), 'formats': ('f4', 'f4')}
        angle, radius = np.loadtxt(fname,
                                   dtype=dtype,
                                   usecols=[1, 2],
                                   unpack=True)

        sph_phi, sph_theta = _topo_to_sphere(angle, radius)

        azimuth = sph_theta / 180.0 * np.pi
        elevation = sph_phi / 180.0 * np.pi
        r = np.ones((len(ch_names_), ))

        x, y, z = _sphere_to_cartesian(azimuth, elevation, r)
        pos = np.c_[-y, x, z]
    else:
        raise ValueError('Currently the "%s" template is not supported.' %
                         kind)
    selection = np.arange(len(pos))

    if unit == 'mm':
        pos /= 1e3
    elif unit == 'cm':
        pos /= 1e2
    elif unit != 'm':
        raise ValueError("'unit' should be either 'm', 'cm', or 'mm'.")
    if transform:
        names_lower = [name.lower() for name in list(ch_names_)]
        if ext == '.hpts':
            fids = ('2', '1', '3')  # Alternate cardinal point names
        else:
            fids = ('nz', 'lpa', 'rpa')

        missing = [name for name in fids if name not in names_lower]
        if missing:
            raise ValueError("The points %s are missing, but are needed "
                             "to transform the points to the MNE coordinate "
                             "system. Either add the points, or read the "
                             "montage with transform=False. " % missing)
        nasion = pos[names_lower.index(fids[0])]
        lpa = pos[names_lower.index(fids[1])]
        rpa = pos[names_lower.index(fids[2])]

        neuromag_trans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
        pos = apply_trans(neuromag_trans, pos)

    if ch_names is not None:
        sel, ch_names_ = zip(*[(i, e) for i, e in enumerate(ch_names_)
                               if e in ch_names])
        sel = list(sel)
        pos = pos[sel]
        selection = selection[sel]
    else:
        ch_names_ = list(ch_names_)
    kind = op.split(kind)[-1]
    return Montage(pos=pos, ch_names=ch_names_, kind=kind, selection=selection)
Exemplo n.º 60
0
def post_process_txt_labels(txtdir,
                            wavdir=None,
                            output_folder=None,
                            output_tsv=None,
                            min_dur_event=0.250,
                            min_dur_inter=0.150,
                            background_label=False,
                            rm_nOn_nOff=True):
    """ clean the .txt files of each file. It is the same processing as the real data
    - overlapping events of the same class are mixed
    - if silence < 150ms between two conscutive events of the same class, they are mixed
    - if event < 250ms, the event lasts 250ms

    Args:
        txtdir: str, directory path where the XXX.txt files are.
        wavdir: str, directory path where the associated XXX.wav audio files are (associated with .txt files)
        output_folder: str, optional, folder in which to put the checked files
        output_tsv: str, optional, tsv with all the annotations concatenated
        min_dur_event: float, optional in sec, minimum duration of an event
        min_dur_inter: float, optional in sec, minimum duration between 2 events
        background_label: bool, whether to include the background label in the annotations.
        rm_nOn_nOff: bool, whether to delete the additional _nOn _nOff at the end of labels.

    Returns:
        None
    """
    logger = create_logger(__name__ + "/" +
                           inspect.currentframe().f_code.co_name)
    if wavdir is None:
        wavdir = txtdir
    fix_count = 0
    logger.info(
        "Correcting annotations ... \n"
        "* annotations with negative duration will be removed\n" +
        "* annotations with duration <250ms will be extended on the offset side)"
    )

    if output_folder is not None:
        create_folder(output_folder)

    df_single = pd.DataFrame()  # only useful if output_csv defined

    if background_label:
        list_files = glob.glob(osp.join(txtdir, "*.jams"))
    else:
        list_files = glob.glob(osp.join(txtdir, "*.txt"))
        if len(list_files) == 0:
            list_files = glob.glob(osp.join(txtdir, '*.jams'))

    out_extension = '.txt'
    for fn in list_files:
        logger.debug(fn)
        df, length_sec = get_data(
            fn,
            osp.join(wavdir,
                     osp.splitext(osp.basename(fn))[0] + '.wav'),
            background_label=background_label)

        df, fc = _post_process_labels_file(df, length_sec, min_dur_event,
                                           min_dur_inter, rm_nOn_nOff)
        fix_count += fc

        if output_folder is not None:
            filepath = os.path.splitext(
                os.path.basename(fn))[0] + out_extension
            df[['onset', 'offset',
                'event_label']].to_csv(osp.join(output_folder, filepath),
                                       header=False,
                                       index=False,
                                       sep="\t")
        if output_tsv is not None:
            df['filename'] = osp.join(
                osp.splitext(osp.basename(fn))[0] + '.wav')
            df_single = df_single.append(
                df[['filename', 'onset', 'offset', 'event_label']],
                ignore_index=True)

    if output_tsv:
        save_tsv(df_single, output_tsv)

    logger.info(f"{fix_count} problems Fixed")