Example #1
0
def grays_to_rand_rgb(source, output):

    ftype = io.dataFileNameToType(source)

    if ftype == 'TIF':

        log.info(f'Generating random RGB imgage for {ftype}')
        data = io.readData(source).astype(int)
        max_label = int(np.max(data))
        # create lut
        lut_r = np.array(random.sample(list(range(0, max_label)), max_label))
        lut_g = np.array(random.sample(list(range(0, max_label)), max_label))
        lut_b = np.array(random.sample(list(range(0, max_label)), max_label))
        # downsample to 8bit
        lut_r = ((255 / max_label) * lut_r).astype(int)
        lut_g = ((255 / max_label) * lut_g).astype(int)
        lut_b = ((255 / max_label) * lut_b).astype(int)
        # add 0 value
        lut_r = np.insert(lut_r, 0, 0)
        lut_g = np.insert(lut_g, 0, 0)
        lut_b = np.insert(lut_b, 0, 0)

        rgb_output = np.zeros(data.shape + (3, ), dtype='uint8')
        rgb_output[..., 0] = lut_r[data]
        rgb_output[..., 1] = lut_g[data]
        rgb_output[..., 2] = lut_b[data]

        io.writeData(output, rgb_output, rgb=True)

    else:
        raise RuntimeError(
            f'Conversion to random RGB not supported for {ftype}')

    return output
Example #2
0
    def _generate_output(self):

        mask_f = self.temp_dir / 'mask.tif'
        erode_f = self.temp_dir / 'erode.tif'

        self.log.info('preparing temp files')
        tif.tifffile.memmap(mask_f,
                            dtype=np.uint8,
                            shape=self.input.T.shape,
                            bigtiff=True)
        tif.tifffile.memmap(erode_f,
                            dtype=np.uint8,
                            shape=self.input.T.shape,
                            bigtiff=True)

        mask = io.readData(mask_f)
        erode_im = io.readData(erode_f)

        # extract brain mask
        z_idxs = list(range(mask.shape[-1]))
        z_chunks = [z_idxs[i::self.processes] for i in range(self.processes)]
        args = [(self.input.filename, mask.filename, z_idxs, self.threshold,
                 self.min_size) for z_idxs in z_chunks]
        if self.processes == 1:
            _parallel_mask(*args)
        else:
            pool = Pool(self.processes)
            pool.map(_parallel_mask, args)
            pool.close()

        erode(mask.filename,
              erode_im.filename,
              self.offset_z,
              self.offset_x,
              processes=self.processes)

        # merge erosions and mask
        args = [(mask.filename, erode_im.filename, z_idxs)
                for z_idxs in z_chunks]
        if self.processes == 1:
            _parallel_merge_mask(*args)
        else:
            pool = Pool(self.processes)
            pool.map(_parallel_merge_mask, args)
            pool.close()

        if self.save_mask:
            self.log.info(f'saving mask to {self.save_mask}')
            io.writeData(self.save_mask, mask)

        # mask input
        # not working
        self.log.info(f'masking image')
        for z in range(self.input.shape[2]):
            im = self.input[:, :, z]
            im[mask[:, :, z] == 0] = 0
            self.input[:, :, z] = im

        return self.input
Example #3
0
def _parallelCopyToFileList(args):
    """copies FileList to FileList in parallel"""
    sources, source_idxs, sink, Xrng, Yrng = args

    for i, file in enumerate(sources):
        log.debug(f'copyData: copying {file} to {sink}')
        im = tif.imread(file)
        if not Xrng == Yrng == None:
            im = io.dataToRange(im, x=Xrng, y=Yrng)

        io.writeData(sink, im, startIndex=source_idxs[i])
Example #4
0
def alignData(fixedImage,
              movingImage,
              resultDirectory=None,
              type_of_transform='SyNRA',
              **kwargs):
    """Align images using elastix, estimates a transformation :math:`T:` fixed image :math:`\\rightarrow` moving image.

    Arguments:
        fixedImage (str): image source of the fixed image (typically the reference image)
        movingImage (str): image source of the moving image (typically the image to be registered)
        resultDirectory (str or None): result directory for transform parameters. None saves to bq3ddefault temp file
        transform: (str): type of transform to apply as defined in 'ants.registration' type_of_transform
        **kwargs: additional arguments to pass to 'ants.registration'

    Returns:
        str: path to elastix result directory
    """

    log_parameters(fixedImage=fixedImage,
                   movingImage=movingImage,
                   resultDirectory=resultDirectory,
                   type_of_transform=type_of_transform)

    # setup input
    mi = ants.from_numpy(io.readData(movingImage).astype('float32'))
    fi = ants.from_numpy(io.readData(fixedImage).astype('float32'))

    # setup output directory
    if not resultDirectory:
        tmp_folder = os.path.join(config.temp_dir, 'ANTs')
        resultDirectory = tmp_folder
    resultDirectory = resultDirectory + '/' if not resultDirectory.endswith(
        '/') else resultDirectory  #make sure ends with '/'
    os.makedirs(resultDirectory, exist_ok=True)

    # run
    result = ants.registration(fi,
                               mi,
                               type_of_transform=type_of_transform,
                               outprefix=resultDirectory,
                               verbose=True,
                               **kwargs)

    # save output
    io.writeData(os.path.join(resultDirectory, 'result.tif'),
                 result['warpedmovout'].numpy())

    # cleanup#
    if not resultDirectory:
        shutil.rmtree(tmp_folder)

    return resultDirectory
Example #5
0
def transformImage(image,
                   reference,
                   transformDirectory,
                   sink=None,
                   invert=False,
                   interpolation='bspline'):
    """Transform a raw data set to reference using the ANTs alignment results

    Arguments:
        source (str or array): image source to be transformed
        reference (str or array): fixed image from transform
        transformDirectory (str): Directory containing ANTS transform parameters
        sink (str or None): image sink to save transformed image to.
        interpolation (str): ANTS interpolator to use for generating image.

    Returns:
        array or str: file name of the transformed data. If sink is None, return array
    """

    log.info('transforming image with ' + transformDirectory)
    log.info('invert: {}'.format(invert))

    # get image and tranform
    im = ants.from_numpy(io.readData(image).astype('float32'))
    ref = ants.from_numpy(io.readData(reference).astype('float32'))
    composite_trans = _compose_transforms(transformDirectory, invert=invert)
    # apply transforms
    res = composite_trans.apply_to_image(im, ref, interpolation=interpolation)
    # output
    if isinstance(sink, str):
        return io.writeData(sink, res.numpy())
    else:
        return res.numpy()
Example #6
0
def transformPoints(points_source,
                    transformDirectory,
                    sink=None,
                    invert=False):
    """Transform coordinates math:`x` via elastix estimated transformation to :math:`T(x)`
    Note the transformation is from the fixed image coorindates to the moving image coordiantes.

    Arguments:
        points_source (str or numpy.array): source of the points.

    Returns:
        array or str: array or file name of transformed points
    """

    log.info('transforming points with ' + transformDirectory)
    log.info('invert: {}'.format(invert))

    pts = io.readPoints(points_source).tolist()
    composite_trans = _compose_transforms(transformDirectory, invert=invert)

    trans_pts = []
    for i in pts:
        trans_pts.append(composite_trans.apply_to_point(i))

    res = np.array(trans_pts)

    if isinstance(sink, str):
        return io.writeData(sink, res)
    else:
        return res
Example #7
0
def copyData(source, sink, x=None, y=None, z=None, returnMemmap=False):
    """Copy a data file from source to sink
    
    Arguments:
        source (str): file name pattern of source
        sink (str): file name pattern of sink
        returnMemmap (bool): returns the result as an array
    Returns:
        str: file name of the copy
    """
    out_type = io.dataFileNameToType(sink)
    if out_type == 'TIF':
        if isinstance(source, np.memmap) and x == y == y == z == None:
            shutil.copyfile(source.filename, sink)
        else:
            Xsize, Ysize, Zsize = io.dataSize(source)
            # cropped size
            Xsize = io.toDataSize(Xsize, r=x)
            Ysize = io.toDataSize(Ysize, r=y)
            Zsize = io.toDataSize(Zsize, r=z)
            im = io.readData(source, x=x, y=y, z=z)
            out = io.writeData(sink, im, returnMemmap=returnMemmap)

        if returnMemmap:
            return io.readData(sink)
        else:
            return sink
    else:
        raise RuntimeError(
            'copying from TIF to {} not yet supported.'.format(out_type))
Example #8
0
def overlay_label(dataSource,
                  labelSource,
                  output=None,
                  alpha=False,
                  labelColorMap='jet',
                  x=all,
                  y=all,
                  z=all):
    """Overlay a gray scale image with colored labeled image
    
    Arguments:
        dataSouce (str or array): volumetric image data
        labelSource (str or array): labeled image to be overlayed on the image data
        output (str or None): destination for the overlayed image
        alpha (float or False): transparency
        labelColorMap (str or object): color map for the labels
        x, y, z (all or tuple): sub-range specification
    
    Returns:
        (array or str): figure handle
        
    See Also:
        :func:`overlayPoints`
    """

    label = io.readData(labelSource, x=x, y=y, z=z)
    image = io.readData(dataSource, x=x, y=y, z=z)

    lmax = label.max()
    if lmax <= 1:
        carray = numpy.array([[1, 0, 0, 1]])
    else:
        cm = mpl.cm.get_cmap(labelColorMap)
        cNorm = mpl.colors.Normalize(vmin=1, vmax=int(lmax))
        carray = mpl.cm.ScalarMappable(norm=cNorm, cmap=cm)
        carray = carray.to_rgba(numpy.arange(1, int(lmax + 1)))

    if not alpha:
        carray = numpy.concatenate(([[0, 0, 0, 1]], carray), axis=0)
    else:
        carray = numpy.concatenate(([[1, 1, 1, 1]], carray), axis=0)

    cm = mpl.colors.ListedColormap(carray)
    carray = cm(label)
    carray = carray.take([0, 1, 2], axis=-1)

    if not alpha:
        cimage = (label == 0) * image
        cimage = numpy.repeat(cimage, 3)
        cimage = cimage.reshape(image.shape + (3, ))
        cimage = cimage.astype(carray.dtype)
        cimage += carray
    else:
        cimage = numpy.repeat(image, 3)
        cimage = cimage.reshape(image.shape + (3, ))
        cimage = cimage.astype(carray.dtype)
        cimage *= carray

    return io.writeData(output, cimage)
    def get_region_info_dataframe(self, index, columns, sink = None, iterator = PreOrderIter):
        """ format region attributes into a dataframe

        If an attribute in column is type list or tuple each member will get its own column in the dataframe

        Arguments:
            index (str): attribute used for row labels.
            columns (str or list): attributes to be included in columns.
            sink (str): file to save dataframe too.
            iterator (Object): `anynode` iterator to use when populating dataframe. Will determine order of entries.
        Returns:
            pandas.DataFrame: if sink, will return filename of sink.
        """
        timer = Timer()
        if not isinstance(columns, list):
            columns = [columns]

        row_labels = []
        data = []
        # get column and row attributes by region and merge into list or lists
        for region in iterator(self.tree_root):
            row_labels.append(getattr(region, index, None))
            region_attrs = []
            for att in columns:
                value = getattr(region, att, None)

                if isinstance(value, (list, tuple)):
                    region_attrs.extend(value)
                else:
                    region_attrs.append(value)
            data.append(region_attrs)

        # create column labels. If attribue has a lenght, duplicate column headers will be added.
        col_labels = []
        for col in columns:
            att = getattr(self.tree_root, col, None)
            if isinstance(att, (list, tuple)):
                col_labels.extend([f'{col}.{i}' for i in range(len(att))])
            else:
                col_labels.append(col)

        data_df = pd.DataFrame(data=data, index=row_labels, columns=col_labels)
        data_df.index.name = index
        timer.log_elapsed(prefix='Generated dataframe for regions')

        if sink:
            return io.writeData(sink, data_df)
        else:
            return data_df
Example #10
0
    def _generate_output(self):

        self._initialize_Ilastik()

        # create temp npy
        input_fn = str((self.temp_dir /
                        Path(self.input.filename).stem).with_suffix('.npy'))
        io.writeData(input_fn, self.input)
        output_fn = str(self.temp_dir / 'out_prelim.npy')

        ilinp = self._filename_to_input_arg(input_fn)
        ilout = self._filename_to_output_arg(output_fn)
        cmd_args = f'--project="{self.project}" {ilout} {ilinp}'

        self.run_headless(cmd_args)

        output = io.readData(output_fn)
        output_chan = self.temp_dir / 'out.npy'
        # transpose to restore input dimensionality
        output_chan = io.writeData(output_chan,
                                   output[..., self.output_channel],
                                   returnMemmap=True)

        return output_chan
Example #11
0
def writeData(filename,
              data,
              startIndex=0,
              rgb=False,
              substack=None,
              **kwargs):
    """Write image stack to single or multiple image files
    
    Arguments:
        filename (str): file name as regular expression
        data (array): image data
        startIndex (int): index of first z-slice
    
    Returns:
        str: file name as regular expression
    """

    # create directory if not exists
    io.createDirectory(filename)

    # check for the \d{xx} part of the regular expression -> if not assume file header
    fileheader, fileext, digitfrmt = splitFileExpression(filename)
    d = data.ndim

    if d == 2:
        fname = fileheader + (digitfrmt % startIndex) + fileext
        io.writeData(fname, data, substack=substack)
        return fname
    else:

        if substack:
            startIndex = substack[0][0]
            nz = substack[0][1] - startIndex
            substack = substack[1:]
        else:
            nz = data.shape[0]

        if rgb:
            if nz == 3:
                fname = fileheader + (digitfrmt % startIndex) + fileext
                io.writeData(fname, data, rgb=True, substack=substack)
                return fname
            else:
                raise RuntimeError(
                    'Image does not have correct dimensionality for RGB. format should be XYS'
                )
        else:
            for i in range(nz):
                fname = fileheader + (digitfrmt % (i + startIndex)) + fileext
                io.writeData(fname, data[i], substack=substack)
            return filename
Example #12
0
def deformationDistance(deformationField, sink=None, scale=None):
    """Compute the distance field from a deformation vector field

    Arguments:
        deformationField (str or array): source of the deformation field determined by :func:`deformationField`
        sink (str or None): image sink to save the deformation field to
        scale (tuple or None): scale factor for each dimension, if None = (1,1,1)

    Returns:
        array or str: array or file name of the transformed data
    """

    deformationField = io.readData(deformationField)

    df = np.square(deformationField)
    if not scale is None:
        for i in range(3):
            df[:, :, :, i] = df[:, :, :, i] * (scale[i] * scale[i])

    return io.writeData(sink, np.sqrt(np.sum(df, axis=3)))
Example #13
0
def resampleDataInverse(sink,
                        source=None,
                        dataSizeSource=None,
                        orientation=None,
                        resolutionSource=(4.0625, 4.0625, 3),
                        resolutionSink=(25, 25, 25),
                        processingDirectory=None,
                        processes=bq3d.config.processes,
                        cleanup=True,
                        interpolation='linear',
                        **args):
    """Resample data inversely to :func:`resampleData` routine

    Arguments:
        sink (str or None): image to be inversly resampled (=sink in :func:`resampleData`)
        source (str or array): destination for inversly resmapled image (=source in :func:`resampleData`)
        dataSizeSource (tuple or None): target size of the resampled image
        orientation (tuple): orientation specified by permuation and change in sign of (1,2,3)
        resolutionSource (tuple): resolution of the source image (in length per pixel)
        resolutionSink (tuple): resolution of the resampled image (in length per pixel)
        processingDirectory (str or None): directory in which to perform resmapling in parallel, None a temporary directry will be created
        processes (int): number of processes to use for parallel resampling
        cleanup (bool): remove temporary files
        interpolation (str): method to use for interpolating to the resmapled image

    Returns:
        (array or str): data or file name of resampled image
    Notes:
        * resolutions are assumed to be given for the axes of the intrinsic
          orientation of the data and reference as when viewed by matplotlib or ImageJ
        * orientation: permuation of 1,2,3 with potential sign, indicating which
          axes map onto the reference axes, a negative sign indicates reversal
          of that particular axes
        * only a minimal set of information to detremine the resampling parameter
          has to be given, e.g. dataSizeSource and dataSizeSink
    """

    # assume we can read data fully into memory
    resampledData = io.readData(sink)

    dataSizeSink = resampledData.shape

    if isinstance(dataSizeSource, str):
        dataSizeSource = io.dataSize(dataSizeSource)

    dataSizeSource, dataSizeSink, resolutionSource, resolutionSink = resampleDataSize(
        dataSizeSource=dataSizeSource,
        dataSizeSink=dataSizeSink,
        resolutionSource=resolutionSource,
        resolutionSink=resolutionSink,
        orientation=orientation)

    dataSizeSinkI = orientDataSizeInverse(dataSizeSink, orientation)

    # flip axes back and permute inversely
    if not orientation is None:
        if orientation[0] < 0:
            resampledData = resampledData[::-1, :, :]
        if orientation[1] < 0:
            resampledData = resampledData[:, ::-1, :]
        if orientation[2] < 0:
            resampledData = resampledData[:, :, ::-1]

        # reorient
        peri = invert_orientation(orientation)
        peri = orientationToPermuation(peri)
        resampledData = resampledData.transpose(peri)

    # upscale in z
    interpolation = parse_interpolation(interpolation)

    resampledDataXY = numpy.zeros(
        (dataSizeSinkI[0], dataSizeSinkI[1], dataSizeSource[2]),
        dtype=resampledData.dtype)

    for i in range(dataSizeSinkI[0]):
        if i % 25 == 0:
            log.vebose("resampleDataInverse: processing %d/%d" %
                       (i, dataSizeSinkI[0]))

        # cv2.resize takes reverse order of sizes !
        resampledDataXY[i] = cv2.resize(resampledData[i],
                                        (dataSizeSource[2], dataSizeSinkI[1]),
                                        interpolation=interpolation)

    # upscale x, y in parallel
    if io.isFileExpression(source):
        files = source
    else:
        if processingDirectory is None:
            processingDirectory = tempfile.mkdtemp()
        files = os.path.join(sink[0], 'resample_\d{4}.tif')

    io.writeData(files, resampledDataXY)

    nZ = dataSizeSource[0]
    pool = multiprocessing.Pool(processes=processes)
    argdata = []
    for i in range(nZ):
        argdata.append((source, fl.fileExpressionToFileName(files, i),
                        dataSizeSource, interpolation, i, nZ))
    pool.map(_resampleXYParallel, argdata)

    if io.isFileExpression(source):
        return source
    else:
        data = io.convertData(files, source)

        if cleanup:
            shutil.rmtree(processingDirectory)

        return data
    def get_voxel_info_dataframe(self, columns, ignore_empty = False, sink=None, iterator=PreOrderIter):
        """ format voxel info with corresponding region attributes into a dataframe

        If an attribute in column is type list or tuple each member will get its own column in the dataframe
        Rows will always be by voxel.

        Arguments:
            columns (str or list): attributes to be included in columns. 'nPoints' will return point counts by voxel.
            ignore_empty (bool): only return voxels containing points
            sink (str): file to save dataframe too.
            iterator (Object): `anynode` iterator to use when populating dataframe. Will determine order of entries.
        Returns:
            pandasDataFrame: if sink, will return filename of sink.
        """
        timer = Timer()
        if not isinstance(columns, list):
            columns = [columns]

        row_labels = []
        data = []
        # get column and row attributes by region and merge into list or list s
        for region in iterator(self.tree_root):
            print(f'Adding region: {region.id}')
            for vox, points in list(region.voxels.items()):
                
                if ignore_empty:
                    if sum(points) == 0:
                        continue
                
                row_labels.append(vox)
                region_attrs = [vox[0],vox[1],vox[2]]
                for att in columns:
                    if att == 'nPoints':
                        value = points
                    else:
                        value = getattr(region, att, None)

                    if isinstance(value, (list, tuple)):
                        region_attrs.extend(value)
                    else:
                        region_attrs.append(value)
                data.append(region_attrs)

        # create column labels. If attribue has a lenght, duplicate column headers will be added.
        col_labels = ['x','y','z']
        for col in columns:
            att = getattr(self.tree_root, col, None) # voxel points list will be same lenght as nPoints
            if isinstance(att, (list, tuple)):
                col = [f'{col}{i}' for i in range(len(att))]
                col_labels.extend(col)
            else:
                col_labels.extend([col])

        data_df = pd.DataFrame(data=data, index=row_labels, columns=col_labels)
        data_df.index.name = 'voxel'
        timer.log_elapsed(prefix='Generated dataframe for voxels')

        if sink:
            return io.writeData(sink, data_df)
        else:
            return data_df
Example #15
0
def copyData(source, sink, **kwargs):
    """Copy a raw/mhd file pair from source to sink
    
    Arguments:
        source (str): file name of source
        sink (str): file name of sink
    
    Returns:
        str: file name of the copy
    """     
    
    sourceExt = io.fileExtension(source)
    sinkExt   = io.fileExtension(sink)

    if sinkExt == 'raw' or sinkExt == 'zraw':
        sources = [source]
        sinks = [sink, sink[:-3] + 'mhd']

        if sourceExt == 'raw' or sourceExt == 'zraw':
            sources.append(source[:-3] + 'mhd')
        elif sourceExt == 'mhd':
            if os.path.exists(source[:-3] + 'raw'):
                sources.append(source[:-3] + 'raw')
            elif os.path.exists(source[:-3] + 'zraw'):
                sources.append(source[:-3] + 'zraw')
            else:
                raise RuntimeError('copyData: raw or zraw matching mhd not found')
        else:
            raise RuntimeError('copyData: {} to {} not supported'.format(sourceExt, sinkExt))

        for i in range(2):
            io.copyFile(sources[i], sinks[i])

        return sink

    elif sinkExt == 'mhd':
        sources = [source]
        sinks = []

        if sourceExt == 'raw':
            sources.append(source[:-3] + 'mhd')
            sinks.append(sink[:-3] + 'raw')
            sinks.append(sink)
        elif sourceExt == 'zraw':
            sources.append(source[:-3] + 'mhd')
            sinks.append(sink[:-3] + 'zraw')
            sinks.append(sink)
        elif sourceExt == 'mhd':
            if os.path.exists(source[:-3] + 'raw'):
                sources.append(source[:-3] + 'raw')
                sinks.append(sink)
                sinks.append(sink[:-3] + 'raw')
            elif os.path.exists(source[:-3] + 'zraw'):
                sources.append(source[:-3] + 'zraw')
                sinks.append(sink)
                sinks.append(sink[:-3] + 'zraw')
            else:
                raise RuntimeError('copyData: raw or zraw matching mhd not found')
        else:
            raise RuntimeError('copyData: {} to {} not supported'.format(sourceExt, sinkExt))

        for i in range(2):
            io.copyFile(sources[i], sinks[i])

        return sink

    elif sinkExt == 'tif':
        data = readData(source, **kwargs)
        return io.writeData(sink, data)

    else:
        raise RuntimeError('copyData: {} to {} not supported'.format(sourceExt, sinkExt))
def voxelize(points,
             dataSize=None,
             sink=None,
             method='Spherical',
             size=(5, 5, 5),
             weights=None):
    """Converts a list of points into an volumetric image array

    Arguments:
        points (array): point data array
        dataSize (tuple or str): size of final image in xyz. If str, will use the size of the passed image.
        sink (str, array or None): the location to write or return the resulting voxelization image, if None return array
        method (str or None): method for voxelization: 'Spherical', 'Rectangular' or 'Pixel'
        size (tuple): size parameter for the voxelization
        weights (array or None): weights for each point, None is uniform weights
    Returns:
        (array): volumetric data of smeared out points
    """

    log.verbose('voxelizing points')
    points = io.readPoints(points)

    if dataSize is None:
        dataSize = tuple(
            int(math.ceil(points[:, i].max())) for i in range(points.shape[1]))
    elif isinstance(dataSize, str):
        dataSize = io.dataSize(dataSize)

    if method.lower() == 'spherical':
        if weights is None:
            data = vox.voxelizeSphere(points.astype('float'), dataSize[0],
                                      dataSize[1], dataSize[2], size[0],
                                      size[1], size[2])
        else:
            data = vox.voxelizeSphereWithWeights(points.astype('float'),
                                                 dataSize[0], dataSize[1],
                                                 dataSize[2], size[0], size[1],
                                                 size[2], weights)

    elif method.lower() == 'rectangular':
        if weights is None:
            data = vox.voxelizeRectangle(points.astype('float'), dataSize[0],
                                         dataSize[1], dataSize[2], size[0],
                                         size[1], size[2])
        else:
            data = vox.voxelizeRectangleWithWeights(points.astype('float'),
                                                    dataSize[0], dataSize[1],
                                                    dataSize[2], size[0],
                                                    size[1], size[2], weights)

    elif method.lower() == 'pixel':
        data = voxelizePixel(points, dataSize, weights)
    else:
        raise RuntimeError('voxelize: mode: %s not supported!' % method)

    if data.dtype == np.float64:
        log.warning(
            'Converting dtype float64 to int32 for output. This may result in loss of info.'
        )
        data = data.astype('int32')

    if sink:
        return io.writeData(sink, data, returnMemmap=True)
    else:
        return data
Example #17
0
def transformImage(source,
                   sink=[],
                   transformParameterFile=None,
                   transformDirectory=None,
                   resultDirectory=None):
    """Transform a raw data set to reference using the elastix alignment results

    If the map determined by elastix is
    :math:`T \\mathrm{fixed} \\rightarrow \\mathrm{moving}`,
    transformix on data works as :math:`T^{-1}(\\mathrm{data})`.

    Arguments:
        source (str or array): image source to be transformed
        sink (str, [] or None): image sink to save transformed image to. if [] return the default name of the data file generated by transformix.
        transformParameterFile (str or None): parameter file for the primary transformation, if None, the file is determined from the transformDirectory.
        transformDirectory (str or None): result directory of elastix alignment, if None the transformParameterFile has to be given.
        resultDirectory (str or None): the directorty for the transformix results

    Returns:
        array or str: array or file name of the transformed data
    """

    if isinstance(source, np.ndarray):
        imgname = os.path.join(tempfile.gettempdir(), 'elastix_input.tif')
        io.writeData(source, imgname)
    elif isinstance(source, str):
        if io.dataFileNameToType(source) == "TIF":
            imgname = source
        else:
            imgname = os.path.join(tempfile.gettempdir(), 'elastix_input.tif')
            io.transformImage(source, imgname)  #TODO: not sure if works
    else:
        raise RuntimeError('transformImage: source not a string or array')

    if resultDirectory is None:
        resultdirname = os.path.join(tempfile.gettempdir(), 'elastix_output')
    else:
        resultdirname = resultDirectory

    if not os.path.exists(resultdirname):
        os.makedirs(resultdirname)

    if transformParameterFile is None:
        if transformDirectory is None:
            raise RuntimeError(
                'neither alignment directory and transformation parameter file specified!'
            )
        transformparameterdir = transformDirectory
        transformParameterFile[-1] = getTransformParameterFiles(
            transformparameterdir)
    else:
        transformparameterdir = os.path.split(transformParameterFile)
        transformparameterdir = transformparameterdir[0]

    #transform
    #make path in parameterfiles absolute
    setPathTransformParameterFiles(transformparameterdir)

    #transformix -in inputImage.ext -out outputDirectory -tp TransformParameters.txt
    cmd = config.tranformix_binary + ' -in ' + imgname + ' -out ' + resultdirname + ' -tp ' + transformParameterFile

    res = os.system(cmd)

    if res != 0:
        raise RuntimeError('transformImage: failed executing: ' + cmd)

    if not isinstance(source, str):
        os.remove(imgname)

    if not sink:
        return getResultDataFile(resultdirname)
    elif sink is None:
        resultfile = getResultDataFile(resultdirname)
        return io.readData(resultfile)
    elif isinstance(sink, str):
        resultfile = getResultDataFile(resultdirname)
        return io.convertData(resultfile, sink)
    else:
        raise RuntimeError('transformImage: sink not valid!')
def processSubStack(flow, output_properties, source, overlap_indices, unique_indices,
                    temp_dir_root):
    """ Helper to process stack in parallel

    Args:
        flow (tuple): images filters to run in sequential order.
            Entries should be a dict and will be passed to *bq3d.image_filters.filter_image*.
            The input image to each filter will the be output of the pevious filter.
        output_properties: (list): properties to include in output. See
        label_properties.region_props for more info
        source (str): path to image file to analyse.
        overlap_indices (tuple or list): list of indices as [start,stop] along each axis to analyse.
        unique_indices (tuple or list): list of indices as [start,stop] along each axis
        corresponding
            to the non-overlapping portion of the image being analyzed.
        temp_dir (str): temp dir to be used for processing.

    Returns:

    """
    timer = Timer()

    zRng, yRng, xRng = overlap_indices
    log.info(f'chunk ranges: z= {zRng}, y= {yRng}, x = {xRng}')

    #memMap routine
    temp_dir = unique_temp_dir('run', path = temp_dir_root)
    if not os.path.exists(temp_dir):
        os.mkdir(temp_dir)

    mmapFile = os.path.join(temp_dir, str(uuid.uuid4())) + '.tif'
    log.info('Creating memory mapped substack at: {}'.format(mmapFile))

    img = io.copyData(source, mmapFile, x=xRng, y=yRng, z=zRng, returnMemmap=True)

    rawFile = os.path.join(temp_dir, str(uuid.uuid4())) + '.tif'
    log.info('Creating raw substack at: {}'.format(rawFile))
    raw = io.copyData(img.filename, rawFile, returnMemmap=True)

    # if a flow
    filtered_im = img
    for p in flow:
        params = dict(p)
        filter = params.pop('filter')
        if 'save' in params:
            save = params.pop('save')
        else:
            save = False
        filtered_im = filter_image(filter, filtered_im, temp_dir_root = temp_dir, **params)

        # save intermediate output
        if save:
            log.info(f'Saving output to {save}')
            h, ext, dfmt = splitFileExpression(save)

            for z in range(*zRng):
                fname = h + (dfmt % z) + ext
                if not os.path.isfile(fname):
                    io.empty(fname, io.dataSize(source)[1:], filtered_im.dtype)

            unique = filtered_im[unique_slice(overlap_indices, unique_indices)]
            io.writeData(save, unique, substack=unique_indices)

    # get label properties and return
    if output_properties:
        props = label_props(raw, filtered_im, output_properties)
    else:
        props = []

    shutil.rmtree(temp_dir, ignore_errors=True)
    timer.log_elapsed(prefix='Processed chunk')
    return props
Example #19
0
def resampleData(source,
                 sink=None,
                 orientation=None,
                 dataSizeSink=None,
                 resolutionSource=(.91, .91, 8.3),
                 resolutionSink=(25, 25, 25),
                 processingDirectory=bq3d.config.temp_dir,
                 processes=bq3d.config.processes,
                 cleanup=True,
                 interpolation='linear',
                 **kwargs):
    """Resample data of source in resolution and orientation

    Arguments:
        source (str or array): image to be resampled
        sink (str or None): destination of resampled image
        orientation (tuple): orientation specified by permuation and change in sign of (1,2,3)
        dataSizeSink (tuple or None): target size of the resampled image
        resolutionSource (tuple): resolution of the source image (in length per pixel)
        resolutionSink (tuple): resolution of the resampled image (in length per pixel)
        processingDirectory (str or None): directory in which to perform resmapling in parallel, None a temporary directry will be created
        processes (int): number of processes to use for parallel resampling
        cleanup (bool): remove temporary files
        interpolation (str): method to use for interpolating to the resmapled image

    Returns:
        (array or str): data or file name of resampled image
    Notes:
        * resolutions are assumed to be given for the axes of the intrinsic
          orientation of the data and reference as when viewed by matplotlib or ImageJ
        * orientation: permuation of 1,2,3 with potential sign, indicating which
          axes map onto the reference axes, a negative sign indicates reversal
          of that particular axes
        * only a minimal set of information to detremine the resampling parameter
          has to be given, e.g. dataSizeSource and dataSizeSink
    """

    log.info(f'interpolation method: {interpolation}')
    log.info(f'Number of processes: {processes}')

    interpolation = parse_interpolation(interpolation)

    dataSizeSource = io.dataSize(source)
    if isinstance(dataSizeSink, str):
        dataSizeSink = io.dataSize(dataSizeSink)

    # orient actual resolutions onto reference resolution
    dataSizeSource, dataSizeSink, resolutionSource, resolutionSink = resampleDataSize(
        dataSizeSource=dataSizeSource,
        dataSizeSink=dataSizeSink,
        resolutionSource=resolutionSource,
        resolutionSink=resolutionSink,
        orientation=orientation)
    dataSizeSinkI = orientDataSizeInverse(dataSizeSink, orientation)

    # setup intermediate output
    if processingDirectory is None:
        processingDirectory = tempfile.mkdtemp()
    else:
        io.createDirectory(processingDirectory)

    resampledXYFile = os.path.join(processingDirectory, 'resampleXY.tif')
    data_type = io.getDataType(source)
    resampledXY = io.empty(resampledXYFile,
                           dtype=data_type,
                           shape=(dataSizeSource[0], dataSizeSinkI[1],
                                  dataSizeSinkI[2]),
                           imagej=True)

    nZ = dataSizeSource[0]

    # resample in XY
    # chunk for each process
    Zlist = list(range(nZ))
    chunks = [Zlist[i::processes] for i in range(processes)]

    argdata = [(source, resampledXYFile, dataSizeSinkI, interpolation, chunk)
               for chunk in chunks]
    if processes == 1:
        _resampleXYParallel(argdata[0])
    else:
        pool = multiprocessing.Pool(processes=processes)
        pool.map(_resampleXYParallel, argdata)
        pool.close()

    # rescale in z
    resampledXY = io.readData(resampledXYFile)
    resampledData = numpy.zeros(
        (dataSizeSinkI[0], dataSizeSinkI[1], dataSizeSinkI[2]),
        dtype=data_type)

    for i in range(dataSizeSinkI[1]):  # faster if iterate over y
        if i % 50 == 0:
            log.verbose(
                ("resampleData: Z: Resampling %d/%d" % (i, dataSizeSinkI[0])))
        resampledData[:, i] = cv2.resize(resampledXY[:, i],
                                         (dataSizeSinkI[2], dataSizeSinkI[0]),
                                         interpolation=interpolation)

    if cleanup:
        shutil.rmtree(processingDirectory)

    if not orientation is None:

        # reorient
        per = orientationToPermuation(orientation)
        resampledData = resampledData.transpose(per)

        # reverse orientation after permuting e.g. (-2,1) brings axis 2 to first axis and we can reorder there
        if orientation[0] < 0:
            resampledData = resampledData[::-1, :, :]
        if orientation[1] < 0:
            resampledData = resampledData[:, ::-1, :]
        if orientation[2] < 0:
            resampledData = resampledData[:, :, ::-1]

    log.verbose("resampleData: resampled data size: " +
                str(resampledData.shape))

    return io.writeData(sink, resampledData)