Ejemplo n.º 1
0
def spatial_smoothing(imgseq, macro_pixel_dim):
    images_reduced = measure.block_reduce(imgseq.as_array(),
                                          block_size=(1, macro_pixel_dim, macro_pixel_dim),
                                          func=np.nanmean,
                                          cval=np.nan) #np.nanmedian(imgseq.as_array()))

    dim_t, dim_x, dim_y = images_reduced.shape
    imgseq_reduced = neo.ImageSequence(images_reduced,
                                   units=imgseq.units,
                                   spatial_scale=imgseq.spatial_scale * macro_pixel_dim,
                                   macro_pixel_dim=macro_pixel_dim,
                                   sampling_rate=imgseq.sampling_rate,
                                   file_origin=imgseq.file_origin,
                                   t_start=imgseq.t_start)

    if 'array_annotations' in imgseq.annotations:
        del imgseq.annotations['array_annotations']

    imgseq_reduced.annotations.update(imgseq.annotations)

    imgseq_reduced.name = imgseq.name + " "
    imgseq_reduced.annotations.update(macro_pixel_dim=macro_pixel_dim)
    imgseq_reduced.description = imgseq.description +  \
                "spatially downsampled ({}).".format(os.path.basename(__file__))

    return imgseq_reduced
def spatial_smoothing(images, macro_pixel_dim):

    # Now we need to reduce the noise from the images by performing a spatial smoothing
    images_reduced = measure.block_reduce(
        images, (1, macro_pixel_dim, macro_pixel_dim),
        np.nanmean,
        cval=np.nanmedian(images))

    dim_t, dim_x, dim_y = images_reduced.shape
    new_annotations = images.annotations.copy()

    new_annotations['array_annotations'] = {
        'x_coords': [i % dim_x for i in range(dim_x * dim_y)],
        'y_coords': [i // dim_y for i in range(dim_x * dim_y)]
    }

    imgseq_reduced = neo.ImageSequence(images_reduced,
                                       units=images.units,
                                       spatial_scale=images.spatial_scale *
                                       macro_pixel_dim,
                                       sampling_rate=images.sampling_rate,
                                       file_origin=images.file_origin,
                                       annotations=new_annotations)

    imgseq_reduced.name = images.name + " "
    imgseq_reduced.annotations.update(macro_pixel_dim=macro_pixel_dim)
    imgseq_reduced.description = images.description + "spatially downsampled ({}).".format(
        os.path.basename(__file__))

    return imgseq_reduced
Ejemplo n.º 3
0
def AnalogSignal2ImageSequence(block):
    for seg_count, segment in enumerate(block.segments):
        block.segments[seg_count].imagesequences = []
        for asig_count, asig in enumerate(segment.analogsignals):
            asig_array = asig.as_array()
            dim_t, dim_channels = asig_array.shape

            if 'x_coords' not in asig.array_annotations\
                or 'y_coords' not in asig.array_annotations:
                print('AnalogSignal {} in Segment {} has no spatial Information '\
                      .format(asig_count, seg_count)\
                    + ' as array_annotations "x_coords" "y_coords", skip.')
                break

            coords = np.array(
                [(x, y) for x, y in zip(asig.array_annotations['x_coords'],
                                        asig.array_annotations['y_coords'])],
                dtype=float)

            if len(coords) != dim_channels:
                raise IndexError("Number of channels doesn't agree with "\
                               + "number of coordinates!")

            dim_x = np.max(asig.array_annotations['x_coords']) + 1
            dim_y = np.max(asig.array_annotations['y_coords']) + 1

            image_data = np.empty((dim_t, dim_x, dim_y), dtype=asig.dtype)
            image_data[:] = np.nan

            for channel in range(dim_channels):
                x, y = coords[channel]
                x, y = int(x), int(y)
                image_data[:, x, y] = asig_array[:, channel]

            spatial_scale = asig.annotations['spatial_scale']

            # array_annotations = {}
            # for k, v in asig.array_annotations.items():
            #     array_annotations[k] = v.reshape((dim_x, dim_y))

            imgseq = neo.ImageSequence(
                image_data=image_data,
                units=asig.units,
                dtype=asig.dtype,
                # t_start=asig.t_start, # NotImplementedError
                sampling_rate=asig.sampling_rate,
                name=asig.name,
                description=asig.description,
                file_origin=asig.file_origin,
                # array_annotations=array_annotations,
                **asig.annotations)

            block.segments[seg_count].imagesequences.append(imgseq)
    return block
Ejemplo n.º 4
0
def AnalogSignal2ImageSequence(block):
    # ToDo: map 1D array annotations to 2D and update
    for seg_count, segment in enumerate(block.segments):
        for asig in segment.analogsignals:
            asig_array = asig.as_array()
            dim_t, dim_channels = asig_array.shape

            # coords = asig.channel_index.coordinates
            # temporary replacement
            coords = np.array(
                [(x, y) for x, y in zip(asig.array_annotations['x_coords'],
                                        asig.array_annotations['y_coords'])],
                dtype=float)
            #
            # spatial_scale = asig.annotations['spatial_scale']
            # int_coords = np.round(np.array(coords)/spatial_scale).astype(int)
            # print(int_coords)

            if len(coords) != dim_channels:
                raise IndexError("Number of channels doesn't agree with "\
                               + "number of coordinates!")

            dim_x, dim_y = determine_dims(coords)

            image_data = np.empty((dim_t, dim_x, dim_y))
            image_data[:] = np.nan

            for channel in range(dim_channels):
                x, y = coords[channel]
                x, y = int(x), int(y)
                image_data[:, x, y] = asig_array[:, channel]

            # spatial_scale = determine_spatial_scale(coords)*coords.units
            spatial_scale = asig.annotations['spatial_scale']

            array_annotations = {}
            for k, v in asig.array_annotations.items():
                array_annotations[k] = v.reshape((dim_x, dim_y))

            imgseq = neo.ImageSequence(
                image_data=image_data,
                units=asig.units,
                sampling_rate=asig.sampling_rate,
                name=asig.name,
                description=asig.description,
                file_origin=asig.file_origin,
                # array_annotations=array_annotations,
                **asig.annotations)

            block.segments[seg_count].imagesequences.append(imgseq)
    return block
Ejemplo n.º 5
0
def spatial_smoothing(images, macro_pixel_dim):

    # Now we need to reduce the noise from the images by performing a spatial smoothing
    #images_reduced = measure.block_reduce(images, (1, macro_pixel_dim, macro_pixel_dim), np.nanmean, cval = np.nanmedian(images))
    images_reduced = scipy.signal.decimate(images,
                                           macro_pixel_dim,
                                           n=2,
                                           ftype='fir',
                                           axis=1,
                                           zero_phase=True)
    images_reduced = scipy.signal.decimate(images_reduced,
                                           macro_pixel_dim,
                                           n=2,
                                           ftype='fir',
                                           axis=2,
                                           zero_phase=True)

    dim_t, dim_x, dim_y = images_reduced.shape

    imgseq_reduced = neo.ImageSequence(
        images_reduced,
        #units=images.units,
        #spatial_scale=images.spatial_scale * macro_pixel_dim,
        #sampling_rate=images.sampling_rate,
        #file_origin=images.file_origin)#,
        #**imgseq.annotations)
        units=imgseq.units,
        spatial_scale=imgseq.spatial_scale * macro_pixel_dim,
        sampling_rate=imgseq.sampling_rate,
        file_origin=imgseq.file_origin,
        t_start=imgseq.t_start)

    if 'array_annotations' in imgseq.annotations:
        del imgseq.annotations['array_annotations']

    imgseq_reduced.annotations.update(imgseq.annotations)

    imgseq_reduced.name = imgseq.name + " "
    imgseq_reduced.annotations.update(macro_pixel_dim=macro_pixel_dim)
    imgseq_reduced.description = imgseq.description +  \
                "spatially downsampled ({}).".format(os.path.basename(__file__))

    return imgseq_reduced
Ejemplo n.º 6
0
    # re-converting into analogsignal
    signal = activity.reshape((dim_t, dim_x * dim_y))
    asig = block.segments[0].analogsignals[0].duplicate_with_new_data(signal)
    asig.array_annotate(**block.segments[0].analogsignals[0].array_annotations)
    
    asig.name += ""
    asig.description += "Deconvoluted activity using the given {} kernel"\
                        .format(args.kernel)
    block.segments[0].analogsignals[0] = asig
    """
    # New method, creating a new block
    # create an ImageSequence with the deconvoluted matrix
    imgseq_deconv = neo.ImageSequence(
        activities,
        units=block.segments[0].analogsignals[0].units,
        sampling_rate=block.segments[0].analogsignals[0].sampling_rate,
        spatial_scale=block.segments[0].imagesequences[0].spatial_scale,
        name=block.segments[0].analogsignals[0].name,
        description=block.segments[0].analogsignals[0].description)

    # create a new Block & Segment and append the ImageSequence
    segm_deconv = neo.Segment()
    segm_deconv.annotations = block.segments[0].annotations
    segm_deconv.annotate(kernel=args.kernel)  #ToDo: parameters annotations
    segm_deconv.imagesequences.append(imgseq_deconv)
    block_deconv = neo.Block()
    block_deconv.segments.append(segm_deconv)
    block_deconv.name = block.name
    block_deconv.description = block.description
    block_deconv.annotations = block.annotations
Ejemplo n.º 7
0
                                 max_Niter=args.max_Niter,
                                 convergence_limit=args.convergence_limit,
                                 kernelX=kernelX,
                                 kernelY=kernelY,
                                 kernelT=kernelT,
                                 kernelHS=kernelHS)

    if np.sum(args.gaussian_sigma):
        vector_frames = smooth_frames(vector_frames, sigma=args.gaussian_sigma)

    vec_imgseq = neo.ImageSequence(
        vector_frames,
        units='dimensionless',
        dtype=complex,
        # t_start=imgseq.t_start,
        spatial_scale=imgseq.spatial_scale,
        sampling_rate=imgseq.sampling_rate,
        name='Optical Flow',
        description='Horn-Schunck estimation of optical flow',
        file_origin=imgseq.file_origin,
        **imgseq.annotations)

    if args.output_img is not None:
        ax = plot_opticalflow(frames[0], vector_frames[0], skip_step=3)
        ax.set_ylabel(f'pixel size: {imgseq.spatial_scale} '\
                    + imgseq.spatial_scale.units.dimensionality.string)
        # ax.set_xlabel('{:.3f} s'.format(imgseq.times[0].rescale('s')))
        save_plot(args.output_img)

    block.segments[0].imagesequences = [vec_imgseq]
    block = ImageSequence2AnalogSignal(block)
Ejemplo n.º 8
0
                         ['y_pos_sel']).T  #array of 1000x 2500 (50x50) images
    x_pos_sel = np.array(sio.loadmat(mat_fname)
                         ['x_pos_sel']).T  #array of 1000x 2500 (50x50) images

    Piexel = x_pos_sel + y_pos_sel * 50
    image_seq = np.empty([len(Signal[0]), 50, 50])

    for t in range(len(Signal[0])):
        # per ogni tempo
        for px in range(len(y_pos_sel)):
            # per ogni pixel
            image_seq[t][x_pos_sel[px], y_pos_sel[px]] = Signal[px][t]

    imageSequences = neo.ImageSequence(
        image_seq,
        sampling_rate=args.sampling_rate * pq.Hz,
        spatial_scale=args.spatial_scale * pq.mm,
        units='dimensionless')

    # loading the data flips the images vertically!

    #block = io.read_block()
    block = neo.Block()
    seg = neo.Segment(name='segment 0', index=0)
    block.segments.append(seg)
    print('vlock', block)
    print('seg', block.segments[0])

    block.segments[0].imagesequences.append(imageSequences)

    # change data orientation to be top=ventral, right=lateral
Ejemplo n.º 9
0
def analogsignals_to_imagesequences(block):
    # ToDo: map 1D array annotations to 2D and update
    for seg_count, segment in enumerate(block.segments):
        for asig_count, asig in enumerate(segment.analogsignals):
            asig_array = asig.as_array()
            dim_t, dim_channels = asig_array.shape
            # coords = asig.channel_index.coordinates
            # temporary replacement
            if 'x_coords' not in asig.array_annotations\
                or 'y_coords' not in asig.array_annotations:
                print('AnalogSignal {} in Segment {} has no spatial Information '\
                      .format(asig_count, seg_count)\
                    + ' as array_annotations "x_coords" "y_coords", skip.')
                continue

            coords = np.array(
                [(x, y) for x, y in zip(asig.array_annotations['x_coords'],
                                        asig.array_annotations['y_coords'])],
                dtype=float)
            #
            # spatial_scale = asig.annotations['spatial_scale']
            # int_coords = np.round(np.array(coords)/spatial_scale).astype(int)
            # print(int_coords)

            if len(coords) != dim_channels:
                raise IndexError("Number of channels doesn't agree with "\
                               + "number of coordinates!")

            dim_x, dim_y = determine_dims(coords)

            image_data = np.empty((dim_t, dim_x, dim_y), dtype=asig.dtype)
            image_data[:] = np.nan

            for channel in range(dim_channels):
                x, y = coords[channel]
                x, y = int(x), int(y)
                image_data[:, x, y] = asig_array[:, channel]

            # spatial_scale = determine_spatial_scale(coords)*coords.units
            spatial_scale = asig.annotations['spatial_scale']

            # array_annotations = {}
            # for k, v in asig.array_annotations.items():
            #     array_annotations[k] = v.reshape((dim_x, dim_y))
            imgseq = neo.ImageSequence(
                image_data=image_data,
                units=asig.units,
                dtype=asig.dtype,
                t_start=asig.t_start,
                sampling_rate=asig.sampling_rate,
                name=asig.name,
                description=asig.description,
                file_origin=asig.file_origin,
                # array_annotations=array_annotations,
                **asig.annotations)

            imgseq.annotate(array_annotations=asig.array_annotations)

            remove_annotations(imgseq, del_keys=['nix_name', 'neo_name'])
            block.segments[seg_count].imagesequences.append(imgseq)
    return block