Example #1
0
    def get_recorded_data(self, vec):
        '''Extract recorded voltages and timestamps given the recorded Vector instance.  
        If self.stimulus_sampling_rate is smaller than self.simulation_sampling_rate, 
        resample to self.stimulus_sampling_rate.

        Parameters
        ----------
        vec : neuron.Vector 
           constructed by self.record_values

        Returns
        -------
        dict with two keys: 'v' = numpy.ndarray with voltages, 't' = numpy.ndarray with timestamps

        '''
        junction_potential = self.description.data['fitting'][0]['junction_potential']
        
        v = np.array(vec["v"])
        t = np.array(vec["t"])

        if self.stimulus_sampling_rate < self.simulation_sampling_rate:
            factor = self.simulation_sampling_rate / self.stimulus_sampling_rate
                
            Utils._log.debug("subsampling recorded traces by %dX", factor)
            v = block_reduce(v, (factor,), np.mean)[:len(self.stim_curr)]
            t = block_reduce(t, (factor,), np.min)[:len(self.stim_curr)]

        mV = 1.0e-3
        v = (v - junction_potential) * mV
        
        return { "v": v, "t": t }
Example #2
0
def test_invalid_block_size():
    image = np.arange(4 * 6).reshape(4, 6)

    with testing.raises(ValueError):
        block_reduce(image, [1, 2, 3])
    with testing.raises(ValueError):
        block_reduce(image, [1, 0.5])
Example #3
0
def downsample(data):
    data["_tr_X"] = np.zeros((len(data["tr_X"]), 14 * 14), dtype="float32")
    data["_va_X"] = np.zeros((len(data["va_X"]), 14 * 14), dtype="float32")
    data["_te_X"] = np.zeros((len(data["te_X"]), 14 * 14), dtype="float32")

    for i in xrange(0, len(data["tr_X"])):
        data["_tr_X"][i] = block_reduce(
            data["tr_X"][i].reshape(data["shape_x"]), block_size=(2, 2), func=np.mean
        ).flatten()

    for i in xrange(0, len(data["va_X"])):
        data["_va_X"][i] = block_reduce(
            data["va_X"][i].reshape(data["shape_x"]), block_size=(2, 2), func=np.mean
        ).flatten()

    for i in xrange(0, len(data["te_X"])):
        data["_te_X"][i] = block_reduce(
            data["te_X"][i].reshape(data["shape_x"]), block_size=(2, 2), func=np.mean
        ).flatten()

    data["tr_X"] = data["_tr_X"]
    data["va_X"] = data["_va_X"]
    data["te_X"] = data["_te_X"]

    data["shape_x"] = (14, 14)
    data["n_x"] = 14 * 14
    return data
Example #4
0
def k8_down_hints(x):
    RGB = x[:, :, 0:3].astype(np.float)
    A = x[:, :, 3:4].astype(np.float)
    RGB = RGB * A / 255.0
    RGB = block_reduce(RGB, (8, 8, 1), np.max)
    A = block_reduce(A, (8, 8, 1), np.max)
    y = np.concatenate([RGB, A], axis=2)
    return y
def mycorrelate2d(fixed,moved,skip=1):
    """a 2d correlation function for numpy 2d matrices
    
    arguments
    fixed) is the larger matrix which should stay still 
    moved) is the smaller matrix which should move left/right up/down and sample the correlation
    skip) is the number of positions to skip over when sampling, 
    so if skip =3 it will sample at shift 0,0 skip,0 2*skip,0... skip,0 skip,skip...
    
    returns
    corrmat) the 2d matrix with the corresponding correlation coefficents of the data at that offset
    note the 0,0 entry of corrmat corresponds to moved(0,0) corresponding to fixed(0,0)
    and the 1,1 entry of corrmat corresponds to moved(0,0) corresponding to fixed(skip,skip)
    NOTE) the height of corrmat is given by corrmat.height=ceil((fixed.height-moved.height)/skip)
    and the width in a corresonding manner.
    NOTE)the standard deviation is measured over the entire dataset, so particular c values can be above 1.0
    if the variance in the subsampled region of fixed is lower than the variance of the entire matrix
    
    """
    if skip>1:
        fixed = block_reduce(fixed,block_size = (int(skip),int(skip)),func = np.mean,cval=np.mean(fixed))
        moved = block_reduce(moved,block_size = (int(skip),int(skip)),func = np.mean,cval=np.mean(moved))

    (fh,fw)=fixed.shape
    (mh,mw)=moved.shape
    deltah=(fh-mh)
    deltaw=(fw-mw)
    #if (deltah<1 or deltaw<1):
    #    return
    #fixed=fixed-fixed.mean()
    #fixed=fixed/fixed.std()
    #moved=moved-moved.mean()
    #moved=moved/moved.std()
    # ch=np.ceil(deltah*1.0/skip)
    # cw=np.ceil(deltaw*1.0/skip)
    
    # corrmat=np.zeros((ch,cw))
    
    # #print (fh,fw,mh,mw,ch,cw,skip,deltah,deltaw)
    # for shiftx in range(0,deltaw,skip):
    #     for shifty in range(0,deltah,skip):
    #         fixcut=fixed[shifty:shifty+mh,shiftx:shiftx+mw]
    #         corrmat[shifty/skip,shiftx/skip]=(fixcut*moved).sum()
           
    # corrmat=corrmat/(mh*mw)
    

    corrmatt = norm_xcorr.norm_xcorr(moved,fixed, trim=True, method='fourier')

    print 'corrmatt',corrmatt.shape
    print 'moved',moved.shape
    print 'fixed',fixed.shape

    #image_product = np.fft.fft2(fixed) * np.fft.fft2(moved).conj()
    #corrmat = np.fft.fftshift(np.fft.ifft2(image_product))

    return corrmatt
Example #6
0
def align_georasters(raster,alignraster,how=np.mean,cxsize=None,cysize=None):
    '''
    Align two rasters so that data overlaps by geographical location
    Usage: (alignedraster_o, alignedraster_a) = AlignRasters(raster, alignraster, how=np.mean)
    where 
        raster: string with location of raster to be aligned
        alignraster: string with location of raster to which raster will be aligned
        how: function used to aggregate cells (if the rasters have different sizes)
    It is assumed that both rasters have the same size
    '''
    (NDV1, xsize1, ysize1, GeoT1, Projection1, DataType1)=(raster.nodata_value, raster.shape[1], raster.shape[0], raster.geot, raster.projection, raster.datatype)
    (NDV2, xsize2, ysize2, GeoT2, Projection2, DataType2)=(alignraster.nodata_value, alignraster.shape[1], alignraster.shape[0], alignraster.geot, alignraster.projection, alignraster.datatype)
    if Projection1.ExportToMICoordSys()==Projection2.ExportToMICoordSys():
        blocksize=(np.round(max(GeoT2[1]/GeoT1[1],1)),np.round(max(GeoT2[-1]/GeoT1[-1],1)))
        mraster=raster.raster
        mmin=mraster.min()
        if block_reduce!=(1,1):
            mraster=block_reduce(mraster,blocksize,func=how)
        blocksize=(np.round(max(GeoT1[1]/GeoT2[1],1)),np.round(max(GeoT1[-1]/GeoT2[-1],1)))
        araster=alignraster.raster
        amin=araster.min()
        if block_reduce!=(1,1):
            araster=block_reduce(araster,blocksize,func=how)
        if GeoT1[0]<=GeoT2[0]:
            row3,mcol=map_pixel(GeoT2[0], GeoT2[3], GeoT1[1] *blocksize[0],GeoT1[-1]*blocksize[1], GeoT1[0], GeoT1[3])
            acol=0
        else:
            row3,acol=map_pixel(GeoT1[0], GeoT1[3], GeoT2[1],GeoT2[-1], GeoT2[0], GeoT2[3])
            mcol=0
        if GeoT1[3]<=GeoT2[3]:
            arow,col3=map_pixel(GeoT1[0], GeoT1[3], GeoT2[1],GeoT2[-1], GeoT2[0], GeoT2[3])
            mrow=0
        else:
            mrow,col3=map_pixel(GeoT2[0], GeoT2[3], GeoT1[1] *blocksize[0],GeoT1[-1]*blocksize[1], GeoT1[0], GeoT1[3])
            arow=0
        mraster=mraster[mrow:,mcol:]
        araster=araster[arow:,acol:]
        if cxsize and cysize:
            araster=araster[:cysize,:cxsize]
            mraster=mraster[:cysize,:cxsize]
        else:
            rows = min(araster.shape[0],mraster.shape[0])
            cols = min(araster.shape[1],mraster.shape[1])
            araster=araster[:rows,:cols]
            mraster=mraster[:rows,:cols]
        mraster=np.ma.masked_array(mraster,mask=mraster<mmin, fill_value=NDV1)
        araster=np.ma.masked_array(araster,mask=araster<amin, fill_value=NDV2)
        GeoT=(max(GeoT1[0],GeoT2[0]), GeoT1[1]*blocksize[0], GeoT1[2], min(GeoT1[3],GeoT2[3]), GeoT1[4] ,GeoT1[-1]*blocksize[1])
        mraster=GeoRaster(mraster, GeoT, projection=Projection1, nodata_value=NDV1, datatype=DataType1)
        araster=GeoRaster(araster, GeoT, projection=Projection2, nodata_value=NDV2, datatype=DataType2)
        return (mraster,araster)
    else:
        print("Rasters need to be in same projection")
        return (-1,-1)
Example #7
0
def test_block_reduce_min():
    image1 = np.arange(4 * 6).reshape(4, 6)
    out1 = block_reduce(image1, (2, 3), func=np.min)
    expected1 = np.array([[ 0, 3],
                          [12, 15]])
    assert_equal(expected1, out1)

    image2 = np.arange(5 * 8).reshape(5, 8)
    out2 = block_reduce(image2, (4, 5), func=np.min)
    expected2 = np.array([[0, 0],
                          [0, 0]])
    assert_equal(expected2, out2)
Example #8
0
def test_block_reduce_max():
    image1 = np.arange(4 * 6).reshape(4, 6)
    out1 = block_reduce(image1, (2, 3), func=np.max)
    expected1 = np.array([[ 8, 11],
                          [20, 23]])
    assert_equal(expected1, out1)

    image2 = np.arange(5 * 8).reshape(5, 8)
    out2 = block_reduce(image2, (4, 5), func=np.max)
    expected2 = np.array([[28, 31],
                          [36, 39]])
    assert_equal(expected2, out2)
Example #9
0
def test_block_reduce_sum():
    image1 = np.arange(4 * 6).reshape(4, 6)
    out1 = block_reduce(image1, (2, 3))
    expected1 = np.array([[ 24,  42],
                          [ 96, 114]])
    assert_equal(expected1, out1)

    image2 = np.arange(5 * 8).reshape(5, 8)
    out2 = block_reduce(image2, (3, 3))
    expected2 = np.array([[ 81, 108,  87],
                          [174, 192, 138]])
    assert_equal(expected2, out2)
Example #10
0
def test_block_reduce_mean():
    image1 = np.arange(4 * 6).reshape(4, 6)
    out1 = block_reduce(image1, (2, 3), func=np.mean)
    expected1 = np.array([[  4.,   7.],
                          [ 16.,  19.]])
    assert_equal(expected1, out1)

    image2 = np.arange(5 * 8).reshape(5, 8)
    out2 = block_reduce(image2, (4, 5), func=np.mean)
    expected2 = np.array([[14. , 10.8],
                          [ 8.5,  5.7]])
    assert_equal(expected2, out2)
def Location_Shape(img,segments,segments_label):
    # 72-D Feature
    row,col = segments.shape
    location_block_row = int(math.ceil(row/6.))
    location_block_col = int(math.ceil(col/6.))

    Location_Shape_Features = []
    for label in range(len(segments_label)):
        # Make mask for each segment
        seg_mask = Segment_Mask(segments, label)

        ### Get Location Features
        # Downsample to 6*6
        try:
            downsample = block_reduce(seg_mask, block_size=(location_block_row, location_block_col), cval = 0, func=np.max)
            # Convert to 36-D Location Features
            Location_Features = downsample.flatten().tolist()
        except:
            Location_Features = [0 for x in range(36)]

        ### Get Shape Features
        # Bounding Box
        left,up,right,down = Image.fromarray(np.uint8(seg_mask)).getbbox()

        # Cropped the mask
        cropped_mask =  seg_mask[up:down,left:right]

        # Downsample to 6*6
        cropped_row,cropped_col = cropped_mask.shape

        ### When the number is too small, there would be a bug
        ### Consider this special situation
        if cropped_row < 26:
            cropped_mask = cropped_mask[:(cropped_row-cropped_row%6),:]
        if cropped_col < 26:
            cropped_mask = cropped_mask[:,:(cropped_col-cropped_col%6)]

        cropped_row,cropped_col = cropped_mask.shape
        cropped_block_row = int(math.ceil(cropped_row/6.))
        cropped_block_col = int(math.ceil(cropped_col/6.))

        try:
            downsample = block_reduce(cropped_mask, block_size=(cropped_block_row, cropped_block_col), cval = 0, func=np.max)
            # Convert to 36-D Shape Features
            Shape_Features = downsample.flatten().tolist()
        except:
            Shape_Features = [0 for x in range(36)]

        Location_Shape_Features.append(Location_Features+Shape_Features)

    return Location_Shape_Features
Example #12
0
def test_block_reduce_median():
    image1 = np.arange(4 * 6).reshape(4, 6)
    out1 = block_reduce(image1, (2, 3), func=np.median)
    expected1 = np.array([[  4.,   7.],
                          [ 16.,  19.]])
    assert_equal(expected1, out1)

    image2 = np.arange(5 * 8).reshape(5, 8)
    out2 = block_reduce(image2, (4, 5), func=np.median)
    expected2 = np.array([[ 14.,  6.5],
                          [  0.,  0. ]])
    assert_equal(expected2, out2)

    image3 = np.array([[1, 5, 5, 5], [5, 5, 5, 1000]])
    out3 = block_reduce(image3, (2, 4), func=np.median)
    assert_equal(5, out3)
Example #13
0
    def downsample(self, factor, method=np.nansum):
        """
        Down sample image by a given factor.

        The image is down sampled using `skimage.measure.block_reduce`. If the
        shape of the data is not divisible by the down sampling factor, the image
        must be padded beforehand to the correct shape.

        Parameters
        ----------
        factor : int
            Down sampling factor.
        method : np.ufunc (np.nansum), optional
            Method how to combine the image blocks.

        Returns
        -------
        image : `SkyImage`
            Down sampled image.
        """
        from skimage.measure import block_reduce

        shape = self.data.shape

        if not (np.mod(shape, factor) == 0).all():
            raise ValueError('Data shape {0} is not divisable by {1} in all axes.'
                             'Pad image prior to downsamling to correct'
                             ' shape.'.format(shape, factor))

        data = block_reduce(self.data, (factor, factor), method)

        # Adjust WCS
        wcs = get_resampled_wcs(self.wcs, factor, downsampled=True)
        return SkyImage(data=data, wcs=wcs)
Example #14
0
    def test_block_reduce_mask_array(self):
        test_array = np.arange(16).reshape((4, 4))
        assert_array_equal(test_array[0], [0, 1, 2, 3], verbose=True)
        assert_array_equal(test_array[1], [4, 5, 6, 7], verbose=True)
        assert_array_equal(test_array[2], [8, 9, 10, 11], verbose=True)
        assert_array_equal(test_array[3], [12, 13, 14, 15], verbose=True)

        mask_array = np.full((4, 4), False, dtype=np.bool)
        mask_array[1:3, 1:3] = True
        assert_array_equal(mask_array[0], [False, False, False, False], verbose=True)
        assert_array_equal(mask_array[1], [False,  True,  True, False], verbose=True)
        assert_array_equal(mask_array[2], [False,  True,  True, False], verbose=True)
        assert_array_equal(mask_array[3], [False, False, False, False], verbose=True)

        masked_array = np.ma.array(test_array, mask=mask_array)
        self.assertTrue(np.ma.is_masked(masked_array))
        assert_array_equal(masked_array[0], [0, 1, 2, 3], verbose=True)
        assert_array_equal(masked_array[1], [4, np.nan, np.nan, 7], verbose=True)
        assert_array_equal(masked_array[2], [8, np.nan, np.nan, 11], verbose=True)
        assert_array_equal(masked_array[3], [12, 13, 14, 15], verbose=True)

        mean_aggregated_array = block_reduce(masked_array, (2, 2), func=np.mean)

        self.assertEqual((2, 2), mean_aggregated_array.shape)
        # The mask is ignored in the block_reduce function
        assert_array_equal(mean_aggregated_array[0], [(0 + 1 + 4 + 5) / 4, (2 + 3 + 6 + 7) / 4], verbose=True)
        assert_array_equal(mean_aggregated_array[1], [(8 + 9 + 12 + 13) / 4, (10 + 11 + 14 + 15) / 4], verbose=True)
Example #15
0
def block_reduce(data, block_size, func=np.sum):
    
    # Backported from Astropy 1.1 for compatibility 

    from skimage.measure import block_reduce

    data = np.asanyarray(data)

    block_size = np.atleast_1d(block_size)
    if data.ndim > 1 and len(block_size) == 1:
        block_size = np.repeat(block_size, data.ndim)

    if len(block_size) != data.ndim:
        raise ValueError('`block_size` must be a scalar or have the same '
                         'length as `data.shape`')

    block_size = np.array([int(i) for i in block_size])
    size_resampled = np.array(data.shape) // block_size
    size_init = size_resampled * block_size

    # trim data if necessary
    for i in range(data.ndim):
        if data.shape[i] != size_init[i]:
            data = data.swapaxes(0, i)
            data = data[:size_init[i]]
            data = data.swapaxes(0, i)

    return block_reduce(data, tuple(block_size), func=func)
def get_patch(image, coords, offset, nodule_list, patch_flag=True):
    xyz = image[int(coords[0] - offset): int(coords[0] + offset), int(coords[1] - offset): int(coords[1] + offset),
          int(coords[2] - offset): int(coords[2] + offset)]

    if patch_flag:
        output = np.expand_dims(xyz, axis=-1)
    else:
        # resize xyz
        """
        xyz = scipy.ndimage.zoom(input=xyz, zoom=1/8, order=1) # nearest
        xyz = np.where(xyz > 0, 1.0, 0.0)
        """
        xyz = block_reduce(xyz, (9, 9, 9), np.max)
        output = np.expand_dims(xyz, axis=-1)

        output = indices_to_one_hot(output.astype(np.int32), 2)
        output = np.reshape(output, (label_size, label_size, label_size, 2))
        output = output.astype(np.float32)

        # print('------------------')
        # print(output)

        # print(output)
        # print(np.shape(output))

    nodule_list.append(output)
Example #17
0
def convolve(img, sigma=4):
    '''
    2D Gaussian convolution
    '''

    if img.sum() == 0:
        return img

    img_pad = np.zeros((3 * img.shape[0], 3 * img.shape[1]))
    img_pad[img.shape[0]:2 * img.shape[0], img.shape[1]:2 * img.shape[1]] = img

    x = np.arange(3 * img.shape[0])
    y = np.arange(3 * img.shape[1])
    g = spinterp.interp2d(y, x, img_pad, kind='linear')

    if img.shape[0] == 16:
        upsample = 4
        offset = -(1 - .625)
    elif img.shape[0] == 8:
        upsample = 8
        offset = -(1 - .5625)
    else:
        raise NotImplementedError
    ZZ_on = g(offset + np.arange(0, img.shape[1] * 3, 1. / upsample), offset + np.arange(0, img.shape[0] * 3, 1. / upsample))
    ZZ_on_f = gaussian_filter(ZZ_on, float(sigma), mode='constant')

    z_on_new = block_reduce(ZZ_on_f, (upsample, upsample))
    z_on_new = z_on_new / z_on_new.sum() * img.sum()
    z_on_new = z_on_new[img.shape[0]:2 * img.shape[0], img.shape[1]:2 * img.shape[1]]

    return z_on_new
Example #18
0
File: misc.py Project: bwallin/edfm
def downsample_array(a, ds):
    '''
    Downsample ndarray by factors in vector ds (same length as dimension of a).
    '''
    from skimage.measure import block_reduce
    a_downsampled = block_reduce(a, ds)

    return a_downsampled
Example #19
0
 def downsample(self, factor, preserve_counts=True):
     from skimage.measure import block_reduce
     geom = self.geom.downsample(factor)
     block_size = tuple([factor, factor] + [1] * (self.geom.ndim - 2))
     data = block_reduce(self.data, block_size[::-1], np.nansum)
     if not preserve_counts:
         data /= factor**2
     return self.__class__(geom, data, meta=copy.deepcopy(self.meta))
Example #20
0
  def featureExtractor(self):

    screen = np.array(ImageGrab.grab(bbox = (50, 120, 1250, 650)))
    small_screen = block_reduce(screen, (530/224, 1200/224, 1), np.max)
    features = np.array(self.tf.transform(small_screen[:,:,0:3])).flatten()

    feat = dict(zip(range(2000), features))

    return  feat
Example #21
0
def crop_and_resize(img, target_size=32, zoom=1):
    small_side = int(np.min(img.shape) * zoom)
    reduce_factor = small_side / target_size
    crop_size = target_size * reduce_factor
    mid = np.array(img.shape) / 2
    half_crop = crop_size / 2
    center = img[mid[0]-half_crop:mid[0]+half_crop,
    	mid[1]-half_crop:mid[1]+half_crop]
    return block_reduce(center, (reduce_factor, reduce_factor), np.mean)
Example #22
0
    def block_reduce(self,block_size, how=np.ma.mean):
        '''
        geo.block_reduce(block_size, how=func)

        Returns copy of raster aggregated to smaller resolution, by adding cells.
        Default: func=np.ma.mean
        '''
        raster2=block_reduce(self.raster,block_size,func=how)
        return GeoRaster(raster2, self.geot, nodata_value=self.nodata_value,\
                        projection=self.projection, datatype = self.datatype)
 def process_frames(self, data):
     logging.debug("Running Downsample data")
     if self.parameters['mode'] in self.mode_dict:
         sampler = self.mode_dict[self.parameters['mode']]
     else:
         logging.warning("Unknown downsample mode. Using 'mean'.")
         sampler = numpy.mean
     block_size = (self.parameters['bin_size'], self.parameters['bin_size'])
     result = skim.block_reduce(data[0], block_size, sampler)
     return result
Example #24
0
    def aggregate(self,block_size):
        '''
        geo.aggregate(block_size)

        Returns copy of raster aggregated to smaller resolution, by adding cells.
        '''
        raster2=block_reduce(self.raster,block_size,func=np.ma.sum)
        geot = self.geot
        geot = (geot[0], block_size[0] * geot[1], geot[2], geot[3],geot[4], block_size[1] * geot[-1])
        return GeoRaster(raster2, geot, nodata_value=self.nodata_value,\
                        projection=self.projection, datatype = self.datatype)
def rebin(image, binning):
    """
    Rebin image using skimage block_reduce()

    :param image: image to rebin
    :param binning: binning factor
    :param block_func: block processing function
    :return: rebinned image
    """
    r_image = block_reduce(image, block_size =(binning, binning), func=np.mean)
    return r_image
Example #26
0
    def test_block_reduce_ndarray(self):
        test_array = np.arange(16).reshape((4, 4))
        assert_array_equal(test_array[0], [0, 1, 2, 3], verbose=True)
        assert_array_equal(test_array[1], [4, 5, 6, 7], verbose=True)
        assert_array_equal(test_array[2], [8, 9, 10, 11], verbose=True)
        assert_array_equal(test_array[3], [12, 13, 14, 15], verbose=True)

        mean_aggregated_array = block_reduce(test_array, (2, 2), func=np.mean)

        self.assertEqual((2, 2), mean_aggregated_array.shape)
        assert_array_equal(mean_aggregated_array[0], [(0 + 1 + 4 + 5) / 4, (2 + 3 + 6 + 7) / 4], verbose=True)
        assert_array_equal(mean_aggregated_array[1], [(8 + 9 + 12 + 13) / 4, (10 + 11 + 14 + 15) / 4], verbose=True)
Example #27
0
def downsample(data):
    data['_tr_X'] = np.zeros((len(data['tr_X']), 14*14), dtype='float32')
    data['_va_X'] = np.zeros((len(data['va_X']), 14*14), dtype='float32')
    data['_te_X'] = np.zeros((len(data['te_X']), 14*14), dtype='float32')

    for i in xrange(0, len(data['tr_X'])):
        data['_tr_X'][i] = block_reduce(data['tr_X'][i].reshape(data['shape_x']), block_size=(2,2), func=np.mean).flatten() 

    for i in xrange(0, len(data['va_X'])):
        data['_va_X'][i] = block_reduce(data['va_X'][i].reshape(data['shape_x']), block_size=(2,2), func=np.mean).flatten() 

    for i in xrange(0, len(data['te_X'])):
        data['_te_X'][i] = block_reduce(data['te_X'][i].reshape(data['shape_x']), block_size=(2,2), func=np.mean).flatten() 

    data['tr_X'] = data['_tr_X']
    data['va_X'] = data['_va_X']
    data['te_X'] = data['_te_X']

    data['shape_x'] = (14,14)
    data['n_x'] = 14*14
    return data
Example #28
0
def main():
	
	#check input args
	if len(sys.argv) is 1:
		print "Error: No PDB file given."
		print "Usage: python downsample.py input_struct.pdb"
		exit()
		
	#open given file
	in_file = open(sys.argv[1], 'r')
	out_file = open(sys.argv[1] + '_HALF.pdb', 'w')
	
	#get chromosome positions from chain id
	pos = []
	curr = ""
	lineNum = 0
	for line in in_file:
		if line[0] == 'C':
			break
		if line[21] != curr:
			pos.append(lineNum)
			curr = line[21]
		lineNum += 1
		
	#convert to new positions by halving value
	newPos = []
	for _pos in pos:
		newPos.append(int(math.floor(_pos / 2)))
		
		
	#load in struct and get its coordinates
	coords = get_coords(sys.argv[1])
	print coords.shape
	
	#downsample the file by half
	half_coords = block_reduce(coords, block_size=(2, 1), func=np.mean)
	print half_coords.shape
	
	#and now write the new file
	count = 1
	atoms = 1
	posIndex = 1
	for i in half_coords:
		if posIndex == len(newPos):
			pass #so it continues to print the entire last chain
		elif count == newPos[posIndex]:
			posIndex += 1
			atoms = 1
		out_file.write("ATOM   %4d O    EDG %c   1    %8.3f%8.3f%8.3f  1.00 75.00  \n" % (atoms, posIndex + 64, half_coords[count-1][0], half_coords[count-1][1], half_coords[count-1][2]))
		count += 1
		atoms += 1
Example #29
0
def nema_data_preprocess(imagearray,resamplesize):
    """ Function performs preprocessing on the input image:
          - resample to 64x64
          - smoothing with nine-point NEMA kernel
          - calculation of UFOV and CFOV regions
        Returns: masked UFOV and CFOV arrays
    """    

    print "data preprocessing: "
    print "array size:",  np.shape(imagearray), "resamplesize: ", resamplesize
    # causes Fourier artifacts
    #imagearray = resample(resample(imagearray,resamplesize[0],axis=0),resamplesize[1],axis=1)
    if resamplesize[0]>0 and resamplesize[1]>0:
      imagearray = block_reduce(imagearray, block_size=(np.shape(imagearray)[0]/resamplesize[0],np.shape(imagearray)[1]/resamplesize[1]),func=np.sum)
    imagearray = imagearray.astype('float64')

    imagearray = nema_smooth(imagearray) 


    """ NEMA step 1: "First, any pixels at the edge of UFOV containing less
                      than 75% of the mean counts per pixel in the CFOV shall
                      be set to zero."
    """                      
    # first estimate of UFOV (use segmentation-threshold = mean value of entire image)
    threshold = set_threshold(imagearray)
    ufov = ma.masked_less(imagearray,threshold,copy=False)
    
    # use NEMA guidelines to determine UFOV
    cfov = create_cfov(ufov)
    
    # average of CFOV
    cfov_average = set_threshold(cfov)
    ufov = ma.masked_less(imagearray,0.75*cfov_average,copy=False)
    
    
    """ NEMA step 2: "Second, those pixels which now have at least one of their
        four directly abutted neighbors containing zero counts, will be also
        set to zero. The remaining non-zero pixels are the pixels to be included
        in the analysis for the UFOV.
    """
    ufov.mask=scipy.ndimage.binary_dilation(ufov.mask,iterations=1)
    # based on final UFOV, create a new CFOV
    cfov = create_cfov(ufov)

    # FIXME: inconsistent use of xy (in python y is the horizontal dimension)
    #ux, uy = get_dims(ufov)

    ufov.fill_value=0
    cfov.fill_value=0

    return ufov, cfov
Example #30
0
def downsample(structL, structH):

	coordsL = get_coords(structL)
	coordsH = get_coords(structH)
	
	ysL, xsL = coordsL.shape
	ysH, xsH = coordsH.shape
	
	factor = ysH / ysL
	dsarr = block_reduce(coordsH, block_size=(factor, 1), func=np.mean)
	newFactor = float(dsarr.shape[0]) / ysL
	coordsL2 = scipy.ndimage.interpolation.zoom(coordsL, (newFactor,1))
	
	return dsarr, coordsL2
Example #31
0
def obtainMaskVolumesList(num_mask_volumes, reshape_size, final_size):
    # load 80x80x80 masks
    maskVolumesList = loadMaskVolumes(num_mask_volumes)

    # put masks in correct format
    maskVolumesList = np.array(maskVolumesList).reshape(-1, 80, 80, 80)
    maskVolumesList = maskVolumesList.astype(float)

    # Resize cubes to reshape_size x reshape_size x reshape_size
    lower = (80 - reshape_size) // 2
    upper = lower + reshape_size
    maskVolumesList = maskVolumesList[:, lower:upper, lower:upper, lower:upper]

    block_size = reshape_size // final_size

    reshaped_masks = []
    for mask in maskVolumesList:
        reshaped_masks.append(
            block_reduce(mask,
                         block_size=(block_size, block_size, block_size),
                         func=np.amax))

    ret = []
    for mask in reshaped_masks:
        flat_mask = []
        for x in range(final_size):
            for y in range(final_size):
                for z in range(final_size):
                    flat_mask.append(mask[x][y][z])
        ret.append(flat_mask)

    ret = np.array(ret).reshape(-1, int(math.pow(final_size, 3)))
    ret = ret.astype(float)

    ret = np.where(ret == 0, 0, 1)

    return ret
Example #32
0
 def load_mask(self):
     ''' mask Jcam to isolate the brain from background '''
     if self.importmask:
         mask = tiff.imread(
             self.absmaskpath)  # mask must be 8bit thresholded as 0 & 255
         mask = mask / 255
         mask_savepath = self.absmaskpath
     else:  # create mask automatically from top %50 pixels in histogram
         if self.img_source == 'npy':  #cut off file extension
             mask_savepath = os.path.join(
                 self.savepath, self.filename[:-4] + '_' +
                 self.special_string + self.maskname)
         else:
             mask_savepath = os.path.join(
                 self.savepath,
                 self.filename + '_' + self.special_string + self.maskname)
         mask_mov = Jcorr.load_mov(self, frameNum=1)
         if self.transposed:
             frame = mask_mov[:, 0].reshape(
                 [np.sqrt(mask_mov.shape[0]),
                  np.sqrt(mask_mov.shape[0])])
         else:
             if len(np.shape(mask_mov)) == 2:
                 frame = mask_mov[:, :]
             else:
                 frame = mask_mov[0, :, :]
             if self.dsfactor != 1:
                 frame = block_reduce(frame,
                                      block_size=(self.dsfactor,
                                                  self.dsfactor),
                                      func=np.mean)
         del mask_mov
         mask = Jcorr.generate_mask(self, frame, 50)
         mask_tosave = mask.astype('uint8')
         mask_tosave = mask_tosave * 255  # for historical reasons, the mask is either 0 and 255
         tiff.imsave(mask_savepath, mask_tosave)
     return mask, mask_savepath
Example #33
0
    def downsample(self, factor, method=np.nansum):
        """
        Down sample image by a given factor.

        The image is down sampled using `skimage.measure.block_reduce`. If the
        shape of the data is not divisible by the down sampling factor, the image
        must be padded beforehand to the correct shape.

        Parameters
        ----------
        factor : int
            Down sampling factor.
        method : np.ufunc (np.nansum), optional
            Method how to combine the image blocks.

        Returns
        -------
        image : `SkyImage`
            Down sampled image.
        """
        from skimage.measure import block_reduce

        shape = self.data.shape

        if not (np.mod(shape, factor) == 0).all():
            raise ValueError('Data shape {0} is not divisable by {1} in all axes.'
                             'Pad image prior to downsamling to correct'
                             ' shape.'.format(shape, factor))

        data = block_reduce(self.data, (factor, factor), method)

        if self.wcs is not None:
            wcs = get_resampled_wcs(self.wcs, factor, downsampled=True)
        else:
            wcs = None

        return self.__class__(name=self.name, data=data, wcs=wcs, unit=self.unit)
Example #34
0
def load_block(tile, b, feature, col_DATA, **kwargs):
    #SETUP OPTIONS
    years = kwargs.get('years', None)
    blocksize = kwargs.get('blocksize', 200)
    outpath = kwargs.get('outpath', None)
    weekly = kwargs.get('weekly', True)
    
    Seeds_B_B = None
    min_f = []
    max_f = []
    n2 = 'NDI' + str(feature+1)

    for y in years:
        n1 = tile + '_' + y
        filename = fm.joinpath(outpath, n1, 'NDI_TimeSeries', n2, 'ts.h5')

        with h5py.File(filename, 'r') as hf:
            temp = np.array(hf["ts"][:,col_DATA:])
        #temp = fm.loadh5(fm.joinpath(outpath, n1, 'NDI_TimeSeries', n2), 'ts.h5')
        #temp = temp[:,col_DATA:]

        if weekly:
            temp = block_reduce(temp, block_size=(1,7), func=np.median, cval=np.median(temp))
        
        min_f.append(np.amin(temp))
        max_f.append(np.amax(temp))
        if Seeds_B_B is None:
            Seeds_B_B = temp[b:(b+blocksize),:]
        else:
            Seeds_B_B = np.concatenate((Seeds_B_B, temp[b:(b+blocksize),:]), axis=1)
        
    min_f = min(min_f)
    max_f = max(max_f) - min_f
    Seeds_B_B = Seeds_B_B - min_f
    Seeds_B_B = Seeds_B_B / max_f
    
    return Seeds_B_B    
Example #35
0
def check_predictions():
    # image = cv2.imread('/home/annus/Desktop/forest_images/image_test.png')[4000:8000,4000:8000,:]
    pred_path = sys.argv[
        1]  #'../numerical_results/german_sentinel_ee/test_images_and_predictions/image_pred_6.npy'
    label = np.memmap(pred_path, dtype=np.uint8, mode='r',
                      shape=(2048, 3840))  #.transpose(1,0)
    # pl.imshow(label)
    # pl.show()
    label = convert_to_colors(label)
    from skimage.measure import block_reduce
    # label[label != 1] = 0
    # print(np.unique(label), label.shape)
    # pl.subplot(121)
    # pl.imshow(image)
    # pl.subplot(122)
    # print(label.shape)
    # label = label[:2048,:2048,:]
    print(label.shape)
    label = block_reduce(label, block_size=(64, 64, 1), func=np.max)
    print(label.shape)
    pl.imshow(label)
    pl.axis('off')
    pl.show()
    pass
def signal_sfft(phase_data, plot=False):

    fs = 40e6
    f, t, Zxx = signal.stft(phase_data, fs, nperseg=1999, boundary=None)

    reducedZ = block_reduce(np.abs(Zxx), block_size=(4, 4), func=np.max)

    reducedf = f[0::4]
    reducedt = t[0::4]

    if plot:
        plt.figure(figsize=(16, 10))
        plt.pcolormesh(reducedt,
                       reducedf,
                       reducedZ,
                       rasterized=True,
                       linewidth=0,
                       vmin=0,
                       vmax=0.5)
        plt.title('STFT Magnitude Reduced')
        plt.ylabel('Frequency [Hz]')
        plt.xlabel('Time [ms]')
        plt.show()
    return reducedZ
Example #37
0
def main():
    t_kwargs = {
        'batch_size': batch_size,
        'num_workers': 1,
        'pin_memory': True,
        'drop_last': True
    }
    Xtr, Ytr = load()
    Xtr = np.pad(Xtr, ((0, 0), (0, 0), (0, 0), (0, 1)),
                 'constant',
                 constant_values=0)
    Ytr = block_reduce(Ytr, block_size=(1, 1, 30), func=np.max)[:, :, :-1]
    tr_loader = torch.utils.data.DataLoader(Data2Torch([Xtr, Ytr]),
                                            shuffle=True,
                                            **t_kwargs)
    print('finishing loading data...')

    model = Net().cuda()
    model.apply(model_init)
    print('finishing loading model...')

    inst_weight = [get_weight(Ytr)]
    Trer = Trainer(model, lr, epoch, out_model_fn)
    Trer.fit(tr_loader, inst_weight)
Example #38
0
def convolveKai(inData, inSizeX, inSizeY, layer):
    colorDepth = layer['in_depth']  #3
    filter_depth = layer['out_depth']  #16

    f = layer['f']

    pooledResult = np.empty([filter_depth, inSizeY / 2, inSizeX / 2])
    result = np.empty([filter_depth, inSizeY, inSizeX])
    for filterID in range(filter_depth):
        c = np.zeros([inSizeY, inSizeX])
        for colorID in range(colorDepth):
            #			c+=convolve2d(inData[colorID],f[filterID][colorID],mode='same',boundary='fill',fillvalue=0)
            c += convolve(inData[colorID],
                          f[filterID][colorID],
                          mode='constant',
                          cval=0)
        result[filterID] = c + layer['biases']['w'][str(filterID)]
        # max pool here
        pooledResult[filterID] = block_reduce(result[filterID],
                                              block_size=(2, 2),
                                              func=np.max)
        # relu step is here
        pooledResult[filterID][pooledResult[filterID] < 0] = 0
    return result, pooledResult
Example #39
0
def downsample_2N(image, factor, method=np.nansum, shape=None):
    """
    Down sample image by a power of two.

    The image is down sampled using `skimage.measure.block_reduce`. Only
    down sampling factor, that are a power of two are allowed. The image is
    padded to a given size using the 'reflect' method, before the down sampling
    is done.

    Parameters
    ----------
    image : `~numpy.ndarray`
        Image to be down sampled.
    factor : int
        Down sampling factor, must be power of two.
    method : np.ufunc (np.nansum), optional
        Method how to combine the image blocks.
    shape : tuple (None), optional
        If shape is specified, the image is padded prior to the down sampling
        symmetrically in x and y direction to the given shape.

    Returns
    -------
    image : `~numpy.ndarray`
        Down sampled image.
    """
    from skimage.measure import block_reduce
    if not np.log2(factor).is_integer():
        raise ValueError('Downsampling factor must be power of 2.')
    factor = int(factor)

    if shape is not None:
        x_pad = (shape[1] - image.shape[1]) // 2
        y_pad = (shape[0] - image.shape[0]) // 2
        image = np.pad(image, ((y_pad, y_pad), (x_pad, x_pad)), mode='reflect')
    return block_reduce(image, (factor, factor), method)
def get_cluster_info(img, tol=cfg.DIST_TOL):
    """ Generates mask for
    each cluster of objects
    Parameters
    ----------
    img :obj:`BinaryImage`
        mask of objects in working area
    Returns
    -------
    :obj:list of `Group`
    """
    img_data = img.data
    orig_shape = img_data.shape
    img_data = block_reduce(img_data, block_size = (cfg.SCALE_FACTOR, cfg.SCALE_FACTOR), func = np.mean)
    scaled_shape = img_data.shape

    dist_tol = tol/cfg.SCALE_FACTOR

    #find groups of adjacent foreground pixels
    groups = generate_groups(img_data, orig_shape, scaled_shape)
    groups = [g for g in groups if g.area >= cfg.SIZE_TOL/cfg.SCALE_FACTOR]
    groups = merge_groups(groups, dist_tol)

    return groups
Example #41
0
 def vol_s(self, tup, crop_by=0):
     img_file_path, mask_file_path, seg_file_path = tup
     img_handler = ImageHandler()
     image_arr = nib.load(img_file_path).get_data()
     mask_arr = nib.load(mask_file_path).get_data()
     block_image_arr = block_reduce(image_arr,
                                    block_size=(3, 3, 3),
                                    func=np.median).astype(np.uint8)
     vol_list = img_handler.image_to_vols(image_arr,
                                          self.stride,
                                          self.segment_size,
                                          crop_by=crop_by)
     tuples = [(np.array(vol.seg_arr.shape) - 2 * crop_by,
                vol.start_voxel + crop_by) for vol in vol_list]
     seg_arr = nib.load(seg_file_path).get_data().astype(np.uint8)
     vol_list_segs = img_handler.image_vols_to_vols(seg_arr, tuples)
     tuples = [(self.segment_size_ss, (
         (vol.start_voxel +
          (self.segment_size - self.ss_factor * self.segment_size_ss) // 2)
         // self.ss_factor)) for vol in vol_list]
     vol_list_subsampled = img_handler.image_vols_to_vols(
         block_image_arr, tuples)
     return exclude_windows_outside_mask(mask_arr, vol_list, vol_list_segs,
                                         vol_list_subsampled)
Example #42
0
def LoadImage():
    # Current Path
    currPath = '/Users/LeonGong/Desktop/ELEN6886/CroppedYale/'
    # Load the first image
    X_train= []

    os.chdir(currPath)
    classDirectory = glob.glob("yale*")

    # Record the image labels
    delta = [[0 for n in range(SAMPLE_EACH_CLASS*CLASSES)] for m in range(CLASSES)]

    pos = 0
    # Load images from different classes
    for i in range(len(classDirectory)):
        # List all the class directories
        filePath = currPath + classDirectory[i]
        os.chdir(filePath)
        fileList = glob.glob("*.pgm")
        # Class i
        # Exculde 
        for file_item in fileList[2:]:
            img = Image.open(filePath+'/'+file_item)
            img = block_reduce(np.array(img), block_size=(DOWNSAMPLE_COEFFICIENT, DOWNSAMPLE_COEFFICIENT), func=np.mean)

            # plt.imshow(img, cmap=plt.get_cmap('gray'))
            # plt.gca().axis('off')
            # plt.gcf().set_size_inches((5, 5))
            # plt.show()
       
            X_train.append(np.ndarray.flatten((np.array(img))))
            delta[i][pos] = 1
            pos += 1
    print("Delta, shape:", np.array(delta).shape)
    print("X_train, shape", np.array(X_train).shape) 
    return np.array(X_train).T, np.array(delta)
Example #43
0
    def __getitem__(self, idx):
        frames = []
        idx = idx * self.T
        for i in range(idx, idx + self.T):
            path = os.path.join(self.image_dir, self.image_names[i])
            image = cv2.imread(path, 1)
            if self.magnification:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)
                cr_mean = np.mean(image[..., 1])
                cb_mean = np.mean(image[..., 2])
                image[..., 1] = cr_mean + (image[..., 1] -
                                           cr_mean) * self.magnification
                image[..., 2] = cb_mean + (image[..., 2] -
                                           cb_mean) * self.magnification
            else:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
            image = cv2.resize(image, self.image_size)
            image = block_reduce(image, (self.n, self.n, 1), np.mean)
            image = np.reshape(image, [1, -1, 3])
            frames.append(image)
        images_stacked = np.concatenate(frames)
        images_stacked = np.transpose(images_stacked, [2, 1, 0])

        return images_stacked
Example #44
0
def examples_to_dataset(examples, block_size=2):
    train_X = []  # pixels
    train_y = []  # labels
    test_X = []
    test_y = []
    for path, label in examples:
        num = re.search(r'([0-9]+)\.jpg', path)
        img_id = int(num[1])
        # read the images
        img = imread(path, as_grey=True)

        # scale down the images
        img = block_reduce(img,
                           block_size=(block_size, block_size),
                           func=np.mean)

        if img_id < 2000:
            test_X.append(img)
            test_y.append(label)
        else:
            train_X.append(img)
            train_y.append(label)
    return np.asarray(train_X).astype(np.float32), np.asarray(train_y), \
            np.asarray(test_X).astype(np.float32), np.asarray(test_y)
Example #45
0
 def getViewData(self):
     from skimage.measure import block_reduce
     ### Get pixel data from view
     img = glReadPixels(self._game_settings["image_clipping_area"][0],
                        self._game_settings["image_clipping_area"][1],
                        self._game_settings["image_clipping_area"][2],
                        self._game_settings["image_clipping_area"][3],
                        GL_RGB, GL_FLOAT)
     # assert(np.sum(img) > 0.0)
     ### reshape into image, colour last
     img = np.reshape(img,
                      (self._game_settings["image_clipping_area"][3],
                       self._game_settings["image_clipping_area"][2], 3))
     ### downsample image
     img = block_reduce(
         img,
         block_size=(self._game_settings["downsample_image"][0],
                     self._game_settings["downsample_image"][1],
                     self._game_settings["downsample_image"][2]),
         func=np.mean)
     ### convert to greyscale
     if (self._game_settings["convert_to_greyscale"]):
         img = np.mean(img, axis=2)
     return img
Example #46
0
def block_downsample(volume, factor):
    """
    Simple downsampling by averaging pixels that fall under each output voxel.
    """
    dtype = volume.dtype
    assert (np.array(volume.shape) % factor == 0).all(), \
        "Volume dimensions must be a multiple of the downsample factor."

    if not isinstance(factor, Iterable):
        factor = (factor,)*volume.ndim

    # Special case for uint8 grayscale downsampling;
    # Use uint16 intermediate value instead of instead
    # of float (as long as the sum of the bin voxels always fits in a uint16).
    if volume.dtype == np.uint8 and np.prod(factor) <= 256:
        sums = block_reduce(volume, factor, lambda a, axis: a.sum(axis, np.uint16))
        denominator = np.prod(factor)
        return (sums // denominator).astype(np.uint8)
    elif np.issubdtype(volume.dtype, np.integer):
        # numpy/scipy will convert integers to float64
        # unless we pre-convert to lower precision first.
        volume = volume.astype(np.float32, order='C')
    
    return downscale_local_mean(volume, factor).astype(dtype, copy=False)
Example #47
0
def np_min_pool(x):
    return block_reduce(x, (2, 2), np.min)
Example #48
0
def np_max_pool_221(x):
    return block_reduce(x, (2, 2, 1), np.max)
Example #49
0
def np_max_pool_s(x, s):
    return block_reduce(x, (s, s, 1), np.max)
Example #50
0
def np_max_pool(x):
    return block_reduce(x, (2, 2), np.max)
Example #51
0
def np_max_441(x):
    return block_reduce(x, (4, 4, 1), np.max)
Example #52
0
    def load_modality(self,
                      modality,
                      normalize_volumes=True,
                      downsample=2,
                      rotate_mult=0.0,
                      shift_mult=0.0):

        # enc= [self.load_ixi(m) for m in self.modalities_to_load]
        # data = [final_image for (final_image, mod) in enc]
        # data = [self.load_ixi(m) for m in self.modalities_to_load]
        data = self.load_ixi(modality)

        # array of 3D volumes
        # To Do: normalize based on slices not volume
        #converting to float
        # for i in data:
        #     float(i)
        X = [data[i] for i in range(self.num_vols)]

        # trim the matrices and downsample: downsample x downsample -> 1x1
        for i, x in enumerate(X):
            if rotate_mult != 0:
                print 'Rotating ' + modality + '. Multiplying by ' + str(
                    rotate_mult)
                rotations = [[-5.57, 2.79, -11.99], [-5.42, -18.34, -14.22],
                             [4.64, 5.80, -5.96], [-17.02, -8.70, 15.43],
                             [18.79, 17.44, 17.06], [-14.55, -4.90, 9.19],
                             [14.37, -0.58, -16.85], [-9.49, -12.53, -2.89],
                             [-16.75, -4.07, 3.23], [14.39, -16.58, 3.35],
                             [-14.05, -2.25, -10.58], [8.47, -8.95, -12.73],
                             [13.00, -10.90, -2.85], [2.61, -7.51, -6.26],
                             [-13.99, -0.38, 6.29], [10.16, -9.88, -11.89],
                             [6.76, 0.83, -19.85], [18.74, -6.70, 15.46],
                             [-3.01, -2.85, 18.45], [-17.37, -1.32, -3.48],
                             [14.67, -17.93, 18.74], [6.55, 18.19, -8.24],
                             [13.52, -4.09, 19.32], [5.27, 11.27, 4.93],
                             [2.29, 17.83, 10.07], [-11.98, 10.49, 0.02],
                             [14.49, -12.00, -17.21], [17.86, -17.38, 19.04]]
                theta = rotations[i]

                x = rotate(x,
                           rotate_mult * theta[0],
                           axes=(1, 0),
                           reshape=False,
                           order=3,
                           mode='constant',
                           cval=0.0,
                           prefilter=True)
                x = rotate(x,
                           rotate_mult * theta[1],
                           axes=(1, 2),
                           reshape=False,
                           order=3,
                           mode='constant',
                           cval=0.0,
                           prefilter=True)
                x = rotate(x,
                           rotate_mult * theta[2],
                           axes=(0, 2),
                           reshape=False,
                           order=3,
                           mode='constant',
                           cval=0.0,
                           prefilter=True)

            if shift_mult != 0:
                print 'Shifting ' + modality + '. Multiplying by ' + str(
                    shift_mult)
                shfts = [[0.931, 0.719, -0.078], [0.182, -0.220, 0.814],
                         [0.709, 0.085, -0.262], [-0.898, 0.367, 0.395],
                         [-0.936, 0.591, -0.101], [0.750, 0.522, 0.132],
                         [-0.093, 0.188, 0.898], [-0.517, 0.905, -0.389],
                         [0.616, 0.599, 0.098], [-0.209, -0.215, 0.285],
                         [0.653, -0.398, -0.153], [0.428, -0.682, -0.501],
                         [-0.421, -0.929, -0.925], [-0.753, -0.492, 0.744],
                         [0.532, -0.302, 0.353], [0.139, 0.991, -0.086],
                         [-0.453, 0.657, 0.072], [0.576, 0.918, 0.242],
                         [0.889, -0.543, 0.738], [-0.307, -0.945, 0.093],
                         [0.698, -0.443, 0.037], [-0.209, 0.882, 0.014],
                         [0.487, -0.588, 0.312], [0.007, -0.789, -0.107],
                         [0.215, 0.104, 0.482], [-0.374, 0.560, -0.187],
                         [-0.227, 0.030, -0.921], [0.106, 0.975, 0.997]]
                shft = shfts[i]
                x = shift(x, [
                    shft[0] * shift_mult, shft[1] * shift_mult,
                    shft[2] * shift_mult
                ])

            if self.trim_and_downsample:
                X[i] = block_reduce(x,
                                    block_size=(1, downsample, downsample),
                                    func=np.mean)

                if self.dataset == 'BRATS':
                    # power of 2 padding
                    (_, w, h) = X[i].shape

                    w_pad_size = int(
                        math.ceil(
                            (math.pow(2, math.ceil(math.log(w, 2))) - w) / 2))
                    h_pad_size = int(
                        math.ceil(
                            (math.pow(2, math.ceil(math.log(h, 2))) - h) / 2))

                    X[i] = np.lib.pad(X[i], ((0, 0), (w_pad_size, w_pad_size),
                                             (h_pad_size, h_pad_size)),
                                      'constant',
                                      constant_values=0)

                    (_, w, h) = X[i].shape

                    # check if dimensions are even

                    if w & 1:
                        X[i] = X[i][:, 1:, :]

                    if h & 1:
                        X[i] = X[i][:, :, 1:]

            else:
                X[i] = x

        if normalize_volumes:
            for i, x in enumerate(X):
                X[i] = X[i] / np.mean(x)

        if rotate_mult > 0:
            for i, x in enumerate(X):
                X[i][X[i] < 0.25] = 0

        return X
Example #53
0
def main(argv):

    # parse directory name from command line argument
    # parse command line arguments
    parser = argparse.ArgumentParser(description="Process raw OPM data.")
    parser.add_argument("-i",
                        "--ipath",
                        type=str,
                        help="supply the directory to be processed")
    parser.add_argument("-d",
                        "--decon",
                        type=int,
                        default=0,
                        help="0: no deconvolution (DEFAULT), 1: deconvolution")
    parser.add_argument(
        "-f",
        "--flatfield",
        type=int,
        default=0,
        help=
        "0: No flat field (DEFAULT), 1: flat field (FIJI) 2: flat field (python)"
    )
    parser.add_argument(
        "-s",
        "--save_type",
        type=int,
        default=1,
        help="0: TIFF stack output, 1: BDV output (DEFAULT), 2: Zarr output")
    parser.add_argument(
        "-z",
        "--z_down_sample",
        type=int,
        default=1,
        help="1: No downsampling (DEFAULT), n: Nx downsampling")
    args = parser.parse_args()

    input_dir_string = args.ipath
    decon_flag = args.decon
    flatfield_flag = args.flatfield
    save_type = args.save_type
    z_down_sample = args.z_down_sample

    # https://docs.python.org/3/library/pathlib.html
    # Create Path object to directory
    input_dir_path = Path(input_dir_string)

    # create parameter array from scan parameters saved by acquisition code
    df_metadata = data_io.read_metadata(input_dir_path /
                                        Path('scan_metadata.csv'))
    root_name = df_metadata['root_name']
    scan_type = df_metadata['scan_type']
    theta = df_metadata['theta']
    scan_step = df_metadata['scan_step']
    pixel_size = df_metadata['pixel_size']
    num_t = df_metadata['num_t']
    num_y = df_metadata['num_y']
    num_z = df_metadata['num_z']
    num_ch = df_metadata['num_ch']
    num_images = df_metadata['scan_axis_positions']
    y_pixels = df_metadata['y_pixels']
    x_pixels = df_metadata['x_pixels']
    chan_405_active = df_metadata['405_active']
    chan_488_active = df_metadata['488_active']
    chan_561_active = df_metadata['561_active']
    chan_635_active = df_metadata['635_active']
    chan_730_active = df_metadata['730_active']
    active_channels = [
        chan_405_active, chan_488_active, chan_561_active, chan_635_active,
        chan_730_active
    ]
    channel_idxs = [0, 1, 2, 3, 4]
    channels_in_data = list(compress(channel_idxs, active_channels))
    n_active_channels = len(channels_in_data)
    if not (num_ch == n_active_channels):
        print('Channel setup error. Check metatdata file and directory names.')
        sys.exit()

    # calculate pixel sizes of deskewed image in microns
    deskewed_x_pixel = pixel_size / 1000.
    deskewed_y_pixel = pixel_size / 1000.
    deskewed_z_pixel = pixel_size / 1000.
    print('Deskewed pixel sizes before downsampling (um). x=' +
          str(deskewed_x_pixel) + ', y=' + str(deskewed_y_pixel) + ', z=' +
          str(deskewed_z_pixel) + '.')

    # create output directory
    if decon_flag == 0 and flatfield_flag == 0:
        output_dir_path = input_dir_path / 'deskew_output'
    elif decon_flag == 0 and flatfield_flag > 0:
        output_dir_path = input_dir_path / 'deskew_flatfield_output'
    elif decon_flag == 1 and flatfield_flag == 0:
        output_dir_path = input_dir_path / 'deskew_decon_output'
    elif decon_flag == 1 and flatfield_flag > 1:
        output_dir_path = input_dir_path / 'deskew_flatfield_decon_output'
    output_dir_path.mkdir(parents=True, exist_ok=True)

    # Create TIFF if requested
    if (save_type == 0):
        # create directory for data type
        tiff_output_dir_path = output_dir_path / Path('tiff')
        tiff_output_dir_path.mkdir(parents=True, exist_ok=True)
    # Create BDV if requested
    elif (save_type == 1):
        # create directory for data type
        bdv_output_dir_path = output_dir_path / Path('bdv')
        bdv_output_dir_path.mkdir(parents=True, exist_ok=True)

        # https://github.com/nvladimus/npy2bdv
        # create BDV H5 file with sub-sampling for BigStitcher
        bdv_output_path = bdv_output_dir_path / Path(root_name + '_bdv.h5')
        bdv_writer = npy2bdv.BdvWriter(str(bdv_output_path),
                                       nchannels=num_ch,
                                       ntiles=num_y * num_z,
                                       subsamp=((1, 1, 1), (4, 8, 8), (8, 16,
                                                                       16)),
                                       blockdim=((32, 128, 128), ),
                                       compression=None)

        # create blank affine transformation to use for stage translation
        unit_matrix = np.array((
            (1.0, 0.0, 0.0, 0.0),  # change the 4. value for x_translation (px)
            (0.0, 1.0, 0.0, 0.0),  # change the 4. value for y_translation (px)
            (0.0, 0.0, 1.0,
             0.0)))  # change the 4. value for z_translation (px)
    # Create Zarr if requested
    elif (save_type == 2):
        # create directory for data type
        zarr_output_dir_path = output_dir_path / Path('zarr')
        zarr_output_dir_path.mkdir(parents=True, exist_ok=True)

        # create name for zarr directory
        zarr_output_path = zarr_output_dir_path / Path(root_name +
                                                       '_zarr.zarr')

        # calculate size of one volume
        # change step size from physical space (nm) to camera space (pixels)
        pixel_step = scan_step / pixel_size  # (pixels)

        # calculate the number of pixels scanned during stage scan
        scan_end = num_images * pixel_step  # (pixels)

        # calculate properties for final image
        ny = np.int64(
            np.ceil(scan_end +
                    y_pixels * np.cos(theta * np.pi / 180)))  # (pixels)
        nz = np.int64(np.ceil(y_pixels *
                              np.sin(theta * np.pi / 180)))  # (pixels)
        nx = np.int64(x_pixels)  # (pixels)

        # create and open zarr file
        root = zarr.open(str(zarr_output_path), mode="w")
        opm_data = root.zeros("opm_data",
                              shape=(num_t, num_y * num_z, num_ch, nz, ny, nx),
                              chunks=(1, 1, 1, 32, 128, 128),
                              dtype=np.uint16)
        root = zarr.open(str(zarr_output_path), mode="rw")
        opm_data = root["opm_data"]

    # if retrospective flatfield is requested, import and open pyimagej in interactive mode
    # because BaSiC flat-fielding plugin cannot run in headless mode
    if flatfield_flag == 1:
        from image_post_processing import manage_flat_field
        import imagej
        import scyjava

        scyjava.config.add_option('-Xmx12g')
        plugins_dir = Path('/home/dps/Fiji.app/plugins')
        scyjava.config.add_option(f'-Dplugins.dir={str(plugins_dir)}')
        ij_path = Path('/home/dps/Fiji.app')
        ij = imagej.init(str(ij_path), headless=False)
        ij.ui().showUI()
        print(
            'PyimageJ approach to flat fielding will be removed soon. Switch to GPU accelerated python BASIC code (-f 2).'
        )
    elif flatfield_flag == 2:
        from image_post_processing import manage_flat_field_py

    # if decon is requested, import microvolution wrapper
    if decon_flag == 1:
        from image_post_processing import mv_decon

    # initialize counters
    timepoints_in_data = list(range(num_t))
    y_tile_in_data = list(range(num_y))
    z_tile_in_data = list(range(num_z))
    ch_in_BDV = list(range(n_active_channels))
    tile_idx = 0

    # loop over all directories. Each directory will be placed as a "tile" into the BigStitcher file
    for (y_idx, z_idx) in product(y_tile_in_data, z_tile_in_data):
        for (t_idx, ch_BDV_idx) in product(timepoints_in_data, ch_in_BDV):

            ch_idx = channels_in_data[ch_BDV_idx]

            # open stage positions file
            stage_position_filename = Path('t' + str(t_idx).zfill(4) + '_y' +
                                           str(y_idx).zfill(4) + '_z' +
                                           str(z_idx).zfill(4) + '_ch' +
                                           str(ch_idx).zfill(4) +
                                           '_stage_positions.csv')
            stage_position_path = input_dir_path / stage_position_filename
            # check to see if stage poisition file exists yet
            while (not (stage_position_filename.exists())):
                time.sleep(60)

            df_stage_positions = data_io.read_metadata(stage_position_path)

            stage_x = np.round(float(df_stage_positions['stage_x']), 2)
            stage_y = np.round(float(df_stage_positions['stage_y']), 2)
            stage_z = np.round(float(df_stage_positions['stage_z']), 2)
            print('y tile ' + str(y_idx + 1) + ' of ' + str(num_y) +
                  '; z tile ' + str(z_idx + 1) + ' of ' + str(num_z) +
                  '; channel ' + str(ch_BDV_idx + 1) + ' of ' +
                  str(n_active_channels))
            print('Stage location (um): x=' + str(stage_x) + ', y=' +
                  str(stage_y) + ', z=' + str(stage_z) + '.')

            # construct directory name
            current_tile_dir_path = Path(root_name + '_t' +
                                         str(t_idx).zfill(4) + '_y' +
                                         str(y_idx).zfill(4) + '_z' +
                                         str(z_idx).zfill(4) + '_ch' +
                                         str(ch_idx).zfill(4) + '_1')
            tile_dir_path_to_load = input_dir_path / current_tile_dir_path

            # https://pycro-manager.readthedocs.io/en/latest/read_data.html
            dataset = Dataset(str(tile_dir_path_to_load))
            raw_data = data_io.return_data_numpy(dataset=dataset,
                                                 time_axis=None,
                                                 channel_axis=None,
                                                 num_images=num_images,
                                                 y_pixels=y_pixels,
                                                 x_pixels=x_pixels)

            # perform flat-fielding
            if flatfield_flag == 1:
                print('Flatfield.')
                corrected_stack = manage_flat_field(raw_data, ij)
            elif flatfield_flag == 2:
                corrected_stack = manage_flat_field_py(raw_data)
            else:
                corrected_stack = raw_data
            del raw_data

            # deskew
            print('Deskew.')
            deskewed = deskew(data=np.flipud(corrected_stack),
                              theta=theta,
                              distance=scan_step,
                              pixel_size=pixel_size)
            del corrected_stack

            # downsample in z due to oversampling when going from OPM to coverslip geometry
            if z_down_sample > 1:
                print('Downsample.')
                deskewed_downsample = block_reduce(deskewed,
                                                   block_size=(z_down_sample,
                                                               1, 1),
                                                   func=np.mean)
            else:
                deskewed_downsample = deskewed
            del deskewed

            # run deconvolution on deskewed image
            if decon_flag == 1:
                print('Deconvolve.')
                deskewed_downsample_decon = mv_decon(
                    deskewed_downsample, ch_idx, deskewed_y_pixel,
                    z_down_sample * deskewed_z_pixel)
            else:
                deskewed_downsample_decon = deskewed_downsample
            del deskewed_downsample

            # save deskewed image into TIFF stack
            if (save_type == 0):
                print('Write TIFF stack')
                tiff_filename = root_name + '_t' + str(t_idx).zfill(
                    3) + '_p' + str(tile_idx).zfill(4) + '_c' + str(
                        ch_idx).zfill(3) + '.tiff'
                tiff_output_path = tiff_output_dir_path / Path(tiff_filename)
                tifffile.imwrite(str(tiff_output_path),
                                 deskewed_downsample_decon,
                                 imagej=True,
                                 resolution=(1 / deskewed_x_pixel,
                                             1 / deskewed_y_pixel),
                                 metadata={
                                     'spacing':
                                     (z_down_sample * deskewed_z_pixel),
                                     'unit': 'um',
                                     'axes': 'ZYX'
                                 })

                metadata_filename = root_name + '_t' + str(t_idx).zfill(
                    3) + '_p' + str(tile_idx).zfill(4) + '_c' + str(
                        ch_idx).zfill(3) + '.csv'
                metadata_output_path = tiff_output_dir_path / Path(
                    metadata_filename)
                tiff_stage_metadata = [{
                    'stage_x': float(stage_x),
                    'stage_y': float(stage_y),
                    'stage_z': float(stage_z)
                }]
                data_io.write_metadata(tiff_stage_metadata[0],
                                       metadata_output_path)

            elif (save_type == 1):
                # create affine transformation for stage translation
                # swap x & y from instrument to BDV
                affine_matrix = unit_matrix
                affine_matrix[0, 3] = (stage_y) / (deskewed_y_pixel
                                                   )  # x-translation
                affine_matrix[1, 3] = (stage_x) / (deskewed_x_pixel
                                                   )  # y-translation
                affine_matrix[2, 3] = (-1 * stage_z) / (
                    z_down_sample * deskewed_z_pixel)  # z-translation

                # save tile in BDV H5 with actual stage positions
                print('Write into BDV H5.')
                bdv_writer.append_view(
                    deskewed_downsample_decon,
                    time=0,
                    channel=ch_BDV_idx,
                    tile=tile_idx,
                    voxel_size_xyz=(deskewed_x_pixel, deskewed_y_pixel,
                                    z_down_sample * deskewed_z_pixel),
                    voxel_units='um',
                    calibration=(1, 1, (z_down_sample * deskewed_z_pixel) /
                                 deskewed_y_pixel),
                    m_affine=affine_matrix,
                    name_affine='tile ' + str(tile_idx) + ' translation')

            elif (save_type == 2):
                print('Write data into Zarr container')
                opm_data[t_idx, tile_idx,
                         ch_BDV_idx, :, :, :] = deskewed_downsample_decon
                metadata_filename = root_name + '_t' + str(t_idx).zfill(
                    3) + '_p' + str(tile_idx).zfill(4) + '_c' + str(
                        ch_idx).zfill(3) + '.csv'
                metadata_output_path = zarr_output_dir_path / Path(
                    metadata_filename)
                zarr_stage_metadata = [{
                    'stage_x': float(stage_x),
                    'stage_y': float(stage_y),
                    'stage_z': float(stage_z)
                }]
                data_io.write_metadata(zarr_stage_metadata[0],
                                       metadata_output_path)

            # free up memory
            del deskewed_downsample_decon
            gc.collect()

        tile_idx = tile_idx + 1

    if (save_type == 2):
        # write BDV xml file
        # https://github.com/nvladimus/npy2bdv
        # bdv_writer.write_xml(ntimes=num_t)
        bdv_writer.write_xml()
        bdv_writer.close()

    # shut down pyimagej
    if (flatfield_flag == 1):
        ij.getContext().dispose()

    # exit
    print('Finished.')
    sys.exit()
Example #54
0
def block_reduce(data, block_size, func=np.sum):
    """
    Downsample a data array by applying a function to local blocks.

    If ``data`` is not perfectly divisible by ``block_size`` along a
    given axis then the data will be trimmed (from the end) along that
    axis.

    Parameters
    ----------
    data : array_like
        The data to be resampled.

    block_size : int or array_like (int)
        The integer block size along each axis.  If ``block_size`` is a
        scalar and ``data`` has more than one dimension, then
        ``block_size`` will be used for for every axis.

    func : callable, optional
        The method to use to downsample the data.  Must be a callable
        that takes in a `~numpy.ndarray` along with an ``axis`` keyword,
        which defines the axis along which the function is applied.  The
        default is `~numpy.sum`, which provides block summation (and
        conserves the data sum).

    Returns
    -------
    output : array-like
        The resampled data.

    Examples
    --------
    >>> import numpy as np
    >>> from astropy.nddata.utils import block_reduce
    >>> data = np.arange(16).reshape(4, 4)
    >>> block_reduce(data, 2)    # doctest: +SKIP
    array([[10, 18],
           [42, 50]])

    >>> block_reduce(data, 2, func=np.mean)    # doctest: +SKIP
    array([[  2.5,   4.5],
           [ 10.5,  12.5]])
    """

    from skimage.measure import block_reduce

    data = np.asanyarray(data)

    block_size = np.atleast_1d(block_size)
    if data.ndim > 1 and len(block_size) == 1:
        block_size = np.repeat(block_size, data.ndim)

    if len(block_size) != data.ndim:
        raise ValueError('`block_size` must be a scalar or have the same '
                         'length as `data.shape`')

    block_size = np.array([int(i) for i in block_size])
    size_resampled = np.array(data.shape) // block_size
    size_init = size_resampled * block_size

    # trim data if necessary
    for i in range(data.ndim):
        if data.shape[i] != size_init[i]:
            data = data.swapaxes(0, i)
            data = data[:size_init[i]]
            data = data.swapaxes(0, i)

    return block_reduce(data, tuple(block_size), func=func)
Example #55
0
    def forward(self, bottom, top):

        #db
        background = [0,0,0]
        aeroplane = [128,128,128]
        bicycle = [128,0,0]
        bird = [192,192,128]
        boat = [255,69,0]
        bottle = [128,64,128]
        bus = [60,40,222]
        car = [128,128,0]
        cat = [192,128,128]
        chair = [64,64,128]
        cow = [64,0,128]
        diningtable = [64,64,0]
        dog = [0,128,192]
        horse = [32,40,0]
        motorbike = [67, 123, 222]
        person = [134, 2, 223]
        pottedplant = [22, 128, 233]
        sheep = [100, 2, 2]
        sofa = [56, 245, 32]
        train =[200, 100, 10]
        tvmonitor = [99, 89, 89]

        
        for i in range(0,bottom[0].data.shape[0]):
            top[0].data[i, 0, ...] = 1.0
	    top[1].data[i, 0, 0, ...] = 41
            aux_c = np.mean(bottom[1].data[i, ...], axis = 0)
	    
            #aux_save_1 = scipy.misc.imresize(aux_c, (100,100))
            #scipy.misc.toimage(aux_save_1, high=255, low=0).save(IMAGE_FILE_BMMASK_FCN_2 + 'map_clc_{}.png'.format(self.count))
            #aux_cc = scipy.misc.imresize(np.mean(bottom[2].data[i, ...], axis = 0), (14,14), mode = 'F')
	    aux_cc = block_reduce(np.mean(bottom[2].data[i, ...], axis = 0), (3,3), func=np.mean)
	    
	    #aux_cc = np.mean(bottom[2].data[i, ...], axis = 0)

            #aux_save_2 = scipy.misc.imresize(aux_cc, (100,100))
            #scipy.misc.toimage(aux_save_2, high=255, low=0).save(IMAGE_FILE_BMMASK_FCN_1 + 'map_clc_{}.png'.format(self.count))
            aux_c5 = np.mean(bottom[3].data[i, ...], axis = 0)

	    aux_c4 = block_reduce(np.mean(bottom[6].data[i, ...], axis = 0), (2,2), func=np.mean)
	    aux_cc4 = block_reduce(np.mean(bottom[7].data[i, ...], axis = 0), (3,3), func=np.mean)
	    aux_c4 = block_reduce(np.mean(bottom[8].data[i, ...], axis = 0), (2,2), func=np.mean)
            
            aux_ccc = (aux_c + aux_cc + aux_c5)/float(3.0) #aux_c +
	    #aux_ccc = np.maximum(aux_ccc, aux_c5) 
            aux_ccc= (aux_ccc - np.min(aux_ccc))/float(np.max(aux_ccc)-np.min(aux_ccc))
            
	    #thresholding
            #aux2 = np.zeros(shape=(14*14), dtype=np.float32)
            #aux3 = np.zeros(shape=(14,14), dtype=np.float32)
            #auxi = aux_ccc.flatten()
            #idx = []
            #idx = np.argwhere(auxi > 0.5*np.max(aux_ccc))

            #aux2[idx] = 1.0
            #aux3 = np.reshape(aux2, (14,14))
	    
            top[0].data[i, 0, ...] -= aux_ccc
            

            max_value = -1000000000000000000000000000
            min_value = 1000000000000000000000000000
            aux = np.zeros(shape=(14,14), dtype=np.float32)
            aux2 = np.zeros(shape=(14,14), dtype=np.float32)
            aux3 = np.zeros(shape=(14*14), dtype=np.float32)
            aux4 = np.zeros(shape=(14*14), dtype=np.float32)
            db = 0
            for j in range(0,bottom[0].data.shape[1]):
                heat_map_flatten =bottom[0].data[i, j, ...].flatten()
                max_value_aux = np.max(heat_map_flatten)
                min_value_aux = np.min(heat_map_flatten)
                if (max_value_aux > max_value):
                    max_value = max_value_aux
                    db = j
                if (min_value_aux < min_value):
                    min_value = min_value_aux
  
           

            thres = 0.2*max_value
            
            for j in range(0,bottom[0].data.shape[1]):
				if bottom[5].data[i, j] == 0.0: #(np.max(bottom[0].data[i, j, ...]) < 0.0):
					top[0].data[i, j+1, ...] = 0.0
				else:
					#aux3 = np.zeros(shape=(14,14), dtype=np.float32)
					top[0].data[i, j+1, ...] = (bottom[0].data[i, j, ...] - np.min(bottom[0].data[i, j, ...]))/float(np.max(bottom[0].data[i, j, ...]) - np.min(bottom[0].data[i, j, ...]))#bottom[4].data[i,j]*((bottom[0].data[i, j, ...] - min_value)/float(max_value - min_value))
         y_mult = int(
             keras_batch_manager.tile_generator.data_dim[1] /
             keras_batch_manager.tile_generator.tile_size[1])
         z_mult = int(
             keras_batch_manager.tile_generator.data_dim[2] /
             keras_batch_manager.tile_generator.tile_size[2])
         tile_flag_shape = list(test_data.shape)
         tile_flag_shape[-1] = 1
         tile_flag = np.zeros(tile_flag_shape)
         tile_flag[keras_batch_manager.tile_generator.
                   y_start:keras_batch_manager.tile_generator.y_end,
                   keras_batch_manager.tile_generator.x_start:
                   keras_batch_manager.tile_generator.x_end, :] = 1
         # 2:3 -> capture only density part
         x_downscale = measure.block_reduce(test_data[..., 2:3],
                                            (1, y_mult, x_mult, 1),
                                            np.mean)
         tile_flag_downscale = measure.block_reduce(
             tile_flag, (1, y_mult, x_mult, 1), np.mean)
         tile = np.append(tile, x_downscale, axis=-1)
         tile = np.append(tile, tile_flag_downscale, axis=-1)
     plot_callback._x = tile
     plot_callback.on_epoch_end(0, 0)
     full_res_image[..., keras_batch_manager.tile_generator.y_start:
                    keras_batch_manager.tile_generator.y_end,
                    keras_batch_manager.tile_generator.
                    x_start:keras_batch_manager.tile_generator.
                    x_end, :] = plot_callback._y
 # if x[0].ndim == 4:
 #     x_tile = x[self.tile_generator.z_start:self.tile_generator.z_end, self.tile_generator.y_start:self.tile_generator.y_end, self.tile_generator.x_start:self.tile_generator.x_end, :]
 # else:
Example #57
0
    def derivatives(self, I):
        #I = np.swapaxes(np.swapaxes(I, 0, 2), 1, 2)
        # make the color first index
        f1 = conv(self.f1p, I)
        sf1 = np.logaddexp(f1, 0)
        # print(np.shape(sf1))
        f2 = conv(self.f2p, sf1)
        # print(np.shape(f2))
        p = block_reduce(f2, (1, self.poolsize, self.poolsize), np.max)
        # print(np.shape(p))
        f3 = conv(self.f3p, p)
        sf3 = np.logaddexp(f3, 0)
        # print(np.shape(sf3))
        f4 = conv(self.f4p, sf3)
        sf4 = np.logaddexp(f4, 0)
        F = self.Fp @ np.reshape(sf4, (self.Fp_color))
        # print(F)
        s = expit(F)
        back1 = s * (1 - s)
        dF = np.zeros((self.Fp_type, self.Fp_color, self.Fp_type))
        for (j, k, i), _ in np.ndenumerate(dF):
            dF[j, k, i] = back1[i] * (j == i) * \
                np.reshape(sf4, (self.Fp_color))[k]
        back2 = np.zeros(
            (self.f4_depth, self.f4_row, self.f4_column, self.Fp_type))
        for (e1, e2, e3, a), _ in np.ndenumerate(back2):
            back2[e1, e2, e3,
                  a] = back1[a] * self.Fp[a,
                                          self.f4_row * self.f4_column * e1 +
                                          self.f4_column * e2 + e3] * expit(
                                              f4[e1, e2, e3])
        df4 = np.zeros((self.f4p_depth, self.f4p_row, self.f4p_column,
                        self.f4p_color, self.Fp_type))
        for (b1, b2, b3, b4, a), _ in np.ndenumerate(df4):
            df4[b1, b2, b3, b4, a] = sum([
                back2[b1, e2, e3, a] * sf3[b4, e2 + b2, e3 + b3]
                for e2 in range(self.f4_row) for e3 in range(self.f4_column)
            ])
        back3 = np.zeros(
            (self.f3_depth, self.f3_row, self.f3_column, self.Fp_type))
        for (m1, m2, m3, a), _ in np.ndenumerate(back3):
            back3[m1, m2, m3, a] = sum([
                back2[e1, e2, e3, a] * self.f4p[e1, m2 - e2, m3 - e3, m1] *
                expit(f3[m1, m2, m3]) for e1 in range(self.f4_depth)
                for e2 in range(max(0, m2 - self.f4p_row +
                                    1), min(self.f4_row, m2 + 1))
                for e3 in range(max(0, m3 - self.f4p_column +
                                    1), min(self.f4_column, m3 + 1))
            ])

        df3 = np.zeros((self.f3p_depth, self.f3p_row, self.f3p_column,
                        self.f3p_color, self.Fp_type))
        for (b1, b2, b3, b4, a), _ in np.ndenumerate(df3):
            df3[b1, b2, b3, b4, a] = sum([
                back3[b1, m2, m3, a] * p[b4, m2 + b2, m3 + b3]
                for m2 in range(self.f3_row) for m3 in range(self.f3_column)
            ])
        back4 = np.zeros(
            (self.f2_depth, self.f2_row, self.f2_column, self.Fp_type))
        for (x1, x2, x3, a), _ in np.ndenumerate(back4):
            sli = f2[x1, self.poolsize * (x2 // self.poolsize):self.poolsize *
                     (x2 // self.poolsize) + self.poolsize,
                     self.poolsize * (x3 // self.poolsize):self.poolsize *
                     (x3 // self.poolsize) + self.poolsize]
            back4[x1, x2, x3, a] = sum([
                back3[m1, m2, m3, a] * self.f3p[m1, x2 // self.poolsize - m2,
                                                x3 // self.poolsize - m3, x1] *
                ((x2 % self.poolsize, x3 % self.poolsize)
                 == np.where(sli == np.max(sli)))
                for m1 in range(self.f3_depth)
                for m2 in range(max(0, x2 // self.poolsize - self.f3p_row + 1),
                                min(self.f3_row, x2 // self.poolsize + 1))
                for m3 in range(
                    max(0, x3 // self.poolsize - self.f3p_column +
                        1), min(self.f3_row, x3 // self.poolsize + 1))
            ])

        df2 = np.zeros((self.f2p_depth, self.f2p_row, self.f2p_column,
                        self.f2p_color, self.Fp_type))
        for (b1, b2, b3, b4, a), _ in np.ndenumerate(df2):
            df2[b1, b2, b3, b4, a] = sum([
                back4[b1, x2, x3, a] * sf1[b4, x2 + b2, x3 + b3]
                for x2 in range(self.f2_row) for x3 in range(self.f2_column)
            ])
        back5 = np.zeros(
            (self.f1_depth, self.f1_row, self.f1_column, self.Fp_type))
        for (g1, g2, g3, a), _ in np.ndenumerate(back5):
            back5[g1, g2, g3, a] = sum([
                back4[x1, x2, x3, a] * self.f2p[x1, g2 - x2, g3 - x3, g1] *
                expit(f1[g1, g2, g3]) for x1 in range(self.f2_depth)
                for x2 in range(max(0, g2 - self.f2p_row +
                                    1), min(self.f2_row, g2 + 1))
                for x3 in range(max(0, g3 - self.f2p_column +
                                    1), min(self.f2_row, g3 + 1))
            ])
        df1 = np.zeros((self.f1p_depth, self.f1p_row, self.f1p_column,
                        self.f1p_color, self.Fp_type))
        for (b1, b2, b3, b4, a), _ in np.ndenumerate(df1):
            df1[b1, b2, b3, b4, a] = sum([
                back5[b1, g2, g3, a] * I[b4, g2 + b2, g3 + b3]
                for g2 in range(self.f1_row) for g3 in range(self.f1_column)
            ])
        return df1
Example #58
0
    def derivatives(self, I):
        #I = np.swapaxes(np.swapaxes(I, 0, 2), 1, 2)
        # make the color first index
        f1 = conv(self.f1p, I)
        sf1 = np.logaddexp(f1, 0)
        # print(np.shape(sf1))
        f2 = conv(self.f2p, sf1)
        # print(np.shape(f2))
        p = block_reduce(f2, (1, self.poolsize, self.poolsize), np.max)
        # print(np.shape(p))
        f3 = conv(self.f3p, p)
        sf3 = np.logaddexp(f3, 0)
        # print(np.shape(sf3))
        f4 = conv(self.f4p, sf3)
        sf4 = np.logaddexp(f4, 0)
        f5 = conv(self.f5p, sf4)
        # print(F)
        sf5 = np.logaddexp(f5, 0)

        back1 = expit(f5)
        df5 = np.zeros(
            (self.f5p_depth, self.f5p_row, self.f5p_column, self.f5p_color,
             self.f5_depth, self.f5_row, self.f5_column))
        for (b1, b2, b3, b4, a1, a2, a3), _ in np.ndenumerate(df5):
            df5[b1, b2, b3, b4, a1, a2, a3] = back1[a1, a2, a3] * \
                sf4[b4, a2 + b2, a3 + b3] * (a1 == b1)

        back2 = np.zeros((self.f4_depth, self.f4_row, self.f4_column,
                          self.f5_depth, self.f5_row, self.f5_column))
        back2shape = np.shape(back2)
        for (g1, g2, g3, a1, a2, a3), _ in np.ndenumerate(back2):
            if g2 != a2 or g3 != a3:
                continue
            back2[g1, g2, g3, a1, a2, a3] = back1[a1, a2, a3] * \
                self.f5p[a1, g2 - a2, g3 - a3, g1] * expit(f4[g1, g2, g3])
        df4 = np.zeros(
            (self.f4p_depth, self.f4p_row, self.f4p_column, self.f4p_color,
             self.f5_depth, self.f5_row, self.f5_column))
        for (b1, b2, b3, b4, a1, a2, a3), _ in np.ndenumerate(df4):
            df4[b1, b2, b3, b4, a1, a2, a3] = sum([
                back2[b1, g2, g3, a1, a2, a3] * sf3[b4, g2 + b2, g3 + b3]
                for g2 in range(back2shape[1]) for g3 in range(back2shape[2])
            ])

        back3 = np.zeros((self.f3_depth, self.f3_row, self.f3_column,
                          self.f5_depth, self.f5_row, self.f5_column))
        back3shape = np.shape(back3)
        for (m1, m2, m3, a1, a2, a3), _ in np.ndenumerate(back3):
            back3[m1, m2, m3, a1, a2, a3] = sum([
                back2[g1, g2, g3, a1, a2, a3] *
                self.f4p[g1, m2 - g2, m3 - g3, m1] * expit(f3[m1, m2, m3])
                for g1 in range(self.f4_depth)
                for g2 in range(max(0, m2 - self.f4p_row +
                                    1), min(back2shape[1], m2 + 1))
                for g3 in range(max(0, m3 - self.f4p_column +
                                    1), min(back2shape[2], m3 + 1))
            ])

        df3 = np.zeros(
            (self.f3p_depth, self.f3p_row, self.f3p_column, self.f3p_color,
             self.f5_depth, self.f5_row, self.f5_column))
        for (b1, b2, b3, b4, a1, a2, a3), _ in np.ndenumerate(df3):
            df3[b1, b2, b3, b4, a1, a2, a3] = sum([
                back3[b1, m2, m3, a1, a2, a3] * p[b4, m2 + b2, m3 + b3]
                for m2 in range(self.f3_row) for m3 in range(self.f3_column)
            ])

        back4 = np.zeros((self.f2_depth, self.f2_row, self.f2_column,
                          self.f5_depth, self.f5_row, self.f5_column))
        for (x1, x2, x3, a1, a2, a3), _ in np.ndenumerate(back4):
            sli = f2[x1, self.poolsize * (x2 // self.poolsize):self.poolsize *
                     (x2 // self.poolsize) + self.poolsize,
                     self.poolsize * (x3 // self.poolsize):self.poolsize *
                     (x3 // self.poolsize) + self.poolsize]
            back4[x1, x2, x3, a1, a2, a3] = sum([
                back3[m1, m2, m3, a1, a2, a3] *
                self.f3p[m1, x2 // self.poolsize - m2, x3 // self.poolsize -
                         m3, x1] * ((x2 % self.poolsize, x3 % self.poolsize)
                                    == np.where(sli == np.max(sli)))
                for m1 in range(self.f3_depth)
                for m2 in range(max(0, x2 // self.poolsize - self.f3p_row + 1),
                                min(self.f3_row, x2 // self.poolsize + 1))
                for m3 in range(
                    max(0, x3 // self.poolsize - self.f3p_column +
                        1), min(self.f3_row, x3 // self.poolsize + 1))
            ])

        df2 = np.zeros(
            (self.f2p_depth, self.f2p_row, self.f2p_column, self.f2p_color,
             self.f5_depth, self.f5_row, self.f5_column))
        for (b1, b2, b3, b4, a1, a2, a3), _ in np.ndenumerate(df2):
            df2[b1, b2, b3, b4, a1, a2, a3] = sum([
                back4[b1, x2, x3, a1, a2, a3] * sf1[b4, x2 + b2, x3 + b3]
                for x2 in range(self.f2_row) for x3 in range(self.f2_column)
            ])
        back5 = np.zeros((self.f1_depth, self.f1_row, self.f1_column,
                          self.f5_depth, self.f5_row, self.f5_column))
        for (g1, g2, g3, a1, a2, a3), _ in np.ndenumerate(back5):
            back5[g1, g2, g3, a1, a2, a3] = sum([
                back4[x1, x2, x3, a1, a2, a3] *
                self.f2p[x1, g2 - x2, g3 - x3, g1] * expit(f1[g1, g2, g3])
                for x1 in range(self.f2_depth)
                for x2 in range(max(0, g2 - self.f2p_row +
                                    1), min(self.f2_row, g2 + 1))
                for x3 in range(max(0, g3 - self.f2p_column +
                                    1), min(self.f2_row, g3 + 1))
            ])
        df1 = np.zeros(
            (self.f1p_depth, self.f1p_row, self.f1p_column, self.f1p_color,
             self.f5_depth, self.f5_row, self.f5_column))
        for (b1, b2, b3, b4, a1, a2, a3), _ in np.ndenumerate(df1):
            df1[b1, b2, b3, b4, a1, a2, a3] = sum([
                back5[b1, g2, g3, a1, a2, a3] * I[b4, g2 + b2, g3 + b3]
                for g2 in range(self.f1_row) for g3 in range(self.f1_column)
            ])

        return df1
Example #59
0
 def apply_pooling(inp):
     return block_reduce(inp, (dimensions), np.max)
            elif(y_pred==3):#CCA---red
                testimage.paste((255,0,0),[c,r,c+windowsize_c,r+windowsize_r])
            elif(y_pred==4):#Ana---green
                testimage.paste((0,255,0),[c,r,c+windowsize_c,r+windowsize_r])
            else:#others---yellow
                testimage.paste((255,255,0),[c,r,c+windowsize_c,r+windowsize_r])
    count = count+1
    percent = percent_coral(testimage)
    testimage.save('./result_image/'+str(name_image)+' result'+str(percent)+'.jpg')
    print(("---image%d finished in %s seconds ---" % (count,(time.time()-start_time))))
    print("there are {0}% coral in this image".format(percent))
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#downsample
path_image = "./2012image/201208172_T-12-58-58_Dive_01_041.jpg"
img = imread(path_image)
img_downsample = block_reduce(img, block_size=(3, 3, 1), func=np.max)
img_new = np.uint8(img_downsample)
plt.imshow(img_new)

#%%
#step1 downsample image
path_image = "./2012image/201208172_T-12-54-11_Dive_01_032.jpg"
img = imread(path_image)
k = img[0:1536:3,0:2048:3,:]
plt.imshow(k)
plt.axis('off')
#%%
#step2 coarse search
windowsize_r = 30
windowsize_c = 30
count = 0