Exemplo n.º 1
0
def find_site(img, thresh=10, filter_kernel=(5, 5, 5)):
    """Find a connected area of high intensity, using a basic filter + threshold + connected components approach

    by: bdeverett Created on Fri May 19 14:46:55 2017

    Parameters
    ----------
    img : np.ndarray
        3D stack in which to find site (technically need not be 3D, so long as filter parameter is adjusted accordingly)
    thresh: float
        threshold for site-of-interest intensity, in number of standard deviations above the mean
    filter_kernel: tuple
        kernel for filtering of image before thresholding

    Returns
    --------
    bool array of volume where coordinates where detected
    """

    filtered = gfilt(im, filter_kernel)
    thresholded = filtered > filtered.mean() + thresh * filtered.std()
    labelled, nlab = label(thresholded)

    if nlab == 0:
        raise Exception('Site not detected, try a lower threshold?')
    elif nlab == 1:
        return labelled.astype(bool)
    else:
        sizes = [np.sum(labelled == i) for i in range(1, nlab + 1)]
        return labelled == np.argmax(sizes) + 1
Exemplo n.º 2
0
    def play_to_heatmap(self, dfP):
        rusher = dfP[dfP['NflId'] == dfP['NflIdRusher']].squeeze()
        LOS = rusher.LineOfScrimmage
        offense = dfP[dfP.OnOffense]
        defense = dfP[~dfP.OnOffense]
        image = np.zeros((1, 3, 42, 54))

        # fill player location vectors
        xpos, ypos, w = self.player_vec(rusher, LOS)
        image[0, 0, xpos, ypos] = w

        for player in offense.itertuples(index=False):
            xpos, ypos, w = self.player_vec(player, LOS)
            image[0, 1, xpos, ypos] = w

        for player in defense.itertuples(index=False):
            xpos, ypos, w = self.player_vec(player, LOS)
            image[0, 2, xpos, ypos] = w

        if self.filt:  # filter
            t = (((self.width - 1) / 2) - 0.5) / self.s
            for dim in range(3):
                image[0, dim, :, :] = gfilt(image[0, dim, :, :],
                                            sigma=self.s,
                                            truncate=t)

        return image
Exemplo n.º 3
0
    def play_to_heatmap(self, dfP, filt=False, test=False, s=.5, w=3, nPoints=3):
        times = np.linspace(0, 1, nPoints+2)
        rusher = dfP[dfP['NflId'] == dfP['NflIdRusher']]
        LOS = rusher.LineOfScrimmage.values[0]
        offense = dfP[dfP.OnOffense]
        defense = dfP[~dfP.OnOffense]
        x = np.zeros((1, 3, 42, 54))

        # fill player location vectors
        xpos, ypos, w = self.player_vec(rusher.squeeze(), times, LOS)
        x[0, 0, xpos, ypos] = w

        for player in offense.itertuples(index=False):
            xpos, ypos, w = self.player_vec(player, times, LOS)
            x[0, 1, xpos, ypos] = w

        for player in defense.itertuples(index=False):
            xpos, ypos, w = self.player_vec(player, times, LOS)
            x[0, 2, xpos, ypos] = w

        if filt:  # filter
            t = (((w - 1) / 2) - 0.5) / s
            for dim in range(3):
                x[0, dim, :, :] = gfilt(x[0, dim, :, :], sigma=s, truncate=t)

        if test:
            return x
        else:
            tmp = np.zeros((1, 199))
            tmp[0, rusher.Yards + 99] = 1
            y = tmp.cumsum(axis=1)
            return x, y
Exemplo n.º 4
0
def add_energy_scale(lineout,
                     known_energy,
                     known_bin=None,
                     rebinparam=1,
                     camerainvert=True,
                     braggorder=1,
                     **kwargs):
    """
    Returns an np array of [energies,lineout], by either applying a known energy to the max of the dataset, or to a specified bin.
    """
    if known_bin == None:
        centerindex = np.argmax(gfilt(
            lineout,
            3))  # if known_bin not provided, set energy to max of lineout
        # note to self, I was worried that gfilt might change the length of the list, but it doesn't.
    else:
        centerindex = round(
            known_bin /
            rebinparam)  # else set energy to be at known bin position
    indexfromcenter = np.array(range(len(lineout))) - centerindex
    if camerainvert == True:
        indexfromcenter = -indexfromcenter  # if camera gets flipped upside down, just reverse the indices
    return (energy_from_x_position(calc_bragg_angle(known_energy, braggorder),
                                   indexfromcenter, rebinparam,
                                   braggorder), lineout)
Exemplo n.º 5
0
def fwhm_datarun(datarun,
                 known_energy,
                 yrange=[0, -1],
                 xrange=[0, -1],
                 rebin=1,
                 fwhm_smooth=2,
                 **kwargs):
    """
    Given a 2d-array of [energies(eV),lineout], calculate fwhm of peak in the lineout.
    """
    lineout = np.sum(datarun.get_array()[yrange[0]:yrange[1],
                                         xrange[0]:xrange[1]],
                     axis=1) / datarun.photon_value
    if rebin != 1:  #rebin using oliver's rebin_spectrum function
        lineout = _rebin_spectrum(np.array(range(len(lineout))), lineout,
                                  rebin)[1]
    lineout_energyscale = add_energy_scale(lineout,
                                           known_energy,
                                           rebinparam=rebin,
                                           **kwargs)
    x, y = lineout_energyscale
    y = gfilt(y, fwhm_smooth)
    spline = UnivariateSpline(x, y - np.max(y) / 2, s=0)
    r1, r2 = spline.roots()
    return format(r2 - r1, '.3f')
Exemplo n.º 6
0
def fwhm_ev(arr2d, fwhm_smooth=2):
    """
    Given a 2d-array of [energies(eV),lineout], calculate fwhm of peak in the lineout.
    """
    x, y = arr2d
    y = gfilt(y, fwhm_smooth)
    spline = UnivariateSpline(x, y - np.max(y) / 2, s=0)
    r1, r2 = spline.roots()
    return format(r2 - r1, '.3f')
Exemplo n.º 7
0
    def make_delta_baseline(self, frame):
        """
        Find the difference of a frame from the baseline, then smooth.
        :param
            frame: frame that you want to subtract from baseline.
        """
        # Subtract and smooth.
        delta = gfilt(self.baseline - frame, self.sigma)

        return delta
Exemplo n.º 8
0
def find_site(im, thresh=10, filter_kernel=(5,5,5), num_sites_to_keep=1):
    """Find a connected area of high intensity, using a basic filter + threshold + connected components approach
    
    by: bdeverett

    Parameters
    ----------
    img : np.ndarray
        3D stack in which to find site (technically need not be 3D, so long as filter parameter is adjusted accordingly)
    thresh: float
        threshold for site-of-interest intensity, in number of standard deviations above the mean
    filter_kernel: tuple
        kernel for filtering of image before thresholding
    num_sites_to_keep: int, number of injection sites to keep, useful if multiple distinct sites
    
    Returns
    --------
    bool array of volume where coordinates where detected
    """
    from scipy.ndimage.filters import gaussian_filter as gfilt
    from scipy.ndimage import label
    if type(im) == str: im = tifffile.imread(im)

    filtered = gfilt(im, filter_kernel)
    thresholded = filtered > filtered.mean() + thresh*filtered.std() 
    labelled,nlab = label(thresholded)

    if nlab == 0:
        raise Exception('Site not detected, try a lower threshold?')
    elif nlab == 1:
        return labelled.astype(bool)
    elif num_sites_to_keep == 1:
        sizes = [np.sum(labelled==i) for i in range(1,nlab+1)]
        return labelled == np.argmax(sizes)+1
    else:
        sizes = [np.sum(labelled==i) for i in range(1,nlab+1)]
        vals = [i+1 for i in np.argsort(sizes)[-num_sites_to_keep:][::-1]]
        return np.in1d(labelled, vals).reshape(labelled.shape)
Exemplo n.º 9
0
def get_data(patch_size, nvar, dataset="BSDS", whiten=True, CNN=False):
    from scipy.ndimage.filters import gaussian_filter as gfilt

    try:
        F = open("./datasets/{}_{}_{}_{}".format(patch_size, nvar, dataset,
                                                 whiten),
                 "rb")  # Open the file with the data
        dataset = pickle.load(F)  # Load the data from the file
        F.close()  # Close the file
        white, fit_data, fit_var, fit_test = dataset  # Create ???

    except:
        if dataset == "bruno":  # use the Sparse Coding original images
            data = np.reshape(
                read_dat("./datasets/bruno_dat.csv"),
                [512, 512, 10])  # Load the full dataset from file
            data = np.transpose(data, [2, 1, 0])  # Re-organize the data
            data = np.array([
                gfilt(i, .5) for i in data
            ])  # Filter the images with a Gaussian filter (Whitening)
            data = (data + data.min()) / (
                data.max() - data.min()
            )  # Normalize the data to the min and set the dynamic range to 1.

            data = np.reshape(
                np.concatenate([IM.split_by_size(d, patch_size)
                                for d in data]), [-1, patch_size * patch_size])

        elif dataset == "MNIST":  # Select MNIST digits
            data = read_dat(
                "./../../data/MNIST/mnist_train.csv")  # MNIST data file
            lab = data[:, 0]  # Extract first index to be in "lab" <-- ??
            data = data[:,
                        1:]  # The remainder of the indices are the actual data
            data = np.reshape(data, [-1, 28 * 28])  # Reshape data into vectors
            data = (data + data.min()) / (
                data.max() - data.min()
            )  # Change the dynamic range to be between 0 and 1

        elif dataset == "BSDS":
            imlist = np.squeeze(IM.get_array_data(BSDSloc + "iids_train.txt"))
            data = [
                IM.get_filter_samples(BSDSloc + "images/train/" + i + ".jpg",
                                      size=patch_size) for i in imlist
            ]
            data = np.concatenate(data)  #
            data = np.reshape(
                data,
                [-1, patch_size * patch_size
                 ])  # Reshape the data to be the vecors with size = num pixels
            print("BSDS data size: {}".format(
                data.shape))  # Print out the size of the data

        else:  # Otherwise make some synthetic data from a given distribution
            f, g, dist = distributions.get_distribution(dataset)  #
            data = make_synthetic_data(dist, patch_size, nvar)  #

        LL = len(data)  # Get the number of datapoints
        var = data[:int(LL / 10)]  #
        test = data[int(LL / 10):int(2 * LL / 10)]  #
        data = data[int(2 * LL / 10):]  #

        white = PCA(nvar, copy=True,
                    whiten=whiten)  # Extablish the PCA decomposition
        fit_data = white.fit_transform(data)  #
        fit_var = white.transform(var)  #
        fit_test = white.transform(test)  #

        fit_data = np.random.permutation(fit_data)  #
        fit_var = np.random.permutation(fit_var)  #
        fit_test = np.random.permutation(fit_test)  #

        F = open("./datasets/{}_{}_{}_{}".format(patch_size, nvar, dataset,
                                                 whiten),
                 "wb")  # Open a file to save some data in
        pickle.dump([white, fit_data, fit_var, fit_test],
                    F)  # Dump the data into the file
        F.close()  # Close the file that now has the data in it

    if CNN:
        fit_data = get_CNN_dat(fit_data, white, whiten)  #
        fit_var = get_CNN_dat(fit_var, white, whiten)  #
        fit_test = get_CNN_dat(fit_test, white, whiten)  #

    return np.float32(fit_data), np.float32(fit_var), np.float32(
        fit_test), white  # Return...???
    clh = np.array([
        equalize_adapthist(img,
                           clip_limit=0.05,
                           kernel_size=(50, 100),
                           nbins=65535) for img in stk
    ])

    power = 3  #how many times to multiple image by itself
    #
    #plt.figure()
    #plt.imshow(clh[300]**power)

    thresh = 2
    filter_kernel = (5, 5, 5)

    filtered = gfilt(clh**power, filter_kernel)
    #tif.imsave("/home/wanglab/Desktop/filtered_z200-300.tif", filtered[200:300].astype("float32"))
    #
    #plt.figure()
    #plt.imshow(filtered[300], "gist_yarg")

    thresholded = filtered > filtered.mean() + thresh * filtered.std()
    labelled, nlab = label(thresholded)

    #plt.figure()
    sizes = [np.sum(labelled == i) for i in range(1, nlab + 1)]
    vals = [i + 1 for i in np.argsort(sizes)[::-1]]
    arr = np.in1d(labelled, vals).reshape(labelled.shape)

    seg_dst = os.path.join(dst, "rawdata_segmentations")
    makedir(seg_dst)
Exemplo n.º 11
0
    # Now upsample back to 50 Hz.
    # Seems intuitive to continue the previous observation...
    for ii in range(CA_SAMPLING_RATE * STIMULUS_DURATION):
        ii_d = int(float(ii)*float(MOVIE_REFRESH_RATE/MOVIE_DOWNSAMPLE_FACTOR)\
                   / CA_SAMPLING_RATE)
        movie[:, :, ii] = movie_d[:, :, ii_d]
    T = movie.shape[2]

    # Centre about 128, and normalise to [-1,1]
    movie = (movie - 128.0) / 128.0

    window = np.dot(
        np.kaiser(movie.shape[0], 2.).reshape((movie.shape[0], 1)),
        np.kaiser(movie.shape[1], 2.).reshape((movie.shape[1], 1)).T)
    movie_dog = np.dstack([
        window * (gfilt(movie[:, :, t], r1) - gfilt(movie[:, :, t], r2))
        for t in range(T)
    ])
    movie_ddt = np.dstack((movie[:, :, 0:1], np.diff(movie, axis=2)))
    movie_dog_ddt = np.dstack((movie_dog[:, :, 0:1], np.diff(movie_dog,
                                                             axis=2)))

    print 'Dumping movie as npy/mp4/pngs...'
    dump_movie(os.path.join(MOVIE_DIR, str(i)), movie, CA_SAMPLING_RATE)
    dump_movie(os.path.join(MOVIE_DIR, str(i)),
               movie_dog,
               CA_SAMPLING_RATE,
               movie_type='dog')
    dump_movie(os.path.join(MOVIE_DIR, str(i)),
               movie_ddt,
               CA_SAMPLING_RATE,
Exemplo n.º 12
0
    movie = np.zeros((movie_d.shape[0], movie_d.shape[1],
                      CA_SAMPLING_RATE * STIMULUS_DURATION))
    # Now upsample back to 50 Hz.
    # Seems intuitive to continue the previous observation...
    for ii in range(CA_SAMPLING_RATE * STIMULUS_DURATION):
        ii_d = int(float(ii)*float(MOVIE_REFRESH_RATE/MOVIE_DOWNSAMPLE_FACTOR)\
                   / CA_SAMPLING_RATE)
        movie[:,:,ii] = movie_d[:,:,ii_d]
    T = movie.shape[2]

    # Centre about 128, and normalise to [-1,1]
    movie = (movie - 128.0) / 128.0
    
    window = np.dot(np.kaiser(movie.shape[0],2.).reshape((movie.shape[0],1)),
                    np.kaiser(movie.shape[1],2.).reshape((movie.shape[1],1)).T)
    movie_dog=np.dstack([window*(gfilt(movie[:,:,t],r1)-gfilt(movie[:,:,t],r2))
                         for t in range(T)])
    movie_ddt = np.dstack((movie[:,:,0:1], np.diff(movie, axis=2)))
    movie_dog_ddt = np.dstack((movie_dog[:,:,0:1], np.diff(movie_dog, axis=2)))

    print 'Dumping movie as npy/mp4/pngs...'
    dump_movie(os.path.join(MOVIE_DIR, str(i)), movie, CA_SAMPLING_RATE)
    dump_movie(os.path.join(MOVIE_DIR, str(i)), movie_dog, CA_SAMPLING_RATE,
               movie_type='dog')
    dump_movie(os.path.join(MOVIE_DIR, str(i)), movie_ddt, CA_SAMPLING_RATE,
               movie_type='ddt')
    dump_movie(os.path.join(MOVIE_DIR, str(i)), movie_dog_ddt, CA_SAMPLING_RATE,
               movie_type='dog_ddt')

################################################################################
""" Grating scenes