예제 #1
0
def _rolling_nanmin_1d(a, w=None):
    """
    Compute the rolling min for 1-D while ignoring NaNs.

    This essentially replaces:

        `np.nanmin(rolling_window(T[..., start:stop], m), axis=T.ndim)`

    Parameters
    ----------
    a : ndarray
        The input array

    w : ndarray, default None
        The rolling window size

    Returns
    -------
    output : ndarray
        Rolling window nanmin.
    """
    if w is None:
        w = a.shape[0]

    half_window_size = int(math.ceil((w - 1) / 2))
    return minimum_filter1d(a, size=w)[half_window_size:half_window_size +
                                       a.shape[0] - w + 1]
예제 #2
0
    def _method_df2(nu, op, alpha=1.0, beta=0.5, nmin_filter=10, log=True):
        """
        Cost function: Use normalized first order derivative

        Parameters:
        -----------
            - nu [ndarray]: photon group boundaries
            - op [ndarray]: spectra
            - alpha [float]: in [0.0, 2.0], default 1.0
                        alpha < 1: low sensitivity to gradients in the spectra
                        alpha > 1: high sensitivity to gradients in the spectra
        """
        from scipy.ndimage.filters import minimum_filter1d
        if log:
            err = np.abs(np.gradient(np.gradient(np.log10(op))))
        else:
            err = np.abs(np.gradient(np.gradient(op)))
        err /= err.max()  # normalising to 1 so pow gives predictive results
        err = err**alpha
        if not log:
            err /= minimum_filter1d(op, nmin_filter)
        err /= err.sum()
        err_f = (err * (1 - beta) + beta / len(op))
        err_f /= err_f.sum()
        return err_f
예제 #3
0
def get_smoothed_running_minimum(timeseries, tau1=30, tau2=100):
    result = minimum_filter1d(uniform_filter1d(timeseries,
                                               tau1,
                                               mode='nearest'),
                              tau2,
                              mode='reflect')
    return result
예제 #4
0
def dff(C, sig_baseline=10, win_baseline=300, sig_output=3, method='maximin'):
    """
    delta F / F using maximin method from Suite2P
    inputs: C - neuropil subtracted fluorescence (neurons x timepoints)
    outputs dFF -  neurons x timepoints

    :param C:
    :param sig_baseline:
    :param win_baseline:
    :param sig_output:
    :param method:
    :return:
    """

    if method == 'maximin':  # windowed baseline estimation
        flow = filters.gaussian_filter(C, [0, sig_baseline])
        flow = filters.minimum_filter1d(flow, win_baseline, axis=1)
        flow = filters.maximum_filter1d(flow, win_baseline, axis=1)
    else:
        flow = None
        raise NotImplementedError

    C -= flow  # substract baseline (dF)
    C /= flow  # divide by baseline (dF/F)
    return filters.gaussian_filter(C, [0, sig_output])  # smooth result
예제 #5
0
파일: AGS.py 프로젝트: fimay/hedp
    def _method_df2(nu, op, alpha=1.0, beta=0.5, nmin_filter=10, log=True):
        """
        Cost function: Use normalized first order derivative

        Parameters:
        -----------
            - nu [ndarray]: photon group boundaries
            - op [ndarray]: spectra
            - alpha [float]: in [0.0, 2.0], default 1.0
                        alpha < 1: low sensitivity to gradients in the spectra
                        alpha > 1: high sensitivity to gradients in the spectra
        """
        from scipy.ndimage.filters import minimum_filter1d
        if log:
            err = np.abs(np.gradient(np.gradient(np.log10(op))))
        else:
            err = np.abs(np.gradient(np.gradient(op)))
        err /= err.max()  # normalising to 1 so pow gives predictive results
        err = err**alpha
        if not log:
            err /= minimum_filter1d(op, nmin_filter)
        err /= err.sum()
        err_f = (err*(1-beta)+beta/len(op))
        err_f /= err_f.sum()
        return err_f
예제 #6
0
파일: util.py 프로젝트: ntlamdut/FMPy
def validate_signal(t, y, t_ref, y_ref, num=1000, dx=20, dy=0.1):
    """ Validate a signal y(t) against a reference signal y_ref(t_ref) by creating a band
    around y_ref and finding the values in y outside the band

    Parameters:

        t       time of the signal
        y       values of the signal
        t_ref   time of the reference signal
        y_ref   values of the reference signal
        num     number of samples for the band
        dx      horizontal width of the band in samples
        dy      vertical distance of the band to y_ref

    Returns:

        t_band  time values of the band
        y_min   lower limit of the band
        y_max   upper limit of the band
        i_out   indices of the values in y outside the band
    """

    from scipy.ndimage.filters import maximum_filter1d, minimum_filter1d
    from scipy.interpolate import interp1d

    # re-sample the reference signal into a uniform grid
    t_band = np.linspace(start=t_ref[0], stop=t_ref[-1], num=num)

    # make t_ref strictly monotonic by adding epsilon to duplicate sample times
    for i in range(1, len(t_ref)):
        while t_ref[i - 1] >= t_ref[i]:
            t_ref[i] = t_ref[i] + 1e-13

    interp_method = 'linear' if y.dtype == np.float64 else 'zero'
    y_band = interp1d(x=t_ref, y=y_ref, kind=interp_method)(t_band)

    y_band_min = np.min(y_band)
    y_band_max = np.max(y_band)

    # calculate the width of the band
    if y_band_min == y_band_max:
        w = 0.5 if y_band_min == 0 else np.abs(y_band_min) * dy
    else:
        w = (y_band_max - y_band_min) * dy

    # calculate the lower and upper limits
    y_min = minimum_filter1d(input=y_band, size=dx) - w
    y_max = maximum_filter1d(input=y_band, size=dx) + w

    # find outliers
    y_min_i = np.interp(x=t, xp=t_band, fp=y_min)
    y_max_i = np.interp(x=t, xp=t_band, fp=y_max)
    i_out = np.logical_or(y < y_min_i, y > y_max_i)

    # do not count outliers outside the t_ref
    i_out = np.logical_and(i_out, t > t_band[0])
    i_out = np.logical_and(i_out, t < t_band[-1])

    return t_band, y_min, y_max, i_out
예제 #7
0
def compute_sliding_minmax(array, Window, sig=2):
    if sig > 0:
        Flow = filters.gaussian_filter1d(array, sig)
    else:
        Flow = array
    Flow = filters.minimum_filter1d(Flow, Window, mode='wrap')
    Flow = filters.maximum_filter1d(Flow, Window, mode='wrap')
    return Flow
예제 #8
0
def center_baseline(V, sigma=100, window=500):
    ''' centers V so the baseline is at 0 '''
    Flow = filters.gaussian_filter(V.T, [0.,sigma])
    Flow = filters.minimum_filter1d(Flow, window)
    Flow = filters.maximum_filter1d(Flow, window)
    V_centered = (V.T - Flow).T
    #V_centered = (V.T - Flow.mean(axis=1)[:,np.newaxis]).T
    return V_centered
예제 #9
0
def find_reps(y, threshold, open_size, close_size):
    """
    From the Y profile of a barbell's path, determine the concentric phase of each rep.

    The algorithm is as follows:
        1. Compute the gradient (dy/dt) of the Y motion
        2. Binarize the gradient signal by a minimum threshold value to eliminate noise.
        3. Perform 1D opening by open_size using a minimum then maximum filter in series.
        4. Perform 1D closing by close_size using a maximum then minimum filter in series.

    The result is a step function that is true for every time point that the concentric (+Y) phase of the rep
    is being performed.

    Parameters
    ----------
    y : (N) array
        Y component of the motion of the barbell path.
    threshold : float
        Miniumum acceptable value of the gradient (dY/dt) to indicate a rep.
        Increasing this can help eliminate noise, but may cause a small delay after a rep begins to when it is
        counted, therefore underestimating the time to complete a rep.
    open_size : int
        Minimum threshold of length of time that it takes to complete a rep (in frames).
        Increase this if there are false positive spikes in the rep step signal that are small in width.
    close_size : int
        Minimum length of time that could be between reps.
        Increase this if there are false breaks between reps that should be continuous.

    Returns
    -------
    (N) array
        Step signal representing when reps are performed. (1 indicates concentric phase of rep, 0 indicates no rep).
    """
    ygrad = np.gradient(y)
    rep_signal = np.where(ygrad > threshold, 1, 0)

    # Opening to remove spikes
    rep_signal = maximum_filter1d(minimum_filter1d(rep_signal, open_size),
                                  open_size)

    # Closing to connect movements (as in the step up from the jerk)
    rep_signal = minimum_filter1d(maximum_filter1d(rep_signal, close_size),
                                  close_size)

    return rep_signal
 def running_min(X, tau1, tau2):
     ###DEBUGGING IMPLEMENTATION###
     # return minimum_filter1d(X,tau2,mode = 'nearest')
     ###PREVIOUS IMPLEMENTATION###
     mode = 'nearest'
     result = minimum_filter1d(uniform_filter1d(X, tau1, mode=mode),
                               tau2,
                               mode='reflect')
     return result
 def dilate_ranges(self, ranges, angle_increment):
     # unvectorized
     # i_dilation = int(round(self.angle_dilation / angle_increment))
     # for i in range(len(ranges)):
     #     dilated[i] = np.min(ranges[i-i_dilation:i+i_dilation])
     i_dilation = int(round(ANGLE_DILATION / angle_increment)) # one-sided
     i_dilation = i_dilation * 2 + 1 # two-sided
     dilated = minimum_filter1d(ranges, size=i_dilation)
     return dilated
예제 #12
0
파일: dcnv.py 프로젝트: karimali34/suite2p
def preprocess(F: np.ndarray,
               baseline: str,
               win_baseline: float,
               sig_baseline: float,
               fs: float,
               prctile_baseline: float = 0.9) -> np.ndarray:
    """ preprocesses fluorescence traces for spike deconvolution

    baseline-subtraction with window 'win_baseline'
    
    Parameters
    ----------------

    F : float, 2D array
        size [neurons x time], in pipeline uses neuropil-subtracted fluorescence

    baseline : str
        setting that describes how to compute the baseline of each trace

    win_baseline : float
        window (in seconds) for max filter

    sig_baseline : float
        width of Gaussian filter in seconds

    fs : float
        sampling rate per plane

    prctile_baseline : float
        percentile of trace to use as baseline if using `constant_prctile` for baseline
    
    Returns
    ----------------

    F : float, 2D array
        size [neurons x time], baseline-corrected fluorescence

    """
    win = int(win_baseline * fs)
    if baseline == 'maximin':
        Flow = filters.gaussian_filter(F, [0., sig_baseline])
        Flow = filters.minimum_filter1d(Flow, win)
        Flow = filters.maximum_filter1d(Flow, win)
    elif baseline == 'constant':
        Flow = filters.gaussian_filter(F, [0., sig_baseline])
        Flow = np.amin(Flow)
    elif baseline == 'constant_prctile':
        Flow = np.percentile(F, prctile_baseline, axis=1)
        Flow = np.expand_dims(Flow, axis=1)
    else:
        Flow = 0.

    F = F - Flow

    return F
예제 #13
0
def validate_signal(t, y, t_ref, y_ref, num=1000, dx=20, dy=0.1):
    """ Validate a signal y(t) against a reference signal y_ref(t_ref) by creating a band
    around y_ref and finding the values in y outside the band

    Parameters:

        t       time of the signal
        y       values of the signal
        t_ref   time of the reference signal
        y_ref   values of the reference signal
        num     number of samples for the band
        dx      horizontal width of the band in samples
        dy      vertical distance of the band to y_ref

    Returns:

        t_band  time values of the band
        y_min   lower limit of the band
        y_max   upper limit of the band
        i_out   indices of the values in y outside the band
    """

    from scipy.ndimage.filters import maximum_filter1d, minimum_filter1d

    # re-sample the reference signal into a uniform grid
    t_band = np.linspace(start=t_ref[0], stop=t_ref[-1], num=num)

    # sort out the duplicate samples before the interpolation
    m = np.concatenate(([True], np.diff(t_ref) > 0))

    y_band = np.interp(x=t_band, xp=t_ref[m], fp=y_ref[m])

    y_band_min = np.min(y_band)
    y_band_max = np.max(y_band)

    # calculate the width of the band
    if y_band_min == y_band_max:
        w = 0.5 if y_band_min == 0 else np.abs(y_band_min) * dy
    else:
        w = (y_band_max - y_band_min) * dy

    # calculate the lower and upper limits
    y_min = minimum_filter1d(input=y_band, size=dx) - w
    y_max = maximum_filter1d(input=y_band, size=dx) + w

    # find outliers
    y_min_i = np.interp(x=t, xp=t_band, fp=y_min)
    y_max_i = np.interp(x=t, xp=t_band, fp=y_max)
    i_out = np.logical_or(y < y_min_i, y > y_max_i)

    # do not count outliers outside the t_ref
    i_out = np.logical_and(i_out, t > t_band[0])
    i_out = np.logical_and(i_out, t < t_band[-1])

    return t_band, y_min, y_max, i_out
예제 #14
0
def _compute_alw(y, interval):
    a, b = interval
    if a == b:
        return y
    y = shift(y, -a, mode="nearest")
    b = min(b, len(y))

    # compute offset to left end of window from center
    width = int(abs(b - a)) + 1
    center = width // 2

    return minimum_filter1d(y, b - a + 1, mode="nearest", origin=-center)
예제 #15
0
def filter1d_same(a: np.ndarray, W: int, max_or_min: str, fillna=np.nan):
    out_dtype = np.full(0, fillna).dtype
    hW = (W - 1) // 2  # Half window size
    if max_or_min == 'max':
        out = maximum_filter1d(a, size=W, origin=hW)
    else:
        out = minimum_filter1d(a, size=W, origin=hW)
    if out.dtype is out_dtype:
        out[:W - 1] = fillna
    else:
        out = np.concatenate((np.full(W - 1, fillna), out[W - 1:]))
    return out
예제 #16
0
def sliding_interval_filter(ts, size):
    """USGS HYSEP sliding interval method
    
        The USGS HYSEP sliding interval method as described in `Sloto & Crouse, 1996`_.
        
        The flow series is filter with scipy.ndimage.genericfilter1D using numpy.nanmin function
        over a window of size `size`
    
    .. _Slot & Crouse, 1996:
        Sloto, Ronald A., and Michele Y. Crouse. “HYSEP: A Computer Program for Streamflow Hydrograph Separation and 
        Analysis.” USGS Numbered Series. Water-Resources Investigations Report. Geological Survey (U.S.), 1996. 
        http://pubs.er.usgs.gov/publication/wri964040.
    
    :param size: 
    :param ts: 
    :return: 
    """
    # TODO ckeck the presence of nodata
    if (ts.isnull()).any():
        blocks, nfeatures = label(~ts.isnull())
        block_list = [ts[blocks == i] for i in range(1, nfeatures + 1)]
        na_df = ts[blocks == 0]
        block_bf = [
            pd.Series(data=minimum_filter1d(block, size, mode='reflect'),
                      index=block.index) for block in block_list
        ]
        baseflow = pd.concat(block_bf + [na_df], axis=0)
        baseflow.sort_index(inplace=True)
    else:
        baseflow = pd.Series(data=minimum_filter1d(ts, size, mode='reflect'),
                             index=ts.index)

    quickflow = ts - baseflow

    baseflow.name = 'baseflow'
    quickflow.name = 'quickflow'

    return baseflow, quickflow
예제 #17
0
def get_seam(image_energy):
    # формируем матрицу
    for i in range(1, image_energy.shape[0]):
        image_energy[i] += minimum_filter1d(image_energy[i - 1], 3)

    # выделяем шов с минимальной энергией:
    seam_mask = np.ones_like(image_energy, bool)
    j_pos = np.argmin(image_energy[-1])
    seam_mask[-1, j_pos] = False
    for i in range(image_energy.shape[0] - 2, -1, -1):
        j_pos += np.argmin(image_energy[i: i + 1, max(0, j_pos - 1): min(j_pos + 2, image_energy.shape[1])]) - \
                 (j_pos != 0)
        seam_mask[i, j_pos] = False

    return seam_mask
def preprocess(F,ops):
    sig = ops['sig_baseline']
    win = int(ops['win_baseline']*ops['fs'])
    if ops['baseline']=='maximin':
        Flow = filters.gaussian_filter(F,    [0., sig])
        Flow = filters.minimum_filter1d(Flow,    win)
        Flow = filters.maximum_filter1d(Flow,    win)
    elif ops['baseline']=='constant':
        Flow = filters.gaussian_filter(F,    [0., sig])
        Flow = np.amin(Flow)
    elif ops['baseline']=='constant_prctile':
        Flow = np.percentile(F, ops['prctile_baseline'], axis=1)
        Flow = np.expand_dims(Flow, axis = 1)
    else:
        Flow = 0.

    F = F - Flow

    return F
예제 #19
0
def preprocess(F, ops):
    """ preprocesses fluorescence traces for spike deconvolution

    baseline-subtraction with window 'win_baseline'
    
    Parameters
    ----------------

    F : float, 2D array
        size [neurons x time], in pipeline uses neuropil-subtracted fluorescence

    ops : dictionary
        'baseline', 'win_baseline', 'sig_baseline', 'fs',
        (optional 'prctile_baseline' needed if ops['baseline']=='constant_prctile')
    
    Returns
    ----------------

    F : float, 2D array
        size [neurons x time], baseline-corrected fluorescence

    """
    sig = ops['sig_baseline']
    win = int(ops['win_baseline'] * ops['fs'])
    if ops['baseline'] == 'maximin':
        Flow = filters.gaussian_filter(F, [0., sig])
        Flow = filters.minimum_filter1d(Flow, win)
        Flow = filters.maximum_filter1d(Flow, win)
    elif ops['baseline'] == 'constant':
        Flow = filters.gaussian_filter(F, [0., sig])
        Flow = np.amin(Flow)
    elif ops['baseline'] == 'constant_prctile':
        Flow = np.percentile(F, ops['prctile_baseline'], axis=1)
        Flow = np.expand_dims(Flow, axis=1)
    else:
        Flow = 0.

    F = F - Flow

    return F
예제 #20
0
파일: cross_check.py 프로젝트: DBDBDDB/FMPy
def validate_signal(t, y, t_ref, y_ref, num=1000, dx=20, dy=0.1):
    """ Validate a signal y(t) against a reference signal y_ref(t_ref)

        t       time of the signal
        y       values of the signal
        t_ref   time of the reference signal
        y_ref   values of the reference signal

    """

    # re-sample the reference signal into a uniform grid
    t_band = np.linspace(start=t_ref[0], stop=t_ref[-1], num=num)

    # sort out the duplicate samples before the interpolation
    m = np.concatenate(([True], np.diff(t_ref) > 0))

    y_band = np.interp(x=t_band, xp=t_ref[m], fp=y_ref[m])

    y_band_min = np.min(y_band)
    y_band_max = np.max(y_band)

    # calculate the width of the band
    if y_band_min == y_band_max:
        w = 0.5 if y_band_min == 0 else np.abs(y_band_min) * dy
    else:
        w = (y_band_max - y_band_min) * dy

    # calculate the lower and upper limits
    y_min = minimum_filter1d(input=y_band, size=dx) - w
    y_max = maximum_filter1d(input=y_band, size=dx) + w

    # find outliers
    y_min_i = np.interp(x=t, xp=t_band, fp=y_min)
    y_max_i = np.interp(x=t, xp=t_band, fp=y_max)
    i_out = np.logical_or(y < y_min_i, y > y_max_i)

    return t_band, y_min, y_max, i_out
예제 #21
0
def analyze_score(
        data_base_dir='/playpen/throat/Endoscope_Study/UNC_HN_Laryngoscopy_004/',
        fname='opticalflowscore2.txt',
        imgnames=None,
        score=None):
    print 'Performing boundary detection ... '
    if imgnames is None:
        assert (score is None)
        score = []
        imgnames = []
        fname = data_base_dir + fname
        with open(fname, 'r') as ins:
            for line in ins:
                pair = line.split()
                imgname = pair[0]
                imgnames.append(imgname)
                score.append(float(pair[1]))

    folder = data_base_dir + 'images/'
    outputfolder = data_base_dir + 'keyframes/'

    if os.path.isdir(outputfolder):
        rmtree(outputfolder)
    os.mkdir(outputfolder)

    keyframes = []
    keyframeids = []
    keyframesfilename = data_base_dir + 'keyframes.txt'
    if os.path.exists(keyframesfilename):
        os.remove(keyframesfilename)
    keyframesfile = open(keyframesfilename, 'w')

    for i in range(len(score)):
        #os.system('cp ' + folder + imgnames[i] + ' ' + outputfolder + imgnames[i])
        keyframesfile.write('%s\n' % imgnames[i])
        keyframes.append(imgnames[i])
        keyframeids.append(i)

    # boundary detection

    boundaries = []
    ignore_ends = True
    #------------------------------------ boundary detection method 2:
    if (ignore_ends):
        score[:int(len(score) * 0.1)] = np.ones_like(
            score[:int(len(score) * 0.1)])
        score[int(len(score) * 0.9):] = np.ones_like(
            score[int(len(score) * 0.9):])

    threshold = np.percentile(score, 1)
    scoreextrema = minimum_filter1d(input=score, size=301)
    localextrema = []
    localextremaids = []
    for i in range(len(score)):
        if score[i] == scoreextrema[
                i] and score[i] < threshold and score[i] < 0.8:
            boundaries.append(imgnames[i])
            localextrema.append(score[i])
            localextremaids.append(i)
    #------------------------------------- boundary detection method 3:
    # brutely divide the sequence into 100-frame chunks


#     stepsize = 100;
#     tempid = stepsize - 1;
#     while(tempid < len(score)):
#         boundaries.append(imgnames[tempid])
#         tempid += stepsize

# ----------------------------------- boundary detection results:
#plt.plot(localextremaids, localextrema, 'ro')
    print 'number of boundaries = %d' % len(boundaries)
    print 'boundaries :',
    print boundaries
    #print 'boundaries motion values: ',
    #print localextrema

    #------------------------------------ split video by boundaries
    shots = []
    boundaryid = 0
    shot = []
    for i in range(len(keyframes)):
        if boundaryid >= len(boundaries):
            shot.append(keyframes[i])
            continue

        if keyframes[i] < boundaries[boundaryid]:
            shot.append(keyframes[i])
        else:
            shots.append(shot)
            shot = []
            shot.append(keyframes[i])
            boundaryid = boundaryid + 1
    shots.append(shot)

    #print shots
    print 'number of shots = %d' % len(shots)
    subid = 0
    min_shot_length = 40
    print 'minimum shot length threshold = %d' % min_shot_length
    for i in range(len(shots)):
        shot = shots[i]
        print 'shot %d: %d frames' % (i, len(shot)),
        if len(shot) > min_shot_length:
            subfolder = outputfolder + str(subid) + '/'
            os.mkdir(subfolder)
            for j in shot:
                os.system('cp ' + folder + j + ' ' + subfolder + j)
            print ' '
            subid = subid + 1
        else:
            print '     discarded'
예제 #22
0
h_samples, = plt.plot(sampleids, samples, 'r.')
h_scores, = plt.plot(sampleids, samplescores, 'b+')
h_rate, = plt.plot(rate)
h_low, = plt.plot(2600, rate[2600], 'b^')
h_high, = plt.plot(3770, rate[3770], 'r^')
plt.legend([h_samples, h_scores, h_rate, h_low, h_high], [
    'Samples', 'Optical Flow Scores', 'Sample Rate', 'Low Motion Part Example',
    'High Motion Part Example'
])
plt.show()
#for i in range(len(imgnames)):
#    os.system('cp ' + folder + imgnames[i] + ' ' + outputfolder + imgnames[i])
#exit()

scoreminima = minimum_filter1d(input=score, size=4)
localminima = []
keyframes = []
keyframeids = []
keyframesfilename = data_base_dir + 'keyframes.txt'
if os.path.exists(keyframesfilename):
    os.remove(keyframesfilename)
keyframesfile = open(keyframesfilename, 'w')

#for i in range(len(score)):
#    if score[i] == scoreminima[i]:
#        localminima.append(imgnames[i])
#        #os.system('cp ' + folder + imgnames[i] + ' ' + outputfolder + imgnames[i])
#        keyframesfile.write('%s\n' % imgnames[i])
#        keyframes.append(imgnames[i])
#        keyframeids.append(i)
예제 #23
0
def _compute_bounded_globally(x, a):
    z1 = minimum_filter1d(x, a, mode="nearest")
    z2 = shift(x, -a, cval=TOP)
    z3 = compute_and_binary(z2, z1)
    z = compute_and_binary(x, z3)
    return z
예제 #24
0
def main(args):
    data_base_dir = args.dir
    data_dir = data_base_dir + "images-raw/"
    outputfolder = data_base_dir + 'keyframes/'
    min_shot_length = args.min_shot_length
    nmins_window_size = args.nmins_window_size
    percentile_threshold = args.percentile_threshold
    absolute_threshold = args.absolute_threshold
    ignore_ends = args.ignore_ends

    if os.path.isdir(data_base_dir + 'classifiedgood'):
        rmtree(data_base_dir + 'classifiedgood')
    os.mkdir(data_base_dir + 'classifiedgood')
    if os.path.isdir(data_base_dir + 'classifiedbad'):
        rmtree(data_base_dir + 'classifiedbad')
    os.mkdir(data_base_dir + 'classifiedbad')
    if os.path.isdir(outputfolder):
        rmtree(outputfolder)
    os.mkdir(outputfolder)

    #======================
    #Load net parames
    #======================
    style_weights = "./model/weights.pretrained.caffemodel"
    test_net = caffe.Net(style_net(train=False, learn_all=False),
                         style_weights, caffe.TEST)
    test_net.forward()

    MEAN_FILE = caffe_root + 'data/ilsvrc12/imagenet_mean.binaryproto'
    Mean_blob = caffe.proto.caffe_pb2.BlobProto()
    Mean_blob.ParseFromString(open(MEAN_FILE, 'rb').read())
    # will mean blob to numpy.array
    Mean_npy = np.array(caffe.io.blobproto_to_array(Mean_blob))[0]

    Mean_npy = Mean_npy.mean(1).mean(
        1)  # average over pixels to obtain the mean (BGR) pixel values
    print 'mean-subtracted values:', zip('BGR', Mean_npy)

    # create transformer for the input called 'data'
    transformer = caffe.io.Transformer(
        {'data': test_net.blobs['data'].data.shape})
    transformer.set_transpose(
        'data', (2, 0, 1))  # move image channels to outermost dimension
    transformer.set_mean(
        'data', Mean_npy)  # subtract the dataset-mean value in each channel
    #transformer.set_raw_scale('data', 255)      # rescale from [0, 1] to [0, 255]
    transformer.set_channel_swap('data',
                                 (2, 1, 0))  # swap channels from RGB to BGR

    atup = ('bad', 'good')
    style_labels = list(atup)

    classifiedgood = []
    opticalflowscore = []
    sharpness = []

    filelist = os.listdir(data_dir)
    filelist.sort()
    #filelist = filelist[3000:3100]
    count = 1
    prev_of = None

    # ImageNet classification and optical flow motion estimation
    for imfile in filelist:
        if imfile.endswith(".jpg"):
            print 'Classification and Motion Estimation: %d / %d' % (
                count, len(filelist))
            count = count + 1
            im = Image.open(data_dir + imfile)
            im = np.array(im, dtype=np.float32)
            transformed_image = transformer.preprocess('data', im)
            t = disp_preds(test_net,
                           transformed_image,
                           style_labels,
                           k=2,
                           name='style')

            if t[0] == 1:  # only do optical flow on frames classified as good
                classifiedgood.append(imfile)
                os.system("cp " + data_dir + imfile + " " + data_base_dir +
                          "classifiedgood")
                print 'classified as good : ' + imfile

                if len(opticalflowscore) == 0:
                    opticalflowscore.append(0)
                    cur_of = rgb2gray(im)
                else:
                    cur_of = rgb2gray(im)
                    flow = cv2.calcOpticalFlowFarneback(
                        prev_of, cur_of, 0.5, 3, 15, 3, 5, 1.2, 0)
                    opticalflowscore.append(
                        (np.sum(np.absolute(flow[..., 0])) +
                         np.sum(np.absolute(flow[..., 1]))) / cur_of.size)
                #if imfile == 'frame2967.jpg':
                #    print opticalflowscore[-1]
                #    temp = Image.open(data_dir + classifiedgood[-2])
                #    temp = np.array(temp, dtype=np.float32)
                #    temp = rgb2gray(temp)
                #    print temp - prev_of
                #    print imfile
                prev_of = cur_of
    # find the local minima of the optical flow motion estimation
    assert len(opticalflowscore) == len(classifiedgood)
    localminima = []
    localmin = minimum_filter1d(opticalflowscore, size=3)
    for i in range(len(opticalflowscore)):
        if localmin[i] == opticalflowscore[i]:
            print 'optical flow local minimum:  ' + classifiedgood[i]
            #os.system('cp '+ folder + filelist[i] + ' ' + outputfolder + filelist[i])
            localminima.append(classifiedgood[i])

    # calculate sharpness
    print 'Calculating sharpness ...'
    dl = DoubleList()
    D = {}
    for f in localminima:
        dl.append(f)
        D[f] = dl.tail
    for i in range(len(localminima)):
        img = readImage(data_dir + localminima[i])
        sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=5)
        sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=5)
        sharpness.append((np.sum(sobelx**2) + np.sum(sobely**2)) / img.size)
    order = np.argsort(sharpness)

    # use homography estimation to eliminate the redundant candidates
    keyframes = []
    score = []
    for i in range(len(order)):
        print 'Homography estimation: %d / %d   ' % (i, len(localminima))
        if order[i] == 0 or order[i] == len(localminima) - 1:
            keyframes.append(localminima[order[i]])
            score.append(0.9)
        else:
            ID = order[i]

            PrevName = D[localminima[ID]].prev.data
            NextName = D[localminima[ID]].next.data
            Prev = readImage(data_dir + PrevName)
            Next = readImage(data_dir + NextName)
            H = EstimateHomography(img1=Prev,
                                   img2=Next,
                                   use_builtin_ransac=True)
            warpped = cv2.warpPerspective(Prev, H,
                                          (Next.shape[1], Next.shape[0]))
            s = correlation_coefficient(warpped, Next)
            if s > 0.9:
                dl.remove_byaddress(D[localminima[ID]])
                print 'Redundant: ' + localminima[ID]
            else:
                keyframes.append(localminima[order[i]])
                score.append(s)
                print 'Keyframe:  ' + localminima[ID]
            #if localminima[ID] == 'frame3423.jpg':
            #    print PrevName
            #    print NextName
            #    print score[ID]
            #    print '-----------------------'

    ordkeyframes = np.argsort(keyframes)
    keyframes.sort()

    score = np.asarray(score)
    score = score[np.asarray(ordkeyframes)]

    # key frame selection
    # optical flow boundary detection
    if ignore_ends:
        opticalflowscore[:int(len(opticalflowscore) * 0.1)] = np.zeros_like(
            opticalflowscore[:int(len(opticalflowscore) * 0.1)])
        opticalflowscore[int(len(opticalflowscore) * 0.9):] = np.zeros_like(
            opticalflowscore[int(len(opticalflowscore) * 0.9):])
    boundaries = []
    threshold = np.percentile(opticalflowscore, percentile_threshold)
    opticalextrema = maximum_filter1d(opticalflowscore, nmins_window_size)
    for i in range(len(opticalflowscore)):
        if opticalflowscore[i] == opticalextrema[i] and opticalflowscore[
                i] > threshold and opticalflowscore[i] > absolute_threshold:
            boundaries.append(classifiedgood[i])
    if len(boundaries) == 0 or boundaries[-1] is not classifiedgood[-1]:
        boundaries.append(classifiedgood[-1])

    shots = []
    boundaryid = 0
    shot = []
    for i in range(len(keyframes)):
        if keyframes[i] <= boundaries[boundaryid]:
            shot.append(keyframes[i])
        else:
            shots.append(shot)
            shot = []
            shot.append(keyframes[i])
            boundaryid = boundaryid + 1
    shots.append(shot)

    print shots
    print len(shots)

    final_valid_frames = 0
    subid = 0
    for i in range(len(shots)):
        shot = shots[i]
        print 'shot %d: %d frames' % (i, len(shot)),
        if 0 and len(shot) > min_shot_length:
            subfolder = outputfolder + str(subid) + '/'
            os.mkdir(subfolder)
            for j in shot:
                os.system('cp ' + data_dir + j + ' ' + subfolder + j)
            print ' '
            final_valid_frames = final_valid_frames + len(shot)
            subid = subid + 1
        else:
            print '     discarded'

    # output result to file
    for i in keyframes:
        os.system('cp ' + data_dir + i + ' ' + outputfolder + i)
    resultfile = data_base_dir + 'keyframes.txt'
    if os.path.exists(resultfile):
        os.remove(resultfile)
    thefile = open(resultfile, 'w')
    for i in keyframes:
        thefile.write("%s\n" % i)

    # divide the sequence into shots
    #sub_sequences = data_base_dir + 'subsequences.txt'
    #if os.path.exists(sub_sequences):
    #    os.remove(sub_sequences)
    #subfile = open(sub_sequences, 'w')
    #for i in range(len(score)):
    #    subfile.write('%s %.4f\n' % (keyframes[i], score[i]))

    offilename = data_base_dir + 'opticalflowscore.txt'
    if os.path.exists(offilename):
        os.remove(offilename)
    offile = open(offilename, 'w')
    for i in range(len(opticalflowscore)):
        offile.write('%s %.4f\n' % (classifiedgood[i], opticalflowscore[i]))

    print '%d optical flow local minima' % len(localminima)
    print 'Selected %d / %d (%.2f%%) frames as keyframes' % (
        final_valid_frames, len(filelist),
        100.0 * final_valid_frames / float(len(filelist)))
    print 'Split the sequence into %d sub-sequences' % subid
from typing import List
from numpy import array, random, cumsum
from scipy.ndimage.filters import gaussian_filter1d, maximum_filter1d, median_filter, minimum_filter, minimum_filter1d, uniform_filter, gaussian_filter, uniform_filter1d
import pylab

if __name__ == "__main__":
    size_of_dataset = 400
    t = cumsum(random.randint(0, 10, size_of_dataset))
    dataset = cumsum(random.normal(0, 1, size_of_dataset))

    pylab.figure(figsize=(8, 8), dpi=80)
    pylab.scatter(x=t, y=dataset, c="black", s=2, label="dataset")

    pylab.plot(t, uniform_filter1d(dataset, size=20), label="Uniform size=20")
    pylab.plot(t,
               gaussian_filter1d(dataset, sigma=20),
               label="Gaussian $\sigma$=20")
    pylab.plot(t, minimum_filter1d(dataset, size=20), label="Minimum size=20")
    pylab.plot(t, maximum_filter1d(dataset, size=20), label="Maximum size=20")
    pylab.plot(t, median_filter(dataset, size=20), label="Median size=20")

    pylab.legend(loc='best')
    pylab.xlabel("Time")
    pylab.ylabel("Value")
    pylab.savefig("filter-application.pdf")
    pylab.savefig("filter-application.svg")
    pass
예제 #26
0
if os.path.isdir(outputfolder):
    rmtree(outputfolder)
os.mkdir(outputfolder)

prev = rgb2gray(mpimg.imread(folder + filelist[0]))
score = np.empty([len(filelist)])
score[0] = 0
for i in range(1, len(filelist)):
    print('image  %d / %d' % (i, len(filelist)))
    cur = rgb2gray(mpimg.imread(folder + filelist[i]))
    flow = cv2.calcOpticalFlowFarneback(prev, cur, 0.5, 3, 15, 3, 5, 1.2, 0)
    score[i] = np.sum(np.absolute(flow[..., 0])) + np.sum(
        np.absolute(flow[..., 1]))
    prev = cur

#plt.figure()
#plt.plot(score)
#plt.show()

localminima = []
localmin = minimum_filter1d(score, size=3)
for i in range(len(score)):
    if localmin[i] == score[i]:
        print 'keyframe  ' + filelist[i]
        #os.system('cp '+ folder + filelist[i] + ' ' + outputfolder + filelist[i])
        localminima.append(filelist[i])

thefile = open('opticalflowlocalminima.txt', 'w')
for i in localminima:
    thefile.write("%s\n" % i)