Exemple #1
0
def relief(data, y, threshold):
    '''
        feature selection using relief algorithm

        Parameter
        ---------
            data: numpy.array
            y: list_like of 1 dimension for sample label
            threshold: float, (0, 1)

        Return
        ------
            valid_feature: bool of list
        caution: only for binary classification and discreat
    '''

    if isinstance(y, list):
        y = array(y)
    distances = calculate_distance(data, kind='cosine')
    n_sample, n_feature = shape(data)
    weight = zeros(n_feature, dtype=float32)
    diff_nominal = lambda x, y: 0 if x == y else 1
    tmp = np.max(data, axis=0) - np.min(data, axis=0)
    tmp = np.where(tmp == 0, 1, tmp)
    diff_numerical = lambda x, y, id: abs(x - y) / tmp[id]
    n_iter = int(n_sample * 0.8)
    for i in range(n_iter):
        index = randint(n_sample)
        sample = data[index]
        same_label_mask = y != y[index]
        non_same_label_mask = logical_not(same_label_mask)
        same_label_mask[index] = True
        same_label_data = ma.array(distances[index], mask=same_label_mask)
        non_same_label_data = ma.array(distances[index],
                                       mask=non_same_label_mask)
        hit_id = ma.argmax(same_label_data)
        miss_id = ma.argmax(non_same_label_data)
        # print index, hit_id, miss_id
        for id_attr, (sample_attr, miss_attr, hit_attr) in enumerate(
                zip(sample, data[miss_id], data[hit_id])):
            if isinstance(sample_attr, (str, bool)):
                delta_w = diff_nominal(sample_attr, miss_attr) - diff_nominal(
                    sample_attr, hit_attr)
            else:
                delta_w = diff_numerical(sample_attr, miss_attr,
                                         id_attr) - diff_numerical(
                                             sample_attr, hit_attr, id_attr)

            weight[id_attr] += delta_w
    weight /= n_iter

    # filter the feature of lower weight than threshold
    valid_feature = weight >= threshold
    return valid_feature
Exemple #2
0
 def calculate_confusion_matrix(self, x_test, y_test):
     """Calculate the probabilities required for the confusion matrix and create a dataframe"""
     y_pred = self.model.predict_classes(x_test)
     y_test = argmax(y_test, axis=1)
     con_mat = confusion_matrix(labels=y_test, predictions=y_pred).numpy()
     con_mat_norm = np.around(con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis], decimals=2)
     classes = self.le.inverse_transform(list(range(0, self.le.encoded_labels.shape[1])))
     return pd.DataFrame(con_mat_norm, index=classes, columns=classes)
def calculate_confusion_matrix(model, le, x_test, y_test):
    y_pred = model.predict_classes(x_test)
    y_test = argmax(y_test, axis=1)
    con_mat = confusion_matrix(labels=y_test, predictions=y_pred).numpy()
    con_mat_norm = np.around(con_mat.astype('float') /
                             con_mat.sum(axis=1)[:, np.newaxis],
                             decimals=2)
    classes = le.inverse_transform([0, 1, 2, 3, 4])
    return pd.DataFrame(con_mat_norm, index=classes, columns=classes)
Exemple #4
0
def where_first(cond, *out, **kwargs):
    """Return values from `out` where `cond` is first true along axis 0."""
    import numpy.ma as ma
    last = kwargs.pop('last', False)
    if last:
        ix = cond.shape[0] - ma.argmax(cond[::-1], axis=0) - 1
        mask = Ellipsis, (ix + 1 == cond.shape[0]) & ~cond[-1]
    else:
        ix = ma.argmax(cond, axis=0)
        mask = Ellipsis, (ix == 0) & ~cond[0]
    ix = Ellipsis, ix, np.arange(ix.size)
    if not out:
        return ix, mask
    out = [ma.masked_array(a[ix], **kwargs) for a in out]
    if mask[-1].any():
        for o in out:
            o[mask] = ma.masked
    return out[0] if len(out) == 1 else out
Exemple #5
0
def where_first(cond, *out, **kwargs):
    """Return values from `out` where `cond` is first true along axis 0."""
    import numpy.ma as ma
    last = kwargs.pop('last', False)
    if last:
        ix = cond.shape[0] - ma.argmax(cond[::-1], axis=0) - 1
        mask = Ellipsis, (ix + 1 == cond.shape[0]) & ~cond[-1]
    else:
        ix = ma.argmax(cond, axis=0)
        mask = Ellipsis, (ix == 0) & ~cond[0]
    ix = Ellipsis, ix, np.arange(ix.size)
    if not out:
        return ix, mask
    out = [ma.masked_array(a[ix], **kwargs) for a in out]
    if mask[-1].any():
        for o in out:
            o[mask] = ma.masked
    return out[0] if len(out) == 1 else out
    def step(self, reward, observation):
        '''
        Given reward and an observation, compute the next action
        
        Keyword Arguments:
        
        reward    (float) The reward received due to the previous action
        observation (int) The index of the most recent observation [0:num_states]
        
        Returns: 
        next_action (int) The index of the next action to take [0:num_actions]
        
        '''
        next_state = observation

        if self._dynamic_alpha:
            self._alpha = self.compute_alpha(self._last_state,
                                             self._last_action)
        else:
            self._alpha = self._alpha0

        # get the index to the maximum value in the relevant row
        exploit_action = ma.argmax(self._q_table[next_state, :])

        if self._dynamic_epsilon:
            self._do_exploit, explore_action = self.two_state_decaying_eps_greedy_exploration(
                next_state, exploit_action)
        else:
            self._do_exploit, explore_action = self.constant_eps_greedy_exploration(
                next_state, exploit_action)

        if self._do_exploit or self._exploringFrozen:
            # use this max value as the action if exploiting
            next_action = exploit_action
        else:
            next_action = explore_action

        qt = self._q_table[self._last_state, self._last_action]
        qt1 = self._q_table[next_state, next_action]

        # update value function
        qt_next = (1 - self._alpha) * qt + self._alpha * (reward +
                                                          self._gamma * qt1)

        if not self._policyFrozen:
            self._q_table[self._last_state, self._last_action] = qt_next

        # store off state for the next iteration
        self._last_state = next_state
        self._last_action = next_action

        # increment state visitation table
        self.update_visitation_table(next_state, next_action)
        self._epoch_num += 1

        return next_action
Exemple #7
0
    def step(self, reward, observation):
        '''
        Given reward and an observation, compute the next action
        
        Keyword Arguments:
        
        reward    (float) The reward received due to the previous action
        observation (int) The index of the most recent observation [0:num_states]
        
        Returns: 
        next_action (int) The index of the next action to take [0:num_actions]
        
        '''
        next_state = observation
        
        if self._dynamic_alpha:
            self._alpha = self.compute_alpha(self._last_state, self._last_action)
        else:
            self._alpha = self._alpha0

        # get the index to the maximum value in the relevant row
        exploit_action = ma.argmax(self._q_table[next_state, :])

        if self._dynamic_epsilon:
            self._do_exploit, explore_action = self.two_state_decaying_eps_greedy_exploration(next_state, exploit_action )
        else:
            self._do_exploit, explore_action = self.constant_eps_greedy_exploration(next_state, exploit_action )

        if self._do_exploit or self._exploringFrozen:
            # use this max value as the action if exploiting
            next_action = exploit_action
        else:
            next_action = explore_action     

        qt = self._q_table[self._last_state, self._last_action]
        qt1 = self._q_table[next_state, next_action]
        

        # update value function
        qt_next = (1-self._alpha)*qt + self._alpha*(reward + self._gamma*qt1)
        
        if not self._policyFrozen:
            self._q_table[self._last_state, self._last_action] = qt_next

        # store off state for the next iteration
        self._last_state = next_state
        self._last_action = next_action
        
        # increment state visitation table
        self.update_visitation_table(next_state, next_action)  
        self._epoch_num +=1
        
        return next_action            
def predict(NN, image_file):
    '''
    Second method you need to complete. 
    Given an image filename, load image, make and return predicted steering angle in degrees. 
    '''
    im = cv2.imread(image_file,0)
    im = cv2.resize(im, (0, 0), fx=proportion_x, fy=proportion_y)
    im = im / 255  
    training_X = im.flatten()
    yHat = NN.forward(training_X)
    argmx = argmax(yHat)
    angle = np.interp(argmx, [0, 64], [-171, 30])
    return angle
Exemple #9
0
def make_solution(case):
    N, K = case
    increasing = np.array(list(range(N)))
    decreasing = np.array(increasing[::-1])

    # status variables
    free = np.ones(N, dtype=bool)
    left = np.zeros(N, dtype=int)
    right = np.zeros(N, dtype=int)

    # init with initial distances
    left[0:N] = increasing
    right[0:N] = decreasing

    for k in range(K):
        # print("customer {}".format(k))
        # mask with current occupancy status
        left = ma.array(left, mask=~free)
        right = ma.array(right, mask=~free)
        #print("left",left)
        #print("right", right)

        mins = ma.minimum(left, right)
        maxs = ma.maximum(left, right)
        #print("mins", mins)
        #print("maxs", maxs)

        # candidates are all stalls where mins are maximal
        max_mins = mins.max()
        candidates = ma.where(mins == max_mins)[0]
        #print(max_mins, candidates)

        # from those candidates select the one where max is also maximal
        selected = ma.argmax(maxs[candidates])
        selected = candidates[selected]
        #print(selected)

        # occupy stall and update left and right
        free[selected] = False
        if selected > 0:
            # left of selected, right distance has to be updated
            right[0:selected] = ma.minimum(decreasing[-selected:], right[0:selected])
        if selected < N -1:
            # right of selected, left distance has to be updated
            left[selected+1:] = ma.minimum(increasing[:N-selected -1], left[selected+1:])

        if k == K -1:
            return maxs[selected], mins[selected]
Exemple #10
0
    def find_new_starting_value(self):
        """
        Function to mask values at the beginning of the array if they have greater than
        max_masked_neighbors number of masked values after them.

        Parameters
        ----------
        self.masked_neighbors : integer
            Maximum number of masked values falling after a non-masked value.
        """
        mask = ~self.xs.mask
        shift_coeffs = [-x for x in xrange(1, self.masked_neighbors + 1)]
        for shift in shift_coeffs:
            x_shifted = np.roll(self.xs, shift=shift, axis=1)
            mask *= ~x_shifted.mask
        start_idx = ma.argmax(mask, axis=1) # Get first True or masked value for new starting year
        for i, s_idx in enumerate(start_idx):
            self.xs.mask[i, :s_idx] = True # Mask all values before starting index
        return start_idx
def _absolute_argmax(function, *, mask):
    """
    Computes the absolute maximum of a discretized function.

    Some values of the function may be masked in order not to consider them
    as maximum.

    Parameters:
        function (numpy array): Discretized function.
        mask (numpy boolean array): Masked values.

    Returns:
        int: Index of the absolute maximum.

    """
    masked_function = ma.array(function, mask=mask)

    t_max = ma.argmax(masked_function)

    t_max = np.unravel_index(t_max, function.shape)

    return t_max
Exemple #12
0
def maxcorr(x, y, **options):
    """
    (rmax,lag,ind) = maxcorr(x,y,**'maxlag'=int(len(x)/4)):
    Calculate the maximum lagged correlation between two 1D arrays
    Inputs:
    x,y are 1D arrays
    Options
    'maxlag' the maximum number of lagged correlations to calculate (default: 1/4 of array length)
    Output:
    r is the correlation coefficient with the maximum absolute value
    lag is the lag of the maximum correlation (positive: y lags x)
    """

    nrows = len(x)
    maxlag = int(np.floor(nrows / 4))
    if ('maxlag' in options):
        maxlag = options['maxlag']

    # use masked arrays (mask NaNs)
    x = ma.masked_invalid(x)
    y = ma.masked_invalid(y)

    lags = np.arange(-maxlag, maxlag + 1)
    rs = np.zeros(np.shape(lags))
    for ni, lag in enumerate(lags):
        lag = lags[ni]
        if lag < 0:
            rs[ni] = ma.corrcoef(x[-lag:], y[:lag])[0, 1]
        elif lag > 0:
            rs[ni] = ma.corrcoef(x[:-lag], y[lag:])[0, 1]
        else:
            rs[ni] = ma.corrcoef(x, y)[0, 1]

    ind = ma.argmax(np.abs(rs))
    rmax = rs[ind]
    lag = lags[ind]

    return (rmax, lag, ind)
Exemple #13
0
def estimate_light(image, dark_channel):
    Y = rgb2gray(image)
    masked = ma.array(Y, haze_mask(dark_channel))
    (h, w) = np.unravel_index(ma.argmax(masked), Y.shape)
    light = image[h, w, :]
    return light
Exemple #14
0
def measure(mode, x, y, x0, x1, thresh = 0):
    """ return the a measure of y in the window x0 to x1
    """
    xt = x.view(numpy.ndarray) # strip Metaarray stuff -much faster!
    v = y.view(numpy.ndarray)
    
    xm = ma.masked_outside(xt, x0, x1).T
    ym = ma.array(v, mask = ma.getmask(xm))
    if mode == 'mean':
        r1 = ma.mean(ym)
        r2 = ma.std(ym)
    if mode == 'max' or mode == 'maximum':
        r1 = ma.max(ym)
        r2 = xm[ma.argmax(ym)]
    if mode == 'min' or mode == 'minimum':
        r1 = ma.min(ym)
        r2 = xm[ma.argmin(ym)]
    if mode == 'median':
        r1 = ma.median(ym)
        r2 = 0
    if mode == 'p2p': # peak to peak
        r1 = ma.ptp(ym)
        r2 = 0
    if mode == 'std': # standard deviation
        r1 = ma.std(ym)
        r2 = 0
    if mode == 'var': # variance
        r1 = ma.var(ym)
        r2 = 0
    if mode == 'cumsum': # cumulative sum
        r1 = ma.cumsum(ym) # Note: returns an array
        r2 = 0
    if mode == 'anom': # anomalies = difference from averge
        r1 = ma.anom(ym) # returns an array
        r2 = 0
    if mode == 'sum':
        r1 = ma.sum(ym)
        r2 = 0
    if mode == 'area' or mode == 'charge':
        r1 = ma.sum(ym)/(ma.max(xm)-ma.min(xm))
        r2 = 0
    if mode == 'latency': # return first point that is > threshold
        sm = ma.nonzero(ym > thresh)
        r1 = -1  # use this to indicate no event detected
        r2 = 0
        if ma.count(sm) > 0:
            r1 = sm[0][0]
            r2 = len(sm[0])
    if mode == 'count':
        r1 = ma.count(ym)
        r2 = 0
    if mode == 'maxslope':
        return(0,0)
        slope = numpy.array([])
        win = ma.flatnotmasked_contiguous(ym)
        st = int(len(win)/20) # look over small ranges
        for k in win: # move through the slope measurementwindow
            tb = range(k-st, k+st) # get tb array
            newa = numpy.array(self.dat[i][j, thisaxis, tb])
            ppars = numpy.polyfit(x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures
            slope = numpy.append(slope, ppars[0]) # keep track of max slope
        r1 = numpy.amax(slope)
        r2 = numpy.argmax(slope)
    return(r1, r2)
Exemple #15
0
    def predict(self, X):
        log_probs = array([m.log_prob(X) for m in self.models]).T

        return argmax(log_probs, axis=1)
Exemple #16
0
    def step(self, reward, observation):
        '''
        Given reward and an observation, compute the next action
        
        Keyword Arguments:
        
        reward    (float) The reward received due to the previous action
        observation (int) The index of the most recent observation [0:num_states]
        
        Returns: 
        next_action (int) The index of the next action to take [0:num_actions]
        
        '''
        next_state = observation
        
        if self._dynamic_alpha:
            self._alpha = self.compute_alpha(self._last_state, self._last_action)
        else:
            self._alpha = self._alpha0
            
        qt = self._q_table[self._last_state, self._last_action]
        max_qt1 = ma.max(self._q_table[next_state, :])
        
        # update value function
        qt_next = (1-self._alpha)*qt + self._alpha*(reward + self._gamma*max_qt1)
        
        if not self._policyFrozen:
            self._q_table[self._last_state, self._last_action] = qt_next

        # update the reward history only for 
        self.update_reward_history(self._last_state, self._last_action, reward)   
        
        # get the index to the maximum value in the relevant row
        exploit_action = ma.argmax(self._q_table[next_state, :])
        
     
        
        if self._dynamic_epsilon:
            self._do_exploit, explore_action = self.two_state_decaying_eps_greedy_exploration(next_state, exploit_action )
        else:
            self._do_exploit, explore_action = self.constant_eps_greedy_exploration(next_state, exploit_action )
        
        
        if self._do_exploit or self._exploringFrozen:
            # use this max value as the action if exploiting
            next_action = exploit_action
            
            # if we're on policy check if there's a change
            
            old_median, new_median = self.get_reward_history_medians(self._last_state, 
                                                                    self._last_action)
           
            # store median vals for debug logging
            self._old_reward_median = old_median
            self._new_reward_median = new_median
           
            self._change_detected = not np.isnan(old_median) and not np.isnan(new_median) and old_median != new_median
            # if there's a change, reset the change detection
            if self._change_detected:
                self.reset_reward_history_state(self._last_state)
            
                # if we're using the change detection to control exploration renewal, 
                # reset the visitation table
                if self._use_change_detection:
                    self.reset_visitation_table_state(self._last_state)
                
        else:
            next_action = explore_action
            
        # store off state for the next iteration
        self._last_state = next_state
        self._last_action = next_action
        
        # increment state visitation table
        self.update_visitation_table(next_state, next_action)
        self._epoch_num +=1
          
        return next_action
Exemple #17
0
def famed(woa_oxyg_dh,
          woa_oxyg_dh_m,
          temp_cl,
          temp_cl_m,
          depth,
          woa_rhom=None):

    import numpy as np
    from numpy import ma

    # Main FAME computation ...
    # =========================

    #	INPUTS FOR THE FAME CALCULATIONS
    #		woa_oxyg_dh   == d18sw       in seawater <time mean> shape is [102, 180, 360]    e.g. depth, lat, lon
    #		woa_oxyg_dh_m == d18sw       in seawater monthly     shape is [12,57, 180, 360]  e.g. depth, lat, lon
    #		temp_cl       == temperature in seawater <time mean> shape is [1, 102, 180, 360] e.g. time (degenerate), depth, lat, lon
    #		temp_cl_m     == temperature in seawater monthly     shape is [12, 57, 180, 360] e.g. time             , depth, lat, lon
    #		depth         == depth of the levels in meters assumed positive here

    # FAME is coded in Kelvins ...
    temp_kl = temp_cl + 273.15
    temp_kl_m = temp_cl_m + 273.15

    # Computation of equilibrium calcite from WOA fields ...
    delt_dh_init = delta_c_eq(temp_cl[...], woa_oxyg_dh)
    delt_dh_init_m = delta_c_eq(temp_cl_m, woa_oxyg_dh_m)

    #�Added the computation of the equation of Marchitto everywhere, no weighting.

    #� Probably useless, only written for compatibility
    #~ d18Oca_mar   =  ma.mean(delta_c_mar(temp_cl  ,woa_oxyg_dh),axis=0)

    #  Actual calculation here
    d18Oca_mar_m = ma.mean(delta_c_mar(temp_cl_m, woa_oxyg_dh_m), axis=0)

    if not (woa_rhom is None):
        # [DENSITY] -- addition of density from WOA
        rho_m = woa_rhom
    #endif

    # i.e. auld method, d18Oca averaged over 50 meters ... == "Lukas Jonkers" methodology
    depth_50m = find_closest(depth, 50.0)
    depth_00m = find_closest(depth, 0.0)

    indx_less_50m = depth <= 50.0

    #~ if depth_50m == depth_00m : depth_50m += 1

    # NOTA: all *_lj variables have a dimension without time and depth
    #       e.g. [180,360], lat, lon

    d18Osw_lj = ma.mean(woa_oxyg_dh[indx_less_50m, ...], axis=0)
    tempcl_lj = ma.mean(temp_cl[indx_less_50m, ...], axis=0)
    d18Oca_lj = delta_c_eq(tempcl_lj, d18Osw_lj)

    d18Osw_ol = woa_oxyg_dh[depth_00m, ...]
    tempcl_ol = temp_cl[depth_00m, ...]
    d18Oca_ol = delta_c_eq(tempcl_ol, d18Osw_ol)

    if not (woa_rhom is None):
        # [DENSITY] -- addition of density from WOA
        rhom_ol = ma.mean(woa_rhom[:, depth_00m, ...], axis=0)
    #endif

    import forams_prod_l09 as fpl

    # Maximum shape of result: nb_forams, lat, lon
    max_shape_final = (len(fpl.l09_cnsts_dic), ) + d18Osw_ol.shape

    # Create a placeholder for the foram result
    delt_forams = ma.zeros(max_shape_final, np.float32)

    if not (woa_rhom is None):
        # [DENSITY] -- addition of density from WOA
        rhom_forams = ma.zeros(max_shape_final, np.float32)
    #endif

    for foram_specie in fpl.l09_cnsts_dic:

        # Rate of growth from the Lombard et al., 2009 methodology
        foram_growth = fpl.growth_rate_l09_array(foram_specie, temp_kl[...])
        foram_growth_m = fpl.growth_rate_l09_array(foram_specie, temp_kl_m)

        # Get the depth of the STD living foram in FAME
        f_dept = fpl.get_living_depth(foram_specie)

        # Find this depth as an index in the array water column
        indx_dfm = find_closest(depth, abs(float(f_dept[0])))
        #~ if indx_dfm == depth_00m : indx_dfm += 1

        indx_less_dfm = depth <= np.abs(float(f_dept[0]))

        # Shrink the FAME arrays to the foram living depth
        foram_growth = foram_growth[indx_less_dfm,
                                    ...]  # shape is depth, lat, lon
        foram_growth_m = foram_growth_m[:, indx_less_dfm,
                                        ...]  # shape is time, depth, lat, lon

        # Do the same for the equilibrium calcite from WOA
        delt_dh = delt_dh_init[indx_less_dfm, ...]  # idem
        delt_dh_m = delt_dh_init_m[:, indx_less_dfm, ...]  # idem

        # [DENSITY] -- addition of density from WOA
        if not (woa_rhom is None):
            rho_m_specie = rho_m[:, indx_less_dfm, ...]  # idem
        #endif

        # Get the location where there is SOME growth, based on a certain epsilon
        epsilon_growth = 0.1 * fpl.l09_maxgrowth_dic[foram_specie][
            0]  # or 0.032

        # Mask out the regions where the foram_growth is less than the epsilon
        masked_f_growth = ma.masked_less_equal(foram_growth, epsilon_growth)

        #~ nb_points_growth = (masked_f_growth * 0.0 + 1.0).filled(0.0)
        #~ if monthly is True:
        #~ nb_points_growth_m = (masked_f_growth_m * 0.0 + 1.0).filled(0.0)

        #~ nb_points_growth = ma.where(foram_growth > epsilon_growth,1,0) # 0.000001
        #~ if monthly is True:
        #~ nb_points_growth_m = ma.where(foram_growth_m > epsilon_growth,1,0) # 0.000001

        # Now sum the growth over the depth ...
        f_growth = ma.sum(masked_f_growth, axis=0)
        #~ n_growth = ma.where(ma.sum(nb_points_growth,axis=0)>0,1,0) # axis 0 = depth

        masked_f_growth_m = ma.masked_less_equal(foram_growth_m,
                                                 epsilon_growth)
        f_growth_m = ma.sum(ma.sum(masked_f_growth_m, axis=1), axis=0)
        location_max_foramprod = ma.argmax(masked_f_growth_m, axis=1)
        location_max_foramprod = ma.masked_array(
            location_max_foramprod,
            mask=masked_f_growth_m[:, :, ...].mask.all(axis=1))
        # location_max_foramprod = ma.masked_array(location_max_foramprod,mask=masked_f_growth_m[:,0,...].mask)

        # Computing the weighted sum for d18Ocalcite using growth over depth
        delt_fp = ma.sum(delt_dh * masked_f_growth, axis=0)
        delt_fp_m = ma.sum(ma.sum(delt_dh_m * masked_f_growth_m, axis=1),
                           axis=0)

        # [DENSITY] -- addition of density from WOA
        if not (woa_rhom is None):
            rho_fp_m = ma.sum(ma.sum(rho_m_specie * masked_f_growth_m, axis=1),
                              axis=0)
        #endif

        # Mask out the points where no growth occur at all, in order to avoid NaNs ...
        delt_fp = delt_fp / ma.masked_less_equal(f_growth, 0.0)
        delt_fp_m = delt_fp_m / ma.masked_less_equal(f_growth_m, 0.0)
        if not (woa_rhom is None):
            # [DENSITY] -- addition of density from WOA
            rho_fp_m = rho_fp_m / ma.masked_less_equal(f_growth_m, 0.0)
        #endif

        # Result of FAME
        Z_om_fm = delt_fp
        Z_om_fm_m = ma.masked_array(delt_fp_m,
                                    mask=ma.max(location_max_foramprod[:, ...],
                                                axis=0).mask)

        # [DENSITY] -- addition of density from WOA
        if not (woa_rhom is None):
            Z_om_rho_m = ma.masked_array(rho_fp_m,
                                         mask=ma.max(
                                             location_max_foramprod[:, ...],
                                             axis=0).mask)
        #endif

        if foram_specie == "pachy_s":
            Z_om_fm = Z_om_fm + 0.1  # in per mil
            Z_om_fm_m = Z_om_fm_m + 0.1  # in per mil

        index_for = list(fpl.l09_cnsts_dic.keys()).index(foram_specie)
        delt_forams[index_for, ...] = Z_om_fm_m

        # [DENSITY] -- addition of density from WOA
        if not (woa_rhom is None):
            rhom_forams[index_for, ...] = Z_om_rho_m
        #endif

    #endfor on foram_specie

    # For comparison with Lukas Jonkers: old method on first 50 meters
    Z_om_lj = d18Oca_lj

    # For comparison with previous figures: old method on first 00 meters
    Z_om_ol = d18Oca_ol

    # [DENSITY] -- addition of density from WOA
    if not (woa_rhom is None):
        print("Fame is used with density ...")
        return delt_forams, Z_om_ol, d18Oca_mar_m, rhom_forams, rhom_ol
    else:
        print("Fame is used without density ...")
        return delt_forams, Z_om_ol, d18Oca_mar_m
    def step(self, reward, observation):
        '''
        Given reward and an observation, compute the next action
        
        Keyword Arguments:
        
        reward    (float) The reward received due to the previous action
        observation (int) The index of the most recent observation [0:num_states]
        
        Returns: 
        next_action (int) The index of the next action to take [0:num_actions]
        
        '''
        next_state = observation

        if self._dynamic_alpha:
            self._alpha = self.compute_alpha(self._last_state,
                                             self._last_action)
        else:
            self._alpha = self._alpha0

        qt = self._q_table[self._last_state, self._last_action]
        max_qt1 = ma.max(self._q_table[next_state, :])

        # update value function
        qt_next = (1 - self._alpha) * qt + self._alpha * (
            reward + self._gamma * max_qt1)

        if not self._policyFrozen:
            self._q_table[self._last_state, self._last_action] = qt_next

        # update the reward history only for
        self.update_reward_history(self._last_state, self._last_action, reward)

        # get the index to the maximum value in the relevant row
        exploit_action = ma.argmax(self._q_table[next_state, :])

        if self._dynamic_epsilon:
            self._do_exploit, explore_action = self.two_state_decaying_eps_greedy_exploration(
                next_state, exploit_action)
        else:
            self._do_exploit, explore_action = self.constant_eps_greedy_exploration(
                next_state, exploit_action)

        if self._do_exploit or self._exploringFrozen:
            # use this max value as the action if exploiting
            next_action = exploit_action

            # if we're on policy check if there's a change

            old_median, new_median = self.get_reward_history_medians(
                self._last_state, self._last_action)

            # store median vals for debug logging
            self._old_reward_median = old_median
            self._new_reward_median = new_median

            self._change_detected = not np.isnan(old_median) and not np.isnan(
                new_median) and old_median != new_median
            # if there's a change, reset the change detection
            if self._change_detected:
                self.reset_reward_history_state(self._last_state)

                # if we're using the change detection to control exploration renewal,
                # reset the visitation table
                if self._use_change_detection:
                    self.reset_visitation_table_state(self._last_state)

        else:
            next_action = explore_action

        # store off state for the next iteration
        self._last_state = next_state
        self._last_action = next_action

        # increment state visitation table
        self.update_visitation_table(next_state, next_action)
        self._epoch_num += 1

        return next_action
Exemple #19
0
def measure(mode, x, y, x0, x1, thresh=0):
    """ return the a measure of y in the window x0 to x1
    """
    xm = ma.masked_outside(x, x0, x1)  # .compressed()
    ym = ma.array(y, mask=ma.getmask(xm))  # .compressed()
    if mode == 'mean':
        r1 = np.mean(ym)
        r2 = np.std(ym)
    if mode == 'max' or mode == 'maximum':
        r1 = ma.max(ym)
        r2 = xm[ma.argmax(ym)]
    if mode == 'min' or mode == 'minimum':
        r1 = ma.min(ym)
        r2 = xm[ma.argmin(ym)]
    if mode == 'minormax':
        r1p = ma.max(ym)
        r1n = ma.min(ym)
        if ma.abs(r1p) > ma.abs(r1n):
            r1 = r1p
            r2 = xm[ma.argmax(ym)]

        else:
            r1 = r1n
            r2 = xm[ma.argmin(ym)]

    if mode == 'median':
        r1 = ma.median(ym)
        r2 = 0
    if mode == 'p2p':  # peak to peak
        r1 = ma.ptp(ym)
        r2 = 0
    if mode == 'std':  # standard deviation
        r1 = ma.std(ym)
        r2 = 0
    if mode == 'var':  # variance
        r1 = ma.var(ym)
        r2 = 0
    if mode == 'cumsum':  # cumulative sum
        r1 = ma.cumsum(ym)  # Note: returns an array
        r2 = 0
    if mode == 'anom':  # anomalies = difference from averge
        r1 = ma.anom(ym)  # returns an array
        r2 = 0
    if mode == 'sum':
        r1 = ma.sum(ym)
        r2 = 0
    if mode == 'area' or mode == 'charge':
        r1 = ma.sum(ym) / (ma.max(xm) - ma.min(xm))
        r2 = 0
    if mode == 'latency':  # return first point that is > threshold
        sm = ma.nonzero(ym > thresh)
        r1 = -1  # use this to indicate no event detected
        r2 = 0
        if ma.count(sm) > 0:
            r1 = sm[0][0]
            r2 = len(sm[0])
    if mode == '1090':  #measure 10-90% time, also returns max
        r1 = ma.max(ym)
        r2 = xm[ma.argmax(ym)]
        y10 = 0.1 * r1
        y90 = 0.9 * r1
        sm1 = ma.nonzero(ym >= y10)
        sm9 = ma.nonzero(ym >= y90)
        r1 = xm[sm9] - xm[sm1]

    if mode == 'count':
        r1 = ma.count(ym)
        r2 = 0
    if mode == 'maxslope':
        return (0, 0)
        slope = np.array([])
        win = ma.flatnotmasked_contiguous(ym)
        st = int(len(win) / 20)  # look over small ranges
        for k in win:  # move through the slope measurementwindow
            tb = range(k - st, k + st)  # get tb array
            newa = np.array(self.dat[i][j, thisaxis, tb])
            ppars = np.polyfit(
                x[tb], ym[tb],
                1)  # do a linear fit - smooths the slope measures
            slope = np.append(slope, ppars[0])  # keep track of max slope
        r1 = np.amax(slope)
        r2 = np.argmax(slope)
    return (r1, r2)
def do_reco(base_array, wvfm_array, reco_array):
    '''
        Run a simple prompt/delayed reconstruction and fill the reco_array
    '''
    # run simplistic prompt/delayed reconstruction
    wvfm_shape = wvfm_array['samples'][0, :, :, :].shape

    # find primary rising edge
    diff = np.diff(wvfm_array['samples'].reshape(wvfm_shape),
                   axis=-1)  # Nserial, Nchan, Nsamples-1
    prompt_edge = (np.argmax(diff, axis=-1)).reshape(
        wvfm_shape[:-1] + (1, ))  # Nserial, Nchan, 1

    # find pedestal (from data prior to rising edge)
    sample_idx = np.arange(wvfm_array['samples'].shape[-1]).reshape(
        (1, 1) + wvfm_shape[-1:])  # 1, 1, Nsamples
    prompt_mask = sample_idx > prompt_edge  # Nserial, Nchan, Nsamples
    masked_wvfm = ma.array(wvfm_array['samples'].reshape(wvfm_shape),
                           mask=prompt_mask)  # Nserial, Nchan, Nsamples
    ped = ma.median(masked_wvfm, axis=-1, keepdims=True)  # Nserial, Nchan, 1
    ped_mae = ma.median(np.abs(masked_wvfm - ped), axis=-1,
                        keepdims=True)  # Nserial, Nchan, 1
    diff_mae = ma.median(np.abs(ma.array(diff, mask=prompt_mask[:, :, 1:])),
                         axis=-1,
                         keepdims=True)  # Nserial, Nchan, 1

    # find prompt time using linear projection
    y1 = np.take_along_axis(wvfm_array['samples'].reshape(wvfm_shape),
                            prompt_edge,
                            axis=-1)  # Nserial, Nchan, 1
    y2 = np.take_along_axis(wvfm_array['samples'].reshape(wvfm_shape),
                            prompt_edge + 1,
                            axis=-1)  # Nserial, Nchan, 1
    prompt_time = intersection(ped, prompt_edge, prompt_edge + 1, y1,
                               y2)  # Nserial, Nchan, 1

    # find delayed signal
    falling_edge = np.argmax(prompt_mask[:, :, 1:] & (diff < 0),
                             axis=-1).reshape(wvfm_shape[:-1] +
                                              (1, ))  # Nserial, Nchan, 1
    delayed_mask = sample_idx[:, :, :
                              -1] < falling_edge  # Nserial, Nchan, Nsamples-1
    masked_diff = ma.array(diff,
                           mask=delayed_mask)  # Nserial, Nchan, Nsamples-1
    delayed_edge = (ma.argmax(masked_diff, axis=-1)).reshape(
        wvfm_shape[:-1] + (1, ))  # Nserial, Nchan, 1
    delayed_mask = sample_idx > delayed_edge  # Nserial, Nchan, Nsamples

    # find delayed time using linear projection - nix'd for the moment
    #y1 = np.take_along_axis(wvfm_array['samples'].reshape(wvfm_shape), delayed_edge, axis=-1) # Nserial, Nchan, 1
    #y2 = np.take_along_axis(wvfm_array['samples'].reshape(wvfm_shape), delayed_edge+1, axis=-1) # Nserial, Nchan, 1
    delayed_time = delayed_edge  #intersection(ped, delayed_edge, delayed_edge+1, y1, y2)

    # calculate signal amplitude (gated integral)
    masked_wvfm.mask = ~(prompt_mask &
                         (sample_idx <
                          (prompt_time + 20)))  # Nserial, Nchan, Nsamples
    prompt_sig = ma.sum(masked_wvfm - ped, axis=-1,
                        keepdims=True)  # Nserial, Nchan, 1
    # calculate delayed amplitude (also gated 20, but uses just prior sample as pedestal)
    masked_wvfm.mask = ~(delayed_mask & (sample_idx < delayed_edge + 20)
                         )  # Nserial, Nchan, Nsamples
    delayed_ped = np.take_along_axis(wvfm_array['samples'].reshape(wvfm_shape),
                                     delayed_edge - 1,
                                     axis=-1)  # Nserial, Nchan, 1
    delayed_sig = ma.sum(masked_wvfm - delayed_ped, axis=-1,
                         keepdims=True)  # Nserial, Nchan, 1

    reco_array['ped'][0] = ped[:, :, 0]
    reco_array['ped_mae'][0] = ped_mae[:, :, 0]
    reco_array['diff_mae'][0] = diff_mae[:, :, 0]
    reco_array['integral'][0] = np.sum(
        wvfm_array['samples'].reshape(wvfm_shape) - ped, axis=-1)
    reco_array['prompt_diff'][0] = np.max(diff, axis=-1)
    reco_array['prompt_t'][0] = prompt_time[:, :, 0]
    reco_array['prompt_sig'][0] = prompt_sig[:, :, 0]
    reco_array['delayed_diff'][0] = ma.max(masked_diff, axis=-1)
    reco_array['delayed_t'][0] = delayed_time[:, :, 0]
    reco_array['delayed_sig'][0] = delayed_sig[:, :, 0]
    reco_array['delayed_ped'][0] = delayed_ped[:, :, 0]
    #print thecoeff_array
    print "creating weight array"
    for r in range(0, rowCount):
        for c in range(0, columnCount):
            var_array = ma.empty([6, 33])
            #print var_array
            for q in range(0, 33):
                #print q
                var_array[0, q] = theSPI6list[q][r, c]
                var_array[1, q] = theSPI24list[q][r, c]
                var_array[2, q] = theSMlist[q][r, c]
                var_array[3, q] = theSPI60list[q][r, c]
                var_array[4, q] = theSPI12list[q][r, c]
                var_array[5, q] = thePHDIlist[q][r, c]
            the_eig = np.linalg.eig(ma.corrcoef(var_array))
            thecoeffs = the_eig[1][:, ma.argmax(the_eig[0])]
            thecoeff_array[r, c, :] = thecoeffs

    theSPI6co = thecoeff_array[:, :, 0].filled(-9999)
    theoutSPI6raster = ap.NumPyArrayToRaster(theSPI6co,
                                             lower_left_corner=arcpy.Point(
                                                 theXMin, theYMin),
                                             x_cell_size=theCellSize,
                                             y_cell_size=theCellSize,
                                             value_to_nodata=-9999)
    theyear = themdlist[0][4:6]
    #print theyear
    theoutSPI6raster.save(outDir + theyear + "SPI6weights.tif")
    arcpy.DefineProjection_management(outDir + theyear + "SPI6weights.tif", sr)

    theSPI24co = thecoeff_array[:, :, 1].filled(-9999)
Exemple #22
0
def measure(mode, x, y, x0, x1, thresh=0):
    """ return the a measure of y in the window x0 to x1
    """
    xt = x.view(numpy.ndarray)  # strip Metaarray stuff -much faster!
    v = y.view(numpy.ndarray)

    xm = ma.masked_outside(xt, x0, x1).T
    ym = ma.array(v, mask=ma.getmask(xm))
    if mode == 'mean':
        r1 = ma.mean(ym)
        r2 = ma.std(ym)
    if mode == 'max' or mode == 'maximum':
        r1 = ma.max(ym)
        r2 = xm[ma.argmax(ym)]
    if mode == 'min' or mode == 'minimum':
        r1 = ma.min(ym)
        r2 = xm[ma.argmin(ym)]
    if mode == 'median':
        r1 = ma.median(ym)
        r2 = 0
    if mode == 'p2p':  # peak to peak
        r1 = ma.ptp(ym)
        r2 = 0
    if mode == 'std':  # standard deviation
        r1 = ma.std(ym)
        r2 = 0
    if mode == 'var':  # variance
        r1 = ma.var(ym)
        r2 = 0
    if mode == 'cumsum':  # cumulative sum
        r1 = ma.cumsum(ym)  # Note: returns an array
        r2 = 0
    if mode == 'anom':  # anomalies = difference from averge
        r1 = ma.anom(ym)  # returns an array
        r2 = 0
    if mode == 'sum':
        r1 = ma.sum(ym)
        r2 = 0
    if mode == 'area' or mode == 'charge':
        r1 = ma.sum(ym) / (ma.max(xm) - ma.min(xm))
        r2 = 0
    if mode == 'latency':  # return first point that is > threshold
        sm = ma.nonzero(ym > thresh)
        r1 = -1  # use this to indicate no event detected
        r2 = 0
        if ma.count(sm) > 0:
            r1 = sm[0][0]
            r2 = len(sm[0])
    if mode == 'count':
        r1 = ma.count(ym)
        r2 = 0
    if mode == 'maxslope':
        return (0, 0)
        slope = numpy.array([])
        win = ma.flatnotmasked_contiguous(ym)
        st = int(len(win) / 20)  # look over small ranges
        for k in win:  # move through the slope measurementwindow
            tb = range(k - st, k + st)  # get tb array
            newa = numpy.array(self.dat[i][j, thisaxis, tb])
            ppars = numpy.polyfit(
                x[tb], ym[tb],
                1)  # do a linear fit - smooths the slope measures
            slope = numpy.append(slope, ppars[0])  # keep track of max slope
        r1 = numpy.amax(slope)
        r2 = numpy.argmax(slope)
    return (r1, r2)
Exemple #23
0
def measure(mode, x, y, x0, x1, thresh=0, slopewin=1.0):
    """ return the a measure of y in the window x0 to x1
    """
    xm = ma.masked_outside(x, x0, x1)# .compressed()
    ym = ma.array(y, mask = ma.getmask(xm))# .compressed()
    if mode == 'mean':
        r1 = np.mean(ym)
        r2 = np.std(ym)
    if mode == 'max' or mode == 'maximum':
        r1 = ma.max(ym)
        r2 = xm[ma.argmax(ym)]
    if mode == 'min' or mode == 'minimum':
        r1 = ma.min(ym)
        r2 = xm[ma.argmin(ym)]
    if mode == 'minormax':
        r1p = ma.max(ym)
        r1n = ma.min(ym)
        if ma.abs(r1p) > ma.abs(r1n):
            r1 = r1p
            r2 = xm[ma.argmax(ym)]

        else:
            r1 = r1n
            r2 = xm[ma.argmin(ym)]

    if mode == 'median':
        r1 = ma.median(ym)
        r2 = 0
    if mode == 'p2p': # peak to peak
        r1 = ma.ptp(ym)
        r2 = 0
    if mode == 'std': # standard deviation
        r1 = ma.std(ym)
        r2 = 0
    if mode == 'var': # variance
        r1 = ma.var(ym)
        r2 = 0
    if mode == 'cumsum': # cumulative sum
        r1 = ma.cumsum(ym) # Note: returns an array
        r2 = 0
    if mode == 'anom': # anomalies = difference from averge
        r1 = ma.anom(ym) # returns an array
        r2 = 0
    if mode == 'sum':
        r1 = ma.sum(ym)
        r2 = 0
    if mode == 'area' or mode == 'charge':
        r1 = ma.sum(ym)/(ma.max(xm)-ma.min(xm))
        r2 = 0
    if mode == 'latency': # return first point that is > threshold
        sm = ma.nonzero(ym > thresh)
        r1 = -1  # use this to indicate no event detected
        r2 = 0
        if ma.count(sm) > 0:
            r1 = sm[0][0]
            r2 = len(sm[0])
    if mode == '1090': #measure 10-90% time, also returns max
        r1 = ma.max(ym)
        r2 = xm[ma.argmax(ym)]
        y10 = 0.1*r1
        y90 = 0.9*r1
        sm1 = ma.nonzero(ym >= y10)
        sm9 = ma.nonzero(ym >= y90)
        r1 = xm[sm9] - xm[sm1]

    if mode == 'count':
        r1 = ma.count(ym)
        r2 = 0
    if mode == 'maxslope':
        slope = []
        win = ma.flatnotmasked_contiguous(ym)
        dt = x[1]-x[0]
        st = int(slopewin/dt) # use slopewin duration window for fit.
        print('st: ', st)
        for k, w in enumerate(win): # move through the slope measurementwindow
            tb = range(k-st, k+st) # get tb array
            ppars = np.polyfit(x[tb], ym[tb], 1) # do a linear fit - smooths the slope measures
            slope.append(ppars[0]) # keep track of max slope
        r1 = np.max(slope)
        r2 = np.argmax(slope)
    return(r1, r2)
    def fit(self, X, y, from_pickle=False):
        if scipy.sparse.issparse(X):
            logging.info('Converting to dense matrix.')
            X = np.array(X.todense())

        if self.estimators_generator is None:
            self.estimators_generator = StumpsClassifiersGenerator(
                n_stumps_per_attribute=10, self_complemented=True)

        # Hack CFS
        if from_pickle:
            self.estimators_generator.estimators_ = np.load(
                open(pickle_path + "estimators.pck", 'rb'))
        else:
            self.estimators_generator.fit(X,
                                          y,
                                          classes_weights=self.classes_weights)

        classification_matrix = self._binary_classification_matrix(X)

        self.chosen_columns_ = []
        self.infos_per_iteration_ = defaultdict(list)

        m, n = classification_matrix.shape
        self.n_total_hypotheses_ = n

        y_kernel_matrix = np.multiply(y.reshape((len(y), 1)),
                                      classification_matrix)

        # Hack CFS (2eme version)
        if self.classes_weights is not None:
            y_kernel_matrix = np.multiply(y_kernel_matrix.T,
                                          self.classes_weights).T

        # Initialization
        alpha = self._initialize_alphas(m)
        w = None
        self.collected_weight_vectors_ = {}
        self.collected_dual_constraint_violations_ = {}

        for k in range(
                min(
                    n, self.n_max_iterations
                    if self.n_max_iterations is not None else np.inf)):
            print(k)

            # Find worst weak hypothesis given alpha.
            h_values = ma.array(np.squeeze(
                np.array(alpha.T.dot(y_kernel_matrix).T)),
                                fill_value=-np.inf)
            h_values[self.chosen_columns_] = ma.masked
            worst_h_index = ma.argmax(h_values)
            logging.info("Adding voter {} to the columns, value = {}".format(
                worst_h_index, h_values[worst_h_index]))

            # Check for optimal solution. We ensure at least one complete iteration is done as the initialization
            # values might provide a degenerate initial solution.
            if h_values[
                    worst_h_index] <= self.dual_constraint_rhs + self.epsilon and len(
                        self.chosen_columns_) > 0:
                break

            # Append the weak hypothesis.
            self.chosen_columns_.append(worst_h_index)

            # Solve restricted master for new costs.
            w, alpha = self._restricted_master_problem(
                y_kernel_matrix[:, self.chosen_columns_],
                previous_w=w,
                previous_alpha=alpha)

            # We collect iteration information for later evaluation.
            if self.save_iteration_as_hyperparameter_each is not None:
                if (k + 1) % self.save_iteration_as_hyperparameter_each == 0:
                    self.collected_weight_vectors_[k] = deepcopy(w)
                    self.collected_dual_constraint_violations_[
                        k] = h_values[worst_h_index] - self.dual_constraint_rhs

        self.weights_ = w
        self.estimators_generator.estimators_ = self.estimators_generator.estimators_[
            self.chosen_columns_]

        self.learner_info_ = {}
        self.learner_info_.update(n_nonzero_weights=np.sum(
            np.asarray(self.weights_) > 1e-12))
        self.learner_info_.update(
            n_generated_columns=len(self.chosen_columns_))

        return self
Exemple #25
0
def measure(mode, x, y, x0, x1, thresh=0):
    """ return the a measure of y in the window x0 to x1
    """
    xm = ma.masked_outside(x, x0, x1)  # .compressed()
    ym = ma.array(y, mask=ma.getmask(xm))  # .compressed()
    if mode == "mean":
        r1 = np.mean(ym)
        r2 = np.std(ym)
    if mode == "max" or mode == "maximum":
        r1 = ma.max(ym)
        r2 = xm[ma.argmax(ym)]
    if mode == "min" or mode == "minimum":
        r1 = ma.min(ym)
        r2 = xm[ma.argmin(ym)]
    if mode == "minormax":
        r1p = ma.max(ym)
        r1n = ma.min(ym)
        if ma.abs(r1p) > ma.abs(r1n):
            r1 = r1p
            r2 = xm[ma.argmax(ym)]

        else:
            r1 = r1n
            r2 = xm[ma.argmin(ym)]

    if mode == "median":
        r1 = ma.median(ym)
        r2 = 0
    if mode == "p2p":  # peak to peak
        r1 = ma.ptp(ym)
        r2 = 0
    if mode == "std":  # standard deviation
        r1 = ma.std(ym)
        r2 = 0
    if mode == "var":  # variance
        r1 = ma.var(ym)
        r2 = 0
    if mode == "cumsum":  # cumulative sum
        r1 = ma.cumsum(ym)  # Note: returns an array
        r2 = 0
    if mode == "anom":  # anomalies = difference from averge
        r1 = ma.anom(ym)  # returns an array
        r2 = 0
    if mode == "sum":
        r1 = ma.sum(ym)
        r2 = 0
    if mode == "area" or mode == "charge":
        r1 = ma.sum(ym) / (ma.max(xm) - ma.min(xm))
        r2 = 0
    if mode == "latency":  # return first point that is > threshold
        sm = ma.nonzero(ym > thresh)
        r1 = -1  # use this to indicate no event detected
        r2 = 0
        if ma.count(sm) > 0:
            r1 = sm[0][0]
            r2 = len(sm[0])
    if mode == "1090":  # measure 10-90% time, also returns max
        r1 = ma.max(ym)
        r2 = xm[ma.argmax(ym)]
        y10 = 0.1 * r1
        y90 = 0.9 * r1
        sm1 = ma.nonzero(ym >= y10)
        sm9 = ma.nonzero(ym >= y90)
        r1 = xm[sm9] - xm[sm1]

    if mode == "count":
        r1 = ma.count(ym)
        r2 = 0
    if mode == "maxslope":
        return (0, 0)
        slope = np.array([])
        win = ma.flatnotmasked_contiguous(ym)
        st = int(len(win) / 20)  # look over small ranges
        for k in win:  # move through the slope measurementwindow
            tb = range(k - st, k + st)  # get tb array
            newa = np.array(self.dat[i][j, thisaxis, tb])
            ppars = np.polyfit(x[tb], ym[tb], 1)  # do a linear fit - smooths the slope measures
            slope = np.append(slope, ppars[0])  # keep track of max slope
        r1 = np.amax(slope)
        r2 = np.argmax(slope)
    return (r1, r2)