Esempio n. 1
0
def getcatch(filen,dp,wvar,numhauls):
  #where "dp" is the site code
  #where "wvar" can be "kept", "total","shorts", or "eggers"
  #returns haul,hauls,datetc where "hauls" is the smoothed version and "datetc" is the datetime  
  catch=csv.reader(open(filen,'r'), delimiter=',', quotechar='|',skipinitialspace=True)
  haul,datetc=[],[]
  for row in catch:
      if (row[2]==dp) and len(row):
        if wvar=='kept':
            haul.append(float(row[11])/float(row[10]))# normalizes by number of traps per trawl (comma separated data)
        elif wvar=='total': # as needed in Bill Doherty's case, for example
            if  row[12]=='':
                row[12]=0
            if  row[13]=='':
                row[13]=0
            haul.append((float(row[11])+float(row[12])+float(row[13]))/float(row[10]))# normalizes by number of traps per trawl (comma separated data)
        dd=row[5] #datetime 
        datetc.append(dt(int(dd[0:4]),int(dd[5:7]),int(dd[8:10]),int(dd[11:13]),int(dd[14:16]),0))
  #sort this according to time?
  haul=numpy.array(haul).transpose()  
  if numhauls>2:
    haul_smooth=utilities.smooth(haul,numhauls,'hanning')
    difflen=len(haul_smooth)-len(haul)
    hauls=haul_smooth[difflen/2:-difflen/2]
  else:
    hauls=haul  
  return haul,hauls,datetc
 def find_fault_time(self, window1=1, window2=4):
     window_len1 = int(window1/self.hsw.step_len)
     window_len2 = int(window2/self.hsw.step_len)
     para_fault_id = np.array(self.para_fault_id)
     para_fault_id = smooth(para_fault_id, 100)
     for i in range(window_len1, len(para_fault_id)):
         if (np.array(para_fault_id[i-window_len1:i])==0).all() and \
            (np.array(para_fault_id[i:i+window_len2])!=0).all():
            return (i+1)*self.hsw.step_len
     return 0
 def estimate_mode(self):
     n_obs = self.obs / self.obs_scale
     t_len, o_len = n_obs.shape
     obs = n_obs.reshape(1, t_len, o_len)
     obs = np2tensor(obs, use_cuda=False)
     mode = self.mode_detector(obs)
     mode = mode.detach().numpy()[0]
     mode = np.argmax(mode, axis=1)
     mode[:100] = self.mode0
     mode = smooth(mode, 50)
     self.mode = mode
    def find_dominant_orientations(self):
        '''  '''

        # flipud: why? I'm sure it won't work otherwise, but dont know why
        # It should happen in both "find_dominant_orientations" & "find_grid_lines"
        image = np.flipud(self.data['image'])

        ### computing orientations
        if image is None:
            orientations = np.array([])

        else:
            ### smoothing
            image = cv2.blur(image, (9, 9))
            image = cv2.GaussianBlur(image, (9, 9), 0)

            ### oriented gradient of the image
            # is it (dx - 1j*dy) or (dx + 1j*dy)
            # this is related to "flipud" problem mentioned above
            dx = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize=9)
            dy = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize=9)
            grd = dx - 1j * dy

            ### weighted histogram of oriented gradients (over the whole image)
            hist, binc = utilities.wHOG(grd, NumBin=180 * 5, Extension=False)
            hist = utilities.smooth(hist, window_len=21)

            ### finding peaks in the histogram
            orthogonal_orientations = True
            if orthogonal_orientations:
                print(
                    '\t *** WARNING: currently auto-orientation detection assumes perpendicularity...'
                )
                # if dominant orientations are orthogonal, find only one and add pi/2 to it
                peak_idx = np.argmax(hist)
                orientations = [binc[peak_idx], binc[peak_idx] + np.pi / 2]

            # shrinking the range to [-pi/2, pi/2]
            for idx in range(len(orientations)):
                if orientations[idx] < -np.pi / 2:
                    orientations[idx] += np.pi
                elif np.pi / 2 <= orientations[idx]:
                    orientations[idx] -= np.pi

        ### setting orientations into places
        self.data['dominant_orientation'] = np.array(orientations)

        # note: that the internal unit of the orientation angles is radian
        # it is converted to degree (and back) only for the user interaction
        string = [
            '{:.1f}'.format(a * 180 / np.pi)
            for a in self.data['dominant_orientation']
        ]
        self.ui.textEdit_dominant_orientations.setText(', '.join(string))
 def track(self, mode, state_mu, state_sigma, obs, N):
     msg = 'Tracking hybrid states...'
     self.log_msg(msg)
     self.mode0, self.state_mu0, self.state_sigma0, self.obs, self.N = mode, state_mu, state_sigma, obs, N
     length = len(obs)
     with progressbar.ProgressBar(max_value=length * self.hsw.step_len,
                                  redirect_stdout=True) as bar:
         i = 0
         while i < length:
             obs = self.obs[i]
             particle = self.last_particle()
             self.step(particle, obs)
             bar.update(float('%.2f' % ((i + 1) * self.hsw.step_len)))
             i += 1
     self.mode = smooth(np.array(self.mode), 50)
 def check_Z(self, window1=1, window2=2):
     if self.fault_time>0:
         return
     window_len1 = int(window1/self.hsw.step_len)
     window_len2 = int(window2/self.hsw.step_len)
     if len(self.Z)<=(window_len1+window_len2):
         return
     Z = np.array(self.Z)
     Z = smooth(Z, 100)
     Z1 = np.array(Z[-(window_len1+window_len2):-window_len2])
     Z2 = np.array(Z[-window_len2:])
     Z1 = np.mean(Z1!=0, 0)
     Z2 = np.mean(Z2!=0, 0)
     if (Z1==0).all() and (Z2==1).any():
         self.fault_time = self.t - window2
         msg = 'A fault occurred at {}s, estimated its magnitude at 100s, fault parameters are mu=[0 0 0 ], sigma=[1 1 1 ].'\
               .format(round(self.fault_time, 2)) # add some extra insignificant txt so that we can process log script conveniently.
         self.log_msg(msg)
         self.break_flag = True
 def plot_mode(self, file_name=None):
     data = np.array(self.mode)
     data = smooth(data, 50)
     self.hsw.plot_modes(data, file_name)
Esempio n. 8
0
   #la,lo=conversions.dm2dd([t[j,3]],[t[j,4]]) #changed this in May 2015
   la,lo=conversions.dm2dd(df['lat'].values[0],df['lon'].values[0])
   lat.append(la)
   lon.append(lo)
   if df['year_day'].values[j]+1<366.0:
     #datet.append(num2date(t[j,1]+1).replace(year=int(t[j,0])))
     datet.append(num2date(df['year_day'].values[j]+1).replace(year=int(df['year'].values[j])))
   else:
     #datet.append(num2date(t[j,1]+1).replace(year=int(t[j,0])+1))
     datet.append(num2date(df['year_day'].values[j]+1).replace(year=int(df['year'].values[j]+1)))  
 #wd=t[:,5]
 #temp=t[:,2] 
 #temp_smooth=utilities.smooth(temp,24*7,'hanning')
 wd=df['depth'].values
 temp=df['temp'].values
 temp_smooth=utilities.smooth(temp,hrs,smoothing_method)
 ax1.plot_date(datet,temp,'k-')
 ax1.plot_date(datet,temp_smooth[hrs/2-1:-hrs/2],'r-',linewidth=4)
 ax1.set_ylabel(str(days)+' day running average temperature (degF)',color='r',fontsize=16)
 ax1.set_ylim(ylim)
 haul,haul_smooth,datetc=getcatch(fileprefix+sc+'.csv',sc+depset,wvar,numhauls)
 ax2=ax1.twinx()
 ax2.plot(datetc,haul_smooth,'g-',linewidth=3)
 ax2.set_ylabel(str(numhauls)+' haul average catch/pot',color='g',fontsize=16)
 #ax2.set_ylim(xlim)
 
 #ax2.xaxis.set_major_locator(yrsl)
 #ax2.xaxis.set_major_formatter(majfmt)
 #ax2.xaxis.set_major_formatter(Fmt)
 
 #plt_fullmoon(datetc[-1].year,min(haul_smooth),max(haul_smooth))
        index=range(len(obsTime)))
    data = data.sort_index(by='obsTime')
    data.index = range(len(obsTime))
    Date = []
    for i in data.index:
        Date.append(data['obsTime'][i])
    ave_obs = round(np.mean(obsILD), 1)
    ave_wea_wind_speed = round(np.mean(wea_wind_speed), 1)
    t = [data, Date, ave_obs, ave_wea_wind_speed]
    T.append(t)
    print 'e'

O, W2 = [], []  # smooth the model and observation ILD
for i in range(4):
    num = 6  # smooth by 6 is the best
    ild2_smooth = smooth(T[i][0]['obsILD'], num, 'hanning')
    difflen2 = len(ild2_smooth) - len(T[i][0]['obsILD'])
    ilds2 = ild2_smooth[difflen2 / 2:-difflen2 / 2]

    wea_wind_smooth = smooth(T[i][0]['wea_wind_speed'], num, 'hanning')
    difflen4 = len(wea_wind_smooth) - len(T[i][0]['wea_wind_speed'])
    wind2 = wea_wind_smooth[difflen4 / 2:-difflen4 / 2]

    O.append(ilds2)
    W2.append(wind2)

fig = plt.figure()
for i in range(4):
    ax1 = fig.add_subplot(
        2,
        2,
Esempio n. 10
0
def detect_events(data, cell_index, stimulus, debug_plots=False):

    stimulus_table = data.get_stimulus_table(stimulus)
    dff_trace = data.get_dff_traces()[1][cell_index, :]

    k_min = 0
    k_max = 10
    delta = 3

    dff_trace = smooth(dff_trace, 5)

    var_dict = {}
    debug_dict = {}
    for ii, fi in enumerate(stimulus_table['start'].values):

        if ii > 0 and stimulus_table.iloc[ii].start == stimulus_table.iloc[
                ii - 1].end:
            offset = 1
        else:
            offset = 0

        if fi + k_min >= 0 and fi + k_max <= len(dff_trace):
            trace = dff_trace[fi + k_min + 1 + offset:fi + k_max + 1 + offset]

            xx = (trace - trace[0])[delta] - (trace - trace[0])[0]
            yy = max((trace - trace[0])[delta + 2] - (trace - trace[0])[0 + 2],
                     (trace - trace[0])[delta + 3] - (trace - trace[0])[0 + 3],
                     (trace - trace[0])[delta + 4] - (trace - trace[0])[0 + 4])

            var_dict[ii] = (trace[0], trace[-1], xx, yy)
            debug_dict[fi + k_min + 1 + offset] = (ii, trace)

    xx_list, yy_list = [], []
    for _, _, xx, yy in var_dict.itervalues():
        xx_list.append(xx)
        yy_list.append(yy)

    mu_x = np.median(xx_list)
    mu_y = np.median(yy_list)

    xx_centered = np.array(xx_list) - mu_x
    yy_centered = np.array(yy_list) - mu_y

    std_factor = 1
    std_x = 1. / std_factor * np.percentile(
        np.abs(xx_centered), [100 * (1 - 2 * (1 - sps.norm.cdf(std_factor)))])
    std_y = 1. / std_factor * np.percentile(
        np.abs(yy_centered), [100 * (1 - 2 * (1 - sps.norm.cdf(std_factor)))])

    curr_inds = []
    allowed_sigma = 4
    for ii, (xi, yi) in enumerate(zip(xx_centered, yy_centered)):
        if np.sqrt(((xi) / std_x)**2 + ((yi) / std_y)**2) < allowed_sigma:
            curr_inds.append(True)
        else:
            curr_inds.append(False)

    curr_inds = np.array(curr_inds)
    data_x = xx_centered[curr_inds]
    data_y = yy_centered[curr_inds]
    Cov = np.cov(data_x, data_y)
    Cov_Factor = np.linalg.cholesky(Cov)
    Cov_Factor_Inv = np.linalg.inv(Cov_Factor)

    #===================================================================================================================

    noise_threshold = max(allowed_sigma * std_x + mu_x,
                          allowed_sigma * std_y + mu_y)
    mu_array = np.array([mu_x, mu_y])
    yes_set, no_set = set(), set()
    for ii, (t0, tf, xx, yy) in var_dict.iteritems():

        xi_z, yi_z = Cov_Factor_Inv.dot((np.array([xx, yy]) - mu_array))

        # Conditions in order:
        # 1) Outside noise blob
        # 2) Minimum change in df/f
        # 3) Change evoked by this trial, not previous
        # 4) At end of trace, ended up outside of noise floor

        if np.sqrt(xi_z**2 + yi_z**2
                   ) > 4 and yy > .05 and xx < yy and tf > noise_threshold / 2:
            yes_set.add(ii)
        else:
            no_set.add(ii)

    assert len(var_dict) == len(stimulus_table)
    b = np.zeros(len(stimulus_table), dtype=np.bool)
    for yi in yes_set:
        b[yi] = True

    if debug_plots == True:
        import matplotlib.pyplot as plt
        fig, ax = plt.subplots(1, 2)
        # ax[0].plot(dff_trace)
        for key, val in debug_dict.iteritems():
            ti, trace = val
            if ti in no_set:
                ax[0].plot(np.arange(key, key + len(trace)), trace, 'b')
            elif ti in yes_set:
                ax[0].plot(np.arange(key, key + len(trace)),
                           trace,
                           'r',
                           linewidth=2)
            else:
                raise Exception

        for ii in yes_set:
            ax[1].plot([var_dict[ii][2]], [var_dict[ii][3]], 'r.')

        for ii in no_set:
            ax[1].plot([var_dict[ii][2]], [var_dict[ii][3]], 'b.')

        print('number_of_events:', b.sum())
        plt.show()

    return b
Esempio n. 11
0
    data = data.sort_index(by='obsTime')
    data.index = range(len(obsTime))
    Date = []
    for i in data.index:
        Date.append(data['obsTime'][i])
    ave_obs = round(np.mean(obsILD), 1)
    ave_mod = round(np.mean(modILD), 1)
    ave_hycom = round(np.mean(hycom_ILD), 1)
    t = [data, Date, ave_obs, ave_mod, ave_hycom, data1, Date1, ave_fvcom]
    T.append(t)
    print e

H, M, O, F = [], [], [], []  # smooth the model and observation ILD
for i in range(2):
    num = 6  # smooth by 6 is the best
    ild0_smooth = utilities.smooth(T[i][0]['hycom_ILD'], num, 'hanning')
    difflen0 = len(ild0_smooth) - len(T[i][0]['hycom_ILD'])
    ilds0 = ild0_smooth[difflen0 / 2:-difflen0 / 2]

    ild1_smooth = utilities.smooth(T[i][0]['modILD'], num, 'hanning')
    difflen1 = len(ild1_smooth) - len(T[i][0]['modILD'])
    ilds1 = ild1_smooth[difflen1 / 2:-difflen1 / 2]

    ild2_smooth = utilities.smooth(T[i][0]['obsILD'], num, 'hanning')
    difflen2 = len(ild2_smooth) - len(T[i][0]['obsILD'])
    ilds2 = ild2_smooth[difflen2 / 2:-difflen2 / 2]

    ild3_smooth = utilities.smooth(T[i][5]['fvcom_ILD'], num, 'hanning')
    difflen3 = len(ild3_smooth) - len(T[i][5]['fvcom_ILD'])
    ilds3 = ild3_smooth[difflen3 / 2:-difflen3 / 2]