示例#1
0
def visualize(time, true_y, pred_y, odefunc, itr, loss):

    if args.viz:



        ax_traj.cla()
        ax_traj.set_title('Trajectories')
        ax_traj.set_xlabel('t')
        ax_traj.set_ylabel('x')
        ax_traj.plot(time.numpy(), pred_y.numpy()[:, 0, 0], 'r--', time.numpy(), true_y.numpy()[:, 0, 0], 'k-')
        ax_traj.set_xlim(time.min(), time.max())
        ax_traj.plot(time.numpy(), pred_y.numpy()[:, 0, 1], 'r--', time.numpy(), true_y.numpy()[:, 0, 1], 'k-')
        ax_traj.legend()

        axy_phase.cla()
        axy_phase.set_title('Phase Portrait')
        axy_phase.set_xlabel('x')
        axy_phase.set_ylabel('y')
        axy_phase.plot(true_y.numpy()[:, 0, 0], true_y.numpy()[:, 0, 1], 'r--')
        axy_phase.plot(pred_y.numpy()[:, 0, 0], pred_y.numpy()[:, 0, 1], 'k-')
        if args.ode_nums == 3:
            ay_traj.cla()
            ay_traj.set_title('Trajectories')
            ay_traj.set_xlabel('t')
            ay_traj.set_ylabel('y')
            ay_traj.plot(time.numpy(), pred_y.numpy()[:, 0, 1], 'r--', time.numpy(), true_y.numpy()[:, 0, 1], 'k-')
            ay_traj.set_xlim(time.min(), time.max())
            # ax_traj.set_ylim(-2, 2)
            ay_traj.legend()
            az_traj.cla()
            az_traj.set_title('Trajectories')
            az_traj.set_xlabel('t')
            az_traj.set_ylabel('z')
            az_traj.plot(time.numpy(), pred_y.numpy()[:, 0, 2], 'r--', time.numpy(), true_y.numpy()[:, 0, 2], 'k-')
            az_traj.set_xlim(time.min(), time.max())
            # ax_traj.set_ylim(-2, 2)
            az_traj.legend()


            axz_phase.cla()
            axz_phase.set_title('Phase Portrait')
            axz_phase.set_xlabel('x')
            axz_phase.set_ylabel('z')
            axz_phase.plot(true_y.numpy()[:, 0, 0], true_y.numpy()[:, 0, 2], 'r--')
            axz_phase.plot(pred_y.numpy()[:, 0, 0], pred_y.numpy()[:, 0, 2], 'k-')

            ayz_phase.cla()
            ayz_phase.set_title('Phase Portrait')
            ayz_phase.set_xlabel('y')
            ayz_phase.set_ylabel('z')
            ayz_phase.plot(true_y.numpy()[:, 0, 1], true_y.numpy()[:, 0, 2], 'r--')
            ayz_phase.plot(pred_y.numpy()[:, 0, 1], pred_y.numpy()[:, 0, 2], 'k-')

        fig.tight_layout()
        plt.savefig('png{}/{}.jpg'.format(args.CASE,itr))
def time_bins_fea(data, x, time, num, name):
    timemin, timemax = time.min(), time.max()
    bins = [timemin + i * (timemax - timemin) / num for i in range(num + 1)]
    bins[0] -= 1.0
    for i in range(1, num + 1):
        data[f'{name}{i}'] = x[(bins[i - 1] < time) & (time <= bins[i])].mean()
    return data
def normalization_kWh():
    df = pd.read_csv(
        '2019 CHP Raw Trend.csv')  # Comment out if already called out above
    time = df['Time'].astype('datetime64[ns]')
    delta_time = (time.max() - time.min()).days + 1
    ChillerAnnualkWh = ChillerKwConsumption.sum() / delta_time * 365
    return ChillerAnnualkWh
 def _check_time(self, t_provided):
     assert self.data is not None
     time = self.get_time()
     if t_provided < time.min() - self.dt:
         warnings.warn('provided time %s smaller than simulation start time' %str(t_provided), stacklevel=2)
     if t_provided > time.max() + self.dt:
         warnings.warn('provided time %s larger than simulation end time' %str(t_provided), stacklevel=2)
示例#5
0
def genImage():

    # Collecting latest data
    recent = np.load('recent.npy')
    # Converting time from seconds to hours
    time = recent[:, 0] / 3600.

    pl.figure(figsize=(8, 5))

    gs = gridspec.GridSpec(2, 2, height_ratios=[1.5, 1], width_ratios=[1, 1])

    ## Starting with the sensors

    sensorPanel = pl.subplot(gs[0, :])
    for j in range(1, 9):
        sensorPanel.plot(time, recent[:, j])

    sensorPanel.set_ylabel("Sensor resistance")
    sensorPanel.set_xlabel('Time (h)')
    sensorPanel.set_xlim(time.min() - 0.01, time.max() + 0.01)
    sensorPanel.grid(True)

    ## Temperature and humidity
    tempPanel = pl.subplot(gs[1, 0])
    tempPanel.plot(time, recent[:, 9])
    tempPanel.set_ylabel("Temperature")
    tempPanel.set_xlabel('Time (h)')
    tempPanel.set_xlim(time.min() - 0.01, time.max() + 0.01)

    humdPanel = pl.subplot(gs[1, 1])
    humdPanel.plot(time, recent[:, 10])
    humdPanel.set_ylabel("Humidity")
    humdPanel.set_xlabel('Time (h)')
    humdPanel.set_xlim(time.min() - 0.01, time.max() + 0.01)

    memdata = io.BytesIO()

    pl.tight_layout()
    pl.savefig(memdata, format='png', dpi=150)
    image = memdata.getvalue()
    pl.close()
    return image
def rawEEGplot(data, channelName):
    # time vector
    time = np.arange(data.size) / sf

    # Plot the signal
    fig, ax = plt.subplots(1, 1, figsize=(12, 4))
    plt.plot(time, data, lw=1.5, color='k')
    plt.xlabel('Time (seconds)')
    plt.ylabel('Voltage')
    plt.xlim([time.min(), time.max()])
    plt.title('Raw EEG at (' + channelName + ')')
    sns.despine()
示例#7
0
def heat_map(request, pk):
    
    data = get_data(pk)

    k1 = {'intel' : ['intel_pmc3']}

    k2 = {'intel': ['MEM_LOAD_RETIRED_L1D_HIT']}
    #k2 = {'intel': ['INSTRUCTIONS_RETIRED']}
    ts0 = tspl.TSPLBase(None,k1,k2,job_stats = data)

    k2 = {'intel': ['CLOCKS_UNHALTED_CORE']}
    ts1 = tspl.TSPLBase(None,k1,k2,job_stats = data)

    cpi = np.array([])
    hosts = []
    for v in ts0.data[0]:
        hosts.append(v)
        ncores = len(ts0.data[0][v])
        for k in range(ncores):
            i = np.array(ts0.data[0][v][k],dtype=np.float)
            c = np.array(ts1.data[0][v][k],dtype=np.float)
            ratio = np.divide(np.diff(i),np.diff(c))
            if not cpi.size: cpi = np.array([ratio])
            else: cpi = np.vstack((cpi,ratio))
    cpi_min, cpi_max = cpi.min(), cpi.max()

    fig,ax=plt.subplots(1,1,figsize=(8,12),dpi=110)

    ycore = np.arange(cpi.shape[0]+1)
    time = ts0.t/3600.

    yhost=np.arange(len(hosts)+1)*ncores + ncores    

    fontsize = 10

    if len(yhost) > 80:
        fontsize /= 0.5*np.log(len(yhost))
        
    plt.yticks(yhost - ncores/2.,hosts,size=fontsize) 
    plt.pcolormesh(time, ycore, cpi, vmin=cpi_min, vmax=cpi_max)
    plt.axis([time.min(),time.max(),ycore.min(),ycore.max()])

    plt.title('L1D Load Hits per Core Clock Cycle')
    plt.colorbar()

    ax.set_xlabel('Time (hrs)')

    plt.close()

    return figure_to_response(fig)
示例#8
0
def heat_map(request, pk):

    data = get_data(pk)

    k1 = {'intel': ['intel_pmc3']}

    k2 = {'intel': ['MEM_LOAD_RETIRED_L1D_HIT']}
    #k2 = {'intel': ['INSTRUCTIONS_RETIRED']}
    ts0 = tspl.TSPLBase(None, k1, k2, job_stats=data)

    k2 = {'intel': ['CLOCKS_UNHALTED_CORE']}
    ts1 = tspl.TSPLBase(None, k1, k2, job_stats=data)

    cpi = np.array([])
    hosts = []
    for v in ts0.data[0]:
        hosts.append(v)
        ncores = len(ts0.data[0][v])
        for k in range(ncores):
            i = np.array(ts0.data[0][v][k], dtype=np.float)
            c = np.array(ts1.data[0][v][k], dtype=np.float)
            ratio = np.divide(np.diff(i), np.diff(c))
            if not cpi.size: cpi = np.array([ratio])
            else: cpi = np.vstack((cpi, ratio))
    cpi_min, cpi_max = cpi.min(), cpi.max()

    fig, ax = plt.subplots(1, 1, figsize=(8, 12), dpi=110)

    ycore = np.arange(cpi.shape[0] + 1)
    time = ts0.t / 3600.

    yhost = np.arange(len(hosts) + 1) * ncores + ncores

    fontsize = 10

    if len(yhost) > 80:
        fontsize /= 0.5 * np.log(len(yhost))

    plt.yticks(yhost - ncores / 2., hosts, size=fontsize)
    plt.pcolormesh(time, ycore, cpi, vmin=cpi_min, vmax=cpi_max)
    plt.axis([time.min(), time.max(), ycore.min(), ycore.max()])

    plt.title('L1D Load Hits per Core Clock Cycle')
    plt.colorbar()

    ax.set_xlabel('Time (hrs)')

    plt.close()

    return figure_to_response(fig)
def normalization_therm():
    df = pd.read_csv(
        '2019 CHP Raw Trend.csv')  # Comment out if already called out above
    time = df['Time'].astype('datetime64[ns]')
    delta_time = (time.max() - time.min()).days + 1
    HHWST = df['CHP HHWS Temp']
    HHWRT = df['CHP HHWR Temp']
    CHPF = df['CHP Flow']
    BoilerMBH = (
        HHWST - HHWRT
    ) * CHPF / 1000 / .80  # 80% efficiency placeholder, update to reference user defined value
    total_BoilerMBH = BoilerMBH.sum()
    BoilerAnnualTherms = (total_BoilerMBH / delta_time) * 365
    return BoilerAnnualTherms
示例#10
0
def format_date_axis(ax, time):
    import matplotlib.dates as mdates
    
    minutes = mdates.MinuteLocator(interval=10)   # every minute
    yearsFmt = mdates.DateFormatter('%H:%M')
    
    # format the ticks
    ax.xaxis.set_major_locator(minutes)
    ax.xaxis.set_major_formatter(yearsFmt)
    #ax.xaxis.set_minor_locator(minutes)
    
    # round to nearest years...
    datemin = np.datetime64(time.min(), 'm')
    datemax = np.datetime64(time.max(), 'm') + np.timedelta64(1, 'm')
    ax.set_xlim(datemin, datemax)
示例#11
0
def plot(pump_plt, index, data, header, time, units, plot_pos):
    y_name = units[index]
    y_data = data[:,index]
    pump_plt.set_title(header[index])
    x_name = ''
    if plot_pos >= 4:
        x_name = 'Time (Hours)'
    pump_plt.set_xlabel(x_name)
    pump_plt.set_ylabel(y_name)
    pump_plt.grid(True)
    pump_plt.xaxis.set_major_formatter(mtick.FormatStrFormatter('%.2f'))
    pump_plt.set_ylim(y_data.min()*0.5, y_data.max()*1.5)
    pump_plt.set_xlim(time.min(), time.max())
    pump_plt.plot(time,y_data)
    at = AnchoredText('Mean:         ' + "{:.2f}".format(y_data.mean()) + ' ' + y_name
                      + '\n' + 'Max:           ' + "{:.2f}".format(y_data.max()) + ' ' + y_name
                      + '\n' + 'Min:            ' + "{:.2f}".format(y_data.min()) + ' ' + y_name
                      + '\n' + 'Test Time:  ' + "{:.2f}".format(time.max()) + ' ' + 'Hours'
                      + '\n' + 'On Time:    ' + "{:.2f}".format(data[:,3].max() / 3600) + ' ' + 'Hours',
                      prop=dict(size=8), frameon=True,
                      loc=1
                      )
    #at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
    pump_plt.add_artist(at)
示例#12
0
 def steady_state_error(self, criterion=None):
     if criterion != None: self.steady_state_criterion = criterion
     nt = self.nt
     dt = self.dt
     time = self.time[-nt:]
     time = time - time.min()
     pa = self.pa[-nt:]
     duration = time.max()
     loc0 = np.where(time <= duration / 2.)
     loc1 = np.where(time >= duration / 2.)
     pa0 = pa[loc0].mean()
     pa1 = pa[loc1].mean()
     error = (abs((pa1 - pa0) / pa0))
     ta = self.time.max() - duration / 2
     niter = len(self.time - 1)
     return (abs((pa1 - pa0) / pa0), ta, niter)
示例#13
0
    def _plot(self):

        time_window_start = self._current_time - self.take_ft_at_seconds
        x_min = max(0.0, time_window_start)
        x_max = max(self.take_ft_at_seconds, self._current_time)

        self.respiration_plot.setRange(xRange=[x_min, x_max])

        # copying to avoid potential race conditions from c++ QT timer. See if okay for a while.
        debug_time = self._time.copy()
        debug_data = self._data.copy()

        indices = np.where(
            np.logical_and(debug_time >= x_min, debug_time <= x_max))
        time = np.take(debug_time, indices[0])
        data = np.take(debug_data, indices[0])
        self.respiration_line.setData(time, data)

        averaged_data = data - np.average(data)

        # interpolate onto uniform t for FFT.
        min_delta_time = np.diff(time).min()
        uniform_time = np.arange(time.min(), time.max(), min_delta_time)
        uniform_data = np.interp(uniform_time, time, averaged_data)

        ft = fft(uniform_data)
        num_samples = ft.shape[0]
        half_num_samples = int(num_samples / 2)
        self.ft_frequency = np.linspace(0.0, 1.0 / (2.0 * min_delta_time),
                                        half_num_samples)

        ft_result = (2.0 / num_samples) * np.abs(ft[:half_num_samples])

        self.fft_line.setData(self.ft_frequency, ft_result)

        self.fft_plot.setRange(yRange=[ft_result.min(), ft_result.max()])
    def genImage(self, data, dtI=0, dtF=0):

        # Converting time from seconds to hours
        time = data[:, 1]

        pl.figure(figsize=(8, 5))

        gs = gridspec.GridSpec(2,
                               2,
                               height_ratios=[1.5, 1],
                               width_ratios=[1, 1])

        ## Starting with the sensors
        sensorPanel = pl.subplot(gs[0, :])
        hfmt = matplotlib.dates.DateFormatter('%H:%M')
        sensorPanel.xaxis.set_major_formatter(hfmt)

        for j in range(4, 12):
            sensorPanel.plot(time, data[:, j], '-', label='R%d' % (j - 3))

        maxy = np.max(data[:, 4:12])
        miny = np.min(data[:, 4:12])
        sensorPanel.set_ylim(miny * 0.95, maxy * 1.03)
        sensorPanel.set_ylabel("Sensor resistance")
        sensorPanel.set_xlabel('Time (h)')
        sensorPanel.set_xlim(time.min() - 0.003, time.max() + 0.003)
        sensorPanel.set_xticks(np.linspace(time.min(), time.max(), 5))
        sensorPanel.legend(loc='center left',
                           bbox_to_anchor=(1, 0.5),
                           prop={'size': 10.5})
        sensorPanel.grid(True)

        ## Drawing line when induction happened
        if dtI != 0 and dtF != 0:
            tI = matplotlib.dates.date2num(dtI)
            tF = matplotlib.dates.date2num(dtF)

        sensorPanel.plot([tI, tI], [miny * 0.5, maxy * 2],
                         '--',
                         color=(1.0, 0., 0.0),
                         lw=3.,
                         alpha=0.3,
                         zorder=-1)
        sensorPanel.plot([tF, tF], [miny * 0.5, maxy * 2],
                         '--',
                         color=(1.0, 0., 0.0),
                         lw=3.,
                         alpha=0.3,
                         zorder=-1)

        # creating the title
        Dfmt = matplotlib.dates.DateFormatter('%Y-%m-%d')
        Date0 = str(Dfmt(time[0]))
        Date1 = str(Dfmt(time[-1]))
        if Date0 == Date1:
            pl.title(Date0)
        else:
            pl.title("From %s to %s" % (Date0, Date1))

        ## Temperature and humidity
        Tstep = (data[:, 2].max() - data[:, 2].min()) / 20.
        Hstep = (data[:, 3].max() - data[:, 3].min()) / 20.

        tempPanel = pl.subplot(gs[1, 0])
        tempPanel.plot(time, data[:, 2], '-')
        tempPanel.xaxis.set_major_formatter(hfmt)
        tempPanel.set_ylabel("Temperature")
        tempPanel.set_xlabel('Time (h)')
        tempPanel.set_xlim(time.min() - 0.003, time.max() + 0.003)
        tempPanel.set_xticks(np.linspace(time.min(), time.max(), 4))
        tempPanel.set_ylim(data[:, 2].min() - Tstep, data[:, 2].max() + Tstep)
        tempPanel.set_yticks(
            np.linspace(data[:, 2].min(), data[:, 2].max(), 5, dtype=int))

        humdPanel = pl.subplot(gs[1, 1])
        humdPanel.plot(time, data[:, 3], '-')
        humdPanel.xaxis.set_major_formatter(hfmt)
        humdPanel.set_ylabel("Humidity")
        humdPanel.set_xlabel('Time (h)')
        humdPanel.set_xlim(time.min() - 0.003, time.max() + 0.003)
        humdPanel.set_xticks(np.linspace(time.min(), time.max(), 4))
        humdPanel.set_ylim(data[:, 3].min() - Hstep, data[:, 3].max() + Hstep)
        humdPanel.set_yticks(
            np.linspace(data[:, 3].min(), data[:, 3].max(), 5, dtype=int))

        pl.tight_layout()

        memdata = io.BytesIO()
        pl.savefig(memdata, format='png', dpi=400, bbox_inches='tight')

        pl.close()

        image = memdata.getvalue()
        return image
示例#15
0
    for i in range(len(part1)):
        try0 = m1[i]
        for j in range(len(data1[0])):
            if part1[i, j] == 1:
                try1[i, j] = m1[i]

#calculated an unique number for each column

    try2 = np.sum(try1, axis=0) / np.sum(np.array(try1) > 0, axis=0)
    #adjusted the line to zero as middle
    data2 = np.where(try2 > 0, try2 - len(part1) / 2, try2)

    # Define sampling frequency and time vector
    sf = 100.
    time = np.arange(data2.size) / sf

    # Plot the signal
    fig, ax = plt.subplots(1, 1, figsize=(12, 4))
    plt.plot(time, data2, lw=1.5, color='k')
    plt.xlabel('Time (seconds)')
    plt.ylabel('Voltage')
    plt.xlim([time.min(), time.max()])
    plt.title('EEG signal')
    sns.despine()

# In[16]:

sns.despine()

# In[ ]:
示例#16
0
def extract_features(samples, print_log=False):
    global prediction

    # this function is used to transfer one column label to one hot label
    def one_hot(y_):
        # Function to encode output labels from number indexes
        # e.g.: [[5], [0], [3]] --> [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]
        y_ = y_.reshape(len(y_))
        n_values = np.max(y_) + 1
        return np.eye(n_values)[np.array(y_, dtype=np.int32)]

    #  Data loading
    # insert here (don't remove the '1')
    for sample in samples:
        sample.append(1)
    all = np.array(samples)

    if print_log:
        print('shape')
        print(all.shape)

    np.random.shuffle(all)  # mix eeg_all
    # Get the 28000 samples of that subject
    final = 1
    all = all[0:final]

    # Get the features
    feature_all = all[:, 0:14]
    # Get the label
    label = all[:, 14:15]

    # z-score

    if print_log:
        print(feature_all)
        print(feature_all.shape)
    no_fea = feature_all.shape[-1]
    label_all = label
    if print_log:
        print("")
        print(label_all)

    sns.set(font_scale=1.2)
    if print_log:
        print("before")
        print(feature_all)
    feature_all = preprocessing.scale(feature_all)
    if print_log:
        print("After")
        print(feature_all)

    data = feature_all

    # Define sampling frequency and time vector
    sf = 160
    time = np.arange(data.shape[0]) / sf
    if print_log:
        print(data.shape)
        print('time')
        print(time.shape)
    # Plot the signal
    fig, ax = plt.subplots(1, 1, figsize=(12, 4))
    plt.plot(time, data, lw=1.5, color='k')
    plt.xlabel('Time (seconds)')
    plt.ylabel('Voltage')
    plt.xlim([time.min(), time.max()])
    plt.title('EEG Data')
    sns.despine()

    # Define window length (4 seconds)
    win = 0.5 * sf
    freqs, psd = signal.welch(data, sf, nperseg=win)
    if print_log:
        print(freqs)
        print('psd')
        print(psd.shape)

    n_classes = 1
    ###CNN code,
    feature_all = feature_all  # the input data of CNN
    if print_log:
        print("cnn input feature shape", feature_all.shape)
    n_fea = 14
    if print_log:
        print(n_fea)
    # label_all=one_hot(label_all)

    final = all.shape[0]
    middle_number = int(final * 3 / 4)
    if print_log:
        print("-----", middle_number)
    feature_training = feature_all[0:middle_number]
    feature_testing = feature_all[middle_number:final]
    label_training = label_all[0:middle_number]
    label_testing = label_all[middle_number:final]
    label_ww = label_all[middle_number:final]  # for the confusion matrix
    if print_log:
        print("label_testing", label_testing.shape)
    a = feature_training
    b = feature_testing
    if print_log:
        print(feature_training.shape)
        print(feature_testing.shape)

    keep = 1
    batch_size = final - middle_number
    n_group = 1
    train_fea = []
    for i in range(n_group):
        f = a[(0 + batch_size * i):(batch_size + batch_size * i)]
        train_fea.append(f)
    if print_log:
        print("Here")
        print(train_fea[0].shape)

    train_label = []
    for i in range(n_group):
        f = label_training[(0 + batch_size * i):(batch_size +
                                                 batch_size * i), :]
        train_label.append(f)
    if print_log:
        print(train_label[0].shape)

    # the CNN code
    def compute_accuracy(v_xs, v_ys):

        y_pre = sess3.run(prediction, feed_dict={xs: v_xs, keep_prob: keep})
        correct_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(v_ys, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        result = sess3.run(accuracy,
                           feed_dict={
                               xs: v_xs,
                               ys: v_ys,
                               keep_prob: keep
                           })
        return result

    #to creat a random weights
    def weight_variable(shape):
        # Outputs random values from a truncated normal distribution
        initial = tf.truncated_normal(shape, stddev=0.1)
        # A variable maintains state in the graph across calls to run().
        # You add a variable to the graph by constructing an instance of the class Variable.
        if print_log:
            print('shape')
            print(shape)
        return tf.Variable(initial)

    #random bias values
    def bias_variable(shape):
        # Creates a constant tensor
        initial = tf.constant(0.1, shape=shape)
        return tf.Variable(initial)

    def conv2d(x, W):
        # stride [1, x_movement, y_movement, 1]
        # Must have strides[0] = strides[3] = 1
        # the concolution layer x is the input
        # w is the weight and the stride is how many moves it makes in each dimention ie how many pixels
        return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

    # def max_pool_2x2(x):
    #     # stride [1, x_movement, y_movement, 1]
    #     return tf.nn.max_pool(x, ksize=[1,1,2,1], strides=[1,1,2,1], padding='SAME')
    #max pooling to reduce dimentionality .. here consider every 1*2 window
    def max_pool_1x2(x):
        # stride [1, x_movement, y_movement, 1]
        return tf.nn.max_pool(x,
                              ksize=[1, 1, 2, 1],
                              strides=[1, 1, 2, 1],
                              padding='SAME')

    # define placeholder for inputs to network
    xs = tf.placeholder(tf.float32, [None, n_fea])  # 1*64
    ys = tf.placeholder(tf.float32,
                        [None, n_classes])  # 2 is the classes of the data
    # Lookup what is keep_prob
    keep_prob = tf.placeholder(tf.float32)
    x_image = tf.reshape(xs, [-1, 1, n_fea, 1])
    if print_log:
        print('x_image')
        print(x_image)
        print(x_image.shape)

    ## conv1 layer ##
    W_conv1 = weight_variable([1, 1, 1,
                               20])  # patch 1*1, in size is 1, out size is 2
    b_conv1 = bias_variable([20])
    h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) +
                         b_conv1)  # output size 1*64*2
    h_pool1 = max_pool_1x2(h_conv1)  # output size 1*32x2

    ## conv2 layer ##
    # W_conv2 = weight_variable([1,1, 2, 4]) # patch 1*1, in size 2, out size 4
    # b_conv2 = bias_variable([4])
    # h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) # output size 1*32*4
    # h_pool2 = max_pool_1x2(h_conv2)                          # output size 1*16*4

    ## fc1 layer ## fc fully connected layer
    W_fc1 = weight_variable([1 * int(n_fea / 2) * 20, 120])
    b_fc1 = bias_variable([120])
    # [n_samples, 7, 7, 64] ->> [n_samples, 7*7*64]
    h_pool2_flat = tf.reshape(h_pool1, [-1, 1 * int(n_fea / 2) * 20])
    h_fc1 = tf.nn.sigmoid(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    ## fc2 layer ##
    W_fc2 = weight_variable([120, n_classes])
    b_fc2 = bias_variable([n_classes])
    # Multiplies matrix a by matrix b, producing a * b
    prediction = tf.matmul(h_fc1_drop, W_fc2) + b_fc2

    # Weight regulrization
    l2 = 0.001 * sum(
        tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables())
    # Getting the mean of the errors between the predication results and the class labels in the trainning data
    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(
            logits=prediction, labels=ys)) + l2  # Softmax loss
    # Using optimizer
    train_step = tf.train.AdamOptimizer(0.04).minimize(cross_entropy)
    # Begin session to visit the nodes (tensors) of the graph
    sess3 = tf.Session()
    # Initializae all the defined variables
    init = tf.global_variables_initializer()
    # Visit the nodes of those variables
    sess3.run(init)
    # Total number of array elements which trigger summarization rather than full array
    #np.set_printoptions(threshold=np.nan)
    step = 1
    while step < 1500:
        # Train the model
        for i in range(n_group):
            sess3.run(train_step,
                      feed_dict={
                          xs: train_fea[i],
                          ys: train_label[i],
                          keep_prob: keep
                      })
        # After 5 steps, use the model on the test data
        if step % 5 == 0:
            # Compute the cost using the cross entropy
            cost = sess3.run(cross_entropy,
                             feed_dict={
                                 xs: b,
                                 ys: label_testing,
                                 keep_prob: keep
                             })
            # Compute the accuracy
            acc_cnn_t = compute_accuracy(b, label_testing)
            if print_log:
                print('the step is:', step, ',the acc is', acc_cnn_t,
                      ', the cost is', cost)
        step += 1
    acc_cnn = compute_accuracy(b, label_testing)
    feature_all_cnn = sess3.run(h_fc1_drop,
                                feed_dict={
                                    xs: feature_all,
                                    keep_prob: keep
                                })
    if print_log:
        print("the shape of cnn output features", feature_all.shape,
              label_all.shape)

    #######RNN
    tf.reset_default_graph()
    feature_all = feature_all
    no_fea = feature_all.shape[-1]
    if print_log:
        print(no_fea)
    # The input to each LSTM layer must be a 3D
    # feature_all.reshape(samples-batch size-,time step, features)

    feature_all = feature_all.reshape([final, 1, no_fea])
    #argmax returns the index with the largest value across axis of a tensor
    if print_log:
        print(tf.argmax(label_all, 1))
        print(feature_all_cnn.shape)

    # middle_number=21000
    feature_training = feature_all
    feature_testing = feature_all
    label_training = label_all
    label_testing = label_all
    # print "label_testing",label_testing
    a = feature_training
    b = feature_testing
    if print_log:
        print(feature_all)
        print(feature_testing.shape)
    #264 dimention vector, that is passed to the next layer
    nodes = 264
    #Used for Weight regulrization
    lameda = 0.004
    #learning rate
    lr = 0.005

    batch_size = final - middle_number
    train_fea = []
    n_group = 1
    for i in range(n_group):
        f = a[(0 + batch_size * i):(batch_size + batch_size * i)]
        train_fea.append(f)

    if print_log:
        print("here0")
        print(train_fea[0].shape)

    train_label = []
    for i in range(n_group):
        f = label_training[(0 + batch_size * i):(batch_size +
                                                 batch_size * i), :]
        train_label.append(f)
    if print_log:
        print(train_label[0].shape)

    # hyperparameters

    n_inputs = no_fea
    n_steps = 1  # time steps
    n_hidden1_units = nodes  # neurons in hidden layer
    n_hidden2_units = nodes
    n_hidden3_units = nodes
    n_hidden4_units = nodes
    n_classes = n_classes

    # tf Graph input

    x = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
    y = tf.placeholder(tf.float32, [None, n_classes])

    # Define weights
    #tf.random_normal: Outputs random values from a normal distribution
    weights = {
        'in':
        tf.Variable(tf.random_normal([n_inputs, n_hidden1_units]),
                    trainable=True),
        'a':
        tf.Variable(tf.random_normal([n_hidden1_units, n_hidden1_units]),
                    trainable=True),
        'hidd2':
        tf.Variable(tf.random_normal([n_hidden1_units, n_hidden2_units])),
        'hidd3':
        tf.Variable(tf.random_normal([n_hidden2_units, n_hidden3_units])),
        'hidd4':
        tf.Variable(tf.random_normal([n_hidden3_units, n_hidden4_units])),
        'out':
        tf.Variable(tf.random_normal([n_hidden4_units, n_classes]),
                    trainable=True),
    }

    biases = {
        #tf.constant result a 1-D tensor of value 0.1
        'in': tf.Variable(tf.constant(0.1, shape=[n_hidden1_units])),
        'hidd2': tf.Variable(tf.constant(0.1, shape=[n_hidden2_units])),
        'hidd3': tf.Variable(tf.constant(0.1, shape=[n_hidden3_units])),
        'hidd4': tf.Variable(tf.constant(0.1, shape=[n_hidden4_units])),
        'out': tf.Variable(tf.constant(0.1, shape=[n_classes]), trainable=True)
    }

    def RNN(X, weights, biases):
        # hidden layer for input to cell
        ########################################

        # transpose the inputs shape from
        X = tf.reshape(X, [-1, n_inputs])

        # into hidden
        #there are n input and output we take only the last output to feed to the next layer
        X_hidd1 = tf.matmul(X, weights['in']) + biases['in']
        X_hidd2 = tf.matmul(X_hidd1, weights['hidd2']) + biases['hidd2']
        X_hidd3 = tf.matmul(X_hidd2, weights['hidd3']) + biases['hidd3']
        X_hidd4 = tf.matmul(X_hidd3, weights['hidd4']) + biases['hidd4']
        X_in = tf.reshape(X_hidd4, [-1, n_steps, n_hidden4_units])

        # cell
        ##########################################

        # basic LSTM Cell.
        # 1-layer LSTM with n_hidden units.
        # creates a LSTM layer and instantiates variables for all gates.
        lstm_cell_1 = tf.contrib.rnn.BasicLSTMCell(n_hidden4_units,
                                                   forget_bias=1.0,
                                                   state_is_tuple=True)
        # 2nd layer LSTM with n_hidden units.
        lstm_cell_2 = tf.contrib.rnn.BasicLSTMCell(n_hidden4_units,
                                                   forget_bias=1.0,
                                                   state_is_tuple=True)
        # Adding an additional layer to inprove the accuracy
        # RNN cell composed sequentially of multiple simple cells.

        lstm_cell = tf.contrib.rnn.MultiRNNCell([lstm_cell_1, lstm_cell_2],
                                                state_is_tuple=True)
        # lstm cell is divided into two parts (c_state, h_state)
        #Initializing the zero state
        init_state = lstm_cell.zero_state(batch_size, dtype=tf.float32)
        with tf.variable_scope('lstm1', reuse=tf.AUTO_REUSE):
            # 'state' is a tensor of shape [batch_size, cell_state_size]
            outputs, final_state = tf.nn.dynamic_rnn(lstm_cell,
                                                     X_in,
                                                     initial_state=init_state,
                                                     time_major=False)

        # hidden layer for output as the final results
        #############################################
        if print_log:
            print("before")
            print(outputs)
        outputs = tf.unstack(tf.transpose(
            outputs, [1, 0, 2]))  # states is the last outputs
        if print_log:
            print("after")
            print(outputs)
        #there are n input and n output we take only the last output to feed to the next layer
        results = tf.matmul(outputs[-1], weights['out']) + biases['out']

        return results, outputs[-1]

    #################################################################################################################################################
    pred, Feature = RNN(x, weights, biases)
    lamena = lameda
    l2 = lamena * sum(
        tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()
    )  # L2 loss prevents this overkill neural network to overfit the data
    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=pred,
                                                labels=y)) + l2  # Softmax loss
    train_op = tf.train.AdamOptimizer(lr).minimize(cost)
    # train_op = tf.train.AdagradOptimizer(l).minimize(cost)
    # train_op = tf.train.RMSPropOptimizer(0.00001).minimize(cost)
    # train_op = tf.train.AdagradDAOptimizer(0.01).minimize(cost)
    # train_op = tf.train.GradientDescentOptimizer(0.00001).minimize(cost)
    # pred_result =tf.argmax(pred, 1)
    label_true = tf.argmax(y, 1)
    correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
    confusion_m = tf.confusion_matrix(tf.argmax(y, 1), tf.argmax(pred, 1))
    #starting sessions
    with tf.Session() as sess:
        if int((tf.__version__).split('.')[1]) < 12 and int(
            (tf.__version__).split('.')[0]) < 1:
            init = tf.initialize_all_variables()
        else:
            init = tf.global_variables_initializer()
        sess.run(init)
        saver = tf.train.Saver()
        step = 0
        if print_log:
            print(train_fea[0])
            print(train_label[0])

        #downloaded = drive.CreateFile({'id':'10p_NuiBV2Or2sk6cm0yPLfu9tJ2lXEKg'})
        #f2 = downloaded.GetContentString()

        #filename = "/home/xiangzhang/scratch/results/rnn_acc.csv"
        #f2 = open(filename, 'wb')
        while step < 2500:
            sess.run(train_op,
                     feed_dict={
                         x: train_fea[0],
                         y: train_label[0],
                     })
            if sess.run(accuracy, feed_dict={
                    x: b,
                    y: label_testing,
            }) > 0.96:
                print("The lamda is :", lamena, ", Learning rate:", lr,
                      ", The step is:", step, ", The accuracy is: ",
                      sess.run(accuracy, feed_dict={
                          x: b,
                          y: label_testing,
                      }))

                break
            if step % 5 == 0:
                hh = sess.run(accuracy, feed_dict={
                    x: b,
                    y: label_testing,
                })
                #f2.write(str(hh)+'\n')
                print(", The step is:", step, ", The accuracy is:", hh,
                      "The cost is :",
                      sess.run(cost, feed_dict={
                          x: b,
                          y: label_testing,
                      }))
            step += 1

        ##confusion matrix
        feature_0 = sess.run(Feature, feed_dict={x: train_fea[0]})
        for i in range(1, n_group):
            feature_11 = sess.run(Feature, feed_dict={x: train_fea[i]})
            feature_0 = np.vstack((feature_0, feature_11))

        if print_log:
            print(feature_0.shape)
        feature_b = sess.run(Feature, feed_dict={x: b})
        feature_all_rnn = np.vstack((feature_0, feature_b))

        confusion_m = sess.run(confusion_m,
                               feed_dict={
                                   x: b,
                                   y: label_testing,
                               })
        if print_log:
            print(confusion_m)
        ## predict probility
        # pred_prob=sess.run(pred, feed_dict={
        #             x: b,
        #             y: label_testing,
        #         })
        # # print pred_prob

        #print ("RNN train time:", time4 - time3, "Rnn test time", time5 - time4, 'RNN total time', time5 - time3)

        ##AE
    if print_log:
        print(feature_all_rnn.shape, feature_all_cnn.shape)
    new_feature_all_rnn = feature_all_rnn[0:1, :]
    if print_log:
        print(new_feature_all_rnn.shape)
    # stacks the featurese from RNN and CNN in a horizontal stack
    feature_all = np.hstack((new_feature_all_rnn, psd))
    feature_all = np.hstack((feature_all, feature_all_cnn))
    if print_log:
        print(psd.shape, feature_all.shape)
    no_fea = feature_all.shape[-1]

    # feature_all =feature_all.reshape([28000,1,no_fea])
    if print_log:
        print("all features")
        print(feature_all.shape)
    # middle_number=21000
    feature_training = feature_all
    feature_testing = feature_all
    label_training = label_all
    label_testing = label_all
    # print "label_testing",label_testing
    a = feature_training
    b = feature_testing
    feature_all = feature_all

    train_fea = feature_all

    #dividing the input into three groups
    group = 1
    display_step = 10
    #An epoch is a full iteration over samples!!!! training cycle
    training_epochs = 400

    # Network Parameters
    n_hidden_1 = 800  # 1st layer num features, should be times of 8

    n_hidden_2 = 100

    n_input_ae = no_fea  # MNIST data input (img shape: 28*28)
    # tf Graph input (only pictures)
    X = tf.placeholder("float", [None, n_input_ae])
    if print_log:
        print("X")
        print(X)

    weights = {
        'encoder_h1': tf.Variable(tf.random_normal([n_input_ae, n_hidden_1])),
        'encoder_h2':
        tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),  #NOT USED !!!
        'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),
        'decoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_input_ae])),
    }
    biases = {
        'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
        'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
        'decoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
        'decoder_b2': tf.Variable(tf.random_normal([n_input_ae])),
    }

    # Building the encoder
    def encoder(x):
        # Encoder Hidden layer with sigmoid activation #1
        #Sigmoid function outputs in the range (0, 1), it makes it ideal for binary classification problems
        #there are n input and output we take only the last output to feed to the next layer
        layer_1 = tf.nn.sigmoid(
            tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1']))
        return layer_1

    # Building the decoder
    def decoder(x):
        # Encoder Hidden layer with sigmoid activation #1
        #Sigmoid function outputs in the range (0, 1), it makes it ideal for binary classification problems
        layer_1 = tf.nn.sigmoid(
            tf.add(tf.matmul(x, weights['decoder_h2']), biases['decoder_b2']))
        return layer_1

    for ll in range(1):
        learning_rate = 0.2
        for ee in range(1):
            # Construct model
            encoder_op = encoder(X)
            if print_log:
                print("Encoder")
                print(encoder_op)
            decoder_op = decoder(encoder_op)
            # Prediction
            y_pred = decoder_op
            # Targets (Labels) are the input data, as the auto encoder tries to make output as similar as possible to the input.
            y_true = X

            # Define loss and optimizer, minimize the squared error
            cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
            # cost = tf.reduce_mean(tf.pow(y_true, y_pred))
            optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)

            # Initializing the variables
            init = tf.global_variables_initializer()

            # Launch the graph
            # saves and restore variables
            saver = tf.train.Saver()
            with tf.Session() as sess1:
                sess1.run(init)
                saver = tf.train.Saver()
                # Training cycle
                for epoch in range(training_epochs):
                    # Loop over all batches
                    for i in range(group):
                        # Run optimization op (backprop) and cost op (to get loss value)
                        _, c = sess1.run([optimizer, cost], feed_dict={X: a})
                    # Display logs per epoch step
                    if epoch % display_step == 0:
                        if print_log:
                            print("Epoch:", '%04d' % (epoch + 1), "cost=",
                                  "{:.9f}".format(c))
                if print_log:
                    print("Optimization Finished!")
                a = sess1.run(encoder_op, feed_dict={X: a})
                b = sess1.run(encoder_op, feed_dict={X: b})
    return a, label_testing
示例#17
0
def FitGrowth(time, cell_count, window_size, start_threshold=0.01, plot_figure=False):
    """Compute growth rate.
    
    Args:
        time: list of data point time measurements (whatever time units you like).
        cell_count: list of cell counts at each time point.
        window_size: the size of the time window (same time units as above).
        start_threshold: minimum cell count to consider.
        plot_figure: whether or not to plot.
    
    Returns:
        growth rate in 1/(time unit) where "time unit" is the unit used above.
    """
    
    def get_frame_range(times, mid_frame, window_size):
        T = times[mid_frame]
        i_range = []
        for i in range(1, len(times)):
            if (times[i-1] > T - window_size/2.0 and times[i] < T + window_size/2.0):
                i_range.append(i)

        if (len(i_range) < 2): # there are not enough frames to get a good estimation
            raise ValueError()
        return i_range

    N = len(cell_count)
    #if (N < window_size):
    #    raise Exception("The measurement time-series is too short (smaller than the windows-size)")

    t_mat = pylab.matrix(time).T
    
    # normalize the cell_count data by its minimum
    count_matrix = pylab.matrix(cell_count).T
    norm_counts = count_matrix - min(cell_count)
    c_mat = pylab.matrix(norm_counts)
    if c_mat[-1, 0] == 0:
        c_mat[-1, 0] = min(c_mat[pylab.find(c_mat > 0)])

    for i in pylab.arange(N-1, 0, -1):
        if c_mat[i-1, 0] <= 0:
            c_mat[i-1, 0] = c_mat[i, 0]

    c_mat = pylab.log(c_mat)
    
    res_mat = pylab.zeros((N, 4)) # columns are: slope, offset, error, avg_value
    for i in range(N):
        try:
            # calculate the indices covered by the window
            i_range = get_frame_range(time, i, window_size)
            x = pylab.hstack([t_mat[i_range, 0], pylab.ones((len(i_range), 1))])
            y = c_mat[i_range, 0]
            if min(pylab.exp(y)) < start_threshold: # the measurements are still too low to use (because of noise)
                raise ValueError()
            (a, residues) = pylab.lstsq(x, y)[0:2]
            res_mat[i, 0] = a[0]
            res_mat[i, 1] = a[1]
            res_mat[i, 2] = residues
            res_mat[i, 3] = pylab.mean(count_matrix[i_range,0])
        except ValueError:
            pass

    max_i = res_mat[:,0].argmax()
    
    abs_res_mat = pylab.array(res_mat)
    abs_res_mat[:,0] = pylab.absolute(res_mat[:,0])
    order = abs_res_mat[:,0].argsort(axis=0)
    stationary_indices = pylab.array(filter(lambda x: x >= max_i, order))
    stationary_level = res_mat[stationary_indices[0], 3]
    
    if plot_figure:
        pylab.hold(True)
        pylab.plot(time, norm_counts)
        pylab.plot(time, res_mat[:,0])
        pylab.plot([0, time.max()], [start_threshold, start_threshold], 'r--')
        i_range = get_frame_range(time, max_i, window_size)
        
        x = pylab.hstack([t_mat[i_range, 0], pylab.ones((len(i_range), 1))])
        y = x * pylab.matrix(res_mat[max_i, 0:2]).T
        pylab.plot(x[:,0], pylab.exp(y), 'k:', linewidth=4)
                
        pylab.plot([0, max(time)], [stationary_level, stationary_level])
        
        pylab.yscale('log')
        pylab.legend(['OD', 'growth rate', 'threshold', 'fit', 'stationary'])
    
    return res_mat[max_i, 0], stationary_level
示例#18
0
def display(temp1,temp2):
    import matplotlib.pyplot as plt
    data = temp1.copy()
    traj = temp2.copy()
    MMSIs = list(set(data['mmsi1']))
    if len(MMSIs) == 2:
        mmsi1 = MMSIs[0]
        mmsi2 = MMSIs[1]
        t1min,t1max,sev1 = minmaxT(data,mmsi1)
        t2min,t2max,sev2 = minmaxT(data,mmsi2)
    else:
        mmsi1 = data['mmsi1'].unique()[0]
        mmsi2 = data['mmsi2'].unique()[0]
        time = data['time']
        t1min = time.min()
        t1max = time.max()
        sev1 = data['sev1']
        tem_mmsi21 = temp[['time','sev2']]
        tem_mmsi22 = tem_mmsi21.loc[(tem_mmsi21['sev2']>0)]
        sev2 = tem_mmsi22['sev2']
        t2min = tem_mmsi22['time'].min()
        t2max = tem_mmsi22['time'].max()
    delta_T = 120 #设置轨迹前后延长120s
    dd  = traj_mmsi(traj,mmsi2,t2min,t2max)
    print(mmsi1,mmsi2)
    if dd.shape[0] >= 1:
        tmin = min(t1min,t2min)
        tmax = min(t1max,t2max)
        tem_traj1 = traj_mmsi_new(traj,mmsi1,tmin-delta_T,tmax+delta_T)
        tem_traj2 = traj_mmsi_new(traj,mmsi2,tmin-delta_T,tmax+delta_T)
        tem1  = traj_mmsi(tem_traj1,mmsi1,t1min,t1max)
        tem2  = traj_mmsi(tem_traj2,mmsi2,t2min,t2max)
        tem11  = traj_mmsi(tem_traj1,mmsi1,tmin-delta_T,t1min-2)
        tem21  = traj_mmsi(tem_traj2,mmsi2,tmin-delta_T,t2min-2)
        tem12  = traj_mmsi(tem_traj1,mmsi1,t1max+2,tmax+delta_T)
        tem22  = traj_mmsi(tem_traj2,mmsi2,t2max+2,tmax+delta_T)
    else:
        tmin = t1min
        tmax = t1max
        tem_traj1 = traj_mmsi_new(traj,mmsi1,tmin-delta_T,tmax+delta_T)
        tem_traj2 = traj_mmsi_new(traj,mmsi2,tmin-delta_T,tmax+delta_T)
        tem1  = traj_mmsi(tem_traj1,mmsi1,t1min,t1max)
        tem2  = traj_mmsi(tem_traj2,mmsi2,t2min,t2max)
        tem11  = traj_mmsi(tem_traj1,mmsi1,tmin-delta_T,t1min-2)
        tem21  = traj_mmsi(tem_traj2,mmsi2,tmin-delta_T,t1min-2)
        tem12  = traj_mmsi(tem_traj1,mmsi1,t1max+2,tmax+delta_T)
        tem22  = traj_mmsi(tem_traj2,mmsi2,t1min,tmax+delta_T)
#        print()
        

    MMSI1 = str(mmsi1)
    MMSI2 = str(mmsi2)
    font1 = {'family' : 'Times New Roman',
             'weight' : 'normal',
             'size'   : 16,
             }
#    try:
    X1,x1 = Time2StrTime(t1min,t1max)
    X2,x2 = Time2StrTime(t2min,t2max)
    T1 = x1[np.argmax(sev1.values)]
    t1 = X1[np.argmax(sev1.values)]
    T11 = x1[np.argmax(sev1.values)+1]#参考线时间
    T2 = x2[np.argmax(sev2.values)]
    t2 = X2[np.argmax(sev2.values)]
    T21 = x2[np.argmax(sev2.values)+1]
    X3,x3 = Time2StrTime(min(t1min,t2min),max(t1max,t2max))
    tempn = data.loc[(data['time']>=min(t1min,t2min))&(data['time']<=max(t1max,t2max))]
    RD = tempn['RD(m)']
    T3 = x3[np.argmin(RD.values)]
    t3 = X3[np.argmin(RD.values)]
    T31 = x3[np.argmin(RD.values)+1]
    ####

    figsize = 5,4
    fig = plt.figure(figsize=figsize)
    ax = fig.add_subplot(111)
    sValue = 16
    plt.scatter(tem1['lon'],tem1['lat'],marker='x',s=sValue,norm=0.07,alpha=0.8,color='#0066CC')
    plt.scatter(tem2['lon'],tem2['lat'],marker='+',s=sValue,norm=0.07,alpha=1,color='#FF9966')
    ax.legend((MMSI1[0:5]+'****',MMSI2[0:5]+'****'),loc='lower left',prop=font1,ncol=2, bbox_to_anchor=(0,1.02,1,0.2),mode='expand')
    plt.scatter(tem11['lon'],tem11['lat'],color='#99CC33',marker='.',s=sValue,norm=0.02,alpha=1)
    plt.scatter(tem12['lon'],tem12['lat'],color='#99CC33',marker='.',s=sValue,norm=0.02,alpha=1)
    plt.scatter(tem21['lon'],tem21['lat'],color='#99CC33',marker='.',s=sValue,norm=0.02,alpha=1)
    plt.scatter(tem22['lon'],tem22['lat'],color='#99CC33',marker='.',s=sValue,norm=0.02,alpha=1)
    lon11,lat11 = lonlat(traj,mmsi1,t1)
    lon21,lat21 = lonlat(traj,mmsi2,t1)
    ax.scatter(lon11,lat11,s=sValue,c='k',marker='o')
    ax.text(lon11+0.0005,lat11-0.0005,'T1',fontsize=16,fontfamily='Times New Roman',weight='normal')
    ax.scatter(lon21,lat21,s=sValue,c='k',marker='o')
    ax.text(lon21-0.0025,lat21-0.0015,'T1',fontsize=16,fontfamily='Times New Roman',weight='normal')
    lon13,lat13 = lonlat(traj,mmsi1,t3)
    lon23,lat23 = lonlat(traj,mmsi2,t3)
    ax.scatter(lon13,lat13,s=sValue,c='k',marker='o')
    ax.text(lon13,lat13+0.0003,'T2',fontsize=16,fontfamily='Times New Roman',weight='normal')
    ax.scatter(lon23,lat23,s=sValue,c='k',marker='o')
    ax.text(lon23-0.0026,lat23-0.0019,'T2',fontsize=16,fontfamily='Times New Roman',weight='normal')      
    labels = ax.get_xticklabels() + ax.get_yticklabels()
    [label.set_fontname('Times New Roman') for label in labels]
    ax.set_xlabel('Longitude [°/E]',fontsize=17,fontname = 'Times New Roman')
    ax.set_ylabel('Latitude [°/N]',fontsize=17,fontname = 'Times New Roman') 
    plt.tick_params(labelsize=16)
    plt.grid(linestyle='--')
    plt.show()

    figsize = 5,4
    fig = plt.figure(figsize=figsize)
    ax = fig.add_subplot(111)
    pt1 = ax.plot(x1,sev1,ls='--',c='#0066CC',label=MMSI1[0:5]+'****')
    pt2 = ax.plot(x2,sev2,ls='--',c='#99CC33',label=MMSI2[0:5]+'****')
    plt.grid(linestyle='--')
    ax.axvline(x=T1,color='#FF3333',ls=':')
    ax.text(T11,0.22,'T1',fontsize=16,fontfamily='Times New Roman',weight='normal')
    print('T1: '+str(T1))
    labels = ax.get_xticklabels() + ax.get_yticklabels()
    [label.set_fontname('Times New Roman') for label in labels]
    ax.set_xlabel('Time',fontsize=17,fontname = 'Times New Roman')
    ax.set_ylabel('Confilct severity ',fontsize=17,fontname = 'Times New Roman') 
    
    ax1 = ax.twinx()
    pt3 = ax1.plot(x3,RD,color='#FF9966',linestyle='-.',linewidth=1.5,label='RD')
    ax1.axvline(x=T3,color='#FF3333',ls=':')
    ax.text(T31,0.22,'T2',fontsize=16,fontfamily='Times New Roman',weight='normal')
    print('T3: '+str(T3))
    ax1.set_ylabel('Relative distance [m]',fontsize=17,fontname = 'Times New Roman') 
    pts = pt1 + pt2
    labs = [pt.get_label() for pt in pts]
    ax.legend(pts,labs,loc='lower left',prop=font1,ncol=2, bbox_to_anchor=(0,1.02,1,0.2),mode='expand')
    ax1.legend(loc='upper right',prop=font1)
    labels = ax1.get_xticklabels() + ax1.get_yticklabels()
    [label.set_fontname('Times New Roman') for label in labels]
    ax.tick_params(labelsize=16)
    ax1.tick_params(labelsize=16)
    plt.show()
    
    figsize = 5,4
    fig = plt.figure(figsize=figsize)
    ax = fig.add_subplot(111)
    ln1 = ax.plot(x3,tempn['DCPA(m)'],ls='--',c='#0066CC',label = 'DCPA')
    ax.set_xlabel('Time',fontsize=17,fontname = 'Times New Roman')
    ax.set_ylabel('DCPA [m]',fontsize=17,fontname = 'Times New Roman')
    labels = ax.get_xticklabels() + ax.get_yticklabels()
    [label.set_fontname('Times New Roman') for label in labels]
    plt.grid(linestyle='--')
    ax1 = ax.twinx()
    ln2 = ax1.plot(x3,tempn['TCPA(s)'],ls='--',c='#99CC33',label = 'TCPA')
    lns = ln1 + ln2
    labs = [ln.get_label() for ln in lns]
    ax.legend(lns,labs,loc='lower left',prop=font1,ncol=2, bbox_to_anchor=(0,1.02,1,0.2),mode='expand')
    ax1.set_ylabel('TCPA [s]',fontsize=17,fontname = 'Times New Roman') 
    labels = ax1.get_xticklabels() + ax1.get_yticklabels()
    [label.set_fontname('Times New Roman') for label in labels]
    ax.tick_params(labelsize=16)
    ax1.tick_params(labelsize=16)
    ax1.axvline(x=T1,color='#FF3333',ls=':')
    ax1.text(T11,8,'T1',fontsize=16,fontfamily='Times New Roman',weight='normal')
    ax1.axvline(x=T3,color='#FF3333',ls=':')
    T32 = x3[np.argmin(RD.values)+1]
    ax1.text(T32,8,'T2',fontsize=16,fontfamily='Times New Roman',weight='normal')
    ax1.axhline(y=0,color='#FF3333',ls=':')
    plt.show()
#    except:
#        print('ValueError: arange: cannot compute length')

    return sev1
示例#19
0
def FitGrowth(time,
              cell_count,
              window_size,
              start_threshold=0.01,
              plot_figure=False):
    """Compute growth rate.
    
    Args:
        time: list of data point time measurements (whatever time units you like).
        cell_count: list of cell counts at each time point.
        window_size: the size of the time window (same time units as above).
        start_threshold: minimum cell count to consider.
        plot_figure: whether or not to plot.
    
    Returns:
        growth rate in 1/(time unit) where "time unit" is the unit used above.
    """
    def get_frame_range(times, mid_frame, windows_size):
        T = times[mid_frame]
        i_range = []
        for i in range(1, len(times)):
            if (times[i - 1] > T - window_size / 2.0
                    and times[i] < T + window_size / 2.0):
                i_range.append(i)

        if (len(i_range) <
                2):  # there are not enough frames to get a good estimation
            raise ValueError()
        return i_range

    N = len(cell_count)
    if (N < window_size):
        raise Exception(
            "The measurement time-series is too short (smaller than the windows-size)"
        )

    t_mat = pylab.matrix(time).T

    # normalize the cell_count data by its minimum (
    c_mat = pylab.matrix(cell_count).T - min(cell_count)
    if c_mat[-1, 0] == 0:
        c_mat[-1, 0] = min(c_mat[pylab.find(c_mat > 0)])

    for i in pylab.arange(N - 1, 0, -1):
        if c_mat[i - 1, 0] <= 0:
            c_mat[i - 1, 0] = c_mat[i, 0]

    c_mat = pylab.log(c_mat)

    res_mat = pylab.zeros((N, 3))  # columns are: slope, offset, error
    for i in range(N):
        try:
            # calculate the indices covered by the window
            i_range = get_frame_range(time, i, window_size)
            x = pylab.hstack(
                [t_mat[i_range, 0],
                 pylab.ones((len(i_range), 1))])
            y = c_mat[i_range, 0]
            if min(
                    pylab.exp(y)
            ) < start_threshold:  # the measurements are still too low to use (because of noise)
                raise ValueError()
            (a, residues) = pylab.lstsq(x, y)[0:2]
            res_mat[i, 0] = a[0]
            res_mat[i, 1] = a[1]
            res_mat[i, 2] = residues
        except ValueError:
            pass

    max_i = res_mat[:, 0].argmax()

    if plot_figure:
        pylab.hold(True)
        pylab.plot(time, cell_count - min(cell_count))
        pylab.plot(time, res_mat[:, 0])
        pylab.plot([0, time.max()], [start_threshold, start_threshold], 'r--')
        i_range = get_frame_range(time, max_i, window_size)

        x = pylab.hstack([t_mat[i_range, 0], pylab.ones((len(i_range), 1))])
        y = x * pylab.matrix(res_mat[max_i, 0:2]).T

        pylab.plot(x[:, 0], pylab.exp(y), 'k:', linewidth=4)
        #plot(time, errors / errors.max())
        pylab.yscale('log')
        #legend(['OD', 'growth rate', 'error'])
        pylab.legend(['OD', 'growth rate', 'threshold', 'fit'])

    return res_mat[max_i, 0]
示例#20
0
def spread(infile, entries):
    '''
    reads a number of entries and outputs mean and std of the PDS and cross-spectra
    infile is the input -zarr file
    entries is the list/array of the entries
    '''
    nl = size(entries)
    print("spread: nl = " + str(nl))
    hfile = zarr.open(infile + ".zarr", "r")
    glo = hfile["globals"]
    time = glo["time"][:]

    nt = size(time)
    dt = (time.max() - time.min()) / double(nt)

    mdotsp = fourier()
    msp = fourier()
    lsp = fourier()
    osp = fourier()
    #   mdotsp_std = fourier() ; msp_std = fourier() ; lsp_std = fourier() ; osp_std = fourier()
    mdotsp.define(nt, dt)
    msp.define(nt, dt)
    lsp.define(nt, dt)
    osp.define(nt, dt)
    #  mdotsp_std.define(nt, dt) ; msp_std.define(nt, dt)    ;    lsp_std.define(nt, dt)   ;  osp_std.define(nt, dt)
    for k in arange(nl):
        data = hfile[entries[k]]
        if k == 0:
            vals = data.keys()
            print(vals)
        L = data['L'][:]
        M = data['M'][:]
        mdot = data['mdot'][:]
        omega = data['omega'][:]
        if epicyclic:
            omega = 2. * omega * (1. + 0.1 * sqrt(copy(L)))
        #       mdot = log(mdot) ; L = log(L) # !!! temporary!!1
        #        if alias > 1:
        #            L = L[::alias] ; M = M[::alias] ; mdot = mdot[::alias] ; omega = omega[::alias]
        if k == 0:
            mdotsp.FT(mdot)
            msp.FT(M)
            msp.crossT(mdotsp.tilde)
            lsp.FT(L)
            lsp.crossT(mdotsp.tilde)
            osp.FT(omega)
            osp.crossT(mdotsp.tilde)
            mdotsp.crossT(
                lsp.tilde, lc_fft=osp.tilde
            )  # mdotsp now also contains correlation between L and Omega!
        else:
            mdot_fft = mdotsp.addFT(mdot)
            m_fft = msp.addFT(M)
            msp.addcrossT(mdot_fft, m_fft)
            l_fft = lsp.addFT(L)
            lsp.addcrossT(mdot_fft, l_fft)
            o_fft = osp.addFT(omega)
            osp.addcrossT(mdot_fft, o_fft)
            mdotsp.addcrossT(l_fft, o_fft)
            # print('max f(mdot) = '+str(abs(mdot_fft).max()))
    mdotsp.normalize(nl)
    msp.normalize(nl)
    lsp.normalize(nl)
    osp.normalize(nl)

    return mdotsp, msp, lsp, osp
示例#21
0
def reloc(tid, I, relocation_traces, svd, test_points, plot=False):
    """
    reloc(tid, I, relocation_traces, plot=False) \n
    Relocation function: computes the modified composite network response on
    the stack of the sliding kurtosis.
    """
    moveoutsP = MV.p_relative_samp[:, I]
    moveoutsS = MV.s_relative_samp[:, I]
    window = np.int32(0.1 * 60. * autodet.cfg.sampling_rate)
    buffer_length = int(window / 10)
    ##-----------------------------
    composite, where = autodet.clib.network_response_SP(np.mean(relocation_traces[I,:-1,:], axis=1), \
                                                                relocation_traces[I,-1,:], \
                                                                moveoutsP, \
                                                                moveoutsS, \
                                                                10, \
                                                                device=device, \
                                                                test_points=test_points)
    composite /= np.float32(I.size)
    smoothed = gaussian_filter1d(composite,
                                 np.int32(0.2 * autodet.cfg.sampling_rate))
    base = np.hstack(
        (baseline(composite[:window],
                  int(window / 20)), baseline(composite[window:], window)))
    comp_baseline = composite - base
    detection_trace = smoothed
    Q = detection_trace.max() / np.sqrt(
        np.var(detection_trace))  # quality factor of the relocation
    T0 = detection_trace[buffer_length:].argmax() + buffer_length
    mv_min = moveoutsP[where[T0], :].min()
    I2 = I[:12]
    #==================================================
    moveoutsP = MV.p_relative_samp[:, I2]
    moveoutsS = MV.s_relative_samp[:, I2]
    ##-----------------------------
    composite2, where2 = autodet.clib.network_response_SP(np.mean(relocation_traces[I2,:-1,:], axis=1), \
                                                                  relocation_traces[I2,-1,:], \
                                                                  moveoutsP, \
                                                                  moveoutsS, \
                                                                  10, \
                                                                  device=device, \
                                                                  test_points=test_points)
    composite2 /= np.float32(I2.size)
    smoothed2 = gaussian_filter1d(composite2,
                                  np.int32(0.2 * autodet.cfg.sampling_rate))
    base2 = np.hstack(
        (baseline(composite2[:window],
                  int(window / 20)), baseline(composite2[window:], window)))
    comp_baseline2 = composite2 - base2
    detection_trace = smoothed2
    Q2 = detection_trace.max() / np.sqrt(
        np.var(detection_trace))  # quality factor of the relocation
    print(
        "Reloc with 20 stations: Q={:.2f} (NR max={:.2f}) / with 12 stations: Q={:.2f} (NR max={:.2f})"
        .format(Q, smoothed.max(), Q2, smoothed2.max()))
    T0 = detection_trace[buffer_length:].argmax() + buffer_length
    mv_min2 = moveoutsP[where2[T0], :].min()
    if Q2 > Q:
        Q = Q2
        composite = composite2
        smoothed = smoothed2
        base = base2
        comp_baseline = comp_baseline2
        where = where2
        mv_min = mv_min2
        I = I2
    #detection_trace = comp_baseline
    detection_trace = smoothed
    #==================================================
    T0 = detection_trace[buffer_length:].argmax() + buffer_length
    source_index = where[T0]
    mvP = moveoutsP[source_index, :]
    mvS = moveoutsS[source_index, :]
    t_min = MV.p_relative[
        source_index,
        I].min()  #useful information to determine absolute origin times
    I = I2  # take only the 12 best stations to create the template
    #==================================================
    n_samples = composite.size
    #---------------------
    rho, mean_delta_r = get_delta_r(composite, where)
    #---------------------
    if plot:
        plt.figure('NR_tp{:d}'.format(tid), figsize=figsize)
        plt.plot(composite, label='CNR')
        plt.plot(smoothed, label='Smoothed CNR')
        plt.plot(comp_baseline, label='CNR without baseline')
        plt.plot(base, ls='--', label='Baseline approximation')
        plt.axvline(T0, color='k')
        plt.legend(loc='upper right', fancybox=True)
        plt.figure('reloc_tp{:d}'.format(tid), figsize=figsize)
        time = np.arange(0., svd[0].data.size / autodet.cfg.sampling_rate,
                         1. / autodet.cfg.sampling_rate)
        for s in range(I.size):
            for c in range(nc):
                plt.subplot(I.size, nc, s * nc + c + 1)
                svd_data = svd.select(station=svd.stations[I[s]])[c].data
                plt.plot(time,
                         svd_data / svd_data.max() * K[I[s], c, :].max(),
                         lw=0.5,
                         label='{}.{}\n{:.2f}'.format(svd.stations[I[s]],
                                                      net.components[c],
                                                      SNR[I[s]]))
                plt.plot(time, K[I[s], c, :])
                plt.axvline((T0 + mvP[s] - mv_min) / autodet.cfg.sampling_rate,
                            lw=1,
                            color='k')
                plt.axvline((T0 + mvS[s] - mv_min) / autodet.cfg.sampling_rate,
                            lw=1,
                            color='r')
                if s != I.size - 1:
                    plt.xticks([])
                else:
                    plt.xlabel('Time (s)')
                plt.xlim(time.min(), time.max())
                plt.legend(loc='upper right',
                           fancybox=True,
                           framealpha=0.7,
                           handlelength=0.1)
        plt.subplots_adjust(top=0.95,
                            bottom=0.05,
                            left=0.02,
                            right=0.98,
                            hspace=0.0,
                            wspace=0.1)
        plot_NR_3D(composite, where)
        plt.show()
    #-----------------------------------------------------------------------
    #                 attach SNR
    n_samples = np.int32(autodet.cfg.template_len * autodet.cfg.sampling_rate)
    n_stations = MV.s_relative.shape[-1]
    SNR_ = np.zeros(n_stations, dtype=np.float32)
    for s in range(n_stations):
        for c in range(nc):
            trace_sc = svd.select(station=net.stations[s])[c].data
            var = np.var(trace_sc)
            if var != 0.:
                id1_P = T0 + MV.p_relative_samp[source_index,
                                                s] - mv_min - n_samples // 2
                id1_S = T0 + MV.s_relative_samp[source_index,
                                                s] - mv_min - n_samples // 2
                if np.isnan(np.var(trace_sc[id1_S:id1_S + n_samples])):
                    continue
                if id1_S + n_samples > trace_sc.size:
                    continue
                snr = 0.
                snr += np.var(trace_sc[id1_P:id1_P + n_samples]) / var
                snr += np.var(trace_sc[id1_S:id1_S + n_samples]) / var
                SNR_[s] += snr
    #-----------------------------------------------------------------------
    metadata = {}
    metadata.update({'latitude': np.float32(MV.latitude[source_index])})
    metadata.update({'longitude': np.float32(MV.longitude[source_index])})
    metadata.update({'depth': np.float32(MV.depth[source_index])})
    metadata.update({'source_idx': np.int32(source_index)})
    metadata.update({'template_idx': np.int32(tid)})
    metadata.update({'Q_loc': np.float32(Q)})
    metadata.update({'loc_uncertainty': np.float32(mean_delta_r)})
    metadata.update({'peak_NR': np.float32(smoothed.max())})
    metadata.update(
        {'p_moveouts': np.float32(mvP - mv_min) / autodet.cfg.sampling_rate})
    metadata.update(
        {'s_moveouts': np.float32(mvS - mv_min) / autodet.cfg.sampling_rate})
    metadata.update({
        'duration':
        np.int32(autodet.cfg.template_len * autodet.cfg.sampling_rate)
    })
    metadata.update({'sampling_rate': np.float32(autodet.cfg.sampling_rate)})
    metadata.update({'stations': np.asarray(net.stations)[I]})
    metadata.update({'channels': np.asarray(['HHN', 'HHE', 'HHZ'])})
    metadata.update({'reference_absolute_time': np.float32(t_min)})
    metadata.update({
        'travel_times':
        np.hstack((MV.p_relative[source_index, :].reshape(-1, 1),
                   MV.s_relative[source_index, :].reshape(-1, 1)))
    })
    metadata.update({'SNR': SNR})
    T = autodet.db_h5py.initialize_template(metadata)
    waveforms = np.zeros((I.size, nc, metadata['duration']), dtype=np.float32)
    # ----------------------------------------------------
    # define when the time windows start before the P/S wave
    time_before_S = np.int32(metadata['duration'] / 2)
    time_before_P = np.int32(1. * autodet.cfg.sampling_rate)
    # ----------------------------------------------------
    size_stack = svd[0].data.size
    for s in range(I.size):
        for c in range(nc):
            data = svd.select(station=svd.stations[I[s]])[c].data
            MAX = np.abs(data).max()
            if MAX != 0.:
                data /= MAX
            if c < 2:
                id1 = T0 + mvS[s] - mv_min - time_before_S
            else:
                # take only 1sec before the P-wave arrival
                id1 = T0 + mvP[s] - mv_min - time_before_P
            id2 = id1 + metadata['duration']
            if id1 > size_stack:
                continue
            elif id2 > size_stack:
                DN = id2 - size_stack
                waveforms[s, c, :] = np.hstack(
                    (data[id1:], np.zeros(DN, dtype=np.float32)))
            elif id1 < 0:
                DN = 0 - id1
                waveforms[s, c, :] = np.hstack(
                    (np.zeros(DN, dtype=np.float32), data[:id2]))
            else:
                waveforms[s, c, :] = data[id1:id2]
            T.select(station=svd.stations[I[s]])[c].data = waveforms[s, c, :]
    T.waveforms = waveforms
    # =========================================================
    # =========================================================
    # set the moveouts to pointing the beginning of the windows
    T.metadata['p_moveouts'] -= time_before_P
    T.metadata['s_moveouts'] -= time_before_S
    # make all our moveouts positive:
    T.metadata['p_moveouts'] += time_before_S
    T.metadata['s_moveouts'] += time_before_S
    # this last operation changes the reference time:
    T.metadata['reference_absolute_time'] -= time_before_S
    #=========================================================
    #=========================================================
    T.metadata['stations'] = T.metadata['stations'].astype('S')
    T.metadata['channels'] = T.metadata['channels'].astype('S')
    return T, composite, where
示例#22
0
def visualize(vt, true_y, pred_y, odefunc, itr, loss):

    if args.viz:

        ax_traj.cla()
        ax_traj.set_title('Trajectories')
        ax_traj.set_xlabel('t')
        ax_traj.set_ylabel('x')
        for i in range(len(indexv)):
            if i == 0:
                ax_traj.plot(vt[i].numpy(),
                             pred_y.numpy()[:indexv[i] + 1, 0, 0], 'r--',
                             vt[i].numpy(),
                             true_y.numpy()[:indexv[i] + 1, 0, 0], 'k-')
            else:
                ax_traj.plot(
                    vt[i].numpy(),
                    pred_y.numpy()[indexv[i - 1] + 1:indexv[i] + 1, 0,
                                   0], 'r--', vt[i].numpy(),
                    true_y.numpy()[indexv[i - 1] + 1:indexv[i] + 1, 0, 0],
                    'k-')
        ax_traj.legend()

        ay_traj.cla()
        ay_traj.set_title('Trajectories')
        ay_traj.set_xlabel('t')
        ay_traj.set_ylabel('y')
        for i in range(len(indexv)):
            if i == 0:
                ay_traj.plot(vt[i].numpy(),
                             pred_y.numpy()[:indexv[i] + 1, 0, 1], 'r--',
                             vt[i].numpy(),
                             true_y.numpy()[:indexv[i] + 1, 0, 1], 'k-')
            else:
                ay_traj.plot(
                    vt[i].numpy(),
                    pred_y.numpy()[indexv[i - 1] + 1:indexv[i] + 1, 0,
                                   1], 'r--', vt[i].numpy(),
                    true_y.numpy()[indexv[i - 1] + 1:indexv[i] + 1, 0, 1],
                    'k-')
        ay_traj.legend()

        if args.variable_nums == 3:
            ay_traj.cla()
            ay_traj.set_title('Trajectories')
            ay_traj.set_xlabel('t')
            ay_traj.set_ylabel('y')
            ay_traj.plot(time.numpy(),
                         pred_y.numpy()[:, 0, 1], 'r--', time.numpy(),
                         true_y.numpy()[:, 0, 1], 'k-')
            ay_traj.set_xlim(time.min(), time.max())
            # ax_traj.set_ylim(-2, 2)
            ay_traj.legend()
            az_traj.cla()
            az_traj.set_title('Trajectories')
            az_traj.set_xlabel('t')
            az_traj.set_ylabel('z')
            az_traj.plot(time.numpy(),
                         pred_y.numpy()[:, 0, 2], 'r--', time.numpy(),
                         true_y.numpy()[:, 0, 2], 'k-')
            az_traj.set_xlim(time.min(), time.max())
            # ax_traj.set_ylim(-2, 2)
            az_traj.legend()

            axz_phase.cla()
            axz_phase.set_title('Phase Portrait')
            axz_phase.set_xlabel('x')
            axz_phase.set_ylabel('z')
            axz_phase.plot(true_y.numpy()[:, 0, 0],
                           true_y.numpy()[:, 0, 2], 'r--')
            axz_phase.plot(pred_y.numpy()[:, 0, 0],
                           pred_y.numpy()[:, 0, 2], 'k-')

            ayz_phase.cla()
            ayz_phase.set_title('Phase Portrait')
            ayz_phase.set_xlabel('y')
            ayz_phase.set_ylabel('z')
            ayz_phase.plot(true_y.numpy()[:, 0, 1],
                           true_y.numpy()[:, 0, 2], 'r--')
            ayz_phase.plot(pred_y.numpy()[:, 0, 1],
                           pred_y.numpy()[:, 0, 2], 'k-')

        fig.tight_layout()
        plt.savefig('png{}/{}.jpg'.format(args.CASE, itr))
示例#23
0
def time_normalize(time):
    max_t = time.max() + 1
    time = time * 3e6 / max_t
    return time
示例#24
0
    def fit_growth(time,
                   cell_count,
                   window_size,
                   start_threshold=0.01,
                   plot_figure=False):
        def get_frame_range(times, mid_frame, windows_size):
            T = times[mid_frame]
            i_range = []
            for i in range(1, len(times)):
                if (times[i - 1] > T - window_size / 2.0
                        and times[i] < T + window_size / 2.0):
                    i_range.append(i)

            if (len(i_range) <
                    2):  # there are not enough frames to get a good estimation
                raise ValueError()
            return i_range

        N = len(cell_count)
        if (N < window_size):
            raise Exception(
                "The measurement time-series is too short (smaller than the windows-size)"
            )

        # get the window-size in samples
        t_mat = np.matrix(time).T
        c_mat = np.matrix(cell_count).T - min(cell_count)
        if (c_mat[-1, 0] == 0):
            c_mat[-1, 0] = min(c_mat[c_mat > 0])

        for i in np.arange(N - 1, 0, -1):
            if (c_mat[i - 1, 0] <= 0):
                c_mat[i - 1, 0] = c_mat[i, 0]

        c_mat = np.log(c_mat)

        res_mat = np.zeros((N, 3))  # columns are: slope, offset, error
        for i in range(N):
            try:
                # calculate the indices covered by the window
                i_range = get_frame_range(time, i, window_size)
                x = np.hstack([t_mat[i_range, 0], np.ones((len(i_range), 1))])
                y = c_mat[i_range, 0]
                if (
                        min(np.exp(y)) < start_threshold
                ):  # the measurements are still too low to use (because of noise)
                    raise ValueError()
                (a, residues) = np.linalg.lstsq(x, y)[0:2]
                res_mat[i, 0] = a[0]
                res_mat[i, 1] = a[1]
                res_mat[i, 2] = residues
            except ValueError:
                pass

        max_i = res_mat[:, 0].argmax()

        if (plot_figure):
            plt.hold(True)
            plt.plot(time, cell_count - min(cell_count))
            plt.plot(time, res_mat[:, 0])
            plt.plot([0, time.max()], [start_threshold, start_threshold],
                     'r--')
            i_range = get_frame_range(time, max_i, window_size)

            x = np.hstack([t_mat[i_range, 0], np.ones((len(i_range), 1))])
            y = x * np.matrix(res_mat[max_i, 0:2]).T

            plt.plot(x[:, 0], np.exp(y), 'k:', linewidth=4)

            #plot(time, errors / errors.max())
            plt.yscale('log')
            #legend(['OD', 'growth rate', 'error'])
            plt.legend(['OD', 'growth rate', 'threshold', 'fit'])

        return res_mat[max_i, 0]
示例#25
0
 def fit_growth(time, cell_count, window_size, start_threshold=0.01, plot_figure=False):
     
     def get_frame_range(times, mid_frame, windows_size):
         T = times[mid_frame]
         i_range = []
         for i in range(1, len(times)):
             if (times[i-1] > T - window_size/2.0 and times[i] < T + window_size/2.0):
                 i_range.append(i)
 
         if (len(i_range) < 2): # there are not enough frames to get a good estimation
             raise ValueError()
         return i_range
 
     N = len(cell_count)
     if (N < window_size):
         raise Exception("The measurement time-series is too short (smaller than the windows-size)")
 
     # get the window-size in samples
     t_mat = np.matrix(time).T
     c_mat = np.matrix(cell_count).T - min(cell_count)
     if (c_mat[-1, 0] == 0):
         c_mat[-1, 0] = min(c_mat[c_mat > 0])
 
     for i in np.arange(N-1, 0, -1):
         if (c_mat[i-1, 0] <= 0):
             c_mat[i-1, 0] = c_mat[i, 0]
 
     c_mat = np.log(c_mat)
     
     res_mat = np.zeros((N,3)) # columns are: slope, offset, error
     for i in range(N):
         try:
             # calculate the indices covered by the window
             i_range = get_frame_range(time, i, window_size)
             x = np.hstack([t_mat[i_range, 0], np.ones((len(i_range), 1))])
             y = c_mat[i_range, 0]
             if (min(np.exp(y)) < start_threshold): # the measurements are still too low to use (because of noise)
                 raise ValueError()
             (a, residues) = np.linalg.lstsq(x, y)[0:2]
             res_mat[i, 0] = a[0]
             res_mat[i, 1] = a[1]
             res_mat[i, 2] = residues
         except ValueError:
             pass
 
     max_i = res_mat[:,0].argmax()
     
     if (plot_figure):
         plt.hold(True)
         plt.plot(time, cell_count-min(cell_count))
         plt.plot(time, res_mat[:,0])
         plt.plot([0, time.max()], [start_threshold, start_threshold], 'r--')
         i_range = get_frame_range(time, max_i, window_size)
         
         x = np.hstack([t_mat[i_range, 0], np.ones((len(i_range), 1))])
         y = x * np.matrix(res_mat[max_i, 0:2]).T
         
         plt.plot(x[:,0], np.exp(y), 'k:', linewidth=4)
         
         #plot(time, errors / errors.max())
         plt.yscale('log')
         #legend(['OD', 'growth rate', 'error'])
         plt.legend(['OD', 'growth rate', 'threshold', 'fit'])
     
     return res_mat[max_i, 0]
示例#26
0
time=time+rt
ang=-16.4
ru=u*np.cos(ang*np.pi/180.0)-v*np.sin(ang*np.pi/180.0)
rv=v*np.cos(ang*np.pi/180.0)+u*np.sin(ang*np.pi/180.0)

fmt=mlab.dates.DateFormatter('%m/%d')
days = mlab.dates.DayLocator() 
#
#fig=mlab.pyplot.figure()
#ax=fig.add_axes([0.1,0.1,0.8,0.8])
ax=mlab.pyplot.subplot(2, 1, 1)
pc=ax.pcolor(time,bins,rv)
ax.plot(time,p,'k')
pc.set_clim([-75, 75])
ax.set_ylim([0.4,9])
ax.set_xlim([time.min(),time.max()])
ax.xaxis.set_major_locator(days)
ax.xaxis.set_major_formatter(fmt)
ax.set_title('Mullica River Along Channel Velocity (cm/s)')
ax.set_ylabel('Bin Range (m)')
mlab.pyplot.colorbar(pc)
#plt.pcolor(time,bins,u)

ax=mlab.pyplot.subplot(2, 1,2)
pc=ax.pcolor(time,bins,ru)
ax.plot(time,p,'k')
pc.set_clim([-75, 75])
ax.set_ylim([0.4,9])
ax.set_xlim([time.min(),time.max()])
ax.xaxis.set_major_locator(days)
ax.xaxis.set_major_formatter(fmt)
示例#27
0
    # ampPeaks1['posTime'] = ampPeaks1['posTime'][0:nonCoalescePeriod]
    # outFile = open("./Data/ampPeaks.csv","w+")
    # writeData(ampPeaks1, outFile)

    # Cubic Spline Fitting
    time = np.array(time)
    amp = np.array(amp)
    # amp1 = [y for (x, y) in sorted(zip(time, amp))][::-1]
    # time1 = sorted(time, reverse=True)
    # time1 = np.array(time1)
    # amp1 = np.array(amp1)

    print len(predAmp), len(predAmpN)
    # print 'time:', time
    # print 'amp:', amp
    xnew = np.linspace(time.min(), time.max(), 1000)
    # splineFit = spline(time, amp, xnew)

    fit = interp1d(time, amp, kind='cubic')
    splineFit = fit(xnew)
    # fit2 = BarycentricInterpolator(time, amp)
    # fit1 = KroghInterpolator(time, amp)
    # splineFit1 = fit1(xnew)
    # splineFit2 = fit2(xnew)
    # fit3 = interpolate.InterpolatedUnivariateSpline(time, amp)
    # splineFit3 = fit3(xnew)
    # fit4 = interpolate.splrep(time[::-1], amp[::-1], s=10000)
    # splineFit4 = interpolate.splev(xnew[::-1], fit4, der=3)

    plt.figure(figsize=(10, 8))
    plt.scatter(ampPeaks['posTime'], ampPeaks['posAmp'], color='g')
示例#28
0
def plot_NR_3D(NR, source_indexes_1D):
    n_samples = NR.size
    #---------------------
    cNorm = Normalize(vmin=NR.min(), vmax=NR.max())
    cmap = plt.get_cmap('jet')
    scalarMap = ScalarMappable(norm=cNorm, cmap=cmap)
    #---------------------
    relevant_interval = get_half_peak_interval(NR)
    rho, mean_delta_r = get_delta_r(NR, source_indexes_1D)
    #---------------------
    time = np.linspace(0., NR.size / autodet.cfg.sampling_rate, NR.size)
    fig = plt.figure('weighting_function', figsize=figsize)
    ax1 = fig.add_subplot(111)
    ax1.plot(time, NR)
    ax1.tick_params('y', color='C0', labelcolor='C0')
    ax1.set_ylabel('Composite Network Response', color='C0')
    ax2 = ax1.twinx()
    ax2.plot(time[relevant_interval], rho, color='C3')
    ax2.set_ylabel('Weighting Function', color='C3')
    ax2.tick_params('y', color='C3', labelcolor='C3')
    plt.subplots_adjust(top=0.88,
                        bottom=0.3,
                        left=0.5,
                        right=0.9,
                        hspace=0.2,
                        wspace=0.2)
    plt.xlim(time.min(), time.max())
    ax1.set_xlabel('Time (s)')
    #---------------------
    colors = scalarMap.to_rgba(NR)
    colors[np.setdiff1d(np.arange(NR.size), relevant_interval), -1] = 0.05
    #---------------------
    #    PROJECTION
    fake_map = plt.figure()
    fake_map_axis = plt.axes(projection=PlateCarree())
    X = np.zeros(n_samples, dtype=np.float64)
    Y = np.zeros(n_samples, dtype=np.float64)
    for n in range(n_samples):
        X[n], Y[n] = fake_map_axis.transLimits.transform(
            (MV.longitude[source_indexes_1D[n]],
             MV.latitude[source_indexes_1D[n]]))
    plt.close(fig=fake_map)
    #---------------------
    fig = plt.figure('NR_map', figsize=figsize)
    ax = fig.add_subplot(111, projection='3d')
    Z = MV.depth[source_indexes_1D]
    ax.scatter(X, Y, zs=Z, c=colors)
    ax.set_xlabel('Longitude')
    ax.set_ylabel('Latitude')
    ax.set_zlabel('Depth (km)')
    ax.set_xlim(X.min(), X.max())
    ax.set_ylim(Y.min(), Y.max())
    ax.set_zlim(Z.min(), Z.max())
    ax.invert_zaxis()
    #---------------------
    ax2, _ = clb.make_axes(plt.gca(),
                           shrink=0.8,
                           orientation='vertical',
                           pad=0.15,
                           aspect=40,
                           anchor=(1.1, 0.75))
    cbmin = cNorm.vmin
    cbmax = cNorm.vmax
    ticks_pos = np.arange(np.round(cbmin, decimals=1),
                          np.round(cbmax, decimals=1), 10.)
    cbar = clb.ColorbarBase(ax2, cmap = cmap, norm=cNorm, \
                            label='Composite Network Response', orientation='vertical', \
                            boundaries=np.linspace(cbmin, cbmax, 100), \
                            ticks=ticks_pos)
    ax.set_title(
        r'Location uncertainty $\Delta r$ = {:.2f}km'.format(mean_delta_r))