Exemplo n.º 1
0
def experiment_s_bst():
    r = range(start, end, offset)
    times = []
    for i in r:
        times.append(search_bst(i))

    plot_data(r, times, "BinarySearchTree", "Search", "n", "time")
Exemplo n.º 2
0
def synthesize(model, text, sentence, prefix=''):
    src_pos = np.array([i+1 for i in range(text.shape[1])])
    src_pos = np.stack([src_pos])
    src_pos = torch.from_numpy(src_pos).to(device).long()
        
    model.to(device)
    mel, mel_postnet, duration_output, f0_output, energy_output = model(text, src_pos)
    model.to('cpu')
    
    mel_torch = mel.transpose(1, 2).detach()
    mel_postnet_torch = mel_postnet.transpose(1, 2).detach()
    mel = mel[0].cpu().transpose(0, 1).detach()
    mel_postnet = mel_postnet[0].cpu().transpose(0, 1).detach()
    f0_output = f0_output[0].detach().cpu().numpy()
    energy_output = energy_output[0].detach().cpu().numpy()

    if not os.path.exists(hp.test_path):
        os.makedirs(hp.test_path)

    Audio.tools.inv_mel_spec(mel_postnet, os.path.join(hp.test_path, '{}_griffin_lim_{}.wav'.format(prefix, sentence)))
    wave_glow = utils.get_WaveGlow()
    waveglow.inference.inference(mel_postnet_torch, wave_glow, os.path.join(
        hp.test_path, '{}_waveglow_{}.wav'.format(prefix, sentence)))

    utils.plot_data([(mel_postnet.numpy(), f0_output, energy_output)], ['Synthesized Spectrogram'], filename=os.path.join(hp.test_path, '{}_{}.png'.format(prefix, sentence)))
Exemplo n.º 3
0
def experiment_s_rbt():
    r = range(start, end, offset)
    times = []
    for i in r:
        times.append(search_rbt(i))

    plot_data(r, times, "RedBlackTree", "Search", "n", "time")
Exemplo n.º 4
0
def experiment_w_c_q_s():
    r = range(start, end, offset)
    times = []
    for i in r:
        times.append(worst_case_quick_sort(i))

    plot_data(r, times, "worst case quick sort", "Sorting", "n", "time")
Exemplo n.º 5
0
def experiment_i_rbt():
    r = range(start, end, offset)
    times = []
    for i in r:
        times.append(insertion_rbt(i))

    plot_data(r, times, "RedBlackTree", "Insertion", "n", "time")
Exemplo n.º 6
0
def synthesize(model, waveglow, melgan, text, sentence, prefix=''):
    sentence = sentence[:200]  # long filename will result in OS Error

    src_len = torch.from_numpy(np.array([text.shape[1]])).to(device)

    with torch.no_grad():
        mel, mel_postnet, log_duration_output, f0_output, energy_output, _, _, mel_len = model(text, src_len)

    mel_torch = mel.transpose(1, 2).detach()
    mel_postnet_torch = mel_postnet.transpose(1, 2).detach()
    mel = mel[0].cpu().transpose(0, 1).detach()
    mel_postnet = mel_postnet[0].cpu().transpose(0, 1).detach()
    f0_output = f0_output[0].detach().cpu().numpy()
    energy_output = energy_output[0].detach().cpu().numpy()

    if not os.path.exists(hp.test_path):
        os.makedirs(hp.test_path)

    Audio.tools.inv_mel_spec(mel_postnet, os.path.join(hp.test_path, '{}_griffin_lim_{}.wav'.format(prefix, sentence)))
    if waveglow is not None:
        utils.waveglow_infer(mel_postnet_torch, waveglow,
                             os.path.join(hp.test_path, '{}_{}_{}.wav'.format(prefix, hp.vocoder, sentence)))
    if melgan is not None:
        utils.melgan_infer(mel_postnet_torch, melgan,
                           os.path.join(hp.test_path, '{}_{}_{}.wav'.format(prefix, hp.vocoder, sentence)))

    utils.plot_data([(mel_postnet.numpy(), f0_output, energy_output)], ['Synthesized Spectrogram'],
                    filename=os.path.join(hp.test_path, '{}_{}.png'.format(prefix, sentence)))
def test_run():
    # Read data
    dates = pd.date_range('2009-01-01', '2012-12-31')
    symbols = ['SPY']
    df = utils.get_data(symbols, dates)
    utils.plot_data(df)

    # Compute daily returns
    daily_returns = utils.compute_daily_returns(df)
    utils.plot_data(daily_returns,
                    title="Daily Returns",
                    ylabel="Daily Returns")

    # Plot a histogram
    daily_returns.hist(bins=20)

    # Get mean and standard deviation
    mean = daily_returns['SPY'].mean()
    print("mean=", mean)
    std = daily_returns['SPY'].std()

    plt.axvline(mean, color="w", linestyle="dashed", linewidth=2)
    plt.axvline(std, color="r", linestyle="dashed", linewidth=2)
    plt.axvline(-std, color="r", linestyle="dashed", linewidth=2)
    plt.show()

    # Compute kurtosis
    print(daily_returns.kurtosis())
Exemplo n.º 8
0
    def lang_cluster_centroids(self, lang_num, cut_small=True):
        '''Plots the cluster centroids of the clusters that lang_num is split up into.'''

        clust_dist = self._cluster_distribution(num, comp, cut_small)
        clust_dist_sort = sorted(clust_dist.items(), key=lambda x: x[1], reverse=True)
        grid_size = math.ceil( np.sqrt(len(clust_dist)) )
        fig, axarr = plt.subplots(grid_size, grid_size, sharex='col', sharey='row')  
        
        i = 0
        for r in range(grid_size):
            for c in range(grid_size):
                if i < len(clust_dist_sort):
                    data = self.group_centers[clust_dist_sort[i][0]].reshape((8,40)) 
                    u.plot_data(data, ax=axarr[r,c], word_labels=False, stim_labels=False, yaxis_label=False, xaxis_label=False, title=True, title_name=str(clust_dist_sort[i][1]))
                    i += 1  
                else:
                    axarr[r,c].axis("off")
        plt.tight_layout()

        image_path = path.abspath(path.join("Boundary Analysis", "Heatmaps", self.save_path))
        if not path.exists(image_path):
            mkdir(image_path)

        if cut_small:
            image_name = comp.capitalize() + str(num) + "_centroids_remove_small.png"
        else:
            image_name = comp.capitalize() + str(num) + "_centroids.png"
        plt.savefig(path.join(image_path, image_name))
        plt.close()
Exemplo n.º 9
0
def experiment_i_bst():
    r = range(start, end, offset)
    times = []
    for i in r:
        times.append(insertion_bst(i))

    plot_data(r, times, "BinarySearchTree", "Insertion", "n", "time")
def test_run():
    #read data
    dates = pd.date_range('1990-05-01', '2016-09-30')
    symbols = ['01', '02', '03', '04']
    df = pt.get_data(symbols, dates)
    fill_missing_data(df)
    pt.plot_data(df)
Exemplo n.º 11
0
    def plot_group_numbers(self, save_fig=True):
        '''Plots the group centroids of the resulting clusters using the cluster index as the header of each subfigure. This representation
        is useful when generating a key for MDS plots.'''

        grid_size = math.ceil( np.sqrt(len(self.groups)) )
        fig, axarr = plt.subplots(grid_size, grid_size, sharex='col', sharey='row')

        weights = [self.r[i] for i in np.unique(self.c_assign)]    #remove non-existent groups
        sort_weights = sorted(weights, reverse=True)
        index_list = []
        for w in sort_weights:
            indices = np.where(weights == w)[0]
            index_list.extend(indices)

        i = 0
        for r in range(grid_size):
            for c in range(grid_size):
                if i < len(self.groups):
                    group = self.groups[index_list[i]]
                    data = self.group_centers[index_list[i]].reshape((8,40))
                    u.plot_data(data, ax=axarr[r,c], word_labels=False, stim_labels=False, yaxis_label=False, xaxis_label=False, title=True, title_name=str(index_list[i]))
                    i += 1
                else:
                    axarr[r,c].axis("off")

        plt.tight_layout()

        if save_fig:
            plt.savefig(self.save_path + "_wgrpnum.png")

        plt.show()
        plt.close()
Exemplo n.º 12
0
def plot_alignment():
    in_dir = '../self_alignment/alignments'
    t2_in_dir = '../alignments'

    for num in range(20):
        fname = str(num) + ".npy"
        a = np.load(os.path.join(in_dir, fname))
        align = np.zeros((a.shape[0], a.sum()), dtype=float)

        last = 0
        for i in range(len(a)):
            num = a[i]
            for j in range(num):
                pos = last + j
                align[i][pos] = 1.0
            last = num + last

        t2a = np.load(os.path.join(t2_in_dir, fname))
        t2align = np.zeros((t2a.shape[0], t2a.sum()), dtype=float)
        last = 0
        for i in range(len(t2a)):
            num = t2a[i]
            for j in range(num):
                pos = last + j
                t2align[i][pos] = 1.0
            last = num + last

        utils.plot_data([align, t2align], fname)
Exemplo n.º 13
0
def train_with_gradient_descent(dataset):
    learning_rate = 0.01
    n_iter = 5000
    n_samples = len(dataset)
    # w0, w1 = random.uniform(-1, 1), random.uniform(-1, 1)
    w0, w1 = 0, 0

    loss_val = loss(dataset, w0, w1)
    print('initial loss={}'.format(loss_val))

    for t in range(n_iter):
        grad_w0, grad_w1 = 0, 0
        for x, y in dataset:
            y_pred = linear_pred(x, w0, w1)
            grad_w0 += y_pred - y
            grad_w1 += x * (y_pred - y)
        grad_w0 *= 2 / n_samples
        grad_w1 *= 2 / n_samples
        w0 = w0 - learning_rate * grad_w0
        w1 = w1 - learning_rate * grad_w1

        loss_val = loss(dataset, w0, w1)
        if (t + 1) % 500 == 0:
            print('iter {}, loss={}'.format(t + 1, loss_val))

    plot_data(dataset, (w0, w1))
def synthesize(model, waveglow, melgan, text, sentence, prefix=''):
    sentence = sentence[:10]  # long filename will result in OS Error

    mean_mel, std_mel = torch.tensor(np.load(
        os.path.join(hp.preprocessed_path, "mel_stat.npy")),
                                     dtype=torch.float).to(device)
    mean_f0, std_f0 = torch.tensor(np.load(
        os.path.join(hp.preprocessed_path, "f0_stat.npy")),
                                   dtype=torch.float).to(device)
    mean_energy, std_energy = torch.tensor(np.load(
        os.path.join(hp.preprocessed_path, "energy_stat.npy")),
                                           dtype=torch.float).to(device)

    mean_mel, std_mel = mean_mel.reshape(1, -1), std_mel.reshape(1, -1)
    mean_f0, std_f0 = mean_f0.reshape(1, -1), std_f0.reshape(1, -1)
    mean_energy, std_energy = mean_energy.reshape(1, -1), std_energy.reshape(
        1, -1)

    src_len = torch.from_numpy(np.array([text.shape[1]])).to(device)

    mel, mel_postnet, log_duration_output, f0_output, energy_output, _, _, mel_len = model(
        text, src_len)

    mel_torch = mel.transpose(1, 2).detach()
    mel_postnet_torch = mel_postnet.transpose(1, 2).detach()
    f0_output = f0_output[0]
    energy_output = energy_output[0]

    mel_torch = utils.de_norm(mel_torch.transpose(1, 2), mean_mel, std_mel)
    mel_postnet_torch = utils.de_norm(mel_postnet_torch.transpose(1, 2),
                                      mean_mel, std_mel).transpose(1, 2)
    f0_output = utils.de_norm(f0_output, mean_f0,
                              std_f0).squeeze().detach().cpu().numpy()
    energy_output = utils.de_norm(energy_output, mean_energy,
                                  std_energy).squeeze().detach().cpu().numpy()

    if not os.path.exists(hp.test_path):
        os.makedirs(hp.test_path)

    Audio.tools.inv_mel_spec(
        mel_postnet_torch[0],
        os.path.join(hp.test_path,
                     '{}_griffin_lim_{}.wav'.format(prefix, sentence)))
    if waveglow is not None:
        utils.waveglow_infer(
            mel_postnet_torch, waveglow,
            os.path.join(hp.test_path,
                         '{}_{}_{}.wav'.format(prefix, hp.vocoder, sentence)))
    if melgan is not None:
        utils.melgan_infer(
            mel_postnet_torch, melgan,
            os.path.join(hp.test_path,
                         '{}_{}_{}.wav'.format(prefix, hp.vocoder, sentence)))

    utils.plot_data([
        (mel_postnet_torch[0].detach().cpu().numpy(), f0_output, energy_output)
    ], ['Synthesized Spectrogram'],
                    filename=os.path.join(hp.test_path,
                                          '{}_{}.png'.format(prefix,
                                                             sentence)))
Exemplo n.º 15
0
def main(args):
    # read training data
    data_frame_train = pd.read_csv(args.trainpath, dtype=np.uint8, header=None)
    NUM_TRAIN = data_frame_train.shape[0]
    NUM_PIXELS = data_frame_train.shape[1] - 1

    # read test data
    data_frame_test = pd.read_csv(args.testpath, dtype=np.uint8, header=None)
    NUM_TEST = data_frame_test.shape[0]

    # reformat data
    [images_train, labels_train] = reformat_data(data_frame_train)
    [images_test, labels_test] = reformat_data(data_frame_test)

    # plot data as sanity check
    plot_data(images_train)

    if (args.algorithm == 'B'):
        predictBinomial(images_train, images_test, labels_train, labels_test)
    elif (args.algorithm == 'G_gray'):
        predictGrayscale(images_train, images_test, labels_train, labels_test)
    elif (args.algorithm == 'G_pca'):
        predictPCA(images_train, images_test, labels_train, labels_test)
    elif (args.algorithm == 'G_hog'):
        predictHoG(images_train, images_test, labels_train, labels_test)
Exemplo n.º 16
0
def experiment_a_c_i_s():
    r = range(start, end, offset)
    times = []
    for i in r:
        times.append(avg_case_insertion_sort(i))

    plot_data(r, times, "average case insertion sort", "Sorting", "n", "time")
Exemplo n.º 17
0
def experiment_mst_prim(filename, p):
    r = range(start, end, offset)
    times = []
    for i in r:
        times.append(uniform_graph_mst_prim(i, filename, p))

    plot_data(r, times, "Prim", "Minimum Spanning Tree P={}".format(p), "n",
              "time", filepath + format_p(p) + 'mstprim.png')
Exemplo n.º 18
0
def test_run():
  port_vals, daily_returns, cum_ret, sharpe_ratio = portfolio_stats(1000000, ['SPY', 'XOM', 'GOOG', 'GLD'], [.4, .4, .1, .1])
  print 'Cumulative return:', cum_ret
  print 'Average daily return:', daily_returns.mean()
  print 'Risk (daily std):', daily_returns.std()
  print 'Sharpe ratio:', sharpe_ratio
  
  plot_data(port_vals, title='Portfolio value', ylabel='Portfolio value')
Exemplo n.º 19
0
def experiment_cc(filename, p):
    r = range(start, end, offset)
    times = []
    for i in r:
        times.append(connected_components(i, filename, p))

    plot_data(r, times, "Connected Components",
              "Connected Components P={}".format(p), "n", "time",
              filepath + format_p(p) + 'conncomponents.png')
Exemplo n.º 20
0
def test_run():
    # Read data
    dates = pd.date_range('2010-01-01', '2012-12-31')
    symbols = ['SPY', 'XOM', 'GOOG', 'GLD']
    df = utils.get_data(symbols, dates)
    utils.plot_data(df)

    #Compute global statistic for each stock
    print(df.mean())
Exemplo n.º 21
0
def test_run():
    # Read data
    dates = pd.date_range('2000-01-01', '2012-12-31')
    symbols = ['SPY']
    df = get_data(symbols, dates)
    plot_data(df)

    #Compute daily returns
    daily_returns = compute_daily_returns(df)
    plot_data(daily_returns, title ="Daily returns", ylabel="Daily returns")
def test_run():
    # Read data
    dates = pd.date_range('2016-07-01', '2016-07-31')  # one month only
    symbols = ['01', '02']
    df = pt.get_data(symbols, dates)
    pt.plot_data(df)

    # Compute daily returns
    daily_returns = compute_daily_returns_with_pandas(df)
    pt.plot_data(daily_returns, title="Daily returns", ylabel="Daily returns")
Exemplo n.º 23
0
    def plot_last_trace(self):
        '''
        Method for plotting the most recent trace (if it exists)
        '''

        if self.last_trace:
            try:
                utils.plot_data(self.last_trace)
            except:
                print(f"Cannot find trace {file_to_open}")
        else:
            print("No trace acquired in this session")
Exemplo n.º 24
0
def p05_how_to_plot_a_histogram():
    dates = pd.date_range('2010-01-01', '2018-03-01')
    symbols = ['SPY']
    df = get_data(symbols, dates)
    plot_data(df)
    dr = compute_daily_returns(df)
    plot_data(dr,
              title='Daily Returns of {}'.format(symbols),
              xlabel='Date',
              ylabel='ratio')

    dr.hist(bins=200)
    plt.show()
Exemplo n.º 25
0
def sample_evolution(start, ns=100): # start = start data
    sample = t.compile_function(initial_vmap, mb_size=1, monitors=[m_model], name='evaluate', train=False, mode=mode)
    
    data = start
    plot_data(data)
    

    while True:
        for k in range(ns):
            for x in sample({ rbm.v: data }): # draw a new sample
                data = x[0]
            
        plot_data(data)
Exemplo n.º 26
0
    def plot_file(self):
        '''
        Method for plotting a file as selected by the user
        '''

        # open up a filedialog to let the user navigate to the appropriate file
        fpath = filedialog.askopenfilename()

        if fpath:
            # if they want to display with FFT
            if self.fft_check_value.get():
                utils.plot_data_with_fft(fpath)
            else:
                utils.plot_data(fpath)
Exemplo n.º 27
0
def main():
    n = 200
    batch_n = 16
    epochs = 500
    learning_rate = 0.003
    reg_constant = 1
    x, y = create_data_2(n)
    sgd = BatchSGD(learning_rate, reg_constant)
    newton = Newton(learning_rate, reg_constant)
    plot_data(
        [sgd.weight, newton.weight],
        ['SGD classification', 'Newton classification'],
        x, y,
        'weights_data_before'
    )
    sgd_loss_hist = []
    newton_loss_hist = []
    for epoch in range(epochs):
        i = 0
        while i < n:
            newton.fit(x[i:min(i+batch_n, n)], y[i:min(i+batch_n, n)])
            sgd.fit(x[i:min(i+batch_n, n)], y[i:min(i+batch_n, n)])
            i += batch_n
        loss, accuracy = sgd.evaluate(x, y)
        sgd_loss_hist.append(loss)
        loss, accuracy = newton.evaluate(x, y)
        newton_loss_hist.append(loss)
    plot_data(
        [sgd.weight, newton.weight],
        ['SGD classification', 'Newton classification'],
        x, y,
        'weights_data_after'
    )
    plot_loss(
        [sgd_loss_hist, newton_loss_hist],
        ['Batch SGD loss', 'Newton loss'],
        'sgd_vs_newton'
    )
    loss, accuracy = sgd.evaluate(x, y)
    print(
        'SGD evaluation__________\nLoss\t: {:2.4f}\nAccuracy: {:2.4f}'.format(
            loss, accuracy)
    )
    loss, accuracy = newton.evaluate(x, y)
    print()
    print(
        'Newton evaluation_______\nLoss\t: {:2.4f}\nAccuracy: {:2.4f}'.format(
            loss, accuracy)
    )
Exemplo n.º 28
0
def test_run():
    # Read data
    dates = pd.date_range('2012-07-01', '2012-07-31')
    symbols = ['SPY', 'XOM']
    df = utils.get_data(symbols, dates)
    # utils.plot_data(df)

    # Compute daily return
    # daily_returns = utils.compute_daily_returns(df)
    # utils.plot_data(daily_returns, title="Daily returns", ylabel="Daily returns")
    # print(df)
    # print(df[:-1].values)

    cumulative_returns = utils.compute_cumulative_returns(df)
    utils.plot_data(cumulative_returns,
                    title="Cumulative returns",
                    ylabel="Cumulative returns")
Exemplo n.º 29
0
def test_run():
    start_date = '2010-01-01'
    end_date = '2010-12-31'
    dates = pd.date_range(start_date, end_date)

    #Read in more stocks
    symbols = ['GOOG', 'IBM', 'GLD']
    df = utils.get_data(symbols, dates)

    # Slice by row range (dates) using DataFrame.ix[] selector
    # print(df.ix['2010-01-01':'2010-01-31'])

    # Slice by column (symbols)
    # print(df[GOOG']  # a single label selects a single column
    # print(df[['IBM','GLD']])  # a list of labels seleccts multiple columns

    # Slice by row and column
    # print(df.ix['2010-03-10':'2010-03-15', ['SPY', 'IBM']])

    utils.plot_data(utils.normalize_data(df))
Exemplo n.º 30
0
def synthesize(model, condition, index):
    # long filename will result in OS Error

    src_len = torch.from_numpy(np.array([condition.shape[1]])).to(device)
    condition = condition[np.newaxis, :, :]
    condition = torch.LongTensor(condition).to(device).transpose(1, 2)
    #     print(condition)

    ap_output, sp_output, sp_postnet_output, log_duration_output, f0_output, energy_output, src_mask, ap_mask, sp_mask, variance_adaptor_output, decoder_output = model(
        condition, src_len)

    length = min(ap_output.shape[1], sp_output.shape[1], f0_output.shape[1])
    ap = ap_output[0, :length].detach().cpu().double().numpy()
    sp = sp_output[0, :length].detach().cpu().double().numpy()
    sp_postnet = sp_postnet_output[0, :length].detach().cpu().double().numpy()
    f0_output = f0_output[0, :length].detach().cpu().double().numpy()
    energy_output = energy_output[0, :length].detach().cpu().numpy()
    print(condition.transpose(1, 2)[0][2])
    print(log_duration_output)
    #     print(ap.shape,sp_postnet.shape,f0_output.shape)
    #     return utils.world_infer()
    #     y=untils.world_infer()
    #     if not os.path.exists(hp.test_path):
    #         os.makedirs(hp.test_path)

    #     Audio.tools.inv_mel_spec(mel_postnet, os.path.join(hp.test_path, '{}_griffin_lim_{}.wav'.format(prefix, sentence)))
    #     if hp.vocoder=='waveglow':
    #         melgan = utils.get_melgan()
    #         melgan.to(device)
    #         utils.waveglow_infer(mel_postnet_torch, waveglow, os.path.join(hp.test_path, '{}_{}_{}.wav'.format(prefix, hp.vocoder, sentence)))
    #     if hp.vocoder=='melgan':
    #         waveglow = utils.get_waveglow()
    #         waveglow.to(device)
    #         utils.melgan_infer(mel_postnet_torch, melgan, os.path.join(hp.test_path, '{}_{}_{}.wav'.format(prefix, hp.vocoder, sentence)))
    y = utils.world_infer(ap, sp, f0_output)
    sp_postnet = np.swapaxes(sp_postnet, 0, 1)
    utils.plot_data([(sp_postnet, f0_output, energy_output)],
                    ['Synthesized Spectrogram'],
                    filename=os.path.join(hp.test_path,
                                          'out_%03d.png' % index))
    return y
Exemplo n.º 31
0
from keras.models import Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
import utils

X, y = make_classification(n_samples=1000, n_features=2, n_redundant=0, \
                           n_informative=2, random_state=0, n_clusters_per_class=1)
print(X.shape)

X_train, X_validation, y_train, y_validation = train_test_split(X, y, test_size=0.33, random_state=100)
print(X_train.shape)
print(X_validation.shape)

utils.plot_data(X_train, y_train)

#perceptron model for binary classification
model = Sequential()
model.add(Dense(units=1, input_shape=(2,), activation='sigmoid'))

model.compile(optimizer='sgd', loss='mean_squared_error', metrics=['accuracy'])

history = model.fit(x=X_train, y=y_train, verbose=3, epochs=1000, validation_data=(X_validation,y_validation), batch_size=10)
print(model.summary())
print(model.get_weights())

historydf = pd.DataFrame(history.history, index=history.epoch)

utils.plot_loss_accuracy(history)
Exemplo n.º 32
0
def main(argv=None):
  if argv is not None:
    sys.argv = argv

  parser = argparse.ArgumentParser(description=__doc__)
  parser.add_argument("-d", "--directory", default="featuredatas-json",
                      help="feature data location (default featuredatas-json)")
  parser.add_argument("-n", "--num-particles", type=int, default=1000,
                      help="number of particles to simulate (default 1000)")
  parser.add_argument("--range-noise", type=float, default=3.0,
                      help="range prediction noise (default 3.0m)")
  parser.add_argument("--angle-noise", type=float, default=3.0,
                      help="angle prediction noise (default 3.0 degrees)")
  parser.add_argument("--range-resolution", type=float, default=3.0,
                      help="range resolution of the sensor (default 3.0m)")
  parser.add_argument("--angular-resolution", type=float, default=1.5,
                      help="angular sensor resolution (default 1.5 degrees)")
  parser.add_argument("-t", "--timeout", type=float, default=0.01,
                      help="seconds to wait between pings (default 0.01s)")
  parser.add_argument("-r", "--fov-range", type=float, default=500.0,
                      help="range of the field of view (default 500.0m)")
  parser.add_argument("-a", "--fov-hor-angle", type=float, default=90.0,
                      help="number of degrees in field of view (default 90.0)")
  parser.add_argument("-m", "--hide-measurements", action="store_true",
                      help="hide measurements in the particle plot")
  parser.add_argument("--save-figure", action="store_true",
                      help="save figure frames as images")
  parser.add_argument("--write-latlon", action="store_true",
                      help=("Save diver position estimates as"
                            "lat/lon's to a file"))
  args = parser.parse_args()

  if not os.path.isdir(args.directory):
    sys.stdout.write("Could not find the {} directory!".format(args.directory))
    return

  if args.save_figure and not os.path.isdir("images"):
    os.makedirs("images")
  if args.write_latlon:
    latlon_file = open("diver_position_estimates.txt", 'w')

  # This assumes the input directory is of the form *-<data_format> where
  # <data_format> is either 'proto' or 'json'.
  data_format = args.directory.split('-')[-1]

  # Read first data file to get position for drawing initialization.
  feature_data = get_first_feature_data(args.directory, data_format)
  init_position = SensorPosition(feature_data.position.lat,
                                 feature_data.position.lon,
                                 feature_data.heading.heading)

  # Initialize plot.
  plt.ion()
  fig = plt.figure()
  particle_plot = fig.add_subplot(111)

  # Accumulated x- and y-dispalcement of sensor (ship) in meters re starting
  # position.
  acc_dx, acc_dy = 0.0, 0.0

  # Initialize our particles.
  particles = []
  for i in range(args.num_particles):
    particles.append(Particle(args.fov_range, args.fov_hor_angle,
                              args.range_noise, args.angle_noise,
                              args.range_resolution, args.angular_resolution))

  # Collection of filtered position computed from posterior particles.
  filtered_xs, filtered_ys = [], []

  particle_xs, particle_ys = utils.get_particle_positions(
    particles, acc_dx, acc_dy, init_position.heading)
  utils.plot_data(particle_xs, particle_ys, filtered_xs, filtered_ys, [], -1,
                  True, acc_dx, acc_dy, init_position.heading, particle_plot)
  plt.draw()

  # Pump.
  last_position = None
  for i, feature_data in enumerate(
      get_feature_datas(args.directory, data_format)):
    current_position = SensorPosition(feature_data.position.lat,
                                      feature_data.position.lon,
                                      feature_data.heading.heading)
    dx, dy = 0.0, 0.0
    if last_position is not None:
      dx, dy = utils.compute_sensor_movement(last_position, current_position)
      for particle in particles:
        particle.move(last_position, current_position, dx, dy)
    acc_dx += dx
    acc_dy += dy
    measurements = get_measurements(feature_data)
    new_weights = get_weights(particles, measurements)
    if new_weights:
      weights = new_weights
    particles = resample_particles(particles, weights)
    particle_xs, particle_ys = utils.get_particle_positions(
      particles, acc_dx, acc_dy, current_position.heading)
    filtered_x, filtered_y = utils.extract_position_from_particles(
      particle_xs, particle_ys)
    filtered_lat, filtered_lon = geo.add_offsets_to_latlons(
      current_position, filtered_x - acc_dx, filtered_y - acc_dy)
    filtered_xs.append(filtered_x)
    filtered_ys.append(filtered_y)
    last_position = current_position

    utils.plot_data(particle_xs, particle_ys, filtered_xs, filtered_ys,
                    measurements, i, args.hide_measurements, acc_dx, acc_dy,
                    current_position.heading, particle_plot)
    plt.draw()

    if args.save_figure:
      plt.savefig("images//%03d.png" % i, format='png')
    if args.write_latlon:
      latlon_file.write(
        "%d,%0.9f,%0.9f\n" % (utils.date_to_sec(
          feature_data.time), filtered_lat, filtered_lon))
      latlon_file.flush()
    time.sleep(args.timeout)
  if args.write_latlon:
    latlon_file.close()