Пример #1
0
def testEKF(inputfilename):
    try:
        f = open(inputfilename, 'r')
    except:
        print("Failed to open file %s" % inputfilename)
        return
    lines = f.readlines()
    f.close()

    # Instantiate an extended Kalman filter
    fusion_EKF = EKF()
    gt_values = []
    estimations = []
    for i, line in enumerate(lines):
        words = line.split('\t')
        sensor_type = words[0]
        measurement = {}
        measurement['sensor_type'] = sensor_type
        if sensor_type == 'L':
            measurement['x'] = float(words[1])
            measurement['y'] = float(words[2])
            pos = 3
        elif sensor_type == 'R':
            measurement['rho'] = float(words[1])
            measurement['phi'] = float(words[2])
            measurement['rho_dot'] = float(words[3])
            pos = 4
        measurement['timestamp'] = int(words[pos])
        # Accumulate ground truth values
        gt_values.append({
            'px': float(words[pos + 1]),
            'py': float(words[pos + 2]),
            'vx': float(words[pos + 3]),
            'vy': float(words[pos + 4]),
        })
        # Process the measurement
        fusion_EKF.process_measurement(measurement)
        estimations.append({
            'px': fusion_EKF.ekf.x[0],
            'py': fusion_EKF.ekf.x[1],
            'vx': fusion_EKF.ekf.x[2],
            'vy': fusion_EKF.ekf.x[3],
        })

    # Visualize the result
    plot_2d(estimations, gt_values)
Пример #2
0
def main(csv, features, iter):
    # -f degree -f betweenness -f closeness -f eigencentrality -f coreness -f layerness -f pagerank -f sum_friends_friends -f transitivity

    column_names = ["NetworkType", "SubType"] + list(features)
    isSubType = True  # use SubType as the labels for classification
    at_least = 0

    X, Y, sub_to_main_type, feature_order = init(csv, column_names, isSubType, at_least)

    N = iter

    # network subtype one is interested in
    one = "seed"

    X_converted, Y_converted = convert_one_to_many(X, Y, one)

    list_accuracies, list_important_features, list_auc = many_classifications(
        X_converted, Y_converted, feature_order, N
    )

    print("average accuracy: %f" % (float(sum(list_accuracies)) / float(N)))
    print("average AUC: %f" % (float(sum(list_auc)) / float(N)))

    dominant_features = plot_feature_importance(list_important_features, feature_order)

    first = dominant_features[0][0][0]
    second = dominant_features[1][0][0]
    if first == second:
        second = dominant_features[1][1][0]

    Y_converted_string_labels = [one if y == 1 else "non-seed" for y in Y_converted]

    x_label = first
    y_label = second
    x_index = feature_order.index(x_label)
    y_index = feature_order.index(y_label)

    plot_2d(np.array(X_converted), np.array(Y_converted_string_labels), x_index, y_index, x_label, y_label)
Пример #3
0
# fig['gpmean'][0].savefig('gpr_test.png')
# GPy.plotting.show(fig, filename='basic_gp_regression_density_optimized_test')
# matplotlib.pylab.show(block=True) 

# plt.plot(X_train, Y_train, '.', label = 'train')
# plt.plot(Y_test, Y_test, '.', label = 'test')
# plt.plot(Y_test, Y_test_pred, '.', label = 'pred')
# plt.legend()
# plt.show()

# sorted_train_idx = np.argsort(X_train, axis = 0).reshape(X_train.shape[0],)
# sorted_test_idx = np.argsort(X_test, axis = 0).reshape(X_test.shape[0],)

# plt.plot(X_train[sorted_train_idx,:], Y_train[sorted_train_idx,:], '.',label = 'train')
# plt.plot(X_test[sorted_test_idx,:], f_test[sorted_test_idx,:], label = 'test')
# plt.plot(X_test[sorted_test_idx,:], Y_test_pred[sorted_test_idx,:], label = 'pred')
# plt.fill_between(X_test[sorted_test_idx,:].reshape(X_test.shape[0],), 
#             (Y_test_pred[sorted_test_idx,:] - 2 * np.sqrt(Y_test_var[sorted_test_idx,:])).reshape(X_test.shape[0],),
#             (Y_test_pred[sorted_test_idx,:] + 2 * np.sqrt(Y_test_var[sorted_test_idx,:])).reshape(X_test.shape[0],), alpha = 0.5)
# plt.legend()
# info = '_train_' + str(num_train) 
# plt.title('gpr' + info)
# plt.xlabel('X')
# plt.ylabel('Y')
# plt.savefig('gpr' + info + '.png')

if dim == 1:
    plot_1d(X_train, X_test, f_train, Y_train, f_test, Y_test, Y_test_pred, Y_test_var, None, 'gpr')
if dim == 2:
    plot_2d(X_train, X_test, f_train, Y_train, f_test, Y_test, Y_test_pred, Y_test_var, 'gpr')
Пример #4
0
def run_survey(tb, savefolder, iterator, gain=60, int_time=30):
    tb.set_sdr_gain(gain)
    freq = tb.get_sdr_frequency() / 1000000  #MHz
    freq_offset = tb.get_output_vector_bandwidth() / 2000000.  #MHz
    flo = freq - freq_offset
    fhi = freq + freq_offset
    num_chan = tb.get_num_channels()
    freq_range = np.linspace(flo, fhi, num_chan)

    #########################################
    # BEGIN COMMANDS #
    #########################################

    #do the survey
    file = open(os.path.join(savefolder, 'vectors.txt'), 'w')
    csvwriter = csv.writer(open(os.path.join(savefolder, 'vectors.csv'), 'w'))
    file.write('Integration time ' + str(int_time) +
               ' seconds. Center frequency ' + str(freq) + ' MHz. \n \n')
    csvwriter.writerow([
        '# Integration time: %d seconds Center frequency: %f MHz' %
        (int_time, freq)
    ])
    freq_count = 2 if 'pos' in iterator.fields else 1
    csvwriter.writerow(['time'] + list(iterator.fields) +
                       [str(f) for f in freq_range] * freq_count)
    file.write(' '.join(iterator.fields) + ' Time Center Data_vector \n \n')

    contour_iter_axes = {field: [] for field in iterator.axes}
    contour_freqs = []
    contour_vels = []
    contour_data = []

    for pos in iterator:
        tb.point(pos.azimuth, pos.elevation)

        print("Observing at coordinates " + str(pos) + '.')
        apytime = get_time()
        data = tb.observe(int_time)

        #write to file
        file.write(' '.join(str(x) for x in pos) + ' ')
        file.write(str(apytime) + ' ')
        file.write(str(data) + '\n \n')

        for field in iterator.axes:
            contour_iter_axes[field].append(
                np.full(len(freq_range), getattr(pos, field)))

        contour_freqs.append(freq_range)

        vel_range = None
        if hasattr(pos, 'longitude'):
            vel_range = np.array(
                freqs_to_vel(freq, freq_range, pos.longitude, pos.latitude))
            contour_vels.append(vel_range)

        contour_data.append(data)

        apytime.format = 'fits'
        row = [str(apytime)] + [str(x) for x in pos]
        if vel_range is not None:
            row += [str(f) for f in vel_range]
        row += [str(f) for f in data]
        csvwriter.writerow(row)

        plot.plot_freq(freq, freq_range, data,
                       iterator.format_title(pos) + ' ' + str(apytime))
        plot.plt.savefig(
            os.path.join(savefolder,
                         iterator.format_filename(pos) + '_freq.pdf'))
        plot.plt.close()

        if hasattr(pos, 'longitude'):
            plot.plot_velocity(vel_range, data,
                               iterator.format_title(pos) + ' ' + str(apytime))
            plot.plt.savefig(
                os.path.join(savefolder,
                             iterator.format_filename(pos) + '_vel.pdf'))
            plot.plt.close()

        print('Data logged.')
        print()

    file.close()

    contour_iter_axes = {x: np.array(y) for x, y in contour_iter_axes.items()}
    contour_freqs = np.array(contour_freqs)
    contour_data = np.array(contour_data)

    for field, data in contour_iter_axes.items():
        np.save(os.path.join(savefolder, 'contour_' + field + '.npy'), data)
    np.save(os.path.join(savefolder, 'contour_data.npy'), contour_data)
    np.save(os.path.join(savefolder, 'contour_freqs.npy'), contour_freqs)
    if contour_vels:
        contour_vels = np.array(contour_vels)
        np.save(os.path.join(savefolder, 'contour_vels.npy'), contour_vels)

    plot.plot_2d(contour_freqs, contour_vels, contour_data, contour_iter_axes,
                 savefolder)
Пример #5
0
# save features
utils.save_features(model_dir, "X_train", X_train, y_train)
utils.save_features(model_dir, "X_test", X_test, y_test)
utils.save_features(model_dir, "Z_train", Z_train, y_train)
utils.save_features(model_dir, "Z_test", Z_test, y_test)

# evaluation train
_, acc_svm = evaluate.svm(Z_train, y_train, Z_train, y_train)
acc_knn = evaluate.knn(Z_train, y_train, Z_train, y_train, k=5)
acc_svd = evaluate.nearsub(Z_train, y_train, Z_train, y_train, n_comp=1)
acc = {"svm": acc_svm, "knn": acc_knn, "nearsub-svd": acc_svd}
utils.save_params(model_dir, acc, name="acc_train.json")

# evaluation test
_, acc_svm = evaluate.svm(Z_train, y_train, Z_test, y_test)
acc_knn = evaluate.knn(Z_train, y_train, Z_test, y_test, k=5)
acc_svd = evaluate.nearsub(Z_train, y_train, Z_test, y_test, n_comp=1)
acc = {"svm": acc_svm, "knn": acc_knn, "nearsub-svd": acc_svd}
utils.save_params(model_dir, acc, name="acc_test.json")

# plot
plot.plot_combined_loss(model_dir)
plot.plot_heatmap(X_train, y_train, "X_train", model_dir)
plot.plot_heatmap(X_test, y_test, "X_test", model_dir)
plot.plot_heatmap(Z_train, y_train, "Z_train", model_dir)
plot.plot_heatmap(Z_test, y_test, "Z_test", model_dir)
plot.plot_2d(X_train, y_train, "X_train", model_dir)
plot.plot_2d(X_test, y_test, "X_test", model_dir)
plot.plot_2d(Z_train, y_train, "Z_train", model_dir)
plot.plot_2d(Z_test, y_test, "Z_test", model_dir)