#     label.set_fontsize(8)

    if epsilons is not None:
        ax2 = ax.twinx()
        ax2.set_ymargin(0.)
        ax2.set_xmargin(0.)
        ax2.autoscale(False)
        ax2.plot(domain, epsilons, 'r--', alpha=0.5)
        ax2.set_ylabel("$\epsilon$-greedy schedule", color='tab:red')
        for label in ax2.yaxis.get_ticklabels():
            label.set_color('tab:red')


# %% Load all the data
path = os.path.join('data', "trial47.tsv")
frame, action_columns = load_frame(path)

# %% Analysis
ONLY_SUCCESSES = False
SHOW_REGRESSIONS = False
NORMALIZE_DISTS = True

if ONLY_SUCCESSES:
    frame = frame.loc[frame.success]

groups = frame.groupby(lambda ix: ix // 251)

X = np.arange(1000).reshape(-1, 1)
epsilons = groups.apply(avg_epsilon)
# Compute the average iteration number each 100 epochs
fig, ax = plt.subplots()
Exemplo n.º 2
0
def test(args):
    parser = pointcloud_parser.Parser('rs', args.pcap_file)

    # frame = next(parser.generator())
    # frame = filter_bound(frame, [-10., 6.], [-4., 5.], [-3., 3])
    # frame = frame[:, [0, 1]]
    # x, y = frame[:, 0], frame[:, 1]

    # def dbscan_for_4_eps(epss):
    # for i, eps in enumerate(epss):
    # l_ = DBSCAN(eps=eps).fit(frame).labels_
    # plt.subplot(2, 2, i+1)
    # plt.xlabel(f"eps = {eps}")
    # plt.scatter(x, y, s=4, c=l_)

    # def compare_dbscan_kmeans():
    # l1 = DBSCAN().fit(frame).labels_
    # l2 = KMeans().fit(frame).labels_
    # plt.subplot(121)
    # plt.xlabel("DBSCAN")
    # plt.scatter(x, y, s=4, c=l1)

    # plt.subplot(122)
    # plt.xlabel("KMeans")
    # plt.scatter(x, y, s=4, c=l2)

    # # dbscan_for_4_eps([0.05, 0.1, 0.5, 1])
    # compare_dbscan_kmeans()
    # plt.show()
    # return

    frame_target = 1
    for frame_cnt, frame in enumerate(parser.generator()):
        frame = filter_bound(frame, [-10., 6.], [-4., 5.], [-3., 3])
        x, y = frame[:, 0], frame[:, 1]

        labels = mtt.run(frame[:, [0, 1]])

        if frame_cnt < frame_target:
            continue
        frame_target = int(
            input(f'which next frame you want to go(current: {frame_cnt})?'))

        plt.xlabel(f"frame_cnt = {frame_cnt}")
        plt.scatter(x, y, s=4, c=labels)

        # for i, track in enumerate(mtt.tracks):
        # x_ = [hist.x_ for hist in track.hists]
        # y_ = [hist.y_ for hist in track.hists]
        # if x_ != [] and y_ != []:
        # plt.scatter(x_, y_, marker='x')
        # plt.text(x_[-1], y_[-1], str(id(track))[-4:])

        plt.show()

    return

    if args.use_serialized_frame:
        for frame in load_frame():
            analyze_frame(frame, args)
            if args.mark_contour_data:
                r = input('Continue?')
            else:
                r = 'y'
            if r == 'y':
                continue
            else:
                break
        return

    with open(args.pcap_file, 'rb') as f:
        pcap = dpkt.pcap.Reader(f)
        i = 0
        for timestamp, buf in pcap:
            if len(buf) == 1248:
                frame = pointcloud_parser.parse(buf, 0)
            elif len(buf) == 1290:
                frame = pointcloud_parser.parse(buf[42:], 1)

            if frame is None:
                continue

            if args.reset_frames:
                save_frame(frame.astype('f4'), i)
                i += 1
                continue
            analyze_frame(frame, args)
            if args.mark_contour_data:
                r = input('Continue?')
            else:
                r = 'y'
            if r == 'y':
                i += 1
                continue
            else:
                i += 1
                break
PATTERN_PL = path.join(DIR_PL, '*')

files_random_baselines = glob.glob(PATTERN_RB)
files_cascade = glob.glob(PATTERN_CS)
files_policy = glob.glob(PATTERN_PL)

Instance = namedtuple("Instance", 'name p seed frame')
Stats = namedtuple(
    "Stats", 'p success_rates avg_papers avg_iterations efficiency size')

instances_rb = list()

for f in files_random_baselines:
    name = f.split(path.sep)[-1][:-4]
    _, __, p, seed = name.split('_')
    frame, _ = load_frame(f)
    instance = Instance(name, float(p), int(seed), frame)
    instances_rb.append(instance)

instances_cs = list()

for f in files_cascade:
    name = f.split(path.sep)[-1][:-4]
    _, seed = name.split('_')
    frame, _ = load_frame(f)
    instance = Instance(name, 0, int(seed), frame)
    instances_cs.append(instance)

instances_pl = list()

for f in files_policy:
Exemplo n.º 4
0
 def __init__(self) -> None:
     self.frames = {
         self.get_garbage_name(frame): load_frame(frame)
         for frame in self.FRAME_FILES
     }
# %% [markdown]
# ### Worst frame SDR

# %%
results.loc[results.sdr.idxmin()][[
    'evaluation_dataset', 'model', 'source', 'chorale', 'frame', 'sdr'
]].to_frame().T

# %%
results.sort_values('sdr').head(10)[[
    'evaluation_dataset', 'model', 'source', 'chorale', 'frame', 'sdr'
]]

# %%
reference, estimate = u.load_frame('chorales_synth_v5', 'alto', 358, 39, '006',
                                   models)

# %%
Audio(reference, rate=44100, normalize=False)

# %%
np.unique(reference, return_counts=True)

# %%
u.get_snr(reference, estimate)

# %%
u.get_snr(reference, estimate, 30)

# %% [markdown]
# ## Frames that are quiet but good
Exemplo n.º 6
0
def test(FLAG):
    output_dim = 11
    valid_list = getVideoList(FLAG.valid_video_list)

    dvalid = pd.DataFrame.from_dict(valid_list)

    xtest  = load_frame(FLAG.valid_pkl_file)

    # model
    scope_name = "M2"
    model = MQ2(scope_name=scope_name)
    model.build(lstm_units=[1024, 1024], max_seq_len=25, input_dim= 40960, output_dim=output_dim)

    def initialize_uninitialized(sess):
        global_vars = tf.global_variables()
        is_not_initialized = sess.run([tf.is_variable_initialized(var) for var in global_vars])
        not_initialized_vars = [v for (v,f) in zip(global_vars, is_not_initialized) if not f]
        if len(not_initialized_vars): 
                sess.run(tf.variables_initializer(not_initialized_vars))

    with tf.Session() as sess:
        if FLAG.save_dir is not None:
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver()
            ckpt = tf.train.get_checkpoint_state(FLAG.save_dir)

            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                print("Model restored %s" % ckpt.model_checkpoint_path)
                sess.run(tf.global_variables())
            else:
                print("No model checkpoint in %s" % FLAG.save_dir)
        else:
            sess.run(tf.global_variables_initializer())
            sess.run(tf.global_variables())
        print("Initialized")


        Xtest, Xtest_end_index = pad_feature_maxlen(xtest, max_len=model.max_seq_len)
        pred, rnn_output = sess.run([model.pred, model.rnn_output],
                            feed_dict={model.x: Xtest,
                                    model.seq_end_index: Xtest_end_index,
                                    model.is_train:False})

        np.savetxt(X=pred.astype(int), fname=os.path.join(FLAG.output_dir, 'p2_result.txt'), fmt='%s')
        print("save as %s" % os.path.join(FLAG.output_dir, 'p2_result.txt'))

        if FLAG.run_tsne:
            from sklearn.manifold import TSNE
            rnn_tsne = TSNE(n_components=2, perplexity=30.0, random_state=5566).fit_transform(rnn_output)
            
            labels = np.array(dvalid.Action_labels).astype('int32')
            plt.figure(0)
            for i in range(output_dim):
                xplot = rnn_tsne[np.where(labels==i)[0]]
                plt.scatter(xplot[:,0], xplot[:,1], label=i)
            plt.legend()
            plt.title("RNN-based features")
            plt.xlabel("tSNE-1")
            plt.ylabel("tSNE-2")
            plt.tight_layout()
            plt.show()
            plt.savefig(os.path.join(FLAG.output_dir, 'rnn_tsne.png'))
            print("save as %s" % os.path.join(FLAG.output_dir, 'rnn_tsne.png'))
Exemplo n.º 7
0
def stylize(args):

    # Load the model
    saved_sr_model = args.model
    print("Using %s as the model for Super Resolution.." % saved_sr_model)
    cap = cv2.VideoCapture(0)
    n_frame = 0

    while True:
        start = time.time()
        # Capture frame from webcam
        ret_val, img = cap.read()

        # Downsample the image
        img = cv2.resize(img,
                         None,
                         fx=(1 / args.downsample_scale),
                         fy=(1 / args.downsample_scale))
        # Upsample it for presentation
        img_up = cv2.resize(img,
                            None,
                            fx=args.downsample_scale,
                            fy=args.downsample_scale,
                            interpolation=cv2.INTER_NEAREST)

        content_image = utils.load_frame(img)
        content_transform = transforms.Compose(
            [transforms.ToTensor(),
             transforms.Lambda(lambda x: x.mul(255))])

        content_image = content_transform(content_image)
        content_image = content_image.unsqueeze(0)

        if is_cuda:
            content_image = content_image.cuda()
        content_image = Variable(content_image, volatile=True)

        # Select the appropriate image transformation network
        if args.downsample_scale == 8:
            sr_model = SR_8x_TransformerNet()
        else:
            sr_model = SR_4x_TransformerNet()

        sr_model.load_state_dict(torch.load(saved_sr_model))
        # Pass the image through the model and obtain the output
        if is_cuda:
            sr_model.cuda()
        output = sr_model(content_image)
        if is_cuda:
            output = output.cpu()
        output_data = output.data[0]

        end = time.time()
        print("Processed frame %d" % n_frame)
        print("FPS = %f" % (1 / (end - start)))
        n_frame = n_frame + 1

        frame = output_data.clone().clamp(0, 255).numpy()
        frame = frame.transpose(1, 2, 0).astype("uint8")

        # Crop the images for presentation
        img_up = img_up[100:400, 250:550]
        frame = frame[100:400, 250:550]
        show = np.hstack((img_up, frame))
        cv2.namedWindow("output", cv2.WND_PROP_FULLSCREEN)
        cv2.setWindowProperty("output", cv2.WND_PROP_FULLSCREEN,
                              cv2.WINDOW_FULLSCREEN)
        cv2.imshow("output", show)
        if cv2.waitKey(1) == 27:
            break  # esc to quit
import os

import matplotlib.pyplot as plt
from matplotlib.ticker import *
import numpy as np

from utils import load_frame, bootsrapping_test, frame_action_distribution, plot_dist

# %% Load the data
path_policy = os.path.join('data', "trial40_eval.tsv")
path_random = os.path.join('data', "baseline_balancedrandom_n.tsv")
path_cascade = os.path.join('data', "baseline_cascade_n.tsv")
path_exploit = os.path.join('data', "baseline_exploit_n.tsv")
path_explore = os.path.join('data', "baseline_explore_n.tsv")

policy_orig, policy_action_cols = load_frame(path_policy)
policy_orig.set_index("id", inplace=True)
random_orig, random_action_cols = load_frame(path_random)
random_orig.set_index("id", inplace=True)
cascade_orig, cascade_action_cols = load_frame(path_cascade)
cascade_orig.set_index("id", inplace=True)
exploit_orig, exploit_action_cols = load_frame(path_exploit)
exploit_orig.set_index("id", inplace=True)
explore_orig, explore_action_cols = load_frame(path_explore)
explore_orig.set_index("id", inplace=True)


# %% Analysis tools
def average_papers_read(f):
    return f.papers.mean()
Exemplo n.º 9
0
import asyncio
import curses

from canvas_constants import CANVAS_FRAME_WIDTH, MIN_CANVAS_COORDINATE
from curses_tools import draw_frame, get_frame_size
from utils import load_frame

spaceship_frame_1 = load_frame('frames/rocket/rocket_frame_1.txt')
spaceship_frame_2 = load_frame('frames/rocket/rocket_frame_2.txt')
spaceship_row_size, spaceship_col_size = get_frame_size(spaceship_frame_1)

gameover_frame = load_frame('frames/game_over.txt')


async def show_gameover(canvas):
    row_size, col_size = get_frame_size(gameover_frame)
    max_y, max_x = canvas.getmaxyx()
    frame_start_row = max_y / 2 - row_size
    frame_start_col = max_x / 2 - col_size / 2
    while True:
        draw_frame(
            canvas=canvas,
            start_row=frame_start_row,
            start_column=frame_start_col,
            text=gameover_frame,
        )
        await asyncio.sleep(0)


def get_spaceship_new_yx(
    start_row,
Exemplo n.º 10
0
def train(FLAG):

    train_list = getVideoList(FLAG.train_video_list)
    valid_list = getVideoList(FLAG.valid_video_list)

    dtrain = pd.DataFrame.from_dict(train_list)
    dvalid = pd.DataFrame.from_dict(valid_list)

    # frames
    xtrain = load_frame(FLAG.train_pkl_file)
    xtest = load_frame(FLAG.valid_pkl_file)

    # labels
    Ytrain = np.array(dtrain.Action_labels).astype('int32')
    Ytest = np.array(dvalid.Action_labels).astype('int32')
    Ytrain = one_hot_encoding(Ytrain, 11)
    Ytest = one_hot_encoding(Ytest, 11)

    # model
    scope_name = "M2"
    model = MQ2(scope_name=scope_name)
    model.build(lstm_units=[1024, 1024],
                max_seq_len=25,
                input_dim=40960,
                output_dim=11)

    # trainable variables
    train_vars = list()
    for var in tf.trainable_variables():
        if model.scope_name in var.name:
            train_vars.append(var)

    # optimizer
    learning_rate = FLAG.lr
    train_op = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                      beta1=0.5).minimize(model.loss,
                                                          var_list=train_vars)

    def initialize_uninitialized(sess):
        global_vars = tf.global_variables()
        is_not_initialized = sess.run(
            [tf.is_variable_initialized(var) for var in global_vars])
        not_initialized_vars = [
            v for (v, f) in zip(global_vars, is_not_initialized) if not f
        ]
        if len(not_initialized_vars):
            sess.run(tf.variables_initializer(not_initialized_vars))

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # hyper parameters
        batch_size = 32
        epoch = 50
        early_stop_patience = 10
        min_delta = 0.0001

        # recorder
        epoch_counter = 0
        history = list()

        # re-initialize
        initialize_uninitialized(sess)

        # reset due to adding a new task
        patience_counter = 0
        current_best_val_accu = 0

        saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
        checkpoint_path = os.path.join(FLAG.save_dir, 'model.ckpt')

        # optimize when the aggregated obj
        while (patience_counter < early_stop_patience
               and epoch_counter < epoch):

            # start training
            stime = time.time()

            train_loss, train_accu = 0.0, 0.0
            for i in range(int(len(xtrain) / batch_size)):
                st = i * batch_size
                ed = (i + 1) * batch_size
                Xtrain, Xtrain_end_index = pad_feature_maxlen(
                    xtrain[st:ed], max_len=model.max_seq_len)

                # process R
                _, loss, accu, logits = sess.run(
                    [train_op, model.loss, model.accuracy, model.logits],
                    feed_dict={
                        model.x: Xtrain,
                        model.y: Ytrain[st:ed],
                        model.seq_end_index: Xtrain_end_index,
                        model.is_train: True
                    })
                train_loss += loss
                train_accu += accu
            train_loss = train_loss / (len(xtrain) / batch_size)
            train_accu = train_accu / (len(xtrain) / batch_size)

            val_loss, val_accu = 0.0, 0.0
            for i in range(int(len(xtest) / batch_size)):
                st = i * batch_size
                ed = (i + 1) * batch_size
                Xtest, Xtest_end_index = pad_feature_maxlen(
                    xtest[st:ed], max_len=model.max_seq_len)
                loss, accu, logits = sess.run(
                    [model.loss, model.accuracy, model.logits],
                    feed_dict={
                        model.x: Xtest,
                        model.y: Ytest[st:ed],
                        model.seq_end_index: Xtest_end_index,
                        model.is_train: False
                    })
                val_loss += loss
                val_accu += accu
            val_loss = val_loss / (len(xtest) / batch_size)
            val_accu = val_accu / (len(xtest) / batch_size)

            print(
                "Epoch %s (%s), %s sec >> train loss: %.4f, train accu: %.4f, val loss: %.4f, val accu: %.4f"
                % (epoch_counter, patience_counter,
                   round(time.time() - stime,
                         2), train_loss, train_accu, val_loss, val_accu))
            history.append([train_loss, train_accu, val_loss, val_accu])

            # early stopping check
            if (val_accu - current_best_val_accu) > min_delta:
                current_best_val_accu = val_accu
                patience_counter = 0
                saver.save(sess, checkpoint_path, global_step=epoch_counter)
                print("save in %s" % checkpoint_path)
                para_dict = sess.run(model.para_dict)
                np.save(os.path.join(FLAG.save_dir, "para_dict.npy"),
                        para_dict)
                print("save in %s" %
                      os.path.join(FLAG.save_dir, "para_dict.npy"))
            else:
                patience_counter += 1

            # epoch end
            epoch_counter += 1
        # end of training
    # end of session

    df = pd.DataFrame(history)
    df.columns = ['train_loss', 'train_accu', 'val_loss', 'val_accu']
    plt.figure(0)
    df[['train_loss', 'val_loss']].plot()
    plt.savefig(os.path.join(FLAG.save_dir, 'loss.png'))
    plt.close()

    plt.figure(0)
    df[['train_accu', 'val_accu']].plot()
    plt.savefig(os.path.join(FLAG.save_dir, 'accu.png'))
    plt.close()
Exemplo n.º 11
0
      occupied = np.ones((numRows, numCols), dtype=float) * 0

    extra_mps = []
    if args.with_extra_points:
      kf = kfs_dict[kf_id]  # current keyframe
      camera_center = [kf[1], kf[2], kf[3]]
      camera_translation = [kf[4], kf[5], kf[6]]
      camera_rotation = [kf[7], kf[8], kf[9], kf[10], kf[11], kf[12], kf[13], kf[14], kf[15]]
      time_stamp = kf[0]

      camera_matrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
      mps_of_this_kf = kfs_mappoints[time_stamp]  # map points of current keyframe
      camera_to_world = get_camera_to_world(camera_translation, camera_rotation)  # tf matrix
      world_to_camera = get_world_to_camera(camera_translation, camera_rotation)  # inverse tf matrix

      kf_img = load_frame(time_stamp, 0)
      segment = get_graph_segment_for_frame(time_stamp, 0, sigma=args.sigma)  # segmentation from graph-cut algorithm
      segment_with_color = segment
      segment = segment[:, :, 0] * (255**2) + segment[:, :, 1] * 255 + segment[:, :, 2]
      # get pixel coordinates of the map points

      pix_of_mps = []
      choosen_mps = []
      for mp in mps_of_this_kf:
        x, y = convert_3d_point_to_pix(mp[2], mp[3], mp[4], world_to_camera, fx, fy, cx, cy)
        if x in range(0, segment.shape[1]) and y in range(0, segment.shape[0]):
          pix_of_mps.append([x, y])
          choosen_mps.append([mp[2], mp[3], mp[4]])
      pix_of_mps = np.asarray(pix_of_mps)
      plane_equation_dict = create_plane_equation_dict(segment, choosen_mps, pix_of_mps)