Пример #1
0
def bench(client, n):
    """ Benchmark n requests """
    pairs = [(x, x + 1) for x in range(n)]

    started = time.time()
    for pair in pairs:
        res, err = client.call('add', *pair)
        # assert err is None
    duration = time.time() - started
    print('Client stats:')
    util.print_stats(n, duration)
Пример #2
0
def bench(client, n):
    """ Benchmark n requests """
    pairs = [(x, x + 1) for x in range(n)]

    started = time.time()
    for pair in pairs:
        res, err = client.call('add', *pair)
        # assert err is None
    duration = time.time() - started
    print('Client stats:')
    util.print_stats(n, duration)
Пример #3
0
def bench(client, n):
    """ Benchmark n requests """
    items = list(range(n))

    # Time client publish operations
    # ------------------------------
    started = time.time()
    for i in items:
        client.publish('test', i)
    duration = time.time() - started

    print('Publisher client stats:')
    util.print_stats(n, duration)
Пример #4
0
def main(_):
  # Configuration.
  num_unrolls = FLAGS.num_steps

  if FLAGS.seed:
    tf.set_random_seed(FLAGS.seed)

  # Problem.
  problem, net_config, net_assignments = util.get_config(FLAGS.problem,
                                                         FLAGS.path, mode='test')

  # Optimizer setup.
  if FLAGS.optimizer == "Adam":
    cost_op = problem()
    problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
    problem_reset = tf.variables_initializer(problem_vars)

    optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
    optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
    update = optimizer.minimize(cost_op)
    reset = [problem_reset, optimizer_reset]
  elif FLAGS.optimizer == "L2L":
    if FLAGS.path is None:
      logging.warning("Evaluating untrained L2L optimizer")
    optimizer = meta.MetaOptimizer(FLAGS.problem,, FLAGS.num_particle,  **net_config)
    meta_loss = optimizer.meta_loss(problem, 1, net_assignments=net_assignments, model_path = FLAGS.path)
    loss, update, reset, cost_op, x_final, constant = meta_loss
  else:
    raise ValueError("{} is not a valid optimizer".format(FLAGS.optimizer))
  with ms.MonitoredSession() as sess:
    # Prevent accidental changes to the graph.
    tf.get_default_graph().finalize()
    min_loss_record = []
    all_time_loss_record = []
    total_time = 0
    total_cost = 0
    x_record = [[sess.run(item) for item in x_final]]
    for _ in xrange(FLAGS.num_epochs):
      # Training.
      time, cost,  constants = util.eval_run_epoch(sess, cost_op, [update], reset,
                                  num_unrolls, x_final, constant)
      total_time += time
      all_time_loss_record.append(cost)
    with open('./{}/evaluate_record.pickle'.format(FLAGS.path),'wb') as l_record:
      record = {'all_time_loss_record':all_time_loss_record,'loss':cost,\
                'constants':[sess.run(item) for item in constants],\
                }
      pickle.dump(record, l_record)
    # Results.
    util.print_stats("Epoch {}".format(FLAGS.num_epochs), total_cost,
                     total_time, FLAGS.num_epochs)
Пример #5
0
def start_service(addr, n):
    """ Start a service """
    s = Responder(addr)
    s.register('add', lambda x, y: x + y)

    started = time.time()
    for _ in range(n):
        s.process()
    duration = time.time() - started

    time.sleep(0.1)
    print('Service stats:')
    util.print_stats(n, duration)
    return
Пример #6
0
def start_service(addr, n):
    """ Start a service """
    s = Service(addr)

    started = time.time()
    for _ in range(n):
        msg = s.sock.recv()
        s.sock.send(msg)
    s.sock.close()
    duration = time.time() - started

    print('Raw REP service stats:\n')
    util.print_stats(n, duration)
    return
Пример #7
0
def start_service(addr, n):
    """ Start a service """
    s = Service(addr)
    s.register('add', lambda x, y: x + y)

    started = time.time()
    for _ in range(n):
        s.process()
    duration = time.time() - started

    time.sleep(0.1)
    print('Service stats:')
    util.print_stats(n, duration)
    return
Пример #8
0
def bench(client, n):
    """ Benchmark n requests """
    items = list(range(n))

    # Time client publish operations
    # ------------------------------
    started = time.time()
    msg = b'test line'
    for i in items:
        client.sock.send(msg)
    duration = time.time() - started

    print('Raw PUB client stats:')
    util.print_stats(n, duration)
Пример #9
0
def bench(client, n):
    """ Benchmark n requests """
    items = list(range(n))

    # Time client publish operations
    # ------------------------------
    started = time.time()
    msg = b'test line'
    for i in items:
        client.socket.send(msg)
    duration = time.time() - started

    print('Raw PUB client stats:')
    util.print_stats(n, duration)
Пример #10
0
def train_loop(agent,
               model,
               env,
               steps_in_iteration,
               n_iterations,
               window_length,
               history=None,
               debug=False,
               verbose=0):
    env.swap_other_players(
        [AIPlayer(agent) for i in range(len(env.other_players))])
    history = PokerHistory() if history is None else history
    opponent_agent = None
    save_agent_weights(agent)
    for i in range(n_iterations):
        print('ITERATION %s' % str(i))
        # Free up resources first
        release_memory([opponent_agent, agent])
        # Create a copy of the agent to play against us (and to free up the resources recreate our training agent as well)
        agent = build_dqn_agent(
            model(window_length, env.n_observation_dimensions, env.n_actions),
            env.n_actions, window_length, debug)
        #load_agent_weights(agent)
        opponent_agent = build_dqn_agent(
            model(window_length, env.n_observation_dimensions, env.n_actions),
            env.n_actions, window_length, debug)
        load_agent_weights(opponent_agent)
        env.swap_opponent_agent(opponent_agent)
        history = agent.fit(env,
                            nb_steps=steps_in_iteration,
                            visualize=debug,
                            log_interval=steps_in_iteration // 5,
                            verbose=verbose,
                            history=history)
        print_stats(history)
        save_agent_weights(agent)

    release_memory([opponent_agent, agent])
    # Create a copy of the agent to play against us (and to free up the resources recreate our training agent as well)
    agent = build_dqn_agent(
        model(window_length, env.n_observation_dimensions, env.n_actions),
        env.n_actions, window_length, debug)
    #load_agent_weights(agent)
    opponent_agent = build_dqn_agent(
        model(window_length, env.n_observation_dimensions, env.n_actions),
        env.n_actions, window_length, debug)
    load_agent_weights(opponent_agent)
    env.swap_opponent_agent(opponent_agent)
    return agent, history
Пример #11
0
def start_service(addr, n):
    """ Start a service """

    s = SubService(addr)
    s.sock.set_string_option(nanomsg.SUB, nanomsg.SUB_SUBSCRIBE, 'test')

    started = time.time()
    for _ in range(n):
        msg = s.sock.recv()
    s.sock.close()
    duration = time.time() - started

    print('Raw SUB service stats:')
    util.print_stats(n, duration)
    return
Пример #12
0
def start_service(addr, n):
    """ Start a service """

    s = Subscriber(addr)
    s.socket.setsockopt(zmq.SUBSCRIBE, 'test')

    started = time.time()
    for _ in range(n):
        msg = s.socket.recv()
    s.socket.close()
    duration = time.time() - started

    print('Raw SUB service stats:')
    util.print_stats(n, duration)
    return
Пример #13
0
def main(_):
    # Configuration.
    num_unrolls = FLAGS.num_steps

    if FLAGS.seed:
        tf.set_random_seed(FLAGS.seed)

    # Problem.
    problem, net_config, net_assignments = util.get_config(
        FLAGS.problem, FLAGS.path)

    # Optimizer setup.
    if FLAGS.optimizer == "Adam":
        cost_op = problem()
        problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        problem_reset = tf.initialize_variables(problem_vars)

        optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
        optimizer_reset = tf.initialize_variables(optimizer.get_slot_names())
        update = optimizer.minimize(cost_op)
        reset = [problem_reset, optimizer_reset]
    elif FLAGS.optimizer == "L2L":
        if FLAGS.path is None:
            logging.warning("Evaluating untrained L2L optimizer")
        optimizer = meta.MetaOptimizer(**net_config)
        meta_loss = optimizer.meta_loss(problem,
                                        1,
                                        net_assignments=net_assignments)
        _, update, reset, cost_op, _ = meta_loss
    else:
        raise ValueError("{} is not a valid optimizer".format(FLAGS.optimizer))

    with ms.MonitoredSession() as sess:
        # Prevent accidental changes to the graph.
        tf.get_default_graph().finalize()

        total_time = 0
        total_cost = 0
        for _ in xrange(FLAGS.num_epochs):
            # Training.
            time, cost = util.run_epoch(sess, cost_op, [update], reset,
                                        num_unrolls)
            total_time += time
            total_cost += cost

        # Results.
        util.print_stats("Epoch {}".format(FLAGS.num_epochs), total_cost,
                         total_time, FLAGS.num_epochs)
Пример #14
0
def bench(client, n):
    """ Benchmark n requests """
    items = list(range(n))

    # Time client publish operations
    # ------------------------------
    started = time.time()
    msg = b'x'
    for i in items:
        client.sock.send(msg)
        res = client.sock.recv()
        assert msg == res
    duration = time.time() - started

    print('Raw REQ client stats:\n')
    util.print_stats(n, duration)
Пример #15
0
def start_service(addr, n):
    """ Start a service """

    s = SubService(addr)
    def do_something(line):
        pass
    s.subscribe('test', do_something)

    started = time.time()
    for _ in range(n):
        s.process()
    s.sock.close()
    duration = time.time() - started

    print('Subscriber service stats:\n')
    util.print_stats(n, duration)
    return
Пример #16
0
def start_service(addr, n):
    """ Start a service """

    s = Subscriber(addr)
    if nanomsg:
        s.socket.set_string_option(nanomsg.SUB, nanomsg.SUB_SUBSCRIBE, 'test')
    else:
        s.socket.setsockopt(nnpy.SUB, nnpy.SUB_SUBSCRIBE, 'test')

    started = time.time()
    for _ in range(n):
        msg = s.socket.recv()
    s.socket.close()
    duration = time.time() - started

    print('Raw SUB service stats:')
    util.print_stats(n, duration)
    return
Пример #17
0
def train(device, batch_size, model, trainloader, optimizer, epoch, writer):
    model.train()
    criterion = nn.CrossEntropyLoss(reduction='mean')

    metric_ftns = ['loss', 'correct', 'nums', 'accuracy']
    train_metrics = MetricTracker(*[m for m in metric_ftns],
                                  writer=writer,
                                  mode='train')
    train_metrics.reset()
    confusion_matrix = torch.zeros(2, 2)

    for batch_idx, input_tensors in enumerate(trainloader):
        optimizer.zero_grad()
        input_data, target = input_tensors
        input_data = input_data.to(device)
        target = target.to(device)

        output = model(input_data)

        loss = criterion(output, target)
        loss.backward()

        optimizer.step()
        correct, nums, acc = accuracy(output, target)
        num_samples = batch_idx * batch_size + 1
        _, preds = torch.max(output, 1)
        for t, p in zip(target.cpu().view(-1), preds.cpu().view(-1)):
            confusion_matrix[t.long(), p.long()] += 1
        train_metrics.update_all_metrics(
            {
                'correct': correct,
                'nums': nums,
                'loss': loss.item(),
                'accuracy': acc
            },
            writer_step=(epoch - 1) * len(trainloader) + batch_idx)
        print_stats(epoch, batch_size, num_samples, trainloader, train_metrics)
    num_samples += len(target) - 1

    print_summary(epoch, num_samples, train_metrics, mode="Training")
    print('Confusion Matrix\n{}\n'.format(confusion_matrix.cpu().numpy()))
    return train_metrics
Пример #18
0
def mm_train(device, batch_size, a_m, i_m, trainloader, optimizer, epoch,
             writer):
    a_m.train()
    i_m.train()
    weight = torch.tensor([0.1, 0.9]).to(device)
    ce_loss = nn.CrossEntropyLoss(weight=weight, reduction='mean')
    alpha = 1

    metric_ftns = ['loss', 'correct', 'nums', 'accuracy']
    image_metrics = MetricTracker(*[m for m in metric_ftns],
                                  writer=writer,
                                  mode='train')
    audio_metrics = MetricTracker(*[m for m in metric_ftns],
                                  writer=writer,
                                  mode='train')
    image_metrics.reset()
    audio_metrics.reset()
    i_confusion_matrix = torch.zeros(2, 2)
    a_confusion_matrix = torch.zeros(2, 2)

    for batch_idx, (audio, a_label, img, i_label) in enumerate(trainloader):

        audio, img = audio.to(device), img.to(device)
        a_label, i_label = a_label.to(device), i_label.to(device)
        i_output, i_feature = i_m(img)
        a_output, a_feature = a_m(audio)

        i_ce = ce_loss(i_output, i_label)
        a_ce = ce_loss(a_output, a_label)
        csa = csa_loss(a_feature, i_feature.detach(),
                       (a_label == i_label).float())

        loss = i_ce + 0.4 * a_ce + alpha * csa
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        num_samples = batch_idx * batch_size + 1
        # image
        i_correct, i_nums, i_acc = accuracy(i_output, i_label)
        image_metrics.update_all_metrics(
            {
                'correct': i_correct,
                'nums': i_nums,
                'loss': loss.item(),
                'accuracy': i_acc
            },
            writer_step=(epoch - 1) * len(trainloader) + batch_idx)
        # audio
        a_correct, a_nums, a_acc = accuracy(a_output, a_label)
        audio_metrics.update_all_metrics(
            {
                'correct': a_correct,
                'nums': a_nums,
                'loss': loss.item(),
                'accuracy': a_acc
            },
            writer_step=(epoch - 1) * len(trainloader) + batch_idx)
        _, preds = torch.max(a_output, 1)
        for t, p in zip(a_label.cpu().view(-1), preds.cpu().view(-1)):
            a_confusion_matrix[t.long(), p.long()] += 1
        print_stats(epoch,
                    batch_size,
                    num_samples,
                    trainloader,
                    image_metrics,
                    mode="Image",
                    acc=i_acc)
        print_stats(epoch,
                    batch_size,
                    num_samples,
                    trainloader,
                    audio_metrics,
                    mode="Audio",
                    acc=a_acc)
    num_samples += len(a_output) - 1

    print_summary(epoch, num_samples, image_metrics, mode="Training Image")
    print_summary(epoch, num_samples, audio_metrics, mode="Training Audio")
    print('A_Confusion Matrix\n{}\n'.format(a_confusion_matrix.cpu().numpy()))

    return audio_metrics
Пример #19
0
def main(_):
    # Configuration.
    num_unrolls = FLAGS.num_steps

    if FLAGS.seed:
        tf.set_random_seed(FLAGS.seed)

    # Problem.
    # problem, net_config, net_assignments = util.get_config(FLAGS.problem,
    #                                                        FLAGS.path)
    param_dict = {}
    param_dict['bs'] = FLAGS.bs
    param_dict['m'] = FLAGS.m
    param_dict['n'] = FLAGS.n
    print(param_dict)
    problem, net_config, net_assignments = util.get_config(
        FLAGS.problem,
        net_name="RNNprop",
        mode=FLAGS.mode,  #加入mode
        num_linear_heads=1,
        init=FLAGS.init,
        path=FLAGS.path,
        param=param_dict)

    # Optimizer setup.
    if FLAGS.optimizer == "Adam":
        cost_op = problem()
        problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        problem_reset = tf.variables_initializer(problem_vars)

        optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
        optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
        update = optimizer.minimize(cost_op)
        reset = [problem_reset, optimizer_reset]
    elif FLAGS.optimizer == "L2L":
        if FLAGS.path is None:
            logging.warning("Evaluating untrained L2L optimizer")
        optimizer = meta.MetaOptimizer(FLAGS.num_mt, FLAGS.beta1, FLAGS.beta2,
                                       **net_config)
        # meta_loss = optimizer.meta_loss(problem, 1, net_assignments=net_assignments)

        meta_loss, _, _, _, _, seq_step, \
        _, _, _, _, _, _ = optimizer.meta_loss(problem, 1, net_assignments=net_assignments)
        #这里原来是各种名字的变量的,但是似乎object never used就是指这些,那我就全部用下划线变量代替了

        _, update, reset, cost_op, _ = meta_loss

    else:
        raise ValueError("{} is not a valid optimizer".format(FLAGS.optimizer))

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with ms.MonitoredSession() as sess:
        # with tf.Session(config=config) as sess:
        sess.run(reset)
        # Prevent accidental changes to the graph.
        tf.get_default_graph().finalize()

        total_time = 0
        total_cost = 0
        loss_record = []
        for ep in xrange(FLAGS.num_epochs):
            # Training.
            time, cost = util.run_eval_epoch(sess,
                                             cost_op, [update],
                                             num_unrolls,
                                             step=seq_step)
            total_time += time

            total_cost += sum(cost) / num_unrolls
            loss_record += cost
            print(ep, cost[-1])
        # Results.
        util.print_stats("Epoch {}".format(FLAGS.num_epochs), total_cost,
                         total_time, FLAGS.num_epochs)
    with open(
            '{}/{}_eval_loss_record.pickle'.format(FLAGS.path,
                                                   FLAGS.optimizer),
            'wb') as l_record:
        pickle.dump(loss_record, l_record)
    print("Saving evaluate loss record {}".format(FLAGS.path))
Пример #20
0
    model = FrequencyModel(inverted_vocab)
    model.fit(docs)
    models[country] = model

eval_train = pd.read_csv("./data/DialectSim_train.csv")
eval_test = pd.read_csv("./data/DialectSim_test.csv")

train_word = eval_train["word"].tolist()
train_label = eval_train["label"].tolist()
test_word = eval_test["word"].tolist()
test_label = eval_test["label"].tolist()
pred = np.array([score(w, models) for w in train_word])

linspace = np.linspace(0, 0.5, 1000)
accs = []
for t in linspace:
    accs.append(acc(train_label, pred > t))
plt.plot(linspace, accs)
plt.xlabel("Threshold")
plt.ylabel("Acc on training set")
plt.show()

t = linspace[np.argmax(accs)]
# print("Train Acc = %f" % acc(train_label, pred > t))
# print("Test Acc = %f" % acc(test_label, np.array([score(w, models) for w in test_word]) > t))
print("Training stats:")
util.print_stats(pred > t, train_label)
print("Testing stats:")
util.print_stats(
    np.array([score(w, models) for w in test_word]) > t, test_label)
Пример #21
0
def process_input(in_lines, flush_stdout_):
    time1 = time.time()

    global flush_stdout
    flush_stdout = flush_stdout_

    multiline = ""
    all_lines = []
    double_form_cnt = 0
    
    for line in in_lines:
        if "#" in line:
            line = line.split("#")[0]
          
        if not line.strip():
            continue
          
        line = line.rstrip()
        
        if line.endswith("\\"):
            multiline += line  #.replace("\\", "")
            continue
        else:
            if multiline:
                line = multiline + line
            multiline = ""
        
        if ("/v" in line and ":imperf:perf" in line) \
                or ("/adjp" in line and "&adj" in line):
            double_form_cnt += 1
        
        try:
            tag_lines = expand_line(line, flush_stdout)
            check_lines(tag_lines)
        except:
            print("Exception in line: \"" + line + "\"", file=sys.stderr)
            raise
        
        if flush_stdout:
            sorted_lines = util.sort_all_lines(tag_lines)
            print("\n".join(sorted_lines))
            sys.stdout.flush()
        else:
            all_lines.extend(tag_lines)

    if "-time" in sys.argv:
        time2 = time.time()
        print("Total out_lines", len(all_lines), file=sys.stderr)
        print("Processing time: {0:.3f}".format(time2-time1), file=sys.stderr)
    
    
    if not flush_stdout:
    #        print("\n".join(line for line in all_lines if "adv:" in line), file=sys.stderr)
#        print("sort 1", file=sys.stderr)
        sorted_lines = util.sort_all_lines(all_lines)
    #        print("\n".join(line for line in sorted_lines if "adv:" in line), file=sys.stderr)
        sorted_lines = post_process_sorted(sorted_lines)



        if "-time" in sys.argv:
            time3 = time.time()
            print("Sorting time 1: {0:.3f}".format(time3-time2), file=sys.stderr)
    
        if "-indent" in sys.argv:
            # to sort newely promoted lemmas
            sorted_lines = util.sort_all_lines(list(set(sorted_lines)))

            if "-time" in sys.argv:
                time4 = time.time()
                print("Sorting time 2: {0:.3f}".format(time4-time3), file=sys.stderr)
          
    #        print ("\n-- ".join( ln for ln in sorted_lines if ln.startswith("Венед") ), file=sys.stderr)

        sorted_lines = [ cleanup(line) for line in sorted_lines ]

        if "-wordlist" in sys.argv:
            print_word_list(sorted_lines)

        if "-indent" in sys.argv:
            if "-mfl" in sys.argv:
                with open("dict_corp_lt.txt", "w", encoding="utf-8") as f:
                    f.write("\n".join(sorted_lines))
        
            sorted_lines = util.indent_lines(sorted_lines)
    
        
            if "-stats" in sys.argv:
                util.print_stats(sorted_lines, double_form_cnt)

        if "--log-usage" in sys.argv:
            log_usage()
          
        return sorted_lines
Пример #22
0
def main():
    assert torch.cuda.device_count() == 1 # mmdet only supports single GPU testing

    opts = parse_args()
    assert (not opts.mask_timing) or (opts.mask_timing and not opts.no_mask)

    mkdir2(opts.out_dir)

    db = COCO(opts.annot_path)
    class_names = [c['name'] for c in db.dataset['categories']]
    n_class = len(class_names)
    coco_mapping = None if opts.no_class_mapping else db.dataset.get('coco_mapping', None)
    if coco_mapping is not None:
        coco_mapping = np.asarray(coco_mapping)
    seqs = db.dataset['sequences']
    seq_dirs = db.dataset['seq_dirs']

    model = init_detector(opts)

    # warm up the GPU
    img = db.imgs[0]
    w_img, h_img = img['width'], img['height']
    _ = inference_detector(model, np.zeros((h_img, w_img, 3), np.uint8))
    torch.cuda.synchronize()

    runtime_all = []
    n_processed = 0
    n_total = 0

    for sid, seq in enumerate(tqdm(seqs)):
        frame_list = [img for img in db.imgs.values() if img['sid'] == sid]
        
        # load all frames in advance
        frames = []
        for img in frame_list:
            img_path = join(opts.data_root, seq_dirs[sid], img['name'])
            frames.append(imread(img_path))
        n_frame = len(frames)
        n_total += n_frame
        
        timestamps = []
        results_raw = []
        results_parsed = []
        input_fidx = []
        runtime = []
        last_fidx = None
        if not opts.dynamic_schedule:
            stride_cnt = 0
        
        t_total = n_frame/opts.fps
        t_start = perf_counter()
        while 1:
            t1 = perf_counter()
            t_elapsed = t1 - t_start
            if t_elapsed >= t_total:
                break

            # identify latest available frame
            fidx_continous = t_elapsed*opts.fps
            fidx = int(np.floor(fidx_continous))
            if fidx == last_fidx:
                continue
            
            last_fidx = fidx
            if opts.dynamic_schedule:
                fidx_remainder = fidx_continous - fidx
                if fidx_remainder > 0.5:
                    continue
            else:
                if stride_cnt % opts.det_stride == 0:
                    stride_cnt = 1
                else:
                    stride_cnt += 1
                    continue

            frame = frames[fidx]
            result = inference_detector(model, frame, gpu_pre=not opts.cpu_pre, decode_mask=not opts.mask_timing)
            if opts.mask_timing:
                mask_encoded = result[2]
                result = result[:2]
                bboxes, scores, labels, _, sel = \
                    parse_det_result(result, coco_mapping, n_class, return_sel=True)
            else:
                bboxes, scores, labels, masks = \
                    parse_det_result(result, coco_mapping, n_class)

            torch.cuda.synchronize()

            t2 = perf_counter()
            t_elapsed = t2 - t_start
            if t_elapsed >= t_total:
                break
            if opts.mask_timing:
                masks = decode_mask(mask_encoded, sel)

            timestamps.append(t_elapsed)
            results_raw.append(result)
            results_parsed.append((bboxes, scores, labels, masks))
            input_fidx.append(fidx)
            runtime.append(t2 - t1)

        out_path = join(opts.out_dir, seq + '.pkl')
        if opts.overwrite or not isfile(out_path):
            pickle.dump({
                'results_raw': results_raw,
                'results_parsed': results_parsed,
                'timestamps': timestamps,
                'input_fidx': input_fidx,
                'runtime': runtime,
            }, open(out_path, 'wb'))

        runtime_all += runtime
        n_processed += len(results_raw)

    runtime_all_np = np.asarray(runtime_all)
    n_small_runtime = (runtime_all_np < 1.0/opts.fps).sum()

    out_path = join(opts.out_dir, 'time_info.pkl')
    if opts.overwrite or not isfile(out_path):
        pickle.dump({
            'runtime_all': runtime_all,
            'n_processed': n_processed,
            'n_total': n_total,
            'n_small_runtime': n_small_runtime,
        }, open(out_path, 'wb'))

    # convert to ms for display
    s2ms = lambda x: 1e3*x

    print(f'{n_processed}/{n_total} frames processed')
    print_stats(runtime_all_np, 'Runtime (ms)', cvt=s2ms)
    print(f'Runtime smaller than unit time interval: '
        f'{n_small_runtime}/{n_processed} '
        f'({100.0*n_small_runtime/n_processed:.4g}%)')
Пример #23
0
def main():
    assert torch.cuda.device_count() == 1 # mmdet only supports single GPU testing

    opts = parse_args()
    mkdir2(opts.out_dir)

    db = COCO(opts.annot_path)
    class_names = [c['name'] for c in db.dataset['categories']]
    n_class = len(class_names)
    coco_mapping = db.dataset.get('coco_mapping', None)
    if coco_mapping is not None:
        coco_mapping = np.asarray(coco_mapping)
    seqs = db.dataset['sequences']
    seq_dirs = db.dataset['seq_dirs']

    img = db.imgs[0]
    w_img, h_img = img['width'], img['height']

    mp.set_start_method('spawn')
    frame_recv, frame_send = mp.Pipe(False)
    det_res_recv, det_res_send = mp.Pipe(False)
    det_proc = mp.Process(target=det_process, args=(opts, frame_recv, det_res_send, w_img, h_img))
    det_proc.start()

    if opts.dynamic_schedule:
        runtime = pickle.load(open(opts.runtime, 'rb'))
        runtime_dist = dist_from_dict(runtime, opts.perf_factor)
        mean_rtf = runtime_dist.mean()*opts.fps

    n_total = 0
    t_det_all = []
    t_send_frame_all = []
    t_recv_res_all = []
    t_assoc_all = []
    t_forecast_all = []

    with torch.no_grad():
        kf_F = torch.eye(8)
        kf_Q = torch.eye(8)
        kf_R = 10*torch.eye(4)
        kf_P_init = 100*torch.eye(8).unsqueeze(0)

        for sid, seq in enumerate(tqdm(seqs)):
            frame_list = [img for img in db.imgs.values() if img['sid'] == sid]
            frame_list = [join(opts.data_root, seq_dirs[sid], img['name']) for img in frame_list]
            n_frame = len(frame_list)
            n_total += n_frame
            
            timestamps = []
            results_parsed = []
            input_fidx = []
            
            processing = False
            fidx_t2 = None            # detection input index at t2
            fidx_latest = None
            tkidx = 0                 # track starting index
            kf_x = torch.empty((0, 8, 1))
            kf_P = torch.empty((0, 8, 8))
            n_matched12 = 0

            # let detector process to read all the frames
            frame_send.send(frame_list)
            # it is possible that unfetched results remain in the pipe
            while 1:
                msg = det_res_recv.recv() # wait till the detector is ready
                if msg == 'ready':
                    break
                elif isinstance(msg, Exception):
                    raise msg

            t_total = n_frame/opts.fps
            t_unit = 1/opts.fps
            t_start = perf_counter()
            while 1:
                t1 = perf_counter()
                t_elapsed = t1 - t_start
                if t_elapsed >= t_total:
                    break

                # identify latest available frame
                fidx_continous = t_elapsed*opts.fps
                fidx = int(np.floor(fidx_continous))
                if fidx == fidx_latest:
                    # algorithm is fast and has some idle time
                    wait_for_next = True
                else:
                    wait_for_next = False
                    if opts.dynamic_schedule:
                        if mean_rtf >= 1:
                            # when runtime < 1, it should always process every frame
                            fidx_remainder = fidx_continous - fidx
                            if mean_rtf < np.floor(fidx_remainder + mean_rtf):
                                # wait till next frame
                                wait_for_next = True

                if wait_for_next:
                    # sleep
                    continue

                if not processing:
                    t_start_frame = perf_counter()
                    frame_send.send((fidx, t_start_frame))
                    fidx_latest = fidx
                    processing = True
  
                # wait till query - forecast-rt-ub
                wait_time = t_unit - opts.forecast_rt_ub
                if det_res_recv.poll(wait_time): # wait
                    # new result
                    result = det_res_recv.recv() 
                    if isinstance(result, Exception):
                        raise result
                    result, t_send_frame, t_start_res = result
                    bboxes_t2, scores_t2, labels_t2, _ = \
                        parse_det_result(result, coco_mapping, n_class)
                    processing = False
                    t_det_end = perf_counter()
                    t_det_all.append(t_det_end - t_start_frame)
                    t_send_frame_all.append(t_send_frame)
                    t_recv_res_all.append(t_det_end - t_start_res)

                    # associate across frames
                    t_assoc_start = perf_counter()
                    if len(kf_x):
                        dt = fidx_latest - fidx_t2

                        kf_F = make_F(kf_F, dt)
                        kf_Q = make_Q(kf_Q, dt)
                        kf_x, kf_P = batch_kf_predict(kf_F, kf_x, kf_P, kf_Q)
                        bboxes_f = x2bbox(kf_x)
                                        
                    fidx_t2 = fidx_latest

                    n = len(bboxes_t2)
                    if n:
                        # put high scores det first for better iou matching
                        score_argsort = np.argsort(scores_t2)[::-1]
                        bboxes_t2 = bboxes_t2[score_argsort]
                        scores_t2 = scores_t2[score_argsort]
                        labels_t2 = labels_t2[score_argsort]

                        ltrb2ltwh_(bboxes_t2)

                    updated = False
                    if len(kf_x):
                        order1, order2, n_matched12, tracks, tkidx = iou_assoc(
                            bboxes_f, labels, tracks, tkidx,
                            bboxes_t2, labels_t2, opts.match_iou_th,
                            no_unmatched1=True,
                        )

                        if n_matched12:
                            kf_x = kf_x[order1]
                            kf_P = kf_P[order1]
                            kf_x, kf_P = batch_kf_update(
                                bbox2z(bboxes_t2[order2[:n_matched12]]),
                                kf_x,
                                kf_P,
                                kf_R,
                            )
                    
                            kf_x_new = bbox2x(bboxes_t2[order2[n_matched12:]])
                            n_unmatched2 = len(bboxes_t2) - n_matched12
                            kf_P_new = kf_P_init.expand(n_unmatched2, -1, -1)
                            kf_x = torch.cat((kf_x, kf_x_new))
                            kf_P = torch.cat((kf_P, kf_P_new))
                            labels = labels_t2[order2]
                            scores = scores_t2[order2]
                            updated = True

                    if not updated:
                        # start from scratch
                        kf_x = bbox2x(bboxes_t2)
                        kf_P = kf_P_init.expand(len(bboxes_t2), -1, -1)
                        labels = labels_t2
                        scores = scores_t2
                        tracks = np.arange(tkidx, tkidx + n, dtype=np.uint32)
                        tkidx += n

                    t_assoc_end = perf_counter()
                    t_assoc_all.append(t_assoc_end - t_assoc_start)

                # apply forecasting for the current query
                t_forecast_start = perf_counter()
                query_pointer = fidx + opts.eta + 1
                
                if len(kf_x):
                    dt = (query_pointer - fidx_t2)

                    kf_x_np = kf_x[:, :, 0].numpy()
                    bboxes_t3 = kf_x_np[:n_matched12, :4] + dt*kf_x_np[:n_matched12, 4:]
                    if n_matched12 < len(kf_x):
                        bboxes_t3 = np.concatenate((bboxes_t3, kf_x_np[n_matched12:, :4]))
                        
                    bboxes_t3, keep = extrap_clean_up(bboxes_t3, w_img, h_img, lt=True)
                    labels_t3 = labels[keep]
                    scores_t3 = scores[keep]
                    tracks_t3 = tracks[keep]

                else:
                    bboxes_t3 = np.empty((0, 4), dtype=np.float32)
                    scores_t3 = np.empty((0,), dtype=np.float32)
                    labels_t3 = np.empty((0,), dtype=np.int32)
                    tracks_t3 = np.empty((0,), dtype=np.int32)

                t_forecast_end = perf_counter()
                t_forecast_all.append(t_forecast_end - t_forecast_start)
                
                t3 = perf_counter()
                t_elapsed = t3 - t_start
                if t_elapsed >= t_total:
                    break

                if len(bboxes_t3):
                    ltwh2ltrb_(bboxes_t3)
                if fidx_t2 is not None:
                    timestamps.append(t_elapsed)
                    results_parsed.append((bboxes_t3, scores_t3, labels_t3, None, tracks_t3))
                    input_fidx.append(fidx_t2)

            out_path = join(opts.out_dir, seq + '.pkl')
            if opts.overwrite or not isfile(out_path):
                pickle.dump({
                    'results_parsed': results_parsed,
                    'timestamps': timestamps,
                    'input_fidx': input_fidx,
                }, open(out_path, 'wb'))

    # terminates the child process
    frame_send.send(None)

    out_path = join(opts.out_dir, 'time_info.pkl')
    if opts.overwrite or not isfile(out_path):
        pickle.dump({
            'n_total': n_total,
            't_det': t_det_all,
            't_send_frame': t_send_frame_all,
            't_recv_res': t_recv_res_all,
            't_assoc': t_assoc_all,
            't_forecast': t_forecast_all,
        }, open(out_path, 'wb'))
 
    # convert to ms for display
    s2ms = lambda x: 1e3*x
    print_stats(t_det_all, 'Runtime detection (ms)', cvt=s2ms)
    print_stats(t_send_frame_all, 'Runtime sending the frame (ms)', cvt=s2ms)
    print_stats(t_recv_res_all, 'Runtime receiving the result (ms)', cvt=s2ms)
    print_stats(t_assoc_all, "Runtime association (ms)", cvt=s2ms)
    print_stats(t_forecast_all, "Runtime forecasting (ms)", cvt=s2ms)
Пример #24
0
    with open(uk_dict_file, 'wb') as handle:
        pickle.dump(uk_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)

# print(us_dict)
# input()
# print(uk_dict)
# input()
pred = np.array([
    manhattan_distance(w, us_dict, uk_dict)
    for w in tqdm(train_words, desc="AdaGram")
])

linspace = np.linspace(0, 2.0, 1000)
metrics_scores = []
for t in linspace:
    metrics_scores.append(acc(train_labels, pred > t))
plt.plot(linspace, metrics_scores)
plt.xlabel("Threshold")
plt.ylabel("Acc on training set")
plt.show()

t = linspace[np.argmax(metrics_scores)]
print("Training stats:")
util.print_stats(pred > t, train_labels)
print("Testing stats:")
util.print_stats(
    np.array([manhattan_distance(w, us_dict, uk_dict)
              for w in test_words]) > t, test_labels)

print(COUNT)
def main():
    assert torch.cuda.device_count(
    ) == 1  # mmdet only supports single GPU testing

    opts = parse_args()

    mp.set_start_method('spawn')
    frame_recv, frame_send = mp.Pipe(False)
    det_res_recv, det_res_send = mp.Pipe(False)
    det_proc = mp.Process(target=det_process,
                          args=(opts, frame_recv, det_res_send))
    det_proc.start()

    mkdir2(opts.out_dir)

    db = COCO(opts.annot_path)
    class_names = [c['name'] for c in db.dataset['categories']]
    n_class = len(class_names)
    coco_mapping = db.dataset.get('coco_mapping', None)
    if coco_mapping is not None:
        coco_mapping = np.asarray(coco_mapping)
    seqs = db.dataset['sequences']
    seq_dirs = db.dataset['seq_dirs']

    runtime_all = []
    n_processed = 0
    n_total = 0

    t_send_frame_all = []
    t_recv_res_all = []

    for sid, seq in enumerate(tqdm(seqs)):
        frame_list = [img for img in db.imgs.values() if img['sid'] == sid]
        frame_list = [
            join(opts.data_root, seq_dirs[sid], img['name'])
            for img in frame_list
        ]
        n_frame = len(frame_list)
        n_total += n_frame

        timestamps = []
        results_raw = []
        results_parsed = []
        input_fidx = []
        runtime = []
        last_fidx = None
        stride_cnt = 0

        # let detector process to read all the frames
        frame_send.send(frame_list)
        init_error = det_res_recv.recv()  # wait till the detector is ready
        if init_error is not None:
            raise init_error

        t_total = n_frame / opts.fps
        t_start = perf_counter()
        while 1:
            t1 = perf_counter()
            t_elapsed = t1 - t_start
            if t_elapsed >= t_total:
                break

            # identify latest available frame
            fidx = int(np.floor(t_elapsed * opts.fps))
            #   t_elapsed/t_total *n_frame
            # = t_elapsed*opts.fps
            if fidx == last_fidx:
                continue

            last_fidx = fidx
            if stride_cnt % opts.det_stride == 0:
                stride_cnt = 1
            else:
                stride_cnt += 1
                continue

            t_start_frame = perf_counter()
            frame_send.send((fidx, t_start_frame))
            result = det_res_recv.recv()  # wait
            if isinstance(result, Exception):
                raise result
            result, t_send_frame, t_start_res = result
            bboxes, scores, labels, masks = \
                parse_det_result(result, coco_mapping, n_class)

            t2 = perf_counter()
            t_send_frame_all.append(t_send_frame)
            t_recv_res_all.append(t2 - t_start_res)
            t_elapsed = t2 - t_start
            if t_elapsed >= t_total:
                break

            timestamps.append(t_elapsed)
            results_raw.append(result)
            results_parsed.append((bboxes, scores, labels, masks))

            input_fidx.append(fidx)
            runtime.append(t2 - t1)

        out_path = join(opts.out_dir, seq + '.pkl')
        if opts.overwrite or not isfile(out_path):
            pickle.dump(
                {
                    'results_raw': results_raw,
                    'results_parsed': results_parsed,
                    'timestamps': timestamps,
                    'input_fidx': input_fidx,
                    'runtime': runtime,
                }, open(out_path, 'wb'))

        runtime_all += runtime
        n_processed += len(results_raw)

    # terminates the child process
    frame_send.send(None)

    runtime_all_np = np.asarray(runtime_all)
    n_small_runtime = (runtime_all_np < 1.0 / opts.fps).sum()

    out_path = join(opts.out_dir, 'time_info.pkl')
    if opts.overwrite or not isfile(out_path):
        pickle.dump(
            {
                'runtime_all': runtime_all,
                'n_processed': n_processed,
                'n_total': n_total,
                'n_small_runtime': n_small_runtime,
            }, open(out_path, 'wb'))

    # convert to ms for display
    s2ms = lambda x: 1e3 * x

    print(f'{n_processed}/{n_total} frames processed')
    print_stats(runtime_all_np, 'Runtime (ms)', cvt=s2ms)
    print(f'Runtime smaller than unit time interval: '
          f'{n_small_runtime}/{n_processed} '
          f'({100.0*n_small_runtime/n_processed:.4g}%)')
    print_stats(t_send_frame_all,
                'Time spent on sending the frame (ms)',
                cvt=s2ms)
    print_stats(t_recv_res_all,
                'Time spent on receiving the result (ms)',
                cvt=s2ms)
Пример #26
0
def main():
    assert torch.cuda.device_count(
    ) == 1  # mmdet only supports single GPU testing

    opts = parse_args()

    mkdir2(opts.out_dir)
    vis_out = bool(opts.vis_dir)
    if vis_out:
        mkdir2(opts.vis_dir)

    db = COCO(opts.annot_path)
    class_names = [c['name'] for c in db.dataset['categories']]
    n_class = len(class_names)
    coco_mapping = None if opts.no_class_mapping else db.dataset.get(
        'coco_mapping', None)
    if coco_mapping is not None:
        coco_mapping = np.asarray(coco_mapping)
    seqs = db.dataset['sequences']
    seq_dirs = db.dataset['seq_dirs']

    model = init_detector(opts)
    if opts.weights_base is not None:
        # for distillation purpose
        load_checkpoint(model, opts.weights_base)
    if opts.cpu_pre:
        img_transform = ImageTransform(
            size_divisor=model.cfg.data.test.size_divisor,
            **model.cfg.img_norm_cfg)
    else:
        img_transform = ImageTransformGPU(
            size_divisor=model.cfg.data.test.size_divisor,
            **model.cfg.img_norm_cfg)
    device = next(model.parameters()).device  # model device
    n_history = model.cfg.data.train.n_history if opts.n_history is None else opts.n_history
    n_future = model.cfg.data.train.n_future if opts.n_future is None else opts.n_future

    results_ccf = []  # instance based
    runtime_all = []
    for sid, seq in enumerate(tqdm(seqs)):
        # print(seq)
        frame_list = [img for img in db.imgs.values() if img['sid'] == sid]
        n_frame = len(frame_list)

        # load all frames in advance
        frames = []
        for img in frame_list:
            img_path = join(opts.data_root, seq_dirs[sid], img['name'])
            frames.append(imread(img_path))

        with torch.no_grad():
            preprocessed = []
            for i in range(n_history):
                data = _prepare_data(frames[i], img_transform, model.cfg,
                                     device)
                preprocessed.append(data)
            for ii in range(n_history, n_frame - n_future):
                # target frame
                iid = frame_list[ii + n_future]['id']
                img_name = frame_list[ii + n_future]['name']
                I = frames[ii + n_future]

                t_start = perf_counter()
                # input frame
                data = _prepare_data(frames[ii], img_transform, model.cfg,
                                     device)
                # if n_history == 0:
                #     data_merge = data
                #     # print(data['img'])
                #     # print(data['img'][0].shape)
                #     # print(data['img'][0][0][0][300][300:305])
                #     # import sys
                #     # sys.exit()
                # else:
                preprocessed.append(data)
                # print(preprocessed[0]['img'][0].data_ptr())
                # print(preprocessed[2]['img'][0].data_ptr())
                # print(torch.all(preprocessed[0]['img'][0] == preprocessed[2]['img'][0]))
                imgs = [d['img'][0] for d in preprocessed]
                imgs = torch.cat(imgs, 0)
                imgs = imgs.unsqueeze(0)
                data_merge = {
                    'img': [imgs],
                    'img_meta': data['img_meta'],
                }
                # print(data_merge['img'][0][0][2][0][300][300:305])
                # import sys
                # sys.exit()
                result = model(return_loss=False,
                               rescale=True,
                               numpy_res=True,
                               **data_merge)
                bboxes, scores, labels, masks = \
                    parse_det_result(result, coco_mapping, n_class)
                # if ii == 2:
                #     print(ii, scores)
                #     import sys
                #     sys.exit()

                # if n_history != 0:
                del preprocessed[0]
                t_end = perf_counter()
                runtime_all.append(t_end - t_start)

                if vis_out:
                    vis_path = join(opts.vis_dir, seq, img_name[:-3] + 'jpg')
                    if opts.overwrite or not isfile(vis_path):
                        vis_det(I,
                                bboxes,
                                labels,
                                class_names,
                                masks,
                                scores,
                                out_scale=opts.vis_scale,
                                out_file=vis_path)

                # convert to coco fmt
                n = len(bboxes)
                if n:
                    bboxes[:, 2:] -= bboxes[:, :2]

                for i in range(n):
                    result_dict = {
                        'image_id': iid,
                        'bbox': bboxes[i],
                        'score': scores[i],
                        'category_id': labels[i],
                    }
                    if masks is not None:
                        result_dict['segmentation'] = masks[i]
                    results_ccf.append(result_dict)

    out_path = join(opts.out_dir, 'time_info.pkl')
    if opts.overwrite or not isfile(out_path):
        pickle.dump({
            'runtime_all': runtime_all,
            'n_total': len(runtime_all),
        }, open(out_path, 'wb'))

    # convert to ms for display
    s2ms = lambda x: 1e3 * x

    print_stats(runtime_all, 'Runtime (ms)', cvt=s2ms)

    out_path = join(opts.out_dir, 'results_ccf.pkl')
    if opts.overwrite or not isfile(out_path):
        pickle.dump(results_ccf, open(out_path, 'wb'))

    if not opts.no_eval:
        eval_summary = eval_ccf(db, results_ccf)
        out_path = join(opts.out_dir, 'eval_summary.pkl')
        if opts.overwrite or not isfile(out_path):
            pickle.dump(eval_summary, open(out_path, 'wb'))

    if vis_out:
        print(f'python vis/make_videos.py "{opts.vis_dir}"')
Пример #27
0
def main(_):
  # Configuration.
  num_unrolls = FLAGS.num_steps // FLAGS.unroll_length

  # Problem.
  problem, net_config, net_assignments = util.get_config(FLAGS.problem)

  # Optimizer setup.
  optimizer = meta.MetaOptimizer(**net_config)
  minimize = optimizer.meta_minimize(
      problem, FLAGS.unroll_length,
      learning_rate=FLAGS.learning_rate,
      net_assignments=net_assignments,
      second_derivatives=FLAGS.second_derivatives)

  step, loss, update, reset, cost_op, farray, lropt, _ = minimize

  with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
    # Prevent accidental changes to the graph.
    graph_writer = tf.summary.FileWriter(logs_path, sess.graph)
    sess.run(tf.global_variables_initializer())
    best_evaluation = float("inf")
    start = timer()
    losstrain = []
    lrtrain = []
    losseval = []
    plotlosstrain = []
    plotlrtrain = []
    plotlosseval = []
    for e in range(FLAGS.num_epochs):
      cost, trainloss, lropttrain = util.run_epoch(sess, cost_op, farray, lropt, [step, update], reset, num_unrolls)
      print(cost)
      losstrain.append(cost)
      lrtrain.append(lropttrain)
      util.print_stats("Training Epoch {}".format(e), trainloss, timer() - start)
      saver = tf.train.Saver()
      if (e + 1) % FLAGS.logging_period == 0:
          plotlosstrain.append(cost)
          plotlrtrain.append(lropttrain)

      if (e + 1) % FLAGS.evaluation_period == 0:
        for _ in range(FLAGS.evaluation_epochs):
          evalcost, evaloss, _ = util.run_epoch(sess, cost_op, farray, lropt, [update], reset, num_unrolls)
          losseval.append(evalcost)
        if save_path is not None and evaloss < best_evaluation:
          print("Saving meta-optimizer to {}".format(save_path))
          saver.save(sess, save_path + '/model.ckpt', global_step=e + 1)
          best_evaluation = evaloss
          plotlosseval.append(evalcost)
    slengths = np.arange(FLAGS.num_steps)
    slengthlr = np.arange(FLAGS.num_steps - num_unrolls)
    np.savetxt(save_path + '/plotlosstrain.out', plotlosstrain, delimiter=',')
    np.savetxt(save_path + '/plotlrtrain.out', plotlrtrain, delimiter=',')
    np.savetxt(save_path + '/plotlosseval.out', plotlosseval, delimiter=',')
    np.savetxt(save_path + '/losstrain.out', losstrain, delimiter=',')
    np.savetxt(save_path + '/lrtrain.out', plotlosstrain, delimiter=',')
    np.savetxt(save_path + '/losseval.out', losseval, delimiter=',')
    plt.figure(figsize=(8, 5))
    plt.plot(slengths, np.mean(plotlosstrain, 0), 'r-', label='Training Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Training Loss')
    plt.legend()
    savefig(save_path + '/Training.png')
    plt.close()
    plt.figure(figsize=(8, 5))
    plt.plot(slengths, np.mean(plotlosseval, 0), 'b-', label='Validation Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Validation Loss')
    plt.legend()
    savefig(save_path + '/Validation.png')
    plt.close()
    plt.figure(figsize=(8, 5))
    plt.plot(slengthlr, np.mean(plotlrtrain, 0), 'r-', label='Learning Rate')
    plt.xlabel('Epoch')
    plt.ylabel('Average Learning Rate')
    plt.legend()
    savefig(save_path + '/LearningRate.png')
    plt.close()
    graph_writer.close()
Пример #28
0
    meta_loss = optimizer.meta_loss(problem, 1, net_assignments=net_assignments)
    _, update, reset, cost_op, problem_vars = meta_loss

else:
    raise ValueError("{} is not a valid optimizer".format(optimizer))

sess = ms.MonitoredSession()# as sess:
# Prevent accidental changes to the graph.
tf.get_default_graph().finalize()

total_time = 0
total_cost = 0

for i in xrange(num_epochs):
    # Training.
    time, cost, problem_res = util.run_epoch_test(sess, cost_op, problem_vars, [update], reset, num_unrolls)
    total_time += time
    total_cost += cost
    print('Cost at Iteration: '+str(i)+' is: '+str(total_cost))

# Results.
util.print_stats("Epoch {}".format(num_epochs), total_cost,
             total_time, num_epochs)


#%% print the results 
# not quiet clear how the shaping is done..
# e.g. problem_res.shape = (20, 2, 100, 4, 100) # (nsteps, (real/imag), nx, nz, ny) - > REAL/IMAG are two seperate variables which we try to optimize here 
for i in range(problem_res.shape[0]):
    mycurrent_result = np.abs(problem_res[i,0,:,1,:]+1j*problem_res[i,1,:,1,:])
    plt.imshow(np.abs(np.squeeze(mycurrent_result))), plt.show()
Пример #29
0
def main(_):
    # Configuration.
    num_unrolls = FLAGS.num_steps
    if FLAGS.seed:
        tf.set_random_seed(FLAGS.seed)

    # Problem.
    problem, net_config, net_assignments = util.get_config(FLAGS.problem,
                                                           FLAGS.path,
                                                           net_name="RNNprop")

    # Optimizer setup.
    if FLAGS.optimizer == "Adam":
        cost_op = problem()
        problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        problem_reset = tf.variables_initializer(problem_vars)

        optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
        optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
        update = optimizer.minimize(cost_op)
        reset = [problem_reset, optimizer_reset]
    elif FLAGS.optimizer == "L2L":
        if FLAGS.path is None:
            logging.warning("Evaluating untrained L2L optimizer")
        optimizer = meta.MetaOptimizer(FLAGS.beta1, FLAGS.beta2, **net_config)
        meta_loss, _, _, step = optimizer.meta_loss(
            problem, 1, net_assignments=net_assignments)
        _, update, reset, cost_op, _ = meta_loss
    else:
        raise ValueError("{} is not a valid optimizer".format(FLAGS.optimizer))

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with ms.MonitoredSession() as sess:
        sess.run(reset)
        # Prevent accidental changes to the graph.
        tf.get_default_graph().finalize()

        total_time = 0
        total_cost = 0
        loss_record = []
        for e in xrange(FLAGS.num_epochs):
            # Training.
            time, cost = util.run_eval_epoch(sess,
                                             cost_op, [update],
                                             num_unrolls,
                                             step=step,
                                             unroll_len=1)
            total_time += time
            total_cost += sum(cost) / num_unrolls
            loss_record += cost

        # Results.
        util.print_stats("Epoch {}".format(FLAGS.num_epochs), total_cost,
                         total_time, FLAGS.num_epochs)

    if FLAGS.output_path is not None:
        if not os.path.exists(FLAGS.output_path):
            os.mkdir(FLAGS.output_path)
    output_file = '{}/{}_eval_loss_record.pickle-{}'.format(
        FLAGS.output_path, FLAGS.optimizer, FLAGS.problem)
    with open(output_file, 'wb') as l_record:
        pickle.dump(loss_record, l_record)
    print("Saving evaluate loss record {}".format(output_file))
Пример #30
0
def main(_):
    # Configuration.
    num_unrolls = FLAGS.num_steps // FLAGS.unroll_length
    problem, net_config, net_assignments = util.get_config(FLAGS.problem)
    optimizer = meta.MetaOptimizer(**net_config)
    if FLAGS.save_path is not None:
        if not os.path.exists(FLAGS.save_path):
            os.mkdir(FLAGS.save_path)
            path = None
#      raise ValueError("Folder {} already exists".format(FLAGS.save_path))
        else:
            if os.path.exists('{}/loss-record.pickle'.format(FLAGS.save_path)):
                path = FLAGS.save_path
            else:
                path = None
    # Problem.

    # Optimizer setup.

    minimize = optimizer.meta_minimize(
        problem,
        FLAGS.unroll_length,
        learning_rate=FLAGS.learning_rate,
        net_assignments=net_assignments,
        model_path=path,
        second_derivatives=FLAGS.second_derivatives)

    step, update, reset, cost_op, x_final, test, fc_weights, fc_bias, fc_va = minimize
    #  saver=tf.train.Saver()
    with ms.MonitoredSession() as sess:
        # Prevent accidental changes to the graph.
        tf.get_default_graph().finalize()
        #    Step=[step for i in range(len(cost_op))]
        best_evaluation = float("inf")
        total_time = 0
        total_cost = 0
        loss_record = []
        constants = []
        for e in xrange(FLAGS.num_epochs):
            # Training.
            time, cost, constant, Weights = util.run_epoch(
                sess, cost_op, [update, step], reset, num_unrolls, test,
                [fc_weights, fc_bias, fc_va])
            cost = sum(cost) / len(cost_op)
            total_time += time
            total_cost += cost
            loss_record.append(cost)
            constants.append(constant)
            # Logging.
            if (e + 1) % FLAGS.log_period == 0:
                util.print_stats("Epoch {}".format(e + 1), total_cost,
                                 total_time, FLAGS.log_period)
                total_time = 0
                total_cost = 0

            # Evaluation.
            if (e + 1) % FLAGS.evaluation_period == 0:
                eval_cost = 0
                eval_time = 0
                for _ in xrange(FLAGS.evaluation_epochs):
                    time, cost, constant, weights = util.run_epoch(
                        sess, cost_op, [update, step], reset, num_unrolls,
                        test, [fc_weights, fc_bias, fc_va])
                    #          cost/=len(cost_op)
                    eval_time += time
                    eval_cost += sum(cost) / len(cost_op)

                util.print_stats("EVALUATION", eval_cost, eval_time,
                                 FLAGS.evaluation_epochs)

                if FLAGS.save_path is not None and eval_cost < best_evaluation:
                    print("Removing previously saved meta-optimizer")
                    for f in os.listdir(FLAGS.save_path):
                        os.remove(os.path.join(FLAGS.save_path, f))
                    print("Saving meta-optimizer to {}".format(
                        FLAGS.save_path))
                    #          saver.save(sess,'./quadratic/quadratic.ckpt',global_step = e)
                    optimizer.save(sess, FLAGS.save_path)
                    with open(FLAGS.save_path + '/loss_record.pickle',
                              'wb') as l_record:
                        record = {'loss_record':loss_record, 'fc_weights':sess.run(weights[0]), \
                            'fc_bias':sess.run(weights[1]), 'fc_va':sess.run(weights[2]), 'constant':sess.run(constant)}
                        pickle.dump(record, l_record)
                    best_evaluation = eval_cost
Пример #31
0
def main(_):
  # Configuration.
  num_unrolls = FLAGS.num_steps // FLAGS.unroll_length

  # if FLAGS.save_path is not None:
  #   if os.path.exists(FLAGS.save_path):
  #     raise ValueError("Folder {} already exists".format(FLAGS.save_path))
  #   else:
  #     os.mkdir(FLAGS.save_path)

  # Problem.
  problem, net_config, net_assignments = util.get_config(
      FLAGS.problem, main_parade_path, first_batch_parade_path)

  # Optimizer setup.
  optimizer = meta.MetaOptimizer(**net_config)
  minimize = optimizer.meta_minimize(
      problem, FLAGS.unroll_length,
      learning_rate=FLAGS.learning_rate,
      net_assignments=net_assignments,
      second_derivatives=FLAGS.second_derivatives)
  step, update, reset, cost_op, _ = minimize

  with ms.MonitoredSession() as sess:
    # Prevent accidental changes to the graph.
    tf.get_default_graph().finalize()
    writer = tf.summary.FileWriter('summary')
    writer.add_graph(tf.get_default_graph())
    best_evaluation = float("inf")
    total_time = 0
    total_cost = 0
    for e in xrange(FLAGS.num_epochs):
      # Training.
      time, cost = util.run_epoch(sess, cost_op, [update, step], reset,
                                  num_unrolls)
      total_time += time
      total_cost += cost

      # Logging.
      if (e + 1) % FLAGS.log_period == 0:
        util.print_stats("Epoch {}".format(e + 1), total_cost, total_time,
                         FLAGS.log_period)
        total_time = 0
        total_cost = 0

      # Evaluation.
      if (e + 1) % FLAGS.evaluation_period == 0:
        eval_cost = 0
        eval_time = 0
        for _ in xrange(FLAGS.evaluation_epochs):
          time, cost = util.run_epoch(sess, cost_op, [update], reset,
                                      num_unrolls)
          eval_time += time
          eval_cost += cost

        util.print_stats("EVALUATION", eval_cost, eval_time,
                         FLAGS.evaluation_epochs)

        if FLAGS.save_path is not None and eval_cost < best_evaluation:
          print("Removing previously saved meta-optimizer")
          for f in os.listdir(FLAGS.save_path):
            os.remove(os.path.join(FLAGS.save_path, f))
          print("Saving meta-optimizer to {}".format(FLAGS.save_path))
          optimizer.save(sess, FLAGS.save_path)
          best_evaluation = eval_cost
Пример #32
0
    # Prevent accidental changes to the graph.
    tf.get_default_graph().finalize()

    best_evaluation = float("inf")
    total_time = 0
    total_cost = 0
    for e in xrange(num_epochs):
        # Training.
        time, cost = util.run_epoch(sess, cost_op, [update, step], reset,
                                    num_unrolls)
        total_time += time
        total_cost += cost

        # Logging.
        if (e + 1) % log_period == 0:
            util.print_stats("Epoch {}".format(e + 1), total_cost, total_time,
                             log_period)
        total_time = 0
        total_cost = 0

        # Evaluation.
        if (e + 1) % evaluation_period == 0:
            eval_cost = 0
            eval_time = 0
            for _ in xrange(evaluation_epochs):
                time, cost = util.run_epoch(sess, cost_op, [update], reset,
                                            num_unrolls)
                eval_time += time
                eval_cost += cost

            util.print_stats("EVALUATION", eval_cost, eval_time,
                             evaluation_epochs)
Пример #33
0
def main():
    opts = parse_args()

    mkdir2(opts.out_dir)

    db = COCO(opts.annot_path)
    class_names = [c['name'] for c in db.dataset['categories']]
    n_class = len(class_names)
    coco_mapping = None if opts.no_class_mapping else db.dataset.get('coco_mapping', None)
    if coco_mapping is not None:
        coco_mapping = np.asarray(coco_mapping)
    seqs = db.dataset['sequences']
    seq_dirs = db.dataset['seq_dirs']

    if opts.cached_res:
        cache_in_ccf = '_ccf' in basename(opts.cached_res)
        if cache_in_ccf:
            # speed up based on the assumption of sequential storage
            cache_end_idx = 0
        cached_res = pickle.load(open(opts.cached_res, 'rb'))
    else:
        assert torch.cuda.device_count() == 1 # mmdet only supports single GPU testing
        model = init_detector(opts)

    np.random.seed(opts.seed)
    runtime = pickle.load(open(opts.runtime, 'rb'))
    runtime_dist = dist_from_dict(runtime, opts.perf_factor)

    runtime_all = []
    n_processed = 0
    n_total = 0

    for sid, seq in enumerate(tqdm(seqs)):
        frame_list = [img for img in db.imgs.values() if img['sid'] == sid]
        n_frame = len(frame_list)
        n_total += n_frame

        if not opts.cached_res:
            # load all frames in advance
            frames = []
            for img in frame_list:
                img_path = join(opts.data_root, seq_dirs[sid], img['name'])
                frames.append(imread(img_path))
        
        timestamps = []
        results_parsed = []
        input_fidx = []
        runtime = []
        last_fidx = None
        if opts.cached_res and cache_in_ccf:
            results_raw = None
        else:
            results_raw = []
        
        t_total = n_frame/opts.fps
        t_elapsed = 0
        if opts.dynamic_schedule:
            mean_rtf = runtime_dist.mean()*opts.fps
        else:
            stride_cnt = 0

        while 1:
            if t_elapsed >= t_total:
                break

            # identify latest available frame
            fidx_continous = t_elapsed*opts.fps
            fidx = int(np.floor(fidx_continous))
            if fidx == last_fidx:
                # algorithm is fast and has some idle time
                fidx += 1
                if fidx == n_frame:
                    break
                t_elapsed = fidx/opts.fps
                
            last_fidx = fidx

            if opts.dynamic_schedule:
                if mean_rtf > 1:
                    # when runtime <= 1, it should always process every frame
                    fidx_remainder = fidx_continous - fidx
                    if mean_rtf < np.floor(fidx_remainder + mean_rtf):
                        # wait till next frame
                        continue
            else:
                if stride_cnt % opts.det_stride == 0:
                    stride_cnt = 1
                else:
                    stride_cnt += 1
                    continue

            if opts.cached_res:
                img = frame_list[fidx]
                if cache_in_ccf:
                    cache_end_idx, bboxes, scores, labels, masks = \
                        result_from_ccf(cached_res, img['id'], cache_end_idx)
                    ltwh2ltrb_(bboxes)
                else:
                    result = cached_res[img['id']]
                    bboxes, scores, labels, masks = \
                        parse_det_result(result, coco_mapping, n_class)
            else:
                frame = frames[fidx]
                result = inference_detector(model, frame)
                bboxes, scores, labels, masks = \
                    parse_det_result(result, coco_mapping, n_class)

            rt_this = runtime_dist.draw()
            t_elapsed += rt_this
            if t_elapsed >= t_total:
                break
            
            timestamps.append(t_elapsed)
            if results_raw is not None:
                results_raw.append(result)
            results_parsed.append((bboxes, scores, labels, masks))
            input_fidx.append(fidx)
            runtime.append(rt_this)

        out_path = join(opts.out_dir, seq + '.pkl')
        if opts.overwrite or not isfile(out_path):
            out_dict = {
                'results_parsed': results_parsed,
                'timestamps': timestamps,
                'input_fidx': input_fidx,
                'runtime': runtime,
            }
            if results_raw is not None:
                out_dict['results_raw'] = results_raw
            pickle.dump(out_dict, open(out_path, 'wb'))

        runtime_all += runtime
        n_processed += len(results_parsed)

    runtime_all_np = np.array(runtime_all)
    n_small_runtime = (runtime_all_np < 1.0/opts.fps).sum()

    out_path = join(opts.out_dir, 'time_info.pkl')
    if opts.overwrite or not isfile(out_path):
        pickle.dump({
            'runtime_all': runtime_all,
            'n_processed': n_processed,
            'n_total': n_total,
            'n_small_runtime': n_small_runtime,
        }, open(out_path, 'wb'))  

    # convert to ms for display
    s2ms = lambda x: 1e3*x

    print(f'{n_processed}/{n_total} frames processed')
    print_stats(runtime_all_np, 'Runtime (ms)', cvt=s2ms)
    print(f'Runtime smaller than unit time interval: '
        f'{n_small_runtime}/{n_processed} '
        f'({100.0*n_small_runtime/n_processed:.4g}%)')
Пример #34
0
def main(_):
    # Configuration.
    num_unrolls = FLAGS.num_steps

    if FLAGS.seed:
        tf.set_random_seed(FLAGS.seed)

    # Problem.
    problem, net_config, net_assignments = util.get_config(
        FLAGS.problem, FLAGS.path, FLAGS.problem_path)

    # Optimizer setup.
    if FLAGS.optimizer == "Adam":
        cost_op = problem()
        problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        problem_reset = tf.variables_initializer(problem_vars)
        x_op = problem_vars

        optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
        optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
        update = optimizer.minimize(cost_op)
        reset = [problem_reset, optimizer_reset]

    elif FLAGS.optimizer == "SGD":
        cost_op = problem()
        problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        problem_reset = tf.variables_initializer(problem_vars)
        x_op = problem_vars

        optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
        optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
        update = optimizer.minimize(cost_op)
        reset = [problem_reset, optimizer_reset]

    elif FLAGS.optimizer == "RMSProp":
        cost_op = problem()
        problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        problem_reset = tf.variables_initializer(problem_vars)
        x_op = problem_vars

        optimizer = tf.train.RMSPropOptimizer(FLAGS.learning_rate)
        optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
        update = optimizer.minimize(cost_op)
        reset = [problem_reset, optimizer_reset]

    elif FLAGS.optimizer == "Momentum":
        cost_op = problem()
        problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        problem_reset = tf.variables_initializer(problem_vars)
        x_op = problem_vars

        optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate,
                                               momentum=0.9)
        optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
        update = optimizer.minimize(cost_op)
        reset = [problem_reset, optimizer_reset]

    elif FLAGS.optimizer == "NAG":
        cost_op = problem()
        problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        problem_reset = tf.variables_initializer(problem_vars)
        x_op = problem_vars

        optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate,
                                               momentum=0.1,
                                               use_nesterov=True)
        optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
        update = optimizer.minimize(cost_op)
        reset = [problem_reset, optimizer_reset]

    elif FLAGS.optimizer == "L2L":
        if FLAGS.path is None:
            logging.warning("Evaluating untrained L2L optimizer")
        optimizer = meta.MetaOptimizer(**net_config)
        meta_loss = optimizer.meta_loss(problem,
                                        1,
                                        net_assignments=net_assignments)
        _, update, reset, cost_op, x_op = meta_loss
    else:
        raise ValueError("{} is not a valid optimizer".format(FLAGS.optimizer))

    with ms.MonitoredSession() as sess:
        # Prevent accidental changes to the graph.
        tf.get_default_graph().finalize()

        total_time = 0
        total_cost = 0
        for _ in xrange(FLAGS.num_epochs):
            # Training.
            time, cost, x_values = util.run_epoch(sess, cost_op, x_op,
                                                  [update], reset, num_unrolls)
            total_time += time
            total_cost += cost

        x_values = np.swapaxes(np.squeeze(x_values), 0, 1)
        if FLAGS.problem.find('wav') != -1:
            np.save(os.path.join('results', '{}_wav'.format(FLAGS.optimizer)),
                    x_values)
        else:
            np.save(os.path.join('results', '{}'.format(FLAGS.optimizer)),
                    x_values)

        # print("x_values shape: {}".format(x_values.shape))
        # print("x_values: {}".format(x_values))
        # np.savetxt(os.path.join('results', '{}.txt'.format(FLAGS.optimizer)), x_values, fmt='%f')
        # Results.
        util.print_stats(
            "Epoch {}, Optimizer {}".format(FLAGS.num_epochs, FLAGS.optimizer),
            total_cost, total_time, FLAGS.num_epochs)
Пример #35
0
def main(_):
  # Configuration.
  if FLAGS.seed:
    tf.set_random_seed(FLAGS.seed)

  # Problem.
  problem, net_config, net_assignments = util.get_config(
      FLAGS.problem, FLAGS.path)

  state = None
  # Optimizer setup.
  if FLAGS.optimizer == "Adam":
    cost_op = problem()
    problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
    problem_reset = tf.variables_initializer(problem_vars)

    optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
    optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
    update = optimizer.minimize(cost_op)
    reset = [problem_reset, optimizer_reset]
  elif FLAGS.optimizer == "SGD_MOM":
    cost_op = problem()
    problem_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
    problem_reset = tf.variables_initializer(problem_vars)

    optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, 0.9)
    optimizer_reset = tf.variables_initializer(optimizer.get_slot_names())
    update = optimizer.minimize(cost_op)
    reset = [problem_reset, optimizer_reset]
  elif FLAGS.optimizer == "L2L":
    if FLAGS.path is None:
      logging.warning("Evaluating untrained L2L optimizer")
    cost_op = problem()
    optimizer = meta.MetaOptimizer(**net_config)
    if FLAGS.load_trained_model:
      # optimizer.load_states([pickle.load(open(os.path.join(FLAGS.model_path, "states.p"), "rb"))])
      # optimizer.load_states([pickle.load(open("./init_state.p", "rb"))])
      meta_loss = optimizer.meta_loss(problem, 1, net_assignments=net_assignments, load_states=False)
      # _, update, reset, cost_op, _ = meta_loss
      _, update, reset, _, _, state = meta_loss
    else:
      meta_loss = optimizer.meta_loss(problem, 1, net_assignments=net_assignments, load_states=False)
      # _, update, reset, cost_op, _ = meta_loss
      _, update, reset, _, _, state = meta_loss 
  else:
    raise ValueError("{} is not a valid optimizer".format(FLAGS.optimizer))

  process_id = os.getpid()
  exp_folder = os.path.join("exp", str(process_id))

  if not os.path.isdir(exp_folder):
    os.mkdir(exp_folder)

  writer = tf.summary.FileWriter(exp_folder)
  summaries = tf.summary.merge_all()

  if FLAGS.problem == "mnist":
    var_name_mlp = [
        "mlp/linear_0/w:0", "mlp/linear_0/b:0", "mlp/linear_1/w:0",
        "mlp/linear_1/b:0"
    ]
  else:
    var_name_mlp = []

  problem_vars = tf.get_collection(tf.GraphKeys.VARIABLES)

  if var_name_mlp:
    saver_vars = [vv for vv in problem_vars if vv.name in var_name_mlp]
  else:
    saver_vars = problem_vars

  saver = tf.train.Saver(saver_vars)

  with ms.MonitoredSession() as sess:
    # a quick hack!
    regular_sess = sess._sess._sess._sess._sess

    # Prevent accidental changes to the graph.
    tf.get_default_graph().finalize()

    # print("Initial loss = {}".format(sess.run(cost_op)))
    # raw_input("wait")

    if FLAGS.load_trained_model == True:
      print("We are loading trained model here!")
      saver.restore(regular_sess, os.path.join(FLAGS.model_path, "model"))

    # init_state = regular_sess.run(optimizer.init_state)
    # cost_val = regular_sess.run(cost_op)
    # import pdb; pdb.set_trace()

    total_time = 0
    total_cost = 0
    for step in xrange(FLAGS.num_epochs):      
      time, cost = util.run_epoch_eval(
          sess,
          cost_op, 
          [update],
          reset,
          FLAGS.num_steps,
          summary_op=summaries,
          summary_writer=writer,
          run_reset=False)
      writer.flush()

      total_time += time
      total_cost += cost

    saver.save(regular_sess, os.path.join(exp_folder, "model"))
    pickle.dump(final_states, open(os.path.join(exp_folder, "states.p"), "wb"))

    # Results.
    util.print_stats("Epoch {}".format(FLAGS.num_epochs), total_cost,
                     total_time, FLAGS.num_epochs)

    # we have to run in the end to skip the error
    regular_sess.run(reset)