def run(training_folder):
    logging.info('Start IL...')
    dataloader = Dataloader(FLAGS.sketch_lengths, FLAGS.il_val_ratio)
    bot = make(input_dim=39,
               action_dim=9,
               arch=FLAGS.arch,
               hidden_size=FLAGS.hidden_size,
               nb_slots=FLAGS.nb_slots,
               env_arch=FLAGS.env_arch,
               dataloader=dataloader)
    if FLAGS.cuda:
        bot = bot.cuda()
    params = [p for p in bot.parameters()]
    opt = torch.optim.Adam(params, lr=FLAGS.il_lr)

    # test dataloader
    test_sketch_lengths = set(FLAGS.test_sketch_lengths) - set(
        FLAGS.sketch_lengths)
    test_dataloader = None if len(test_sketch_lengths) == 0 else Dataloader(
        test_sketch_lengths, FLAGS.il_val_ratio)

    try:
        main_loop(bot, dataloader, opt, training_folder, test_dataloader)
    except KeyboardInterrupt:
        pass
def ompn_eval(bot, args):
    parsing_metric = {}
    dataloader = Dataloader(args.sketch_lengths, 0.99)
    bot.eval()
    for sketch_len in dataloader.env_names:
        parsing_metric[sketch_len] = DictList()
        data_iter = dataloader.val_iter(args.episodes,
                                        shuffle=True,
                                        env_names=[sketch_len])
        batch, batch_lens, batch_sketch_lens = data_iter.__next__()
        with torch.no_grad():
            _, extra_info = bot.teacherforcing_batch(batch,
                                                     batch_lens,
                                                     batch_sketch_lens,
                                                     recurrence=64)

        for batch_id, (length, sketch_length, ps) in tqdm(
                enumerate(zip(batch_lens, batch_sketch_lens, extra_info.p))):
            traj = batch[batch_id]
            traj = traj[:length]
            _gt_subtask = traj.gt_onsets
            target = point_of_change(_gt_subtask)

            # Get prediction sorted
            ps = ps[:length]
            ps[0, :-1] = 0
            ps[0, -1] = 1
            for threshold in [0.2, 0.3, 0.4, 0.5, 0.6, 0.7]:
                preds = get_boundaries(ps,
                                       bot.nb_slots,
                                       threshold=threshold,
                                       nb_boundaries=len(target))
                #parsing_metric[sketch_len].append({'f1_tol{}_thres{}'.format(tol, threshold):
                #                                       f1(target, preds, tol) for tol in [0, 1, 2]})
                _decoded_subtask = get_subtask_seq(length.item(),
                                                   subtask=traj.tasks.tolist(),
                                                   use_ids=np.array(preds))
                parsing_metric[sketch_len] += {
                    'task_acc_thres{}'.format(threshold):
                    (_gt_subtask.cpu() == _decoded_subtask.cpu()).tolist()
                }

            preds = automatic_get_boundaries_peak(ps,
                                                  bot.nb_slots,
                                                  nb_boundaries=len(target))
            _decoded_subtask = get_subtask_seq(length.item(),
                                               subtask=traj.tasks.tolist(),
                                               use_ids=np.array(preds))
            parsing_metric[sketch_len] += {
                'task_acc_auto':
                (_gt_subtask.cpu() == _decoded_subtask.cpu()).tolist()
            }

        parsing_metric[sketch_len].apply(lambda _t: np.mean(_t))

    return parsing_metric
Exemple #3
0
 def eval(self):
     dataloader = Dataloader()
     c = tf.ConfigProto()
     c.gpu_options.allow_growth = True
     with tf.Session(config=c) as sess:
         checkpoint = tf.train.latest_checkpoint("checkpoint/")
         if checkpoint:
             print("restore from: " + checkpoint)
             self.saver.restore(sess, checkpoint)
         nn = 0
         for inputs in dataloader.get_images():
             preds = sess.run(self.output,
                              feed_dict={self.input_tensor: inputs})
             dataloader.save_images(str(nn) + ".jpg", preds)
             nn += 1
Exemple #4
0
 def train(self):
     self.train_step = tf.train.AdamOptimizer(3e-4).minimize(self.loss)
     dataloader = Dataloader()
     i = 0
     c = tf.ConfigProto()
     c.gpu_options.allow_growth = True
     with tf.Session(config=c) as sess:
         sess.run(tf.global_variables_initializer())
         checkpoint = tf.train.latest_checkpoint("checkpoint/")
         if checkpoint:
             print("restore from: " + checkpoint)
             self.saver.restore(sess, checkpoint)
         elif os.path.exists("vgg16.npy"):
             print("restore from vgg weights.")
             vgg = np.load("vgg16.npy", encoding='latin1').item()
             ops = []
             vgg_dict = [
                 "conv1_1", "conv1_2", "conv2_1", "conv2_2", "conv3_1",
                 "conv3_2", "conv3_3", "conv4_1", "conv4_2", "conv4_3",
                 "conv5_1", "conv5_2", "conv5_3", "fc6", "fc7"
             ]
             tf_variables = {}
             for variables in tf.get_collection(
                     tf.GraphKeys.GLOBAL_VARIABLES):
                 if "Adam" or "RMS" in variables.name: continue
                 key = variables.name.split("/")[0].split(":")[0]
                 if key not in vgg_dict: continue
                 if key not in tf_variables:
                     tf_variables[key] = [variables]
                     ops.append(variables.assign(vgg[key][0]))
                 else:
                     tf_variables[key].append(variables)
                     ops.append(variables.assign(vgg[key][1]))
             sess.run(ops)
         for inputs, labels in dataloader.generate():
             _, lo, preds = sess.run(
                 [self.train_step, self.loss, self.output],
                 feed_dict={
                     self.input_tensor: inputs,
                     self.label_tensor: labels
                 })
             print(i, lo)
             if i % 20 == 0:
                 dataloader.save_images("output.jpg", preds)
                 dataloader.save_images("label.jpg", labels)
             i += 1
             if i % 100 == 99:
                 self.saver.save(sess, "checkpoint/ckpt")
def taco_eval(bot, args):
    parsing_metric = {}
    dataloader = Dataloader(args.sketch_lengths, 0.99)
    bot.eval()
    for sketch_len in dataloader.env_names:
        parsing_metric[sketch_len] = DictList()
        data_iter = dataloader.val_iter(args.episodes,
                                        shuffle=True,
                                        env_names=[sketch_len])
        batch, batch_lens, batch_sketch_lens = data_iter.__next__()
        with torch.no_grad():
            parsing_res, _ = taco_decode(bot,
                                         trajs=batch,
                                         lengths=batch_lens,
                                         subtask_lengths=batch_sketch_lens,
                                         dropout_p=0.,
                                         decode=True)
        parsing_metric[sketch_len].append(parsing_res)
        parsing_metric[sketch_len].apply(lambda _t: _t[0].item())
    return parsing_metric
def main():
    parser = argparse.ArgumentParser(description='D2L Linear Regression')
    parser.add_argument('--run_mode',
                        type=str,
                        nargs='?',
                        default='mxnet',
                        help='input run_mode. "raw" or "mxnet"')
    args = parser.parse_args()
    run_mode = args.run_mode

    num_inputs = 2
    num_examples = 1000
    batch_size = 10
    lr = 0.03  # Learning rate
    num_epochs = 10  # Number of iterations

    print(f'run {run_mode} code ...')

    if run_mode == 'raw':
        data_loader = Dataloader(TRUE_W, TRUE_B, num_inputs, num_examples,
                                 batch_size)
        features, labels = data_loader.get_data()

        model = LinearModel(num_inputs, lr, batch_size)

        for epoch in range(num_epochs):
            # Assuming the number of examples can be divided by the batch size, all
            # the examples in the training data set are used once in one epoch
            # iteration. The features and tags of mini-batch examples are given by X
            # and y respectively

            for X, y in data_loader.data_iter():
                with autograd.record():
                    # Minibatch loss in X and y
                    l = model.squared_loss(model.linreg(X), y)
                l.backward()  # Compute gradient on l with respect to [w,b]
                model.sgd()
                # sgd([w, b], lr, batch_size)  # Update parameters using their gradient

            train_l = model.squared_loss(model.linreg(features), labels)

            print('epoch %d, loss %f' % (epoch + 1, train_l.mean().asnumpy()))
    else:
        data_loader = MxDataLoader(TRUE_W, TRUE_B, num_inputs, num_examples,
                                   batch_size)
        features, labels = data_loader.get_data()

        model = MxLinearModel(lr)

        for epoch in range(num_epochs):
            for X, y in data_loader.data_iter:
                with autograd.record():
                    l = model.loss(model.net(X), y)
                l.backward()
                model.trainer.step(batch_size)

            l = model.loss(model.net(features), labels)
            print('epoch %d, loss: %f' % (epoch, l.mean().asnumpy()))

    model.print_result(TRUE_W, TRUE_B)
Exemple #7
0
from data.Dataloader import *
from utils.general_utils import *

data_path = 'C:/Users/harsh/Downloads/Assignments/Spring 2020/CS 766 Computer Vision/Project/Data/17flowers'
image_size = 256
batch_size = 2
image_format = 'jpg'

loader = Dataloader(data_path,
                    image_size,
                    batch_size=batch_size,
                    image_format=image_format,
                    validation_required=(True, 0.2, 'train_validation_split'))
trainloader, validloader = loader.get_data_loader()

generate_sample(trainloader)
Exemple #8
0
def main(training_folder):
    logging.info('start taco...')
    dataloader = Dataloader(FLAGS.sketch_lengths, 0.2)
    model = ModularPolicy(nb_subtasks=10, input_dim=39,
                          n_actions=9,
                          a_mu=dataloader.a_mu,
                          a_std=dataloader.a_std,
                          s_mu=dataloader.s_mu,
                          s_std=dataloader.s_std)
    if FLAGS.cuda:
        model = model.cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=FLAGS.taco_lr)

    train_steps = 0
    writer = SummaryWriter(training_folder)
    train_iter = dataloader.train_iter(batch_size=FLAGS.taco_batch_size)
    nb_frames = 0
    curr_best = np.inf
    train_stats = DictList()

    # test dataloader
    test_sketch_lengths = set(FLAGS.test_sketch_lengths) - set(FLAGS.sketch_lengths)
    test_dataloader = None if len(test_sketch_lengths) == 0 else Dataloader(test_sketch_lengths, FLAGS.il_val_ratio)
    scheduler = DropoutScheduler()
    while True:
        if train_steps > FLAGS.taco_train_steps:
            logging.info('Reaching maximum steps')
            break

        if train_steps % FLAGS.taco_eval_freq == 0:
            val_metrics = evaluate_loop(dataloader, model, dropout_p=scheduler.dropout_p)
            logging_metrics(nb_frames, train_steps, val_metrics, writer, 'val')

            if test_dataloader is not None:
                test_metrics = evaluate_loop(test_dataloader, model, dropout_p=scheduler.dropout_p)
                logging_metrics(nb_frames, train_steps, test_metrics, writer, 'test')

            avg_loss = [val_metrics[env_name].loss for env_name in val_metrics]
            avg_loss = np.mean(avg_loss)
            if avg_loss < curr_best:
                curr_best = avg_loss
                logging.info('Save Best with loss: {}'.format(avg_loss))
                # Save the checkpoint
                with open(os.path.join(training_folder, 'bot_best.pkl'), 'wb') as f:
                    torch.save(model, f)

        model.train()
        train_batch, train_lengths, train_subtask_lengths = train_iter.__next__()
        if FLAGS.cuda:
            train_batch.apply(lambda _t: _t.cuda())
            train_lengths = train_lengths.cuda()
            train_subtask_lengths = train_subtask_lengths.cuda()
        start = time.time()
        train_outputs = teacherforce_batch(modular_p=model,
                                           trajs=train_batch,
                                           lengths=train_lengths,
                                           subtask_lengths=train_subtask_lengths,
                                           decode=False,
                                           dropout_p=scheduler.dropout_p)
        optimizer.zero_grad()
        train_outputs['loss'].backward()
        optimizer.step()
        train_steps += 1
        scheduler.step()
        nb_frames += train_lengths.sum().item()
        end = time.time()
        fps = train_lengths.sum().item() / (end - start)
        train_outputs['fps'] = torch.tensor(fps)

        train_outputs = DictList(train_outputs)
        train_outputs.apply(lambda _t: _t.item())
        train_stats.append(train_outputs)

        if train_steps % FLAGS.taco_eval_freq == 0:
            train_stats.apply(lambda _tensors: np.mean(_tensors))
            logger_str = ['[TRAIN] steps={}'.format(train_steps)]
            for k, v in train_stats.items():
                logger_str.append("{}: {:.4f}".format(k, v))
                writer.add_scalar('train/' + k, v, global_step=nb_frames)
            logging.info('\t'.join(logger_str))
            train_stats = DictList()
            writer.flush()
Exemple #9
0
    elif '-l' in opt:
      LR = float(opt[1])
    elif '-d' in opt:
      DIMS = int(opt[1])
    elif '-b' in opt:
      BETA = float(opt[1])
    elif '-g' in opt:
      BVAE = 'g'

  if model is None:
    model = BVAEs[BVAE](device, z_dim=DIMS)

  data_file = np.load('data/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz', encoding='bytes')

  if action == 'train':
    train_dl = Dataloader(device, data_file['imgs'], TRAIN_BATCH)
    
    optim = torch.optim.Adam(model.parameters(), lr=LR)
    scheduler = torch.optim.lr_scheduler.StepLR(optim, S_SZ, S_MUL, verbose=True)
    name = '{}_{}_b{}_{}.tm'.format(BVAE, datetime.datetime.now().strftime('%m%d_%H_%M'), BETA, DIMS)

    try:
      loss = train(train_dl, model, optim, BETA, EPOCHS, scheduler)
      torch.save(model.state_dict(), './models/' + name)

      output.print('model', name)
      output.print('loss', loss)
    except KeyboardInterrupt:
      s = input('Do you want to save the model? ')
      if s == 'y':
        torch.save(model.state_dict(), './models/' + name)
def main(cmd_args):
    """Run the main training function."""

    parser = get_parser()
    args, _ = parser.parse_known_args(cmd_args)

    # logging info
    if args.verbose > 0:
        logging.basicConfig(
            level=logging.INFO,
            format=
            "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
        )
    else:
        logging.basicConfig(
            level=logging.WARN,
            format=
            "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
        )
        logging.warning("Skip DEBUG/INFO messages")

    # set random seed
    logging.info("random seed = %d" % args.seed)
    random.seed(args.seed)
    np.random.seed(args.seed)

    # load dictionary for debug log
    if args.dict is not None:
        with open(args.dict, "rb") as f:
            dictionary = f.readlines()
        char_list = [
            entry.decode("utf-8").split(" ")[0] for entry in dictionary
        ]
        char_list.insert(0, "<blank>")
        char_list.append("<eos>")
        args.char_list = char_list
    else:
        args.char_list = None

    with open(args.valid_json, "rb") as f:
        valid_json = json.load(f)["utts"]
    utts = list(valid_json.keys())
    idim = int((valid_json[utts[0]]["input"][0]["shape"][-1]))
    odim = int(valid_json[utts[0]]["output"][0]["shape"][-1])

    logging.info("input dims: " + str(idim))
    logging.info("#output dims: " + str(odim))

    # data
    Data = Dataloader(args)

    # model
    Model = Transducer(idim, odim, args)

    # update saved model
    call_back = ModelCheckpoint(monitor='val_loss', dirpath=args.outdir)

    # train model
    trainer = Trainer(gpus=args.ngpu,
                      callbacks=[call_back],
                      max_epochs=args.epochs,
                      resume_from_checkpoint=args.resume)
    trainer.fit(Model, Data)
def main(training_folder):
    logging.info('Start compile...')
    dataloader = Dataloader(FLAGS.sketch_lengths, 0.2)
    model = compile.CompILE(vec_size=39,
                            hidden_size=FLAGS.hidden_size,
                            action_size=9,
                            env_arch=FLAGS.env_arch,
                            max_num_segments=FLAGS.compile_max_segs,
                            latent_dist=FLAGS.compile_latent,
                            beta_b=FLAGS.compile_beta_b,
                            beta_z=FLAGS.compile_beta_z,
                            prior_rate=FLAGS.compile_prior_rate,
                            dataloader=dataloader)
    if FLAGS.cuda:
        model = model.cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=FLAGS.compile_lr)

    train_steps = 0
    writer = SummaryWriter(training_folder)
    train_iter = dataloader.train_iter(batch_size=FLAGS.compile_batch_size)
    nb_frames = 0
    curr_best = np.inf
    train_stats = DictList()
    while True:
        if train_steps > FLAGS.compile_train_steps:
            logging.info('Reaching maximum steps')
            break

        if train_steps % FLAGS.compile_eval_freq == 0:
            # Testing
            val_metrics = {}
            model.eval()
            for env_name in FLAGS.sketch_lengths:
                val_metrics[env_name] = DictList()
                val_iter = dataloader.val_iter(
                    batch_size=FLAGS.compile_batch_size, env_names=[env_name])
                for val_batch, val_lengths, val_sketch_lens in val_iter:
                    if FLAGS.cuda:
                        val_batch.apply(lambda _t: _t.cuda())
                        val_lengths = val_lengths.cuda()
                        val_sketch_lens = val_sketch_lens.cuda()
                    with torch.no_grad():
                        val_outputs, extra_info = model.forward(
                            val_batch, val_lengths, val_sketch_lens)
                    val_metrics[env_name].append(val_outputs)

                # Parsing
                total_lengths = 0
                total_task_corrects = 0
                val_iter = dataloader.val_iter(batch_size=FLAGS.eval_episodes,
                                               env_names=[env_name],
                                               shuffle=True)
                val_batch, val_lengths, val_sketch_lens = val_iter.__next__()
                if FLAGS.cuda:
                    val_batch.apply(lambda _t: _t.cuda())
                    val_lengths = val_lengths.cuda()
                    val_sketch_lens = val_sketch_lens.cuda()
                with torch.no_grad():
                    val_outputs, extra_info = model.forward(
                        val_batch, val_lengths, val_sketch_lens)
                seg = torch.stack(extra_info['segment'], dim=1).argmax(-1)
                for batch_id, (length, sketch_length, _seg) in enumerate(
                        zip(val_lengths, val_sketch_lens, seg)):
                    traj = val_batch[batch_id]
                    traj = traj[:length]
                    _gt_subtask = traj.gt_onsets
                    target = point_of_change(_gt_subtask)
                    _seg = _seg[_seg.sort()[1]].cpu().tolist()

                    # Remove the last one because too trivial
                    val_metrics[env_name].append({
                        'f1_tol0': f1(target, _seg, 0),
                        'f1_tol1': f1(target, _seg, 1),
                        'f1_tol2': f1(target, _seg, 2)
                    })

                    # subtask
                    total_lengths += length.item()
                    _decoded_subtask = get_subtask_seq(
                        length.item(),
                        subtask=traj.tasks.tolist(),
                        use_ids=np.array(_seg))
                    total_task_corrects += (_gt_subtask.cpu(
                    ) == _decoded_subtask.cpu()).float().sum()

                # record task acc
                val_metrics[
                    env_name].task_acc = total_task_corrects / total_lengths

                # Print parsing result
                lines = []
                lines.append('tru_ids: {}'.format(target))
                lines.append('dec_ids: {}'.format(_seg))
                logging.info('\n'.join(lines))
                val_metrics[env_name].apply(
                    lambda _t: torch.tensor(_t).float().mean().item())

            # Logger
            for env_name, metric in val_metrics.items():
                line = ['[VALID][{}] steps={}'.format(env_name, train_steps)]
                for k, v in metric.items():
                    line.append('{}: {:.4f}'.format(k, v))
                logging.info('\t'.join(line))

            mean_val_metric = DictList()
            for metric in val_metrics.values():
                mean_val_metric.append(metric)
            mean_val_metric.apply(lambda t: torch.mean(torch.tensor(t)))
            for k, v in mean_val_metric.items():
                writer.add_scalar('val/' + k, v.item(), nb_frames)
            writer.flush()

            avg_loss = [val_metrics[env_name].loss for env_name in val_metrics]
            avg_loss = np.mean(avg_loss)
            if avg_loss < curr_best:
                curr_best = avg_loss
                logging.info('Save Best with loss: {}'.format(avg_loss))
                # Save the checkpoint
                with open(os.path.join(training_folder, 'bot_best.pkl'),
                          'wb') as f:
                    torch.save(model, f)

        model.train()
        train_batch, train_lengths, train_sketch_lens = train_iter.__next__()
        if FLAGS.cuda:
            train_batch.apply(lambda _t: _t.cuda())
            train_lengths = train_lengths.cuda()
            train_sketch_lens = train_sketch_lens.cuda()
        train_outputs, _ = model.forward(train_batch, train_lengths,
                                         train_sketch_lens)

        optimizer.zero_grad()
        train_outputs['loss'].backward()
        optimizer.step()
        train_steps += 1
        nb_frames += train_lengths.sum().item()

        train_outputs = DictList(train_outputs)
        train_outputs.apply(lambda _t: _t.item())
        train_stats.append(train_outputs)

        if train_steps % FLAGS.compile_eval_freq == 0:
            train_stats.apply(lambda _tensors: np.mean(_tensors))
            logger_str = ['[TRAIN] steps={}'.format(train_steps)]
            for k, v in train_stats.items():
                logger_str.append("{}: {:.4f}".format(k, v))
                writer.add_scalar('train/' + k, v, global_step=nb_frames)
            logging.info('\t'.join(logger_str))
            train_stats = DictList()
            writer.flush()