Beispiel #1
0
def main():
    vocab, train_iter, val_iter, test_iter = dataset_factory(
        'twitter-customer-support')

    epochs = 100
    embedding_size = 20
    hidden_size = 100
    vocab_size = len(vocab)
    padding_idx = vocab.stoi['<pad>']

    encoder = Encoder(vocab_size, embedding_size, hidden_size)
    decoder = Decoder(vocab_size, embedding_size, hidden_size)
    seq2seq = cuda(Seq2Seq(encoder, decoder, vocab_size))

    optimizer = optim.Adam(seq2seq.parameters())

    best_val_loss = None
    for epoch in range(epochs):
        # calculate train and val loss
        train_loss = train(seq2seq, optimizer, train_iter, vocab_size, 5,
                           padding_idx)
        val_loss = evaluate(seq2seq, val_iter, vocab_size, padding_idx)
        print("[Epoch=%d] train_loss %f - val_loss %f" %
              (epoch, train_loss, val_loss))

        # save model if model achieved best val loss
        if not best_val_loss or val_loss < best_val_loss:
            print('Saving model...')
            save_model(seq2seq, epoch, val_loss)
            best_val_loss = val_loss
Beispiel #2
0
def main():
    args = parse_args()
    cuda = torch.cuda.is_available() and args.cuda
    torch.set_default_tensor_type(
        torch.cuda.FloatTensor if cuda else torch.FloatTensor)
    device = torch.device('cuda' if cuda else 'cpu')

    print("Using %s for training" % ('GPU' if cuda else 'CPU'))
    print('Loading dataset...', end='', flush=True)
    metadata, vocab, train_iter, val_iter, test_iter = dataset_factory(
        args, device)
    print('Done.')

    print('Saving vocab and args...', end='')
    save_vocab(vocab, args.save_path + os.path.sep + 'vocab')
    save_object(args, args.save_path + os.path.sep + 'args')
    print('Done')

    model = train_model_factory(args, metadata)
    if cuda and args.multi_gpu:
        model = nn.DataParallel(
            model,
            dim=1)  # if we were using batch_first we'd have to use dim=0
    print(model)  # print models summary

    optimizer = optim.Adam(model.parameters(),
                           lr=args.learning_rate,
                           amsgrad=True)

    try:
        best_val_loss = None
        for epoch in range(args.max_epochs):
            start = datetime.now()
            # calculate train and val loss
            train_loss = train(model, optimizer, train_iter, metadata,
                               args.gradient_clip)
            val_loss = evaluate(model, val_iter, metadata)
            print("[Epoch=%d/%d] train_loss %f - val_loss %f time=%s " %
                  (epoch + 1, args.max_epochs, train_loss, val_loss,
                   datetime.now() - start),
                  end='')

            # save models if models achieved best val loss (or save every epoch is selected)
            if args.save_every_epoch or not best_val_loss or val_loss < best_val_loss:
                print('(Saving model...', end='')
                save_model(args.save_path, model, epoch + 1, train_loss,
                           val_loss)
                print('Done)', end='')
                best_val_loss = val_loss
            print()
    except (KeyboardInterrupt, BrokenPipeError):
        print('[Ctrl-C] Training stopped.')

    test_loss = evaluate(model, test_iter, metadata)
    print("Test loss %f" % test_loss)
Beispiel #3
0
"""
use or not pangolin (if you want to use it then you need to install it by using the script install_thirdparty.sh)
"""
kUsePangolin = False  

if kUsePangolin:
    from viewer3D import Viewer3D




if __name__ == "__main__":

    config = Config()

    dataset = dataset_factory(config.dataset_settings)

    groundtruth = groundtruth_factory(config.dataset_settings)

    cam = PinholeCamera(config.cam_settings['Camera.width'], config.cam_settings['Camera.height'],
                        config.cam_settings['Camera.fx'], config.cam_settings['Camera.fy'],
                        config.cam_settings['Camera.cx'], config.cam_settings['Camera.cy'],
                        config.DistCoef, config.cam_settings['Camera.fps'])


    num_features=2000  # how many features do you want to detect and track?

    # select your tracker configuration (see the file feature_tracker_configs.py) 
    # LK_SHI_TOMASI, LK_FAST
    # SHI_TOMASI_ORB, FAST_ORB, ORB, BRISK, AKAZE, FAST_FREAK, SIFT, ROOT_SIFT, SURF, SUPERPOINT, FAST_TFEAT
    tracker_config = FeatureTrackerConfigs.LK_SHI_TOMASI