Esempio n. 1
0
def load_data_tttttttttttttt():
    from data.load_data import load_dataset, SignLangDataset, SignLangDataLoader
    from data.some_script import id_2_sign
    root = "/home/fanyang/PycharmProjects/SignLanguage/data/tctodd"
    file_name = "/home/fanyang/PycharmProjects/SignLanguage/data/tctodd/tctodd1/alive-1.tsd"
    id_sign, sign_id = id_2_sign(root)
    # train_samples, dev_samples = load_dataset(root, sign_id)

    for i in range(9):
        load_dataset(root, sign_id, hold_id=-i)
Esempio n. 2
0
def main():
    root = "/home/fanyang/PycharmProjects/SignLanguage/data/tctodd"
    id_sign, sign_id = id_2_sign(root)

    train_samples, dev_samples = load_dataset(root, sign_id, hold_id=8)

    train_dataset = SignLangDataset(sample_list=train_samples)
    dev_dateset = SignLangDataset(sample_list=dev_samples)

    # model

    model = SimpleNN1DCNN()
    model.load_state_dict(
        torch.load('/home/fanyang/PycharmProjects/SignLanguage/ckpt/model.pkl'))
    model.cuda()
    for param in model.parameters():
        param.requires_grad = False

    vis = visdom.Visdom()
    min_val, max_val = get_min_max_from_dataset(train_dataset)
    # min: -0.4535120129585266, max: 1.0
    print("min:{}, max:{}".format(min_val, max_val))
    # exit()
    vis.heatmap(X=train_dataset[0][0], opts={'xmin': min_val, 'xmax': max_val})

    # [22, 90] label :0
    inputs = Variable(torch.randn(1, 1, 22, 90), requires_grad=True).cuda()
    nn.init.normal(inputs, std=.001)

    inputs._grad_fn = None

    model.optimizer = optim.SGD(params=[inputs], lr=1e-4)

    for i in count():
        logits = model(inputs)
        logits = torch.squeeze(logits)
        model.optimizer.zero_grad()
        logits[0].backward(torch.FloatTensor([-1.]).cuda())
        model.optimizer.step()
        if (i + 1) % 50000 == 0:
            vis.heatmap(X=torch.squeeze(inputs).cpu().data,
                        opts={'xmin': min_val, 'xmax': max_val,
                              'title': '%d-step' % (i + 1)})
            print("step:%d" % (i + 1), "prob:%.7f" %
                  F.softmax(logits)[0].cpu().data.numpy()[0])
            adjust_learning_rate(model.optimizer)
Esempio n. 3
0
def main():
    root = "/home/fanyang/PycharmProjects/SignLanguage/data/tctodd"
    id_sign, sign_id = id_2_sign(root)

    for i in range(9):

        train_samples, dev_samples = load_dataset(root, sign_id, hold_id=i)

        train_dataset = SignLangDataset(sample_list=train_samples)
        dev_dateset = SignLangDataset(sample_list=dev_samples)

        # model

        model = SimpleNN1DCNN()
        model.cuda()
        model.criterion = nn.CrossEntropyLoss()
        model.optimizer = optim.Adam(params=model.parameters())

        writer = SummaryWriter('ckpt/hold-id-%d' % i)
        routine = Routine(model=model,
                          saver_dir='ckpt/hold-id-%d' % i,
                          writer=writer)

        for i in count():
            train_loader = SignLangDataLoader(dataset=train_dataset,
                                              batch_size=1,
                                              shuffle=True)
            dev_loader = SignLangDataLoader(dataset=dev_dateset,
                                            batch_size=1,
                                            shuffle=True)
            routine.train_one_epoch(train_loader)

            tools.adjust_learning_rate(model.optimizer)

            routine.validation(dev_loader)

            if i == 100:
                break
Esempio n. 4
0
    print("Training models with {} layers".format(layers))
    width = 64
    lstm_width = 49

    for layers in [1, 2, 3]:
        for val_set in range(1, 8):
            # Load the data set
            dataset_options = load_data.make_default_options(train_bs=1,
                                                             train_sl=2048,
                                                             val_bs=10,
                                                             ar=False,
                                                             val_set=val_set)
            dataset_options["subject"] = subject
            train_loader, val_loader, test_loader, scaling_factors = load_data.load_dataset(
                dataset="gait_prediction_stairs",
                dataset_options=dataset_options)

            nu = train_loader.nu
            ny = train_loader.ny

            # Options for the solver
            # solver_options = nlsdp.make_stochastic_nlsdp_options(max_epochs=max_epochs, lr=5.0E-4, mu0=100, lr_decay=0.98)
            solver_options = nlsdp.make_stochastic_nlsdp_options(
                max_epochs=max_epochs,
                lr=lr,
                mu0=10,
                lr_decay=lr_decay,
                patience=patience)

            # Train an LSTM network
    layers = 2
    print("Training models with {} layers".format(layers))
    width = 64

    for subject in range(2, 10):
        for val_set in range(0, 9):

            # Load the data set
            dataset_options = load_data.make_default_options(train_bs=1,
                                                             train_sl=2048,
                                                             val_bs=10,
                                                             ar=ar,
                                                             val_set=val_set)
            dataset_options["subject"] = subject
            train_loader, val_loader, test_loader = load_data.load_dataset(
                dataset="gait_prediction_stairs",
                dataset_options=dataset_options)

            nu = train_loader.nu
            ny = train_loader.ny

            # Options for the solver
            # solver_options = nlsdp.make_stochastic_nlsdp_options(max_epochs=max_epochs, lr=5.0E-4, mu0=100, lr_decay=0.98)
            solver_options = nlsdp.make_stochastic_nlsdp_options(
                max_epochs=max_epochs,
                lr=5.0E-4,
                mu0=500,
                lr_decay=0.987,
                patience=20)

            # Train Unconstrained model - still project onto stable models for stable initial point
Esempio n. 6
0
from utils import pose_utils as util
import torch
import numpy as np
from tqdm import tqdm
from imageio import get_writer
from skimage.io import imsave

opt = Options().parse(save=False)
opt.nThreads = 1  # test code only supports nThreads = 1
opt.batchSize = 1  # test code only supports batchSize = 1
opt.serial_batches = True  # no shuffle
opt.no_flip = True  # no flip
opt.use_first_frame = False  #??

dataset = load_dataset(opt)  #CreateDataset(opt)
model = load_model(opt)  #create_model(opt)
data = dataset[0]

prev_frame = torch.zeros_like(data['image'])
start_from = 0
generated = []

for i in tqdm(range(start_from, dataset.clip_length)):
    label = data['label'][i:i + 1]
    inst = None if opt.no_instance else data['inst'][i:i + 1]

    cur_frame = model.inference(label, inst, torch.unsqueeze(prev_frame,
                                                             dim=0))
    prev_frame = cur_frame.data[0]
Esempio n. 7
0
    layers = int(sys.argv[1])
    # layers = 3
    print("Training models with {} layers".format(layers))
    width = 200

    for subject in range(1, 50):
        for val_set in range(0, 20):

            # Load the data set
            dataset_options = load_data.make_default_options(train_bs=40,
                                                             train_sl=200,
                                                             val_set=val_set,
                                                             test_sl=2000)
            dataset_options["gain"] = 1.4
            train_loader, val_loader, test_loader = load_data.load_dataset(
                dataset="chen", dataset_options=dataset_options)

            nu = train_loader.nu
            ny = train_loader.ny

            # Options for the solver
            solver_options = nlsdp.make_stochastic_nlsdp_options(
                max_epochs=max_epochs,
                lr=0.1E-4,
                mu0=2000,
                lr_decay=0.96,
                patience=10)

            # Train Contracting implicit model ------------------------------------------------------------------------
            name = "contracting_sub{:d}_val{:d}".format(subject, val_set)
            model = diRNN.diRNN(nu,
Esempio n. 8
0
from options.demo_options import DemoTestOptions
from data.load_data import load_dataset
from models.load_model import load_model
import util.util as util
import torch
from imageio import get_writer
import numpy as np
from tqdm import tqdm

opt = DemoTestOptions().parse(save=False)
opt.nThreads = 1  # test code only supports nThreads = 1
opt.batchSize = 1  # test code only supports batchSize = 1
opt.serial_batches = True  # no shuffle
opt.no_flip = True  # no flip

dataset = load_dataset(opt)
model = load_model(opt)
#if opt.verbose:                                                  # demo changed
#    print(model)

# test whole video sequence
# 20181009: do we use first frame as input?

data = dataset[0]
#if opt.use_first_frame:                                          # demo changed
#    prev_frame = data['image']
#    start_from = 1
#    from skimage.io import imsave
#    imsave('results/ref.png', util.tensor2im(prev_frame))
#    generated = [util.tensor2im(prev_frame)]
if 1:  #else: