Пример #1
0
        print('Load Model')
    except:
        print('No Model found')
        train_tcn = True
    try:
        generator.load_state_dict(torch.load('generator.pt'))
        print("Load Generator Model")
    except:
        print("Generator Model Not Found")




    if train_tcn:
        model = model.cuda()
        optimizer_tcn = optim.Adam(model.parameters(),lr=lr_tcn)
        scheduler_tcn = optim.lr_scheduler.StepLR(optimizer_tcn,step_size=step_size_tcn,gamma=gamma_tcn)
        for j in range(epochs_tcn):
            avg_loss = 0
            optimizer_tcn.zero_grad()
            for i in range(nsample//batch_size):
                itrain = train[i] #[batch_size,1,length]
                if per_datapoint:
                    for k in range(itrain.size()[2]):
                        ioutput = model(itrain).cuda() #[batch_size,3*ncomponent,length]
                        loss = LogMixGaussian(ioutput,itrain,batch_size,n_components,index=k)
                        loss.backward()
                        optimizer_tcn.step()
                else:
                    ioutput = model(itrain).cuda() #[batch_size,3*ncomponent,length]
                    loss = LogMixGaussian(ioutput,itrain,batch_size,n_components)
Пример #2
0
    test_target = torch.transpose(test_target, 1, 2)
    print(input.size(), target.size())
    # build the model
    input_size = 2  # dimension of each sequence element
    num_hidden = 8  # num hidden units per layer
    levels = 10  # num layers
    channel_sizes = [num_hidden] * levels
    kernel_size = 8
    #Use the TCN specified in tcn.py
    seq = TCN(input_size, input_size, channel_sizes, kernel_size, dropout=0.0)
    if use_cuda:
        seq.cuda()
    seq.double()
    criterion = nn.MSELoss()
    # use LBFGS as optimizer since we can load the whole data to train
    optimizer = optim.LBFGS(seq.parameters(), lr=0.08)
    #begin to train
    best_loss = 1e8

    EPOCHS = 100

    for i in range(EPOCHS):
        print('EPOCH: ', i)

        def closure():
            optimizer.zero_grad()
            out = seq(input)
            #print(out[0])
            loss = criterion(out, target)
            print('loss:', loss.cpu().data.numpy())
            loss.backward()
Пример #3
0
mse = nn.MSELoss()   

# Move models to computing device
c3d.to(device)
tcn.to(device)
ap.to(device)
rn.to(device)

logSoftmax.to(device)
ctc.to(device)
mse.to(device)

# Define Optimizers
c3d_optim = torch.optim.AdamW(c3d.parameters(), lr=args.lr)
rn_optim = torch.optim.AdamW(rn.parameters(), lr=args.lr)
tcn_optim = torch.optim.AdamW(tcn.parameters(), lr=args.lr)
ap_optim = torch.optim.AdamW(ap.parameters(), lr=args.lr)

# Define Schedulers
c3d_scheduler = StepLR(c3d_optim, step_size=3000, gamma=0.5)
rn_scheduler = StepLR(rn_optim, step_size=3000, gamma=0.5)
tcn_scheduler = StepLR(tcn_optim, step_size=3000, gamma=0.5)
ap_scheduler = StepLR(ap_optim, step_size=3000, gamma=0.5)

# Load Saved Models & Optimizers & Schedulers
if args.checkpoint is not None:
    my_load(c3d, "c3d.pkl")
    my_load(tcn, "tcn.pkl")
    my_load(ap, "ap.pkl")
    my_load(rn, "rn.pkl")
    my_load(c3d_optim, "c3d_optim.pkl")
Пример #4
0
train_x = np.load('train_x_ele_24.npy')
train_y = np.load('train_y_ele_24.npy')
valid_x = np.load('valid_x_ele_24.npy')
valid_y = np.load('valid_y_ele_24.npy')

print(train_x.shape, train_y.shape)
print(valid_x.shape, valid_y.shape)

model = TemporalConvNet(past_seq_len=36,
                        input_feature_num=2,
                        future_seq_len=12,
                        output_feature_num=1,
                        num_channels=[16] * 6,
                        kernel_size=3,
                        dropout=0.2).cuda()
opt = torch.optim.Adam(model.parameters(), lr=0.01)
loss_fn = nn.MSELoss()


def train_epoch(model, x, y, opt, loss_fn, x_test, y_test):
    batch_size = 512
    model.train()
    total_loss = 0
    train_loader = DataLoader(TensorDataset(x, y),
                              batch_size=int(batch_size),
                              shuffle=True)
    batch_idx = 0
    for x_batch, y_batch in train_loader:
        opt.zero_grad()
        yhat = model(x_batch)
        loss = loss_fn(yhat, y_batch)