Beispiel #1
0
            pred = output.data.max(1, keepdim=True)[1]  # get the index of the max log-probability
            correct += pred.eq(y.data.view_as(pred)).long().cpu().sum().item()

            # accumulator
            train_loss += loss.item()

    # fill stats
    if epoch < 20 or epoch%200 == 0:
        print("train last batch {} of {}: recon_loss {:.3f}".format(i,len(train_loader),loss))
    train_accuracy = correct / train_num 
    train_loss /= train_num
    epoch_train_loss.append(train_loss)
    epoch_train_acc.append(train_accuracy) 
    
    # VALIDATION
    model.eval()
    correct = 0
    val_loss = 0
    val_num = 0
    for i, (XI, XB,  y) in enumerate(val_loader):
        if model.header == 'CNN':
            x = XI
        else:
            x = XB
        x, y = x.to(device), y.long().to(device)
        if x.size()[0] != batch_size:
#             print("batch {} size {} < {}, skip".format(i, x.size()[0], batch_size))
            break
        val_num += x.size(0)
        x_decoded, latent, output = model(x)
Beispiel #2
0
    train_loss_B /= train_num
    epoch_train_loss_B.append(train_loss_B)
    epoch_train_acc_B.append(train_accuracy_B)

    train_accuracy_I = correct_I / train_num
    train_loss_I /= train_num
    epoch_train_loss_I.append(train_loss_I)
    epoch_train_acc_I.append(train_accuracy_I)

    train_loss_C /= train_num
    epoch_train_loss_C.append(train_loss_C)

    epoch_train_loss_tot.append(train_loss_tot)

    # VALIDATION
    model_B.eval()
    model_I.eval()

    correct_B = 0
    val_loss_B = 0
    correct_I = 0
    val_loss_I = 0
    val_loss_C = 0
    val_loss_tot = 0
    val_num = 0

    for i, (XI, XB, y) in enumerate(val_loader):
        XI, XB, y = XI.to(device), XB.to(device), y.long().to(device)

        if XI.size()[0] != batch_size:
            break
Beispiel #3
0
                           print_every=print_every,
                           clip=clip,
                           max_grad_norm=max_grad_norm,
                           dload=logDir,
                           model_name=model_name_B,
                           header=header_B,
                           device=device)
model_B_pretrained_dir = logDir + model_name_B + '.pt'
if device == torch.device('cpu'):
    model_B_pretrained.load_state_dict(
        torch.load(model_B_pretrained_dir, map_location=torch.device('cpu')))
else:
    model_B_pretrained.load_state_dict(torch.load(model_B_pretrained_dir))

model_B_pretrained.to(device)
model_B_pretrained.eval()

print("load model from")
print(model_name_B)

model_I = VRAEC(num_class=num_class,
                sequence_length=sequence_length_I,
                number_of_features=number_of_features_I,
                hidden_size=hidden_size,
                hidden_layer_depth=hidden_layer_depth,
                latent_length=latent_length,
                batch_size=batch_size,
                learning_rate=learning_rate,
                n_epochs=n_epochs,
                dropout_rate=dropout_rate,
                cuda=cuda,
    # fill stats
    train_accuracy_B = correct_B / train_num  # len(train_loader.dataset)
    train_loss_B /= train_num  #len(train_loader.dataset)
    epoch_train_loss_B.append(train_loss_B)
    epoch_train_acc_B.append(train_accuracy_B)

    train_accuracy_I = correct_I / train_num  # len(train_loader.dataset)
    train_loss_I /= train_num  #len(train_loader.dataset)
    epoch_train_loss_I.append(train_loss_I)
    epoch_train_acc_I.append(train_accuracy_I)

    train_loss_C /= train_num
    epoch_train_loss_C.append(train_loss_C)

    # VALIDATION
    model_B.eval()
    model_I.eval()

    correct_B = 0
    val_loss_B = 0
    correct_I = 0
    val_loss_I = 0
    val_loss_C = 0
    val_loss_tot = 0
    val_num = 0

    for i, (XI, XB, y) in enumerate(val_loader):
        XI, XB, y = XI.to(device), XB.to(device), y.long().to(device)

        if XI.size()[0] != batch_size:
            #             print("batch {} size {} < {}, skip".format(i, x.size()[0], batch_size))
        .format(loss_B, loss_I, loss_C, recon_loss_B, kl_loss_B, cl_loss_B,
                recon_loss_I, kl_loss_I, cl_loss_I))

    # fill stats
    train_accuracy_B = correct_B / train_num  # len(train_loader.dataset)
    train_loss_B /= train_num  #len(train_loader.dataset)
    epoch_train_loss_B.append(train_loss_B)
    epoch_train_acc_B.append(train_accuracy_B)

    train_accuracy_I = correct_I / train_num  # len(train_loader.dataset)
    train_loss_I /= train_num  #len(train_loader.dataset)
    epoch_train_loss_I.append(train_loss_I)
    epoch_train_acc_I.append(train_accuracy_I)

    # VALIDATION
    model_B.eval()
    model_I.eval()

    correct_B = 0
    val_loss_B = 0
    correct_I = 0
    val_loss_I = 0
    val_loss_tot = 0
    val_num = 0

    for i, (XI, XB, y) in enumerate(val_loader):
        XI, XB, y = XI.to(device), XB.to(device), y.long().to(device)

        if XI.size()[0] != batch_size:
            #             print("batch {} size {} < {}, skip".format(i, x.size()[0], batch_size))
            break
                           learning_rate=learning_rate,
                           n_epochs=n_epochs,
                           dropout_rate=dropout_rate,
                           cuda=cuda,
                           model_name=model_name_I,
                           header=header_I,
                           device=device)
model_I_pretrained.to(device)

model_I_pretrained_dir = logDir + model_name_I + '.pt'
if device == torch.device('cpu'):
    model_I_pretrained.load_state_dict(
        torch.load(model_I_pretrained_dir, map_location=torch.device('cpu')))
else:
    model_I_pretrained.load_state_dict(torch.load(model_I_pretrained_dir))
model_I_pretrained.eval()

print("load model I from")
print(model_name_I)

# Initialize training settings
optimB = optim.Adam(model_B.parameters(), lr=learning_rate)
cl_loss_fn = nn.NLLLoss()
recon_loss_fn = nn.MSELoss()

training_start = datetime.now()

# create empty lists to fill stats later
epoch_train_loss_B = []
epoch_train_acc_B = []
epoch_val_loss_B = []
Beispiel #7
0
            train_loss += loss.item()

            loss.backward()
            optimizer.step()

    # if epoch < 20 or epoch%200 == 0:
    #     print("train last batch: recon_loss {:.3f}".format(loss))

    # fill stats
    train_accuracy = correct / train_num
    train_loss /= train_num
    epoch_train_loss.append(train_loss)
    epoch_train_acc.append(train_accuracy) 
    
    # VALIDATION
    model.eval()
    correct = 0
    val_loss = 0
    val_num = 0
    for i, (XI, XB,  y) in enumerate(val_loader):
        if model.header == 'CNN':
            x = XI
        else:
            x = XB
        x, y = x.to(device), y.long().to(device)
        if x.size()[0] != batch_size:
            break
        val_num += x.size(0)
        x_decoded, latent, output = model(x)

        # construct loss function