def testZeroOut(self, test_inputs, test_targets, criterion, net_gru_mse, net_gru_dtw, net_gru_dilate):
        ## Testing for first input/target loss

        criterion_softdtw = SoftDTW(gamma=self.gamma, normalize=True)

        zero_inputs = test_inputs.detach().cpu().numpy()[0, :, self.idx_tgt_col]
        zero_targets = test_targets.detach().cpu().numpy()[0, :, :]

        zero_mse_pred = net_gru_mse(test_inputs).to(self.device).detach().cpu().numpy()[0, :,
                        :]
        zero_dtw_pred = net_gru_dtw(test_inputs).to(self.device).detach().cpu().numpy()[0, :,
                        :]
        zero_dilate_pred = net_gru_dilate(test_inputs).to(self.device).detach().cpu().numpy()[0, :,
                           :]
        print(f"zero input:{zero_inputs}")
        print(f"zero targets:{zero_targets}")
        print(f"zero mse:{zero_mse_pred}")
        print(f"zero dtw:{zero_dtw_pred}")
        print(f"zero dilate:{zero_dilate_pred}")
        print(f"mse net mse: {criterion(test_targets, net_gru_mse(test_inputs))}, "
              f"dtw: {criterion_softdtw(test_targets, net_gru_mse(test_inputs))}, "
              f"dilate: {dilate_loss(net_gru_mse(test_inputs), test_targets, alpha=self.alpha, gamma=self.gamma, device=self.device)} ")

        print(f"dtw net mse: {criterion(test_targets, net_gru_dtw(test_inputs))}, "
              f"dtw: {criterion_softdtw(test_targets, net_gru_dtw(test_inputs))}, "
              f"dilate: {dilate_loss(net_gru_dtw(test_inputs), test_targets, alpha=self.alpha, gamma=self.gamma, device=self.device)}, ")

        print(f"dilate net mse: {criterion(test_targets, net_gru_dilate(test_inputs))}, "
              f"dtw: {criterion_softdtw(test_targets, net_gru_dilate(test_inputs))}, "
              f"dilate: {dilate_loss(net_gru_dilate(test_inputs), test_targets, alpha=self.alpha, gamma=self.gamma, device=self.device)}, ")
Example #2
0
    def train_model(self,
                    net,
                    batch_size,
                    loss_type,
                    learning_rate,
                    epochs=1000,
                    gamma=0.001,
                    print_every=50,
                    eval_every=50,
                    verbose=1,
                    Lambda=1,
                    alpha=0.5):
        optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
        criterion = torch.nn.MSELoss()
        criterion_softdtw = SoftDTW(gamma=gamma, normalize=True)

        for epoch in range(epochs):
            for i, data in enumerate(self.trainloader, 0):

                inputs, target = data

                inputs = torch.tensor(inputs,
                                      dtype=torch.float32).to(self.device)
                target = torch.tensor(target,
                                      dtype=torch.float32).to(self.device)
                # batch_size, N_output = target.shape[0:2]

                # forward + backward + optimize

                outputs = net(inputs)
                loss_mse, loss_shape, loss_temporal = torch.tensor(
                    0), torch.tensor(0), torch.tensor(0)

                ## TODO next run with dtw implementation
                if (loss_type == 'dtw'):
                    loss_dtw = criterion_softdtw(outputs, target)
                    loss = torch.mean(loss_dtw)
                if (loss_type == 'mse'):
                    loss_mse = criterion(target, outputs)
                    loss = loss_mse

                if (loss_type == 'dilate'):
                    loss, loss_shape, loss_temporal = dilate_loss(
                        outputs, target, alpha, gamma, self.device)

                # print(loss)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

            if (verbose):
                if (epoch % print_every == 0):
                    print('epoch ', epoch, ' loss ', loss.item(),
                          ' loss shape ', loss_shape.item(), ' loss temporal ',
                          loss_temporal.item())
                    self.eval_model(net,
                                    self.testloader,
                                    batch_size,
                                    gamma,
                                    verbose=1)
Example #3
0
def train_model(net,
                batch_size,
                loss_type,
                learning_rate,
                epochs=1000,
                gamma=0.001,
                print_every=50,
                eval_every=50,
                verbose=1,
                Lambda=1,
                alpha=0.5,
                target_mean=0,
                target_std=0):

    optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
    criterion = torch.nn.MSELoss()
    criterion_softdtw = SoftDTW(gamma=gamma, normalize=True)

    for epoch in range(epochs):
        for i, data in enumerate(trainloader, 0):

            ## TODO modified for CustomDS
            try:
                inputs, target, _ = data
            except:
                try:
                    inputs, target = data
                except:
                    pass

            inputs = torch.tensor(inputs, dtype=torch.float32).to(device)
            target = torch.tensor(target, dtype=torch.float32).to(device)
            # batch_size, N_output = target.shape[0:2]

            # forward + backward + optimize
            # print(f"input size: {inputs.size()}")
            # print(f"input size -1: {inputs.size(-1)}")
            # print(net)
            outputs = net(inputs)
            loss_mse, loss_shape, loss_temporal = torch.tensor(
                0), torch.tensor(0), torch.tensor(0)

            ## TODO next run with dtw implementation
            if (loss_type == 'dtw'):
                loss_dtw = criterion_softdtw(outputs, target)
                loss = torch.mean(loss_dtw)
            if (loss_type == 'mse'):
                loss_mse = criterion(target, outputs)
                loss = loss_mse

            if (loss_type == 'dilate'):
                loss, loss_shape, loss_temporal = dilate_loss(
                    outputs, target, alpha, gamma, device)

            # print(loss)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if epoch > 300:
                for z in range(len(outputs.to(device).detach().cpu().numpy())):
                    preds_arr = outputs.to(device).detach().cpu().numpy()[
                        z, :, :] * target_std + target_mean
                    input_arr = inputs.detach().cpu().numpy()[
                        z, :, idx_tgt_col] * target_std + target_mean
                    target_arr = target.detach().cpu().numpy()[
                        z, :, :] * target_std + target_mean

                    plt.plot(range(0, len(input_arr)),
                             input_arr,
                             label='input',
                             linewidth=1)

                    plt.plot(range(
                        len(input_arr) - 1,
                        len(input_arr) + len(target_arr)),
                             np.concatenate([
                                 input_arr[len(input_arr) - 1:len(input_arr)],
                                 target_arr.ravel()
                             ]),
                             label='target',
                             linewidth=1)

                    plt.plot(range(
                        len(input_arr) - 1,
                        len(input_arr) + len(target_arr)),
                             np.concatenate([
                                 input_arr[len(input_arr) - 1:len(input_arr)],
                                 preds_arr.ravel()
                             ]),
                             label='prediction',
                             linewidth=1)
                    plt.title(
                        f"f{loss_type}: {loss.item()}, loss shape: {loss_shape.item()}, loss temporal: {loss_temporal.item()}"
                    )
                    plt.show()

        if (verbose):
            if (epoch % print_every == 0):
                print('epoch ', epoch, ' loss ', loss.item(), ' loss shape ',
                      loss_shape.item(), ' loss temporal ',
                      loss_temporal.item())
                eval_model(net, testloader, gamma, verbose=1)
Example #4
0
test_inputs, test_targets = next(gen_test)

test_inputs = torch.tensor(test_inputs, dtype=torch.float32).to(device)
test_targets = torch.tensor(test_targets, dtype=torch.float32).to(device)
criterion = torch.nn.MSELoss()

nets = [net_gru_mse, net_gru_dtw, net_gru_dilate]
nets_name = ["net_gru_mse", "net_gru_dtw", "net_gru_dilate"]

######################################################################################################################
############################################## RAN ABOVE ON CONSOLE ##################################################
######################################################################################################################

## Testing for first input/target loss

criterion_softdtw = SoftDTW(gamma=gamma, normalize=True)

zero_inputs = test_inputs.detach().cpu().numpy()[
    0, :, idx_tgt_col] * target_log_std + target_log_mean
zero_targets = test_targets.detach().cpu().numpy()[
    0, :, :] * target_log_std + target_log_mean
zero_mse_pred = net_gru_mse(test_inputs).to(device).detach().cpu().numpy()[
    0, :, :] * target_log_std + target_log_mean
zero_dtw_pred = net_gru_dtw(test_inputs).to(device).detach().cpu().numpy()[
    0, :, :] * target_log_std + target_log_mean
zero_dilate_pred = net_gru_dilate(test_inputs).to(
    device).detach().cpu().numpy()[0, :, :] * target_log_std + target_log_mean

print(f"zero input:{zero_inputs}")
print(f"zero targets:{zero_targets}")
print(f"zero mse:{zero_mse_pred}")