예제 #1
0
def train_stage2(H_config, embedder, supervisor, generator):

    # Dataset
    data_set = SensorSignalDataset(root_dir=dataset_dir, transform=None)
    data_loader = DataLoader(dataset=data_set,
                             batch_size=H_config["batch_size"],
                             shuffle=False,
                             num_workers=1)

    # Loss
    criterion = SupervisedLoss()

    # model
    embedder.train()
    supervisor.train()
    generator.train()

    # Optimizer
    # models_param = [generator.parameters(), supervisor.parameters()]
    # optimizer = torch.optim.Adam(params=itertools.chain(*models_param), lr=learning_rate)

    optimizer = torch.optim.Adam([{
        'params': generator.parameters()
    }, {
        'params': supervisor.parameters()
    }],
                                 lr=H_config["learning rate2"])

    print('Start Training with Supervised Loss Only')

    num_epochs = H_config["num_epochs"]

    for epoch in range(num_epochs):

        training_loss = 0.0

        for i, inputs in enumerate(data_loader):

            X = inputs[0].to(CUDA_DEVICES)

            optimizer.zero_grad()

            H = embedder(X, None)
            H_hat_supervise = supervisor(H, None)

            loss = criterion(H_hat_supervise[:, :-1, :], H[:, 1:, :])
            loss.backward()
            optimizer.step()

            training_loss += loss.item() * X.size(0)

        training_loss = training_loss / len(data_set)

        if epoch % (np.round(num_epochs / 5)) == 0:
            print('epoch: ' + str(epoch) + '/' + str(num_epochs) +
                  ', s_loss: ' + str(np.round(np.sqrt(training_loss), 4)))

        tune.report(loss=training_loss)
    print('Finish Training with Supervised Loss Only')
예제 #2
0
def train_stage1(H_config, embedder, recovery):

    # Dataset
    data_set = SensorSignalDataset(root_dir=dataset_dir, transform=None)
    data_loader = DataLoader(dataset=data_set,
                             batch_size=H_config["batch_size"],
                             shuffle=False,
                             num_workers=1)

    # Loss
    criterion = EmbedderLoss()

    # model
    embedder.train()
    recovery.train()

    # Optimizer
    # models_param = [embedder.parameters(), recovery.parameters()]
    # optimizer = torch.optim.Adam(params=itertools.chain(*models_param), lr=learning_rate)
    optimizer = torch.optim.Adam([{
        'params': embedder.parameters()
    }, {
        'params': recovery.parameters()
    }],
                                 lr=H_config["learning rate1"])

    print('Start Embedding Network Training')

    num_epochs = H_config["num_epochs"]

    for epoch in range(num_epochs):

        training_loss = 0.0

        for i, inputs in enumerate(data_loader):

            X = inputs[0].to(CUDA_DEVICES)

            optimizer.zero_grad()

            H = embedder(X, None)
            outputs = recovery(H, None)

            loss_only, loss = criterion(outputs, X)
            loss.backward()
            optimizer.step()

            training_loss += loss_only.item() * X.size(0)

        training_loss = training_loss / len(data_set)

        if epoch % (np.round(num_epochs / 5)) == 0:
            print('epoch: ' + str(epoch) + '/' + str(num_epochs) +
                  ', e_loss: ' + str(np.round(np.sqrt(training_loss), 4)))

        tune.report(loss=training_loss)

    print('Finish Embedding Network Training')
예제 #3
0
def Generate_data():

    data_set = SensorSignalDataset(root_dir=dataset_dir, transform=None)
    data_loader = DataLoader(dataset=data_set,
                             batch_size=config.getint('generate_data',
                                                      'batch_size'),
                             shuffle=True,
                             num_workers=1)

    model_path = PATH_TO_WEIGHTS + '/' + date_dir + '/' + classification_dir

    generator = torch.load(model_path + '/' + generator_name)
    supervisor = torch.load(model_path + '/' + supervisor_name)
    recovery = torch.load(model_path + '/' + recovery_name)

    generator = generator.cuda(CUDA_DEVICES)
    supervisor = supervisor.cuda(CUDA_DEVICES)
    recovery = recovery.cuda(CUDA_DEVICES)

    generator.eval()
    supervisor.eval()
    recovery.eval()

    data_names = 1

    for i, inputs in enumerate(data_loader):

        X, min_val1, max_val1, min_val2, max_val2 = inputs[0], inputs[
            1], inputs[2], inputs[3], inputs[4]

        z_batch_size, z_seq_len, z_dim = X.shape
        Z = random_generator(z_batch_size, z_seq_len, z_dim)
        Z = Z.to(CUDA_DEVICES)

        min_val1 = min_val1.to(CUDA_DEVICES)
        max_val1 = max_val1.to(CUDA_DEVICES)
        min_val2 = min_val2.to(CUDA_DEVICES)
        max_val2 = max_val2.to(CUDA_DEVICES)

        E_hat = generator(Z, None)
        H_hat = supervisor(E_hat, None)
        X_hat = recovery(H_hat, None)

        X_hat = ReMinMaxScaler2(X_hat, min_val2, max_val2)
        X_hat = ReMinMaxScaler1(X_hat, min_val1, max_val1)

        data_names = Save_Data(X_hat, data_names)
예제 #4
0
def train_test_dataloader(dataset_dir="", mode='test'):

    data_set = SensorSignalDataset(root_dir=dataset_dir, transform=None)

    train_dataset_size = int(
        config.getfloat(mode, 'trainset_percentage') * len(data_set))
    test_dataset_size = len(data_set) - train_dataset_size
    train_dataset, test_dataset = torch.utils.data.random_split(
        data_set, [train_dataset_size, test_dataset_size])

    train_data_loader = DataLoader(dataset=train_dataset,
                                   batch_size=config.getint(
                                       mode, 'batch_size'),
                                   shuffle=True,
                                   num_workers=1)
    test_data_loader = DataLoader(dataset=test_dataset,
                                  batch_size=config.getint(mode, 'batch_size'),
                                  shuffle=True,
                                  num_workers=1)

    return train_data_loader, test_data_loader
예제 #5
0
def train_stage3(embedder, recovery, generator, supervisor, discriminator):

    print('Start Joint Training')

    # Dataset
    data_set = SensorSignalDataset(root_dir=dataset_dir, transform=None)
    data_loader = DataLoader(dataset=data_set,
                             batch_size=batch_size,
                             shuffle=False,
                             num_workers=1)

    # generator loss
    Gloss_criterion = JointGloss()
    Eloss_criterion = JointEloss()
    Dloss_criterion = JointDloss()

    # model
    embedder.train()
    recovery.train()
    generator.train()
    supervisor.train()
    discriminator.train()

    # optimizer
    # models_paramG = [generator.parameters(), supervisor.parameters()]
    # optimizerG = torch.optim.Adam(params=itertools.chain(*models_paramG), lr=learning_rate)

    optimizerG = torch.optim.Adam([{
        'params': generator.parameters()
    }, {
        'params': supervisor.parameters()
    }],
                                  lr=learning_rate3)

    # models_paramE = [embedder.parameters(), recovery.parameters()]
    # optimizerE = torch.optim.Adam(params=itertools.chain(*models_paramE), lr=learning_rate)

    optimizerE = torch.optim.Adam([{
        'params': embedder.parameters()
    }, {
        'params': recovery.parameters()
    }],
                                  lr=learning_rate4)

    optimizerD = torch.optim.Adam(params=discriminator.parameters(),
                                  lr=learning_rate5)

    for epoch in range(num_epochs):

        training_loss_G = 0.0
        training_loss_U = 0.0
        training_loss_S = 0.0
        training_loss_V = 0.0
        training_loss_E0 = 0.0
        training_loss_D = 0.0

        # Discriminator training
        for _ in range(5):
            for i, inputs in enumerate(data_loader):

                X = inputs[0].to(CUDA_DEVICES)
                optimizerD.zero_grad()

                z_batch_size, z_seq_len, z_dim = X.shape
                Z = random_generator(z_batch_size, z_seq_len, z_dim)
                Z = Z.to(CUDA_DEVICES)

                E_hat = generator(Z, None)
                Y_fake_e = discriminator(E_hat, None)
                H_hat = supervisor(E_hat, None)
                Y_fake = discriminator(H_hat, None)

                H = embedder(X, None)
                Y_real = discriminator(H, None)

                lossD = Dloss_criterion(Y_real, Y_fake, Y_fake_e)

                # Train discriminator (only when the discriminator does not work well)
                if lossD > 0.15:
                    lossD.backward()
                    optimizerD.step()
                    training_loss_D += lossD.item() * X.size(0)

        # Generator training (twice more than discriminator training)
        for _ in range(1):
            for inputs in data_loader:

                X = inputs[0].to(CUDA_DEVICES)

                optimizerG.zero_grad()
                optimizerE.zero_grad()

                # Train generator
                z_batch_size, z_seq_len, z_dim = X.shape
                Z = random_generator(z_batch_size, z_seq_len, z_dim)
                Z = Z.to(CUDA_DEVICES)

                E_hat = generator(Z, None)
                H_hat = supervisor(E_hat, None)
                Y_fake = discriminator(H_hat, None)
                Y_fake_e = discriminator(E_hat, None)
                H = embedder(X, None)
                X_tilde = recovery(H, None)
                H_hat_supervise = supervisor(H, None)
                X_hat = recovery(H_hat, None)

                lossG, loss_U, loss_S, loss_V = Gloss_criterion(
                    Y_fake, Y_fake_e, H[:, 1:, :], H_hat_supervise[:, :-1, :],
                    X, X_hat)

                lossG.backward()
                optimizerG.step()

                training_loss_G += lossG.item() * X.size(0)
                training_loss_U += loss_U.item() * X.size(0)
                training_loss_S += loss_S.item() * X.size(0)
                training_loss_V += loss_V.item() * X.size(0)

                # Train embedder

                H = embedder(X, None)
                X_tilde = recovery(H, None)
                H_hat_supervise = supervisor(H, None)

                lossE, lossE_0 = Eloss_criterion(X_tilde, X, H[:, 1:, :],
                                                 H_hat_supervise[:, :-1, :])

                lossE.backward()
                optimizerE.step()

                training_loss_E0 += lossE_0.item() * X.size(0)

        # # Discriminator training
        # for i, inputs in enumerate(data_loader):

        #   X = inputs[0].to(CUDA_DEVICES)
        #   optimizerD.zero_grad()

        #   z_batch_size, z_seq_len, z_dim = X.shape
        #   Z = random_generator(z_batch_size, z_seq_len, z_dim)
        #   Z = Z.to(CUDA_DEVICES)

        #   E_hat = generator(Z, None)
        #   Y_fake_e = discriminator(E_hat, None)
        #   H_hat = supervisor(E_hat, None)
        #   Y_fake = discriminator(H_hat, None)

        #   H = embedder(X, None)
        #   Y_real = discriminator(H, None)

        #   lossD = Dloss_criterion(Y_real, Y_fake, Y_fake_e)

        #   # Train discriminator (only when the discriminator does not work well)
        #   if lossD > 0.15:
        #     lossD.backward()
        #     optimizerD.step()
        #     training_loss_D += lossD.item() * X.size(0)

        training_loss_G = 0.5 * (training_loss_G / len(data_set))
        training_loss_U = 0.5 * (training_loss_U / len(data_set))
        training_loss_S = 0.5 * (training_loss_S / len(data_set))
        training_loss_V = 0.5 * (training_loss_V / len(data_set))
        training_loss_E0 = 0.5 * (training_loss_E0 / len(data_set))
        training_loss_D = training_loss_D / len(data_set)

        # Print multiple checkpoints
        if epoch % (np.round(num_epochs / 5)) == 0:
            print('step: ' + str(epoch) + '/' + str(num_epochs) +
                  ', d_loss: ' + str(np.round(training_loss_D, 4)) +
                  ', g_loss_u: ' + str(np.round(training_loss_U, 4)) +
                  ', g_loss_s: ' + str(np.round(np.sqrt(training_loss_S), 4)) +
                  ', g_loss_v: ' + str(np.round(training_loss_V, 4)) +
                  ', e_loss_t0: ' +
                  str(np.round(np.sqrt(training_loss_E0), 4)))

            epoch_embedder_name = str(epoch) + "_" + embedder_name
            epoch_recovery_name = str(epoch) + "_" + recovery_name
            epoch_generator_name = str(epoch) + "_" + generator_name
            epoch_supervisor_name = str(epoch) + "_" + supervisor_name
            epoch_discriminator_name = str(epoch) + "_" + discriminator_name

            # save model
            today = date.today()
            save_time = today.strftime("%d_%m_%Y")
            output_dir = config.get(
                'train', 'model_path') + '/' + save_time + '/' + config.get(
                    'train', 'classification_dir') + '/'
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)

            torch.save(embedder, f'{output_dir+epoch_embedder_name}')
            torch.save(recovery, f'{output_dir+epoch_recovery_name}')
            torch.save(generator, f'{output_dir+epoch_generator_name}')
            torch.save(supervisor, f'{output_dir+epoch_supervisor_name}')
            torch.save(discriminator, f'{output_dir+epoch_discriminator_name}')

    print('Finish Joint Training')
예제 #6
0
                c = colors[anal_sample_no:], alpha = 0.2, label = "Synthetic")
  
    ax.legend()
      
    plt.title('t-SNE plot')
    plt.xlabel('x-tsne')
    plt.ylabel('y_tsne')
    plt.savefig(pic_path + "/" + pic_name, bbox_inches='tight')
    # plt.show()



if __name__ == '__main__':


  real_dataset = SensorSignalDataset(root_dir=real_dataset_dir, transform=False)
  synthetic_dataset = SensorSignalDataset(root_dir=synthetic_dataset_dir, transform=False)

  real_data_loader = DataLoader(dataset=real_dataset, batch_size=batch_size, shuffle=False, num_workers=1)
  synthetic_data_loader = DataLoader(dataset=synthetic_dataset, batch_size=batch_size, shuffle=False, num_workers=1)

  real_data_loader_iter = iter(real_data_loader)
  synthetic_data_loader_iter = iter(synthetic_data_loader)
    
  real_data = next(real_data_loader_iter)
  synthetic_data = next(synthetic_data_loader_iter)

  real_data = real_data[0].numpy()
  synthetic_data = synthetic_data[0].numpy()

  if not os.path.exists(pic_path):