def Generate_data():

    data_set = SensorSignalDataset(root_dir=dataset_dir, transform=None)
    data_loader = DataLoader(dataset=data_set,
                             batch_size=config.getint('generate_data',
                                                      'batch_size'),
                             shuffle=True,
                             num_workers=1)

    model_path = PATH_TO_WEIGHTS + '/' + date_dir + '/' + classification_dir

    generator = torch.load(model_path + '/' + generator_name)
    supervisor = torch.load(model_path + '/' + supervisor_name)
    recovery = torch.load(model_path + '/' + recovery_name)

    generator = generator.cuda(CUDA_DEVICES)
    supervisor = supervisor.cuda(CUDA_DEVICES)
    recovery = recovery.cuda(CUDA_DEVICES)

    generator.eval()
    supervisor.eval()
    recovery.eval()

    data_names = 1

    for i, inputs in enumerate(data_loader):

        X, min_val1, max_val1, min_val2, max_val2 = inputs[0], inputs[
            1], inputs[2], inputs[3], inputs[4]

        z_batch_size, z_seq_len, z_dim = X.shape
        Z = random_generator(z_batch_size, z_seq_len, z_dim)
        Z = Z.to(CUDA_DEVICES)

        min_val1 = min_val1.to(CUDA_DEVICES)
        max_val1 = max_val1.to(CUDA_DEVICES)
        min_val2 = min_val2.to(CUDA_DEVICES)
        max_val2 = max_val2.to(CUDA_DEVICES)

        E_hat = generator(Z, None)
        H_hat = supervisor(E_hat, None)
        X_hat = recovery(H_hat, None)

        X_hat = ReMinMaxScaler2(X_hat, min_val2, max_val2)
        X_hat = ReMinMaxScaler1(X_hat, min_val1, max_val1)

        data_names = Save_Data(X_hat, data_names)
    def train():
        
        #1. Embedding network training
        print('Start Embedding Network Training')

        for itt in range(iterations):
            # Set mini-batch
            X_mb, T_mb = batch_generator(ori_data, ori_time, batch_size)
            # Train embedder
            step_e_loss = train_step_embedder(X_mb)
           
            # Checkpoint
            if itt % 100 == 0:
                print('step: '+ str(itt) + '/' + str(iterations) + ', e_loss: ' + str(np.round(np.sqrt(step_e_loss),4)) )

        print('Finish Embedding Network Training')
        
        #2. Training only with supervised loss
        print('Start Training with Supervised Loss Only')

        for itt in range(iterations):
            # Set mini-batch
            X_mb, T_mb = batch_generator(ori_data, ori_time, batch_size)
            # Random vector generation 
            # Train generator
            step_gen_s_loss = train_step_generator_s(X_mb)

            # Checkpoint
            if itt % 100 == 0:
                print('step: '+ str(itt)  + '/' + str(iterations) +', s_loss: ' + str(np.round(np.sqrt(step_gen_s_loss),4)) )

        print('Finish Training with Supervised Loss Only')
        
        # 3. Joint Training
        print('Start Joint Training')

        for itt in range(iterations):
            # Generator training (twice more than discriminator training)
            for kk in range(2):
                # Set mini-batch
                X_mb, T_mb = batch_generator(ori_data, ori_time, batch_size)
                # Random vector generation 
                Z_mb = random_generator(batch_size, z_dim, T_mb, max_seq_len)
                # Train generator and embedder
                emb_T0_loss, emb_loss, g_loss_u, gen_s_loss, g_loss_v = train_step_joint(X_mb, Z_mb)

            # Discriminator training        
            # Set mini-batch
            X_mb, T_mb = batch_generator(ori_data, ori_time, batch_size)
            # Random vector generation 
            Z_mb = random_generator(batch_size, z_dim, T_mb, max_seq_len)
            #train discriminator
            d_loss = train_step_discriminator(X_mb, Z_mb)

            # Print multiple checkpoints
            if itt % 100 == 0:
                print('step: '+ str(itt) + '/' + str(iterations) + 
                    ', d_loss: ' + str(np.round(d_loss,4)) + 
                    ', g_loss_u: ' + str(np.round(g_loss_u,4)) + 
                    ', g_loss_s: ' + str(np.round(np.sqrt(gen_s_loss),4)) + 
                    ', g_loss_v: ' + str(np.round(g_loss_v,4)) + 
                    ', e_loss_t0: ' + str(np.round(np.sqrt(emb_T0_loss),4))  )
        
        print('Finish Joint Training')
        
        ## Synthetic data generation
        Z_mb = random_generator(no, z_dim, ori_time, max_seq_len)
        E_hat_generated = generator_model(Z_mb)
        H_hat_generated = supervisor_model(E_hat_generated)
        generated_data_curr = recovery_model(H_hat_generated)

        generated_data = list()

        for i in range(no):
            temp = generated_data_curr[i,:ori_time[i],:]
            generated_data.append(temp)
                
        # Renormalization
        generated_data = generated_data * max_val
        generated_data = generated_data + min_val
    
        return generated_data
Exemple #3
0
def train_stage3(embedder, recovery, generator, supervisor, discriminator):

    print('Start Joint Training')

    # Dataset
    data_set = SensorSignalDataset(root_dir=dataset_dir, transform=None)
    data_loader = DataLoader(dataset=data_set,
                             batch_size=batch_size,
                             shuffle=False,
                             num_workers=1)

    # generator loss
    Gloss_criterion = JointGloss()
    Eloss_criterion = JointEloss()
    Dloss_criterion = JointDloss()

    # model
    embedder.train()
    recovery.train()
    generator.train()
    supervisor.train()
    discriminator.train()

    # optimizer
    # models_paramG = [generator.parameters(), supervisor.parameters()]
    # optimizerG = torch.optim.Adam(params=itertools.chain(*models_paramG), lr=learning_rate)

    optimizerG = torch.optim.Adam([{
        'params': generator.parameters()
    }, {
        'params': supervisor.parameters()
    }],
                                  lr=learning_rate3)

    # models_paramE = [embedder.parameters(), recovery.parameters()]
    # optimizerE = torch.optim.Adam(params=itertools.chain(*models_paramE), lr=learning_rate)

    optimizerE = torch.optim.Adam([{
        'params': embedder.parameters()
    }, {
        'params': recovery.parameters()
    }],
                                  lr=learning_rate4)

    optimizerD = torch.optim.Adam(params=discriminator.parameters(),
                                  lr=learning_rate5)

    for epoch in range(num_epochs):

        training_loss_G = 0.0
        training_loss_U = 0.0
        training_loss_S = 0.0
        training_loss_V = 0.0
        training_loss_E0 = 0.0
        training_loss_D = 0.0

        # Discriminator training
        for _ in range(5):
            for i, inputs in enumerate(data_loader):

                X = inputs[0].to(CUDA_DEVICES)
                optimizerD.zero_grad()

                z_batch_size, z_seq_len, z_dim = X.shape
                Z = random_generator(z_batch_size, z_seq_len, z_dim)
                Z = Z.to(CUDA_DEVICES)

                E_hat = generator(Z, None)
                Y_fake_e = discriminator(E_hat, None)
                H_hat = supervisor(E_hat, None)
                Y_fake = discriminator(H_hat, None)

                H = embedder(X, None)
                Y_real = discriminator(H, None)

                lossD = Dloss_criterion(Y_real, Y_fake, Y_fake_e)

                # Train discriminator (only when the discriminator does not work well)
                if lossD > 0.15:
                    lossD.backward()
                    optimizerD.step()
                    training_loss_D += lossD.item() * X.size(0)

        # Generator training (twice more than discriminator training)
        for _ in range(1):
            for inputs in data_loader:

                X = inputs[0].to(CUDA_DEVICES)

                optimizerG.zero_grad()
                optimizerE.zero_grad()

                # Train generator
                z_batch_size, z_seq_len, z_dim = X.shape
                Z = random_generator(z_batch_size, z_seq_len, z_dim)
                Z = Z.to(CUDA_DEVICES)

                E_hat = generator(Z, None)
                H_hat = supervisor(E_hat, None)
                Y_fake = discriminator(H_hat, None)
                Y_fake_e = discriminator(E_hat, None)
                H = embedder(X, None)
                X_tilde = recovery(H, None)
                H_hat_supervise = supervisor(H, None)
                X_hat = recovery(H_hat, None)

                lossG, loss_U, loss_S, loss_V = Gloss_criterion(
                    Y_fake, Y_fake_e, H[:, 1:, :], H_hat_supervise[:, :-1, :],
                    X, X_hat)

                lossG.backward()
                optimizerG.step()

                training_loss_G += lossG.item() * X.size(0)
                training_loss_U += loss_U.item() * X.size(0)
                training_loss_S += loss_S.item() * X.size(0)
                training_loss_V += loss_V.item() * X.size(0)

                # Train embedder

                H = embedder(X, None)
                X_tilde = recovery(H, None)
                H_hat_supervise = supervisor(H, None)

                lossE, lossE_0 = Eloss_criterion(X_tilde, X, H[:, 1:, :],
                                                 H_hat_supervise[:, :-1, :])

                lossE.backward()
                optimizerE.step()

                training_loss_E0 += lossE_0.item() * X.size(0)

        # # Discriminator training
        # for i, inputs in enumerate(data_loader):

        #   X = inputs[0].to(CUDA_DEVICES)
        #   optimizerD.zero_grad()

        #   z_batch_size, z_seq_len, z_dim = X.shape
        #   Z = random_generator(z_batch_size, z_seq_len, z_dim)
        #   Z = Z.to(CUDA_DEVICES)

        #   E_hat = generator(Z, None)
        #   Y_fake_e = discriminator(E_hat, None)
        #   H_hat = supervisor(E_hat, None)
        #   Y_fake = discriminator(H_hat, None)

        #   H = embedder(X, None)
        #   Y_real = discriminator(H, None)

        #   lossD = Dloss_criterion(Y_real, Y_fake, Y_fake_e)

        #   # Train discriminator (only when the discriminator does not work well)
        #   if lossD > 0.15:
        #     lossD.backward()
        #     optimizerD.step()
        #     training_loss_D += lossD.item() * X.size(0)

        training_loss_G = 0.5 * (training_loss_G / len(data_set))
        training_loss_U = 0.5 * (training_loss_U / len(data_set))
        training_loss_S = 0.5 * (training_loss_S / len(data_set))
        training_loss_V = 0.5 * (training_loss_V / len(data_set))
        training_loss_E0 = 0.5 * (training_loss_E0 / len(data_set))
        training_loss_D = training_loss_D / len(data_set)

        # Print multiple checkpoints
        if epoch % (np.round(num_epochs / 5)) == 0:
            print('step: ' + str(epoch) + '/' + str(num_epochs) +
                  ', d_loss: ' + str(np.round(training_loss_D, 4)) +
                  ', g_loss_u: ' + str(np.round(training_loss_U, 4)) +
                  ', g_loss_s: ' + str(np.round(np.sqrt(training_loss_S), 4)) +
                  ', g_loss_v: ' + str(np.round(training_loss_V, 4)) +
                  ', e_loss_t0: ' +
                  str(np.round(np.sqrt(training_loss_E0), 4)))

            epoch_embedder_name = str(epoch) + "_" + embedder_name
            epoch_recovery_name = str(epoch) + "_" + recovery_name
            epoch_generator_name = str(epoch) + "_" + generator_name
            epoch_supervisor_name = str(epoch) + "_" + supervisor_name
            epoch_discriminator_name = str(epoch) + "_" + discriminator_name

            # save model
            today = date.today()
            save_time = today.strftime("%d_%m_%Y")
            output_dir = config.get(
                'train', 'model_path') + '/' + save_time + '/' + config.get(
                    'train', 'classification_dir') + '/'
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)

            torch.save(embedder, f'{output_dir+epoch_embedder_name}')
            torch.save(recovery, f'{output_dir+epoch_recovery_name}')
            torch.save(generator, f'{output_dir+epoch_generator_name}')
            torch.save(supervisor, f'{output_dir+epoch_supervisor_name}')
            torch.save(discriminator, f'{output_dir+epoch_discriminator_name}')

    print('Finish Joint Training')
Exemple #4
0
def timegan(ori_data, parameters):
    """TimeGAN function.

    Use original data as training set to generater synthetic data (time-series)

    Args:
      - ori_data: original time-series data
      - parameters: TimeGAN network parameters

    Returns:
      - generated_data: generated time-series data
    """
    # Initialization on the Graph
    tf.reset_default_graph()

    # Basic Parameters
    no, seq_len, dim = np.asarray(ori_data).shape

    # Maximum sequence length and each sequence length
    ori_time, max_seq_len = extract_time(ori_data)

    def MinMaxScaler(data):
        """Min-Max Normalizer.

        Args:
          - data: raw data

        Returns:
          - norm_data: normalized data
          - min_val: minimum values (for renormalization)
          - max_val: maximum values (for renormalization)
        """
        min_val = np.min(np.min(data, axis=0), axis=0)
        data = data - min_val

        max_val = np.max(np.max(data, axis=0), axis=0)
        norm_data = data / (max_val + 1e-7)

        return norm_data, min_val, max_val

    # Normalization
    ori_data, min_val, max_val = MinMaxScaler(ori_data)

    # Build a RNN networks

    # Network Parameters
    hidden_dim = parameters["hidden_dim"]
    num_layers = parameters["num_layer"]
    iterations = parameters["iterations"]
    batch_size = parameters["batch_size"]
    module_name = parameters["module"]
    z_dim = dim
    gamma = 1

    # Input place holders
    X = tf.placeholder(tf.float32, [None, max_seq_len, dim], name="myinput_x")
    Z = tf.placeholder(tf.float32, [None, max_seq_len, z_dim],
                       name="myinput_z")
    T = tf.placeholder(tf.int32, [None], name="myinput_t")

    def embedder(X, T):
        """Embedding network between original feature space to latent space.

        Args:
          - X: input time-series features
          - T: input time information

        Returns:
          - H: embeddings
        """
        with tf.variable_scope("embedder", reuse=tf.AUTO_REUSE):
            e_cell = tf.nn.rnn_cell.MultiRNNCell(
                [rnn_cell(module_name, hidden_dim) for _ in range(num_layers)])
            e_outputs, e_last_states = tf.nn.dynamic_rnn(e_cell,
                                                         X,
                                                         dtype=tf.float32,
                                                         sequence_length=T)
            H = tf.contrib.layers.fully_connected(e_outputs,
                                                  hidden_dim,
                                                  activation_fn=tf.nn.sigmoid)
        return H

    def recovery(H, T):
        """Recovery network from latent space to original space.

        Args:
          - H: latent representation
          - T: input time information

        Returns:
          - X_tilde: recovered data
        """
        with tf.variable_scope("recovery", reuse=tf.AUTO_REUSE):
            r_cell = tf.nn.rnn_cell.MultiRNNCell(
                [rnn_cell(module_name, hidden_dim) for _ in range(num_layers)])
            r_outputs, r_last_states = tf.nn.dynamic_rnn(r_cell,
                                                         H,
                                                         dtype=tf.float32,
                                                         sequence_length=T)
            X_tilde = tf.contrib.layers.fully_connected(
                r_outputs, dim, activation_fn=tf.nn.sigmoid)
        return X_tilde

    def generator(Z, T):
        """Generator function: Generate time-series data in latent space.

        Args:
          - Z: random variables
          - T: input time information

        Returns:
          - E: generated embedding
        """
        with tf.variable_scope("generator", reuse=tf.AUTO_REUSE):
            e_cell = tf.nn.rnn_cell.MultiRNNCell(
                [rnn_cell(module_name, hidden_dim) for _ in range(num_layers)])
            e_outputs, e_last_states = tf.nn.dynamic_rnn(e_cell,
                                                         Z,
                                                         dtype=tf.float32,
                                                         sequence_length=T)
            E = tf.contrib.layers.fully_connected(e_outputs,
                                                  hidden_dim,
                                                  activation_fn=tf.nn.sigmoid)
        return E

    def supervisor(H, T):
        """Generate next sequence using the previous sequence.

        Args:
          - H: latent representation
          - T: input time information

        Returns:
          - S: generated sequence based on the latent representations generated by the generator
        """
        with tf.variable_scope("supervisor", reuse=tf.AUTO_REUSE):
            e_cell = tf.nn.rnn_cell.MultiRNNCell([
                rnn_cell(module_name, hidden_dim)
                for _ in range(num_layers - 1)
            ])
            e_outputs, e_last_states = tf.nn.dynamic_rnn(e_cell,
                                                         H,
                                                         dtype=tf.float32,
                                                         sequence_length=T)
            S = tf.contrib.layers.fully_connected(e_outputs,
                                                  hidden_dim,
                                                  activation_fn=tf.nn.sigmoid)
        return S

    def discriminator(H, T):
        """Discriminate the original and synthetic time-series data.

        Args:
          - H: latent representation
          - T: input time information

        Returns:
          - Y_hat: classification results between original and synthetic time-series
        """
        with tf.variable_scope("discriminator", reuse=tf.AUTO_REUSE):
            d_cell = tf.nn.rnn_cell.MultiRNNCell(
                [rnn_cell(module_name, hidden_dim) for _ in range(num_layers)])
            d_outputs, d_last_states = tf.nn.dynamic_rnn(d_cell,
                                                         H,
                                                         dtype=tf.float32,
                                                         sequence_length=T)
            Y_hat = tf.contrib.layers.fully_connected(d_outputs,
                                                      1,
                                                      activation_fn=None)
        return Y_hat

    # Embedder & Recovery
    H = embedder(X, T)
    X_tilde = recovery(H, T)

    # Generator
    E_hat = generator(Z, T)
    H_hat = supervisor(E_hat, T)
    H_hat_supervise = supervisor(H, T)

    # Synthetic data
    X_hat = recovery(H_hat, T)

    # Discriminator
    Y_fake = discriminator(H_hat, T)
    Y_real = discriminator(H, T)
    Y_fake_e = discriminator(E_hat, T)

    # Variables
    e_vars = [
        v for v in tf.trainable_variables() if v.name.startswith("embedder")
    ]
    r_vars = [
        v for v in tf.trainable_variables() if v.name.startswith("recovery")
    ]
    g_vars = [
        v for v in tf.trainable_variables() if v.name.startswith("generator")
    ]
    s_vars = [
        v for v in tf.trainable_variables() if v.name.startswith("supervisor")
    ]
    d_vars = [
        v for v in tf.trainable_variables()
        if v.name.startswith("discriminator")
    ]

    # Discriminator loss
    D_loss_real = tf.losses.sigmoid_cross_entropy(tf.ones_like(Y_real), Y_real)
    D_loss_fake = tf.losses.sigmoid_cross_entropy(tf.zeros_like(Y_fake),
                                                  Y_fake)
    D_loss_fake_e = tf.losses.sigmoid_cross_entropy(tf.zeros_like(Y_fake_e),
                                                    Y_fake_e)
    D_loss = D_loss_real + D_loss_fake + gamma * D_loss_fake_e

    # Generator loss
    # 1. Adversarial loss
    G_loss_U = tf.losses.sigmoid_cross_entropy(tf.ones_like(Y_fake), Y_fake)
    G_loss_U_e = tf.losses.sigmoid_cross_entropy(tf.ones_like(Y_fake_e),
                                                 Y_fake_e)

    # 2. Supervised loss
    G_loss_S = tf.losses.mean_squared_error(H[:, 1:, :],
                                            H_hat_supervise[:, :-1, :])

    # 3. Two Momments
    G_loss_V1 = tf.reduce_mean(
        tf.abs(
            tf.sqrt(tf.nn.moments(X_hat, [0])[1] + 1e-6) -
            tf.sqrt(tf.nn.moments(X, [0])[1] + 1e-6)))
    G_loss_V2 = tf.reduce_mean(
        tf.abs((tf.nn.moments(X_hat, [0])[0]) - (tf.nn.moments(X, [0])[0])))

    G_loss_V = G_loss_V1 + G_loss_V2

    # 4. Summation
    G_loss = G_loss_U + gamma * G_loss_U_e + 100 * tf.sqrt(
        G_loss_S) + 100 * G_loss_V

    # Embedder network loss
    E_loss_T0 = tf.losses.mean_squared_error(X, X_tilde)
    E_loss0 = 10 * tf.sqrt(E_loss_T0)
    E_loss = E_loss0 + 0.1 * G_loss_S

    # optimizer
    E0_solver = tf.train.AdamOptimizer().minimize(E_loss0,
                                                  var_list=e_vars + r_vars)
    E_solver = tf.train.AdamOptimizer().minimize(E_loss,
                                                 var_list=e_vars + r_vars)
    D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=d_vars)
    G_solver = tf.train.AdamOptimizer().minimize(G_loss,
                                                 var_list=g_vars + s_vars)
    GS_solver = tf.train.AdamOptimizer().minimize(G_loss_S,
                                                  var_list=g_vars + s_vars)

    # TimeGAN training
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    # 1. Embedding network training
    print("Start Embedding Network Training")

    for itt in range(iterations):
        # Set mini-batch
        X_mb, T_mb = batch_generator(ori_data, ori_time, batch_size)
        # Train embedder
        _, step_e_loss = sess.run([E0_solver, E_loss_T0],
                                  feed_dict={
                                      X: X_mb,
                                      T: T_mb
                                  })
        # Checkpoint
        if itt % 1000 == 0:
            print("step: " + str(itt) + "/" + str(iterations) + ", e_loss: " +
                  str(np.round(np.sqrt(step_e_loss), 4)))

    print("Finish Embedding Network Training")

    # 2. Training only with supervised loss
    print("Start Training with Supervised Loss Only")

    for itt in range(iterations):
        # Set mini-batch
        X_mb, T_mb = batch_generator(ori_data, ori_time, batch_size)
        # Random vector generation
        Z_mb = random_generator(batch_size, z_dim, T_mb, max_seq_len)
        # Train generator
        _, step_g_loss_s = sess.run([GS_solver, G_loss_S],
                                    feed_dict={
                                        Z: Z_mb,
                                        X: X_mb,
                                        T: T_mb
                                    })
        # Checkpoint
        if itt % 1000 == 0:
            print("step: " + str(itt) + "/" + str(iterations) + ", s_loss: " +
                  str(np.round(np.sqrt(step_g_loss_s), 4)))

    print("Finish Training with Supervised Loss Only")

    # 3. Joint Training
    print("Start Joint Training")

    for itt in range(iterations):
        # Generator training (twice more than discriminator training)
        for kk in range(2):
            # Set mini-batch
            X_mb, T_mb = batch_generator(ori_data, ori_time, batch_size)
            # Random vector generation
            Z_mb = random_generator(batch_size, z_dim, T_mb, max_seq_len)
            # Train generator
            _, step_g_loss_u, step_g_loss_s, step_g_loss_v = sess.run(
                [G_solver, G_loss_U, G_loss_S, G_loss_V],
                feed_dict={
                    Z: Z_mb,
                    X: X_mb,
                    T: T_mb
                },
            )
            # Train embedder
            _, step_e_loss_t0 = sess.run([E_solver, E_loss_T0],
                                         feed_dict={
                                             Z: Z_mb,
                                             X: X_mb,
                                             T: T_mb
                                         })

        # Discriminator training
        # Set mini-batch
        X_mb, T_mb = batch_generator(ori_data, ori_time, batch_size)
        # Random vector generation
        Z_mb = random_generator(batch_size, z_dim, T_mb, max_seq_len)
        # Check discriminator loss before updating
        check_d_loss = sess.run(D_loss, feed_dict={X: X_mb, T: T_mb, Z: Z_mb})
        # Train discriminator (only when the discriminator does not work well)
        if check_d_loss > 0.15:
            _, step_d_loss = sess.run([D_solver, D_loss],
                                      feed_dict={
                                          X: X_mb,
                                          T: T_mb,
                                          Z: Z_mb
                                      })

        # Print multiple checkpoints
        if itt % 1000 == 0:
            print("step: " + str(itt) + "/" + str(iterations) + ", d_loss: " +
                  str(np.round(step_d_loss, 4)) + ", g_loss_u: " +
                  str(np.round(step_g_loss_u, 4)) + ", g_loss_s: " +
                  str(np.round(np.sqrt(step_g_loss_s), 4)) + ", g_loss_v: " +
                  str(np.round(step_g_loss_v, 4)) + ", e_loss_t0: " +
                  str(np.round(np.sqrt(step_e_loss_t0), 4)))
    print("Finish Joint Training")

    # Synthetic data generation
    Z_mb = random_generator(no, z_dim, ori_time, max_seq_len)
    generated_data_curr = sess.run(X_hat,
                                   feed_dict={
                                       Z: Z_mb,
                                       X: ori_data,
                                       T: ori_time
                                   })

    generated_data = list()

    for _ in range(50):
        for i in range(no):
            temp = generated_data_curr[i, :ori_time[i], :]
            generated_data.append(temp)

    # Renormalization
    generated_data = generated_data * max_val
    generated_data = generated_data + min_val

    return generated_data
Exemple #5
0
    def train():
        #1. Embedding static network training
        print('Start Static Embedding Network Training')

        for itt in range(iterations):
            # Set mini-batch
            _, X_mb_static, _ = batch_generator_with_static(
                ori_data, ori_data_static, ori_time, batch_size)

            # Train embedder
            step_e_loss = train_step_embedder_static(X_mb_static)

            # Checkpoint
            if itt % 1000 == 0:
                print('step: ' + str(itt) + '/' + str(iterations) +
                      ', e_loss: ' + str(np.round(np.sqrt(step_e_loss), 4)))

        print('Finish static Embedding Network Training')

        #1. Embedding network training

        print('Start Embedding Network Training')
        for itt in range(iterations):
            # Set mini-batch
            X_mb, _, T_mb = batch_generator_with_static(
                ori_data, ori_data_static, ori_time, batch_size)
            # Train embedder
            step_e_loss = train_step_embedder(X_mb)

            # Checkpoint
            if itt % 1000 == 0:
                print('step: ' + str(itt) + '/' + str(iterations) +
                      ', e_loss: ' + str(np.round(np.sqrt(step_e_loss), 4)))

        #2. Training only with supervised loss
        print('Start Training with Supervised Loss Only')

        for itt in range(iterations):
            # Set mini-batch
            X_mb, X_mb_static, T_mb = batch_generator_with_static(
                ori_data, ori_data_static, ori_time, batch_size)
            # Train generator
            step_gen_s_loss = train_step_generator_s(X_mb, X_mb_static)

            # Checkpoint
            if itt % 1000 == 0:
                print('step: ' + str(itt) + '/' + str(iterations) +
                      ', s_loss: ' +
                      str(np.round(np.sqrt(step_gen_s_loss), 4)))

        print('Finish Training with Supervised Loss Only')

        # 3. Joint Training
        print('Start Joint Training')

        for itt in range(iterations):

            # Generator training (twice more than discriminator training)
            for kk in range(2):
                # Set mini-batch
                X_mb, X_mb_static, T_mb = batch_generator_with_static(
                    ori_data, ori_data_static, ori_time, batch_size)
                # Random vector generation
                Z_mb = random_generator(batch_size, z_dim, T_mb, max_seq_len)
                Z_mb_static = random_generator_static(batch_size, z_dim_static,
                                                      T_mb, max_seq_len)
                # Train generator and embedder
                emb_T0_loss, emb_loss, g_loss_u, gen_s_loss, g_loss_v, emb_T0_loss_static, g_loss_v_static = train_step_joint_both(
                    X_mb, X_mb_static, Z_mb, Z_mb_static)

            # Discriminator training
            # Set mini-batch
            X_mb, X_mb_static, T_mb = batch_generator_with_static(
                ori_data, ori_data_static, ori_time, batch_size)
            # Random vector generation
            Z_mb = random_generator(batch_size, z_dim, T_mb, max_seq_len)
            Z_mb_static = random_generator_static(batch_size, z_dim_static,
                                                  T_mb, max_seq_len)
            #train discriminator
            d_loss, d_loss_static = train_step_discriminator_both(
                X_mb, X_mb_static, Z_mb, Z_mb_static)

            # Print multiple checkpoints
            if itt % 200 == 0:
                print('step: ' + str(itt) + '/' + str(iterations) +
                      ', d_loss: ' + str(np.round(d_loss, 4)) +
                      ', g_loss_u: ' + str(np.round(g_loss_u, 4)) +
                      ', g_loss_s: ' + str(np.round(np.sqrt(gen_s_loss), 4)) +
                      ', g_loss_v: ' + str(np.round(g_loss_v, 4)) +
                      ', e_loss_t0: ' +
                      str(np.round(np.sqrt(emb_T0_loss), 4)) +
                      ', d_loss_static: ' + str(np.round(d_loss_static, 4)) +
                      ', g_loss_v_static: ' +
                      str(np.round(g_loss_v_static, 4)))

        print('Finish Joint Training')

        ## Synthetic data generation
        Z_mb = random_generator(no, z_dim, ori_time, max_seq_len)
        Z_mb_static = random_generator_static(no, z_dim_static, ori_time,
                                              max_seq_len)

        # generate in latent dim
        E_hat_generated = generator_model(Z_mb)
        E_hat_generated_static = generator_model_static(Z_mb_static)

        # repeat for seq_len for static values
        E_hat_generated_static_ = tf.expand_dims(E_hat_generated_static,
                                                 axis=1)
        E_hat_generated_static_ = tf.repeat(E_hat_generated_static_,
                                            seq_len,
                                            axis=1)

        # join static and temporal together
        E_hat_generated_mix = tf.concat(
            [E_hat_generated, E_hat_generated_static_], axis=2)

        H_hat_generated = supervisor_model(E_hat_generated_mix)

        # map up to original dimension
        generated_data_curr = recovery_model(H_hat_generated)
        generated_data_curr_static = recovery_model_static(
            E_hat_generated_static)

        generated_data_static = list()

        for i in range(no):
            temp = generated_data_curr_static[i, :]
            generated_data_static.append(temp)

        # Renormalization
        generated_data_static = generated_data_static * max_val_static
        generated_data_static = generated_data_static + min_val_static

        generated_data_seq = np.array(
            [[generated_data_static[i] for _ in range(seq_len)]
             for i in range(no)])

        generated_data = list()

        for i in range(no):
            temp = generated_data_curr[i, :ori_time[i], :]
            generated_data.append(temp)

        # Renormalization

        generated_data = generated_data * max_val
        generated_data = generated_data + min_val

        generated_data = np.dstack((generated_data, generated_data_seq))

        return generated_data
Exemple #6
0
# normalize real images to be between -1 and 1 for discriminator
#MAXVAL = 2**14 - 1
max_val = np.amax(real_images, axis=(1, 2), keepdims=True)
real_images = (real_images / max_val) - np.mean(
    real_images / max_val, axis=(1, 2), keepdims=True)

if args.augment:  # generate more training data through augmentation
    synthetic_images = utils.augment_images(synthetic_images)
    real_images = utils.augment_images(real_images)

#synth_crops = utils.random_crop_generator(synthetic_images, args.crop_size, args.batch_size)
#real_crops = utils.random_crop_generator(real_images, args.crop_size, args.batch_size)
#synth_crops = utils.center_crop_generator(synthetic_images, args.crop_size, args.batch_size)
#real_crops = utils.center_crop_generator(real_images, args.crop_size, args.batch_size)
synth_crops = utils.random_generator(synthetic_images, args.batch_size)
real_crops = utils.random_generator(real_images, args.batch_size)

# select optimizer to use during training
if args.optimizer.lower() == 'sgd':
    optimizer = SGD(args.lr, momentum=0.9)
elif args.optimizer.lower() == 'adam':
    optimizer = Adam(args.lr, amsgrad=True)
else:
    raise ValueError('Not a valid optimizer. Choose between SGD or Adam.')

# build SimGAN model
#sim_gan = SimGAN(input_shape=(args.crop_size, args.crop_size, 1),
height, width = real_images.shape[1], real_images.shape[2]
sim_gan = SimGAN(input_shape=(height, width, 1),
                 optimizer=optimizer,
 def __init__(self):
     super(MetaCreate, self).__init__()
     self.key = random_generator()
    def train():
        for itt in range(iterations):
            pass
            # Set mini-batch

            #X_mb = ori_data

            X_mb, T_mb = batch_generator(ori_data, ori_time, batch_size)
            X_mb = X_mb.reshape(batch_size, seq_len, dim)

            step_e_loss = train_step_embedder(X_mb)

            # Checkpoint
            if itt % 1000 == 0:
                print('step: ' + str(itt) + '/' + str(iterations) +
                      ', e_loss: ' + str(np.round(np.sqrt(step_e_loss), 4)))
        print('Finish Embedding Network Training')

        # 2. Training only with supervised loss
        print('Start Training with Supervised Loss Only')

        for itt in range(iterations):
            # Set mini-batch
            X_mb, T_mb = batch_generator(ori_data, ori_time, batch_size)
            # Random vector generation
            Z_mb = random_generator(batch_size, z_dim, T_mb, max_seq_len)
            # Train generator
            step_g_loss_s = train_step_supervised(X_mb, Z_mb)
            # Checkpoint
            if itt % 1000 == 0:
                print('step: ' + str(itt) + '/' + str(iterations) +
                      ', s_loss: ' + str(np.round(np.sqrt(step_g_loss_s), 4)))

        print('Finish Training with Supervised Loss Only')

        # 3. Joint Training
        print('Start Joint Training')

        for itt in range(iterations):
            # Generator training (twice more than discriminator training)
            for kk in range(2):
                # Set mini-batch
                X_mb, T_mb = batch_generator(ori_data, ori_time, batch_size)
                # Random vector generation
                Z_mb = random_generator(batch_size, z_dim, T_mb, max_seq_len)
                # Train generator
                step_g_loss_u, step_g_loss_s, step_g_loss_v = train_step_generator(
                    X_mb, Z_mb)
                #_, step_g_loss_u, step_g_loss_s, step_g_loss_v = sess.run([G_solver, G_loss_U, G_loss_S, G_loss_V], feed_dict={Z: Z_mb, X: X_mb, T: T_mb})
                # Train embedder
                step_e_loss_t0 = train_step_joint_embed(X_mb, Z_mb)

                #_, step_e_loss_t0 = sess.run([E_solver, E_loss_T0], feed_dict={Z: Z_mb, X: X_mb, T: T_mb})

            # Discriminator training
            # Set mini-batch
            X_mb, T_mb = batch_generator(ori_data, ori_time, batch_size)
            # Random vector generation
            Z_mb = random_generator(batch_size, z_dim, T_mb, max_seq_len)

            step_d_loss = train_step_discriminator(X_mb, Z_mb)

            # Print multiple checkpoints
            if itt % 1000 == 0:
                print('step: ' + str(itt) + '/' + str(iterations) +
                      ', d_loss: ' + str(np.round(step_d_loss, 4)) +
                      ', g_loss_u: ' + str(np.round(step_g_loss_u, 4)) +
                      ', g_loss_s: ' +
                      str(np.round(np.sqrt(step_g_loss_s), 4)) +
                      ', g_loss_v: ' + str(np.round(step_g_loss_v, 4)) +
                      ', e_loss_t0: ' +
                      str(np.round(np.sqrt(step_e_loss_t0), 4)))
        print('Finish Joint Training')

        ## Synthetic data generation
        Z_mb = random_generator(no, z_dim, ori_time, max_seq_len)

        generated = generator(Z_mb)
        supervised = supervisor(generated)
        generated_data_curr = recovery(supervised)

        generated_data = list()

        for i in range(no):
            temp = generated_data_curr[i, :ori_time[i], :]
            generated_data.append(temp)

        # Renormalization
        generated_data = generated_data * max_val
        generated_data = generated_data + min_val

        return generated_data