Esempio n. 1
0
 def __readfile(self, names, c_size, m_size, labels, shuffle_ind, flag):
     N = len(names)
     data_temp = np.load(names[0])
     GRID_IN = data_temp[()][labels[0]][...]
     Y_LABEL = data_temp[()][labels[1]][..., None]
     Y2 = np.copy(-1 * GRID_IN[..., 1][..., None])
     rnd_ind = du.shuffle_ind(200)
     GRID_IN[..., shuffle_ind] = GRID_IN[rnd_ind, ..., shuffle_ind]
     return [GRID_IN], [Y_LABEL, Y2]
    train_size = 100
    record = 999

    for i in range(epochs):
        print('epoch = {}'.format(i))
        start_time = time.time()
        shuffle(trainfile64)
        shuffle(trainfile96)
        trainfiles = trainfile64[:160] + trainfile96[:160]
        # shuffling at epoch begin
        shuffle(trainfiles)
        # loop over batches
        for j, name in enumerate(trainfiles):

            # ----- import batch data subset ----- #
            inds = du.shuffle_ind(batch_size)[:train_size]
            temp_batch = np.load(name, allow_pickle=True)[()]
            X = temp_batch['batch'][inds, ...]
            # ------------------------------------ #

            # ----- D training ----- #
            # Latent space sampling
            Wf = np.random.normal(0.0, 1.0, size=[train_size, latent_size])
            # soft labels
            dummy_bad = np.ones(train_size) * 0.1 + np.random.uniform(
                -0.02, 0.02, train_size)
            dummy_good = np.ones(train_size) * 0.9 + np.random.uniform(
                -0.02, 0.02, train_size)
            # get G_output (channel last)
            g_in = [Wf, X[..., input_flag]]
            g_out = G_style.predict(g_in)  # <-- np.array
Esempio n. 3
0
    tol = 0
    train_size = 100
    for i in range(epochs):
        print('epoch = {}'.format(i))
        if i == 0:
            record = G.evaluate_generator(gen_valid, verbose=1)
            print('Initial validation loss: {}'.format(record))

        start_time = time.time()
        # learning rate schedule

        # shuffling at epoch begin
        shuffle(trainfiles)
        # loop over batches
        for j, name in enumerate(trainfiles):
            inds = du.shuffle_ind(batch_size)
            inds = inds[:train_size]
            # dynamic soft labels
            y_bad = np.ones(train_size) * 0.1 + np.random.uniform(
                -0.02, 0.02, train_size)
            y_good = np.ones(train_size) - y_bad
            dummy_bad = keras.utils.to_categorical(y_bad)
            dummy_good = keras.utils.to_categorical(y_good)

            # import batch data
            temp_batch = np.load(name, allow_pickle=True)[()]
            X = temp_batch['batch'][inds, ...]

            # get G_output
            g_in = X[..., input_flag]
            g_out = G.predict([g_in])  # <-- np.array
Esempio n. 4
0
            # import batch data
            temp_batch = np.load(name, allow_pickle=True)[()]
            X = temp_batch['batch']

            # D training
            D.trainable = True
            g_in = X[..., input_flag]
            g_out = G.predict([g_in])  # <-- np.array

            d_in_fake = np.concatenate((g_out, g_in), axis=-1)  # channel last
            d_in_true = X[..., inout_flag]
            if mix:
                d_in = np.concatenate((d_in_fake, d_in_true),
                                      axis=0)  # batch size doubled
                d_target = np.concatenate((dummy_bad, dummy_good), axis=0)
                d_shuffle_ind = du.shuffle_ind(2 * batch_size)
                d_loss1 = D.train_on_batch(d_in[d_shuffle_ind, ...],
                                           d_target[d_shuffle_ind, ...])
                d_loss2 = 0
            else:
                d_loss1 = D.train_on_batch(d_in_true, dummy_good)
                d_loss2 = D.train_on_batch(d_in_fake, dummy_bad)

            d_loss_sum = d_loss1 + d_loss2
            D_LOSS[i * L_train + j] = d_loss_sum
            LOSS = {'D_LOSS': D_LOSS}
            np.save(hist_path, LOSS)

            if j % 50 == 0:
                print('\t{} step loss = {}'.format(j, d_loss_sum))
        print('Save to {}'.format(model_path))