예제 #1
0
def pretrain_phase2(model: ADEC,
                    seeds,
                    groups,
                    label,
                    batch_size,
                    epochs=1000,
                    save_interval=200,
                    save_path='./images',
                    early_stopping=False):
    n_epochs = tqdm.tqdm_notebook(range(epochs))
    total_batches = seeds.shape[0] // batch_size
    EARLY_STOPPING_THRESHOLD = 1e-4
    PATIENCE = 20
    last_ae_loss = 10e10
    p_count = 0
    groups_label = get_group_label(groups, label)
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    epochs_for_disc = epochs // 3
    for epoch in n_epochs:
        offset = 0
        losses = []
        random_idx = np.random.randint(0, seeds.shape[0], seeds.shape[0])
        seeds_shuffle = seeds[random_idx, :]
        for batch_iter in range(total_batches):
            # Randomly choose each half batch
            imgs = seeds_shuffle[offset:offset + batch_size, ::] if (
                batch_iter <
                (total_batches - 1)) else seeds_shuffle[:batch_size, :]
            offset += batch_size

            loss = model.pretrain_on_batch_phase2(imgs, None)
            losses.append(loss)

        avg_loss = avg_losses(losses)
        wandb.log({'pretrain_phase2_losses': avg_loss})

        if epoch % save_interval == 0 or (epoch == epochs - 1):
            # Save the visualization of latent space
            latent = model.encoder.predict(seeds)
            latent_space_img = visualize_latent_space(
                latent,
                groups_label,
                10,
                is_save=True,
                save_path=f'{save_path}/latent_{epoch}.png')

            wandb.log({
                'latent_space':
                [wandb.Image(latent_space_img, caption="Latent space")]
            })

        if early_stopping:
            if last_ae_loss - avg_loss[
                    'res_ae_loss'] < EARLY_STOPPING_THRESHOLD:
                p_count += 1
                if p_count == PATIENCE:
                    print(f'No improvement after {PATIENCE} epochs. Stop!')
                    break  # Stop training
예제 #2
0
def train(model: ACAI,
          x_train,
          y_train,
          x_test,
          batch_size,
          epochs=1000,
          save_interval=200,
          save_path='./images'):
    n_epochs = tqdm.tqdm_notebook(range(epochs))
    total_batches = x_train.shape[0] // batch_size
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    for epoch in n_epochs:
        offset = 0
        losses = []
        random_idx = np.random.randint(0, x_train.shape[0], x_train.shape[0])
        for batch_iter in range(1):
            # Randomly choose each half batch
            imgs = x_train[offset:offset + batch_size, ::] if (
                batch_iter < (total_batches - 1)) else x_train[offset:, ::]
            offset += batch_size

            loss = train_on_batch(imgs, None, model)
            losses.append(loss)

        avg_loss = avg_losses(losses)
        # wandb.log({'losses': avg_loss})

        if epoch % save_interval == 0 or (epoch == epochs - 1):
            sampled_imgs = model.autoencoder(x_test[:100])
            res_img = make_image_grid(sampled_imgs.numpy(), (28, 28),
                                      str(epoch), save_path)

            latent = model.encoder(x_train, training=False).numpy()
            latent_space_img = visualize_latent_space(
                latent,
                y_train,
                10,
                is_save=True,
                save_path=f'./latent_space/{epoch}')
예제 #3
0
    obs = tf.convert_to_tensor(obs, dtype=tf.float32)
    rewards = tf.convert_to_tensor(rewards, dtype=tf.float32)
    rewards = discount_rewards(rewards)
    return obs, rewards


def train():
    obs, rewards = interaction_process()
    with tf.GradientTape() as tape:
        tf.transpose(tf.math.log(model(obs)[1])) * rewards
        loss = -tf.reduce_sum()
    gradient = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradient, model.trainable_variables))
    return loss.numpy(), rewards


if __name__ == '__main__':
    Epochs = 10000
    losses = []
    for epoch in range(Epochs):
        loss, rewards = train()
        losses.append(loss)
        print('epochs:{}, loss:{}'.format(epoch, loss))
        if epoch % 1000 == 0:
            plot_df = df[price_col]
            portfolio = [10000]
            portfolio.extend(rewards)
            portfolio.append(np.nan)
            plot_df.loc[:, 'rewards'] = portfolio
            plot_df.to_csv('./data_for_analysis/%s_df.csv' % epoch)
    model.save('./model/policy_gradient.h5')
예제 #4
0
def cluster(model: ADEC,
            seeds,
            groups,
            label,
            batch_size,
            epochs=1000,
            save_interval=200,
            save_path='./images'):
    n_epochs = tqdm.tqdm_notebook(range(epochs))
    total_batches = seeds.shape[0] // batch_size
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    groups_label = get_group_label(groups, label)

    # Using kmeans result to initialize clusters for model
    latent = adec.encoder.predict(seeds)
    kmeans = KMeans(n_clusters=model.n_clusters, n_init=100)
    init_cluster_pred = kmeans.fit_predict(latent)
    _, _, init_f1 = genome_acc(groups, init_cluster_pred, label,
                               model.n_clusters)
    print('Initialized performance: ', init_f1)

    model.cluster.get_layer(name='clustering').set_weights(
        [kmeans.cluster_centers_])
    # Check model cluster performance
    non_wh_cluster_res = model.cluster.predict(seeds).argmax(1)
    _, _, init_f1_1 = genome_acc(groups, non_wh_cluster_res, label,
                                 model.n_clusters)
    print('Model cluster performance: ', init_f1_1)

    stop = False
    last_cluster_pred = np.copy(non_wh_cluster_res)
    for epoch in n_epochs:
        offset = 0
        losses = []

        if epoch % save_interval == 0 or (epoch == epochs - 1):
            # Save the visualization of latent space
            latent = model.encoder.predict(seeds)
            latent_space_img = visualize_latent_space(
                latent,
                groups_label,
                model.n_clusters,
                is_save=True,
                save_path=f'{save_path}/latent_{epoch}.png')

            # Log the clustering performance
            cluster_res = model.cluster.predict(seeds)
            y_pred = cluster_res.argmax(1)
            _, _, f1 = genome_acc(groups, y_pred, label, model.n_clusters)

            try:
                wandb.log({
                    'latent_space':
                    [wandb.Image(latent_space_img, caption="Latent space")],
                    'cluster_f1':
                    f1
                })
            except:
                print('cluster_f1: ', f1)

            delta_label = np.sum(y_pred != last_cluster_pred).astype(
                np.float32) / y_pred.shape[0]
            if epoch > 0 and delta_label < model.tol:
                stop = False
                break

            last_cluster_pred = np.copy(cluster_res)

            # Update target distribution
            targ_dist = model.target_distribution(last_cluster_pred)

        is_alternate = False
        for batch_iter in range(total_batches):
            # Randomly choose each half batch
            imgs = seeds[offset:offset + batch_size, :] if (
                batch_iter < (total_batches - 1)) else seeds[:batch_size, :]
            y_cluster = targ_dist[offset:offset + batch_size, :] if (
                batch_iter <
                (total_batches - 1)) else targ_dist[:batch_size, :]
            offset += batch_size

            if batch_iter < int(2 * total_batches / 3) and total_batches >= 3:
                is_alternate = True
            else:
                is_alternate = False

            loss = model.train_on_batch(imgs, y_cluster, is_alternate)
            losses.append(loss)

        avg_loss = avg_losses(losses)
        try:
            wandb.log({'clustering_losses': avg_loss})
        except:
            pass

        if stop:
            # Reach stop condition, stop training
            break
예제 #5
0
def train(model: AdversarialAutoencoder,
          x_train,
          y_train,
          batch_size,
          epochs=1000,
          save_interval=200,
          save_path='./images'):
    # latents = 5 * np.random.normal(size=(100, model.latent_dim))
    latents = 5 * np.random.uniform(-1, 1, size=(100, model.latent_dim))
    n_epochs = tqdm.tqdm_notebook(range(epochs))
    half_batch = int(batch_size / 2)
    total_batches = x_train.shape[0] // batch_size
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    for epoch in n_epochs:
        losses = []
        offset = 0
        for batch_iter in range(1):
            # Randomly choose each half batch
            imgs = x_train[offset:offset + batch_size, ::] if (
                batch_iter < (total_batches - 1)) else x_train[offset:, ::]
            offset += batch_size
            # Randomly choose each half batch
            idx = np.random.randint(0, x_train.shape[0], half_batch)
            imgs = x_train[idx]

            # Train discriminator
            ## Get fake and real latent feature
            latent_fake = model.encoder.predict(imgs)
            latent_real = 5 * np.random.normal(size=(half_batch,
                                                     model.latent_dim))
            d_real = np.ones((half_batch, 1))
            d_fake = np.zeros((half_batch, 1))

            ## train
            d_loss_real = model.discriminator.train_on_batch(
                latent_real, d_real)
            d_loss_fake = model.discriminator.train_on_batch(
                latent_fake, d_fake)
            d_loss = d_loss_real + d_loss_fake

            idx = np.random.randint(0, x_train.shape[0], batch_size)
            imgs = x_train[idx]

            # Train autoencoder
            ae_loss = model.autoencoder.train_on_batch(imgs, imgs)

            # Train generator
            g_real = np.ones((batch_size, 1))
            g_loss = model.generator.train_on_batch(imgs, g_real)

            loss = {'ae_loss': ae_loss, 'g_loss': g_loss, 'd_loss': d_loss}
            losses.append(loss)

        avg_loss = avg_losses(losses)
        # wandb.log({'losses': avg_loss})

        if epoch % save_interval == 0 or (epoch == epochs - 1):
            sampled_imgs = model.decoder(latents, training=False)
            res_img = make_image_grid(sampled_imgs.numpy(), (28, 28),
                                      str(epoch), save_path)

            latent = model.encoder.predict(x_train)
            latent_space_img = visualize_latent_space(
                latent,
                y_train,
                10,
                is_save=True,
                save_path=f'./latent_space/{epoch}.png')
예제 #6
0
def run():
    # learning rate
    lr = 1e-2
    accs, losses = [], []

    # 784 => 512
    w1, b1 = tf.Variable(tf.random.normal([784, 256],
                                          stddev=0.1)), tf.Variable(
                                              tf.zeros([256]))
    # 512 => 256
    w2, b2 = tf.Variable(tf.random.normal([256, 128],
                                          stddev=0.1)), tf.Variable(
                                              tf.zeros([128]))
    # 256 => 10
    w3, b3 = tf.Variable(tf.random.normal([128, 10], stddev=0.1)), tf.Variable(
        tf.zeros([10]))

    for step, (x, y) in enumerate(train_db):

        # [b, 28, 28] => [b, 784]
        x = tf.reshape(x, (-1, 784))

        with tf.GradientTape() as tape:

            # layer1.
            h1 = x @ w1 + b1
            h1 = tf.nn.relu(h1)
            # layer2
            h2 = h1 @ w2 + b2
            h2 = tf.nn.relu(h2)
            # output
            out = h2 @ w3 + b3
            # out = tf.nn.relu(out)

            # compute loss
            # [b, 10] - [b, 10]
            loss = tf.square(y - out)
            # [b, 10] => scalar
            loss = tf.reduce_mean(loss)

        grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3])
        for p, g in zip([w1, b1, w2, b2, w3, b3], grads):
            p.assign_sub(lr * g)

        # print
        if step % 100 == 0:
            print(step, 'loss:', float(loss))
            losses.append(float(loss))

        if step % 100 == 0:
            # evaluate/test
            total, total_correct = 0., 0

            for x, y in test_db:
                # layer1.
                h1 = x @ w1 + b1
                h1 = tf.nn.relu(h1)
                # layer2
                h2 = h1 @ w2 + b2
                h2 = tf.nn.relu(h2)
                # output
                out = h2 @ w3 + b3
                # [b, 10] => [b]
                pred = tf.argmax(out, axis=1)
                # convert one_hot y to number y
                y = tf.argmax(y, axis=1)
                # bool type
                correct = tf.equal(pred, y)
                # bool tensor => int tensor => numpy
                total_correct += tf.reduce_sum(tf.cast(
                    correct, dtype=tf.int32)).numpy()
                total += x.shape[0]

            print(step, 'Evaluate Acc:', total_correct / total)

            accs.append(total_correct / total)

    plt.figure()
    x = [i * 80 for i in range(len(losses))]
    plt.plot(x, losses, color='C0', marker='s', label='训练')
    plt.ylabel('MSE')
    plt.xlabel('Step')
    plt.legend()
    plt.savefig('train.svg')

    plt.figure()
    plt.plot(x, accs, color='C1', marker='s', label='测试')
    plt.ylabel('准确率')
    plt.xlabel('Step')
    plt.legend()
    plt.savefig('test.svg')