sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    prec, rec, f1, auprc = _eval(sess, model, x_val, y_val)
    print('Prec:{:.4f}  |  Rec:{:.4f}  |  F1:{:.4f}  |  Eval_auprc:{:.4f}'.
          format(prec, rec, f1, auprc))
    sys.stdout.flush()

    # Start training
    start_time = time.time()
    for i in range(nb_epochs):
        print('==== Training epoch {} ===='.format(i))
        sys.stdout.flush()

        # shuffle for each epoch
        x_train, y_train = _shuffle(x_train, y_train)

        loss_dis_sum, loss_gen_sum, loss_enc_sum = 0., 0., 0.
        for j, batch_data in DataInput(x_train, train_batch_size):
            # Update discriminator
            if (j % d_g_iter != 0) or (j == 0):
                loss_dis, loss_gen, loss_enc = model.train(sess,
                                                           batch_data,
                                                           learning_rate,
                                                           train_d=True)

            # Update generator and encoder
            else:
                loss_dis, loss_gen, loss_enc = model.train(sess,
                                                           batch_data,
                                                           learning_rate,
    # model.restore(sess, 'train_logs_5%_0.8623/talkingdata/cross-e/0.9/1/ckpt')
    print('Total params: ', np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))

    start_time = time.time()
    auroc, prec, rec, f1 = _eval(sess, model, x_val, y_val)
    print('Eval_auc:{:.4f} | prec:{:.4f} | rec:{:.4f} | f1:{:.4f}\tTime Cost:{:.4f}'.format(auroc, prec,
                                                                                            rec, f1,
                                                                                            time.time()-start_time))
    sys.stdout.flush()

    start_time = time.time()
    for i in range(nb_epochs):
        print('==== Training epoch {} ===='.format(i))
        sys.stdout.flush()

        x, y = _shuffle(x, y)
        loss_dis_sum, loss_gen_sum, loss_enc_sum = 0., 0., 0.
        for j, batch_data in DataInput(x, train_batch_size):
            # train D
            if (j % d_g_iter != 0) or (j == 0):
                loss_dis, loss_gen, loss_enc = model.train(sess, batch_data, learning_rate, train_d=True)
            # train G, E
            else:
                loss_dis, loss_gen, loss_enc = model.train(sess, batch_data, learning_rate, train_d=False)
            loss_dis_sum += loss_dis
            loss_gen_sum += loss_gen
            loss_enc_sum += loss_enc

            print_iter = 1000 if best_auroc > 0.90 else 2000
            if j % print_iter == 0:
                print('== Epoch {}  Batch {}\tLoss_dis:{:.4f}  Loss_gen:{:.4f}  loss_enc:{:.4f} =='\