Ejemplo n.º 1
0
 def evaluation(self):
     a = 0
     scores, y_pred_test = self.isolation_forest()
     # print("##########scores###########")
     # print(scores)
     self.testy = np.load("test_y.npy")
     # print("##############testy##############")
     # print(self.testy)
     auroc = do_roc(scores=scores,
                    true_labels=self.testy,
                    file_name='roc',
                    directory='data',
                    plot=False)
     print('AUROC:', auroc)
     auprc = do_prc(scores, self.testy, file_name='auprc', directory='data')
     auprg = do_prg(scores, self.testy, file_name='auprg', directory='data')
     per = get_percentile(scores, self.dataset, self.anomaly_type, a)
     print(per)
     y_pred = (scores >= per).astype(int)
     precision, recall, f1, _ = precision_recall_fscore_support(
         self.testy.astype(int), y_pred.astype(int), average='binary')
     cm = confusion_matrix(self.testy,
                           y_pred,
                           labels=None,
                           sample_weight=None)
     print('confusion matrix:', cm)
     # check  if folder exist:
     if not os.path.exists('results/'):
         os.makedirs('results/')
     if not os.path.exists('results/tf_results'):
         os.makedirs('results/tf_results')
     # write results on csv file
     csv_name = 'results/tf_results/results_' + self.dataset + '_' + self.anomaly_type + '_' + '.csv'
     exists = os.path.isfile(csv_name)
     if not exists:
         columns = [
             'auroc', 'auprc', 'auprg', 'precision', 'recall', 'f1', 'date'
         ]
         df = pd.DataFrame(columns=columns)
         df.to_csv(csv_name)
     new_data = [
         auroc,
         auprc,
         auprg,
         precision,
         recall,
         f1,
         datetime.datetime.now(),
     ]
     with open(csv_name, 'a') as csv_file:
         filewriter = csv.writer(csv_file)
         filewriter.writerow(new_data)
Ejemplo n.º 2
0
def train_and_test(nb_epochs, weight, method, degree, random_seed):
    """ Runs the Bigan on the KDD dataset

    Note:
        Saves summaries on tensorboard. To display them, please use cmd line
        tensorboard --logdir=model.training_logdir() --port=number
    Args:
        nb_epochs (int): number of epochs
        weight (float, optional): weight for the anomaly score composition
        method (str, optional): 'fm' for ``Feature Matching`` or "cross-e"
                                     for ``cross entropy``, "efm" etc.
        anomalous_label (int): int in range 0 to 10, is the class/digit
                                which is considered outlier
    """
    logger = logging.getLogger("BiGAN.train.kdd.{}".format(method))

    # Placeholders
    input_pl = tf.placeholder(tf.float32,
                              shape=data.get_shape_input(),
                              name="input")
    is_training_pl = tf.placeholder(tf.bool, [], name='is_training_pl')
    learning_rate = tf.placeholder(tf.float32, shape=(), name="lr_pl")

    # Data
    trainx, trainy = data.get_train()
    trainx_copy = trainx.copy()
    testx, testy = data.get_test()
    trainx_org, trainy_org = data.get_train_org()

    print('samples:', trainx.shape, testx.shape, trainx_org.shape)

    # Parameters
    starting_lr = network.learning_rate
    batch_size = network.batch_size
    latent_dim = network.latent_dim
    ema_decay = 0.9999

    rng = np.random.RandomState(RANDOM_SEED)
    nr_batches_train = int(trainx.shape[0] / batch_size)
    nr_batches_test = int(testx.shape[0] / batch_size)
    nr_batches_train_org = int(trainx_org.shape[0] / batch_size)

    logger.info('Building training graph...')

    logger.warn("The BiGAN is training with the following parameters:")
    display_parameters(batch_size, starting_lr, ema_decay, weight, method,
                       degree)

    gen = network.decoder
    enc = network.encoder
    dis = network.discriminator

    with tf.variable_scope('encoder_model'):
        z_gen = enc(input_pl, is_training=is_training_pl)

    with tf.variable_scope('generator_model'):
        z = tf.random_normal([batch_size, latent_dim])
        x_gen = gen(z, is_training=is_training_pl)

    with tf.variable_scope('discriminator_model'):
        l_encoder, inter_layer_inp = dis(z_gen,
                                         input_pl,
                                         is_training=is_training_pl)
        l_generator, inter_layer_rct = dis(z,
                                           x_gen,
                                           is_training=is_training_pl,
                                           reuse=True)

    with tf.name_scope('loss_functions'):
        # discriminator
        loss_dis_enc = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.ones_like(l_encoder), logits=l_encoder))
        loss_dis_gen = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.zeros_like(l_generator), logits=l_generator))
        loss_discriminator = loss_dis_gen + loss_dis_enc
        # generator
        loss_generator = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.ones_like(l_generator), logits=l_generator))
        # encoder
        loss_encoder = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.zeros_like(l_encoder), logits=l_encoder))

    with tf.name_scope('optimizers'):
        # control op dependencies for batch norm and trainable variables
        tvars = tf.trainable_variables()
        dvars = [var for var in tvars if 'discriminator_model' in var.name]
        gvars = [var for var in tvars if 'generator_model' in var.name]
        evars = [var for var in tvars if 'encoder_model' in var.name]

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        update_ops_gen = [
            x for x in update_ops if ('generator_model' in x.name)
        ]
        update_ops_enc = [x for x in update_ops if ('encoder_model' in x.name)]
        update_ops_dis = [
            x for x in update_ops if ('discriminator_model' in x.name)
        ]

        optimizer_dis = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                               beta1=0.5,
                                               name='dis_optimizer')
        optimizer_gen = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                               beta1=0.5,
                                               name='gen_optimizer')
        optimizer_enc = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                               beta1=0.5,
                                               name='enc_optimizer')

        with tf.control_dependencies(update_ops_gen):
            gen_op = optimizer_gen.minimize(loss_generator, var_list=gvars)
        with tf.control_dependencies(update_ops_enc):
            enc_op = optimizer_enc.minimize(loss_encoder, var_list=evars)
        with tf.control_dependencies(update_ops_dis):
            dis_op = optimizer_dis.minimize(loss_discriminator, var_list=dvars)

        # Exponential Moving Average for estimation
        dis_ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
        maintain_averages_op_dis = dis_ema.apply(dvars)

        with tf.control_dependencies([dis_op]):
            train_dis_op = tf.group(maintain_averages_op_dis)

        gen_ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
        maintain_averages_op_gen = gen_ema.apply(gvars)

        with tf.control_dependencies([gen_op]):
            train_gen_op = tf.group(maintain_averages_op_gen)

        enc_ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
        maintain_averages_op_enc = enc_ema.apply(evars)

        with tf.control_dependencies([enc_op]):
            train_enc_op = tf.group(maintain_averages_op_enc)

    with tf.name_scope('summary'):
        with tf.name_scope('dis_summary'):
            tf.summary.scalar('loss_discriminator', loss_discriminator,
                              ['dis'])
            tf.summary.scalar('loss_dis_encoder', loss_dis_enc, ['dis'])
            tf.summary.scalar('loss_dis_gen', loss_dis_gen, ['dis'])

        with tf.name_scope('gen_summary'):
            tf.summary.scalar('loss_generator', loss_generator, ['gen'])
            tf.summary.scalar('loss_encoder', loss_encoder, ['gen'])

        sum_op_dis = tf.summary.merge_all('dis')
        sum_op_gen = tf.summary.merge_all('gen')

    logger.info('Building testing graph...')

    with tf.variable_scope('encoder_model'):
        z_gen_ema = enc(input_pl,
                        is_training=is_training_pl,
                        getter=get_getter(enc_ema),
                        reuse=True)

    with tf.variable_scope('generator_model'):
        reconstruct_ema = gen(z_gen_ema,
                              is_training=is_training_pl,
                              getter=get_getter(gen_ema),
                              reuse=True)

    with tf.variable_scope('discriminator_model'):
        l_encoder_ema, inter_layer_inp_ema = dis(z_gen_ema,
                                                 input_pl,
                                                 is_training=is_training_pl,
                                                 getter=get_getter(dis_ema),
                                                 reuse=True)
        l_generator_ema, inter_layer_rct_ema = dis(z_gen_ema,
                                                   reconstruct_ema,
                                                   is_training=is_training_pl,
                                                   getter=get_getter(dis_ema),
                                                   reuse=True)
    with tf.name_scope('Testing'):
        with tf.variable_scope('Reconstruction_loss'):
            delta = input_pl - reconstruct_ema
            delta_flat = tf.contrib.layers.flatten(delta)
            gen_score = tf.norm(delta_flat,
                                ord=degree,
                                axis=1,
                                keep_dims=False,
                                name='epsilon')

        with tf.variable_scope('Discriminator_loss'):
            if method == "cross-e":
                dis_score = tf.nn.sigmoid_cross_entropy_with_logits(
                    labels=tf.ones_like(l_generator_ema),
                    logits=l_generator_ema)

            elif method == "fm":
                fm = inter_layer_inp_ema - inter_layer_rct_ema
                fm = tf.contrib.layers.flatten(fm)
                dis_score = tf.norm(fm,
                                    ord=degree,
                                    axis=1,
                                    keep_dims=False,
                                    name='d_loss')

            dis_score = tf.squeeze(dis_score)

        with tf.variable_scope('Score'):
            list_scores = (1 - weight) * gen_score + weight * dis_score

    logdir = create_logdir(weight, method, random_seed)

    sv = tf.train.Supervisor(logdir=logdir,
                             save_summaries_secs=None,
                             save_model_secs=120)

    logger.info('Start training...')
    with sv.managed_session() as sess:

        logger.info('Initialization done')
        writer = tf.summary.FileWriter(logdir, sess.graph)
        train_batch = 0
        epoch = 0

        while not sv.should_stop() and epoch < nb_epochs:

            lr = starting_lr
            begin = time.time()

            # construct randomly permuted minibatches
            trainx = trainx[rng.permutation(
                trainx.shape[0])]  # shuffling dataset
            trainx_copy = trainx_copy[rng.permutation(trainx.shape[0])]
            train_loss_dis, train_loss_gen, train_loss_enc = [0, 0, 0]

            # training
            for t in range(nr_batches_train):

                display_progression_epoch(t, nr_batches_train)
                ran_from = t * batch_size
                ran_to = (t + 1) * batch_size

                # train discriminator
                feed_dict = {
                    input_pl: trainx[ran_from:ran_to],
                    is_training_pl: True,
                    learning_rate: lr
                }

                _, ld, sm = sess.run(
                    [train_dis_op, loss_discriminator, sum_op_dis],
                    feed_dict=feed_dict)
                train_loss_dis += ld
                writer.add_summary(sm, train_batch)

                # train generator and encoder
                feed_dict = {
                    input_pl: trainx_copy[ran_from:ran_to],
                    is_training_pl: True,
                    learning_rate: lr
                }
                _, _, le, lg, sm = sess.run([
                    train_gen_op, train_enc_op, loss_encoder, loss_generator,
                    sum_op_gen
                ],
                                            feed_dict=feed_dict)
                train_loss_gen += lg
                train_loss_enc += le
                writer.add_summary(sm, train_batch)

                train_batch += 1

            train_loss_gen /= nr_batches_train
            train_loss_enc /= nr_batches_train
            train_loss_dis /= nr_batches_train

            logger.info('Epoch terminated')
            print(
                "Epoch %d | time = %ds | loss gen = %.4f | loss enc = %.4f | loss dis = %.4f "
                % (epoch, time.time() - begin, train_loss_gen, train_loss_enc,
                   train_loss_dis))

            epoch += 1

        logger.warn('Training evaluation...')

        inds = rng.permutation(trainx_org.shape[0])
        trainx_org = trainx_org[inds]  # shuffling  dataset
        trainy_org = trainy_org[inds]  # shuffling  dataset
        scores = []
        inference_time = []

        # Create scores
        for t in range(nr_batches_train_org):

            # construct randomly permuted minibatches
            ran_from = t * batch_size
            ran_to = (t + 1) * batch_size
            begin_val_batch = time.time()

            feed_dict = {
                input_pl: trainx_org[ran_from:ran_to],
                is_training_pl: False
            }

            scores += sess.run(list_scores, feed_dict=feed_dict).tolist()
            inference_time.append(time.time() - begin_val_batch)

        logger.info('Testing : mean inference time is %.4f' %
                    (np.mean(inference_time)))

        ran_from = nr_batches_train_org * batch_size
        ran_to = (nr_batches_train_org + 1) * batch_size
        size = trainx_org[ran_from:ran_to].shape[0]
        fill = np.ones([batch_size - size, 70])

        batch = np.concatenate([trainx_org[ran_from:ran_to], fill], axis=0)
        feed_dict = {input_pl: batch, is_training_pl: False}

        batch_score = sess.run(list_scores, feed_dict=feed_dict).tolist()

        scores += batch_score[:size]

        # Highest 80% are anomalous
        per = np.percentile(scores, 80)

        y_pred_org = scores.copy()
        y_pred_org = np.array(y_pred_org)

        inds = (y_pred_org < per)
        inds_comp = (y_pred_org >= per)

        y_pred_org[inds] = 0
        y_pred_org[inds_comp] = 1

        roc_auc = do_roc(scores, trainy_org, "train_", "Results/", True)
        prc_auc = do_prc(scores, trainy_org, "train_", "Results/", True)
        #prg_auc = do_prg(scores, trainy_org, "train_", "Results/", True)

        precision, recall, f1, _ = precision_recall_fscore_support(
            trainy_org, y_pred_org, average='binary')

        print("Testing : Prec = %.4f | Rec = %.4f | F1 = %.4f " %
              (precision, recall, f1))

        save_results(scores,
                     trainy_org,
                     'bigan',
                     'sia_train_air_4',
                     'fm',
                     '0.5',
                     '1',
                     2018,
                     'outlier',
                     0.1,
                     step=-1)

        logger.warn('Testing evaluation...')

        inds = rng.permutation(testx.shape[0])
        testx = testx[inds]  # shuffling  dataset
        testy = testy[inds]  # shuffling  dataset
        scores = []
        inference_time = []

        # Create scores
        for t in range(nr_batches_test):

            # construct randomly permuted minibatches
            ran_from = t * batch_size
            ran_to = (t + 1) * batch_size
            begin_val_batch = time.time()

            feed_dict = {
                input_pl: testx[ran_from:ran_to],
                is_training_pl: False
            }

            scores += sess.run(list_scores, feed_dict=feed_dict).tolist()
            inference_time.append(time.time() - begin_val_batch)

        logger.info('Testing : mean inference time is %.4f' %
                    (np.mean(inference_time)))

        ran_from = nr_batches_test * batch_size
        ran_to = (nr_batches_test + 1) * batch_size
        size = testx[ran_from:ran_to].shape[0]
        fill = np.ones([batch_size - size, 70])

        batch = np.concatenate([testx[ran_from:ran_to], fill], axis=0)
        feed_dict = {input_pl: batch, is_training_pl: False}

        batch_score = sess.run(list_scores, feed_dict=feed_dict).tolist()

        scores += batch_score[:size]

        # Highest 80% are anomalous
        per = np.percentile(scores, 80)

        y_pred = scores.copy()
        y_pred = np.array(y_pred)

        inds = (y_pred < per)
        inds_comp = (y_pred >= per)

        y_pred[inds] = 0
        y_pred[inds_comp] = 1

        roc_auc = do_roc(scores, testy, "test_", "Results/", True)
        prc_auc = do_prc(scores, testy, "test_", "Results/", True)
        #prg_auc = do_prg(scores, testy, "test_", "Results/", True)

        precision, recall, f1, _ = precision_recall_fscore_support(
            testy, y_pred, average='binary')

        print("Testing : Prec = %.4f | Rec = %.4f | F1 = %.4f " %
              (precision, recall, f1))

        # SIA metrics calculation
        TN_train, FN_train, FP_train, TP_train = my_confusion_matrix(
            trainy_org, y_pred_org)
        TN_test, FN_test, FP_test, TP_test = my_confusion_matrix(testy, y_pred)

        overall_train, average_train, sens_train, spec_train, ppr_train = derive_metric(
            TN_train, FN_train, FP_train, TP_train)
        overall_test, average_test, sens_test, spec_test, ppr_test = derive_metric(
            TN_test, FN_test, FP_test, TP_test)

        print('Train Metrics:', overall_train, average_train, sens_train,
              spec_train, ppr_train)
        print('Test Metrics:', overall_test, average_test, sens_test,
              spec_test, ppr_test)

        save_results(scores,
                     testy,
                     'bigan',
                     'sia_test_air_4',
                     'fm',
                     '0.5',
                     '1',
                     2018,
                     'outlier',
                     0.1,
                     step=-1)
Ejemplo n.º 3
0
def train_and_test(nb_epochs, weight, method, degree, random_seed, label):
    """ Runs the Bigan on the KDD dataset

    Note:
        Saves summaries on tensorboard. To display them, please use cmd line
        tensorboard --logdir=model.training_logdir() --port=number
    Args:
        nb_epochs (int): number of epochs
        weight (float, optional): weight for the anomaly score composition
        method (str, optional): 'fm' for ``Feature Matching`` or "cross-e"
                                     for ``cross entropy``, "efm" etc.
        anomalous_label (int): int in range 0 to 10, is the class/digit
                                which is considered outlier
    """
    logger = logging.getLogger("GAN.train.mnist.{}.{}".format(method,label))

    # Placeholders
    input_pl = tf.placeholder(tf.float32, shape=data.get_shape_input(), name="input")
    is_training_pl = tf.placeholder(tf.bool, [], name='is_training_pl')
    learning_rate = tf.placeholder(tf.float32, shape=(), name="lr_pl")

    # Data
    trainx, trainy = data.get_train(label, True)
    trainx_copy = trainx.copy()
    testx, testy = data.get_test(label, True)

    # Parameters
    starting_lr = network.learning_rate
    batch_size = network.batch_size
    latent_dim = network.latent_dim
    ema_decay = 0.999

    rng = np.random.RandomState(RANDOM_SEED)
    nr_batches_train = int(trainx.shape[0] / batch_size)
    nr_batches_test = int(testx.shape[0] / batch_size)

    logger.info('Building training graph...')

    logger.warn("The GAN is training with the following parameters:")
    display_parameters(batch_size, starting_lr, ema_decay, weight, method, degree, label)

    gen = network.generator
    dis = network.discriminator

    # Sample noise from random normal distribution
    random_z = tf.random_normal([batch_size, latent_dim], mean=0.0, stddev=1.0, name='random_z')
    # Generate images with generator
    generator = gen(random_z, is_training=is_training_pl)
    # Pass real and fake images into discriminator separately
    real_d, inter_layer_real = dis(input_pl, is_training=is_training_pl)
    fake_d, inter_layer_fake = dis(generator, is_training=is_training_pl, reuse=True)

    with tf.name_scope('loss_functions'):
        # Calculate seperate losses for discriminator with real and fake images
        real_discriminator_loss = tf.losses.sigmoid_cross_entropy(tf.constant(1, shape=[batch_size]), real_d, scope='real_discriminator_loss')
        fake_discriminator_loss = tf.losses.sigmoid_cross_entropy(tf.constant(0, shape=[batch_size]), fake_d, scope='fake_discriminator_loss')
        # Add discriminator losses
        discriminator_loss = real_discriminator_loss + fake_discriminator_loss
        # Calculate loss for generator by flipping label on discriminator output
        generator_loss = tf.losses.sigmoid_cross_entropy(tf.constant(1, shape=[batch_size]), fake_d, scope='generator_loss')

    with tf.name_scope('optimizers'):
        # control op dependencies for batch norm and trainable variables
        dvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')
        gvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')

        update_ops_gen = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='generator')
        update_ops_dis = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='discriminator')

        optimizer_dis = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.5, name='dis_optimizer')
        optimizer_gen = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.5, name='gen_optimizer')

        with tf.control_dependencies(update_ops_gen): # attached op for moving average batch norm
            gen_op = optimizer_gen.minimize(generator_loss, var_list=gvars)
        with tf.control_dependencies(update_ops_dis):
            dis_op = optimizer_dis.minimize(discriminator_loss, var_list=dvars)

        dis_ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
        maintain_averages_op_dis = dis_ema.apply(dvars)

        with tf.control_dependencies([dis_op]):
            train_dis_op = tf.group(maintain_averages_op_dis)

        gen_ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
        maintain_averages_op_gen = gen_ema.apply(gvars)

        with tf.control_dependencies([gen_op]):
            train_gen_op = tf.group(maintain_averages_op_gen)

    with tf.name_scope('training_summary'):
        with tf.name_scope('dis_summary'):
            tf.summary.scalar('real_discriminator_loss', real_discriminator_loss, ['dis'])
            tf.summary.scalar('fake_discriminator_loss', fake_discriminator_loss, ['dis'])
            tf.summary.scalar('discriminator_loss', discriminator_loss, ['dis'])

        with tf.name_scope('gen_summary'):
            tf.summary.scalar('loss_generator', generator_loss, ['gen'])

        with tf.name_scope('image_summary'):
            tf.summary.image('reconstruct', generator, 8, ['image'])
            tf.summary.image('input_images', input_pl, 8, ['image'])


        sum_op_dis = tf.summary.merge_all('dis')
        sum_op_gen = tf.summary.merge_all('gen')
        sum_op_im = tf.summary.merge_all('image')

    logger.info('Building testing graph...')

    with tf.variable_scope("latent_variable"):
        z_optim = tf.get_variable(name='z_optim', shape= [batch_size, latent_dim], initializer=tf.truncated_normal_initializer())
        reinit_z = z_optim.initializer
    # EMA
    generator_ema = gen(z_optim, is_training=is_training_pl, getter=get_getter(gen_ema), reuse=True)
    # Pass real and fake images into discriminator separately
    real_d_ema, inter_layer_real_ema = dis(input_pl, is_training=is_training_pl, getter=get_getter(gen_ema), reuse=True)
    fake_d_ema, inter_layer_fake_ema = dis(generator_ema, is_training=is_training_pl, getter=get_getter(gen_ema), reuse=True)

    with tf.name_scope('error_loss'):
        delta = input_pl - generator_ema
        delta_flat = tf.contrib.layers.flatten(delta)
        gen_score = tf.norm(delta_flat, ord=degree, axis=1, keep_dims=False, name='epsilon')

    with tf.variable_scope('Discriminator_loss'):
        if method == "cross-e":
            dis_score = tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.ones_like(fake_d_ema), logits=fake_d_ema)

        elif method == "fm":
            fm = inter_layer_real_ema - inter_layer_fake_ema
            fm = tf.contrib.layers.flatten(fm)
            dis_score = tf.norm(fm, ord=degree, axis=1, keep_dims=False,
                             name='d_loss')

        dis_score = tf.squeeze(dis_score)

    with tf.variable_scope('Total_loss'):
        loss = (1 - weight) * gen_score + weight * dis_score

    with tf.variable_scope("Test_learning_rate"):
        step = tf.Variable(0, trainable=False)
        boundaries = [200, 300]
        values = [0.01, 0.001, 0.0005]
        learning_rate_invert = tf.train.piecewise_constant(step, boundaries, values)
        reinit_lr = tf.variables_initializer(
            tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                              scope="Test_learning_rate"))

    with tf.name_scope('Test_optimizer'):
        optimizer = tf.train.AdamOptimizer(learning_rate_invert).minimize(loss, global_step=step, var_list=[z_optim], name='optimizer')
        reinit_optim = tf.variables_initializer(
            tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                              scope='Test_optimizer'))

    reinit_test_graph_op = [reinit_z, reinit_lr, reinit_optim]

    with tf.name_scope("Scores"):
        list_scores = loss

    logdir = create_logdir(method, weight, label, random_seed)

    sv = tf.train.Supervisor(logdir=logdir, save_summaries_secs=None,
                             save_model_secs=120)

    logger.info('Start training...')
    with sv.managed_session() as sess:

        logger.info('Initialization done')

        writer = tf.summary.FileWriter(logdir, sess.graph)

        train_batch = 0
        epoch = 0

        while not sv.should_stop() and epoch < nb_epochs:

            lr = starting_lr

            begin = time.time()
            trainx = trainx[rng.permutation(trainx.shape[0])]  # shuffling unl dataset
            trainx_copy = trainx_copy[rng.permutation(trainx.shape[0])]

            train_loss_dis, train_loss_gen = [0, 0]
            # training
            for t in range(nr_batches_train):
                display_progression_epoch(t, nr_batches_train)

                # construct randomly permuted minibatches
                ran_from = t * batch_size
                ran_to = (t + 1) * batch_size

                # train discriminator
                feed_dict = {input_pl: trainx[ran_from:ran_to],
                             is_training_pl:True,
                             learning_rate:lr}
                _, ld, sm = sess.run([train_dis_op, discriminator_loss, sum_op_dis], feed_dict=feed_dict)
                train_loss_dis += ld
                writer.add_summary(sm, train_batch)

                # train generator
                feed_dict = {input_pl: trainx_copy[ran_from:ran_to],
                             is_training_pl:True,
                             learning_rate:lr}
                _, lg, sm = sess.run([train_gen_op, generator_loss, sum_op_gen], feed_dict=feed_dict)
                train_loss_gen += lg
                writer.add_summary(sm, train_batch)

                if t % FREQ_PRINT == 0:  # inspect reconstruction
                    t= np.random.randint(0,4000)
                    ran_from = t
                    ran_to = t + batch_size
                    sm = sess.run(sum_op_im, feed_dict={input_pl: trainx[ran_from:ran_to],is_training_pl: False})
                    writer.add_summary(sm, train_batch)

                train_batch += 1

            train_loss_gen /= nr_batches_train
            train_loss_dis /= nr_batches_train

            logger.info('Epoch terminated')
            print("Epoch %d | time = %ds | loss gen = %.4f | loss dis = %.4f "
                  % (epoch, time.time() - begin, train_loss_gen, train_loss_dis))

            epoch += 1

        logger.warn('Testing evaluation...')
        inds = rng.permutation(testx.shape[0])
        testx = testx[inds]  # shuffling unl dataset
        testy = testy[inds]
        scores = []
        inference_time = []

        # testing
        for t in range(nr_batches_test):
            # construct randomly permuted minibatches
            ran_from = t * batch_size
            ran_to = (t + 1) * batch_size
            begin_val_batch = time.time()

            # invert the gan
            feed_dict = {input_pl: testx[ran_from:ran_to],
                         is_training_pl:False}

            for step in range(STEPS_NUMBER):
                _ = sess.run(optimizer, feed_dict=feed_dict)
            scores += sess.run(list_scores, feed_dict=feed_dict).tolist()
            inference_time.append(time.time() - begin_val_batch)
            sess.run(reinit_test_graph_op)

        logger.info('Testing : mean inference time is %.4f' % (
            np.mean(inference_time)))
        ran_from = nr_batches_test * batch_size
        ran_to = (nr_batches_test + 1) * batch_size
        size = testx[ran_from:ran_to].shape[0]
        fill = np.ones([batch_size - size, 28, 28, 1])

        batch = np.concatenate([testx[ran_from:ran_to], fill], axis=0)
        feed_dict = {input_pl: batch,
                     is_training_pl: False}

        for step in range(STEPS_NUMBER):
            _ = sess.run(optimizer, feed_dict=feed_dict)
        batch_score = sess.run(list_scores,
                           feed_dict=feed_dict).tolist()

        scores += batch_score[:size]

        prc_auc = do_prc(scores, testy,
               file_name=r'gan/mnist/{}/{}/{}'.format(method, weight,
                                                     label),
               directory=r'results/gan/mnist/{}/{}/'.format(method,
                                                           weight))

        print("Testing | PRC AUC = {:.4f}".format(prc_auc))