Example #1
0
def train_and_test(nb_epochs, weight, method, degree, random_seed, nc):
    """ Runs the Bigan on the KDD dataset

    Note:
        Saves summaries on tensorboard. To display them, please use cmd line
        tensorboard --logdir=model.training_logdir() --port=number
    Args:
        nb_epochs (int): number of epochs
        weight (float, optional): weight for the anomaly score composition
        method (str, optional): 'fm' for ``Feature Matching`` or "cross-e"
                                     for ``cross entropy``, "efm" etc.
        anomalous_label (int): int in range 0 to 10, is the class/digit
                                which is considered outlier
    """
    logger = logging.getLogger("BiGAN.train.kdd.{}".format(method))

    data.set_nc(nc)
    print("get_shape", data.get_shape_input()[1])
    # Placeholders
    input_pl = tf.placeholder(tf.float32,
                              shape=data.get_shape_input(),
                              name="input")
    is_training_pl = tf.placeholder(tf.bool, [], name='is_training_pl')
    learning_rate = tf.placeholder(tf.float32, shape=(), name="lr_pl")

    # Data
    trainx, trainy = data.get_train()
    trainx_copy = trainx.copy()
    testx, testy = data.get_test()

    # Parameters
    starting_lr = network.learning_rate
    batch_size = network.batch_size
    latent_dim = network.latent_dim
    ema_decay = 0.9999

    rng = np.random.RandomState(RANDOM_SEED)
    nr_batches_train = int(trainx.shape[0] / batch_size)
    nr_batches_test = int(testx.shape[0] / batch_size)

    logger.info('Building training graph...')

    logger.warn("The BiGAN is training with the following parameters:")
    display_parameters(batch_size, starting_lr, ema_decay, weight, method,
                       degree)

    gen = network.decoder
    enc = network.encoder
    dis = network.discriminator

    with tf.variable_scope('encoder_model'):
        z_gen = enc(input_pl, is_training=is_training_pl)

    with tf.variable_scope('generator_model'):
        z = tf.random_normal([batch_size, latent_dim])
        x_gen = gen(z, is_training=is_training_pl)

    with tf.variable_scope('discriminator_model'):
        l_encoder, inter_layer_inp = dis(z_gen,
                                         input_pl,
                                         is_training=is_training_pl)
        l_generator, inter_layer_rct = dis(z,
                                           x_gen,
                                           is_training=is_training_pl,
                                           reuse=True)

    with tf.name_scope('loss_functions'):
        # discriminator
        loss_dis_enc = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.ones_like(l_encoder), logits=l_encoder))
        loss_dis_gen = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.zeros_like(l_generator), logits=l_generator))
        loss_discriminator = loss_dis_gen + loss_dis_enc
        # generator
        loss_generator = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.ones_like(l_generator), logits=l_generator))
        # encoder
        loss_encoder = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.zeros_like(l_encoder), logits=l_encoder))

    with tf.name_scope('optimizers'):
        # control op dependencies for batch norm and trainable variables
        tvars = tf.trainable_variables()
        dvars = [var for var in tvars if 'discriminator_model' in var.name]
        gvars = [var for var in tvars if 'generator_model' in var.name]
        evars = [var for var in tvars if 'encoder_model' in var.name]

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        update_ops_gen = [
            x for x in update_ops if ('generator_model' in x.name)
        ]
        update_ops_enc = [x for x in update_ops if ('encoder_model' in x.name)]
        update_ops_dis = [
            x for x in update_ops if ('discriminator_model' in x.name)
        ]

        optimizer_dis = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                               beta1=0.5,
                                               name='dis_optimizer')
        optimizer_gen = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                               beta1=0.5,
                                               name='gen_optimizer')
        optimizer_enc = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                               beta1=0.5,
                                               name='enc_optimizer')

        with tf.control_dependencies(update_ops_gen):
            gen_op = optimizer_gen.minimize(loss_generator, var_list=gvars)
        with tf.control_dependencies(update_ops_enc):
            enc_op = optimizer_enc.minimize(loss_encoder, var_list=evars)
        with tf.control_dependencies(update_ops_dis):
            dis_op = optimizer_dis.minimize(loss_discriminator, var_list=dvars)

        # Exponential Moving Average for estimation
        dis_ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
        maintain_averages_op_dis = dis_ema.apply(dvars)

        with tf.control_dependencies([dis_op]):
            train_dis_op = tf.group(maintain_averages_op_dis)

        gen_ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
        maintain_averages_op_gen = gen_ema.apply(gvars)

        with tf.control_dependencies([gen_op]):
            train_gen_op = tf.group(maintain_averages_op_gen)

        enc_ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
        maintain_averages_op_enc = enc_ema.apply(evars)

        with tf.control_dependencies([enc_op]):
            train_enc_op = tf.group(maintain_averages_op_enc)

    with tf.name_scope('summary'):
        with tf.name_scope('dis_summary'):
            tf.summary.scalar('loss_discriminator', loss_discriminator,
                              ['dis'])
            tf.summary.scalar('loss_dis_encoder', loss_dis_enc, ['dis'])
            tf.summary.scalar('loss_dis_gen', loss_dis_gen, ['dis'])

        with tf.name_scope('gen_summary'):
            tf.summary.scalar('loss_generator', loss_generator, ['gen'])
            tf.summary.scalar('loss_encoder', loss_encoder, ['gen'])

        sum_op_dis = tf.summary.merge_all('dis')
        sum_op_gen = tf.summary.merge_all('gen')

    logger.info('Building testing graph...')

    with tf.variable_scope('encoder_model'):
        z_gen_ema = enc(input_pl,
                        is_training=is_training_pl,
                        getter=get_getter(enc_ema),
                        reuse=True)

    with tf.variable_scope('generator_model'):
        reconstruct_ema = gen(z_gen_ema,
                              is_training=is_training_pl,
                              getter=get_getter(gen_ema),
                              reuse=True)

    with tf.variable_scope('discriminator_model'):
        l_encoder_ema, inter_layer_inp_ema = dis(z_gen_ema,
                                                 input_pl,
                                                 is_training=is_training_pl,
                                                 getter=get_getter(dis_ema),
                                                 reuse=True)
        l_generator_ema, inter_layer_rct_ema = dis(z_gen_ema,
                                                   reconstruct_ema,
                                                   is_training=is_training_pl,
                                                   getter=get_getter(dis_ema),
                                                   reuse=True)
    with tf.name_scope('Testing'):
        with tf.variable_scope('Reconstruction_loss'):
            delta = input_pl - reconstruct_ema
            delta_flat = tf.contrib.layers.flatten(delta)
            gen_score = tf.norm(delta_flat,
                                ord=degree,
                                axis=1,
                                keep_dims=False,
                                name='epsilon')

        with tf.variable_scope('Discriminator_loss'):
            if method == "cross-e":
                dis_score = tf.nn.sigmoid_cross_entropy_with_logits(
                    labels=tf.ones_like(l_generator_ema),
                    logits=l_generator_ema)

            elif method == "fm":
                fm = inter_layer_inp_ema - inter_layer_rct_ema
                fm = tf.contrib.layers.flatten(fm)
                dis_score = tf.norm(fm,
                                    ord=degree,
                                    axis=1,
                                    keep_dims=False,
                                    name='d_loss')

            dis_score = tf.squeeze(dis_score)

        with tf.variable_scope('Score'):
            list_scores = (1 - weight) * gen_score + weight * dis_score

    logdir = create_logdir(weight, method, random_seed)

    sv = tf.train.Supervisor(logdir=logdir,
                             save_summaries_secs=None,
                             save_model_secs=120)

    logger.info('Start training...')
    with sv.managed_session() as sess:

        logger.info('Initialization done')
        writer = tf.summary.FileWriter(logdir, sess.graph)
        train_batch = 0
        epoch = 0

        while not sv.should_stop() and epoch < nb_epochs:

            lr = starting_lr
            begin = time.time()

            # construct randomly permuted minibatches
            trainx = trainx[rng.permutation(
                trainx.shape[0])]  # shuffling dataset
            trainx_copy = trainx_copy[rng.permutation(trainx.shape[0])]
            train_loss_dis, train_loss_gen, train_loss_enc = [0, 0, 0]

            # training
            for t in range(nr_batches_train):

                display_progression_epoch(t, nr_batches_train)
                ran_from = t * batch_size
                ran_to = (t + 1) * batch_size

                # train discriminator
                feed_dict = {
                    input_pl: trainx[ran_from:ran_to],
                    is_training_pl: True,
                    learning_rate: lr
                }

                _, ld, sm = sess.run(
                    [train_dis_op, loss_discriminator, sum_op_dis],
                    feed_dict=feed_dict)
                train_loss_dis += ld
                writer.add_summary(sm, train_batch)

                # train generator and encoder
                feed_dict = {
                    input_pl: trainx_copy[ran_from:ran_to],
                    is_training_pl: True,
                    learning_rate: lr
                }
                _, _, le, lg, sm = sess.run([
                    train_gen_op, train_enc_op, loss_encoder, loss_generator,
                    sum_op_gen
                ],
                                            feed_dict=feed_dict)
                train_loss_gen += lg
                train_loss_enc += le
                writer.add_summary(sm, train_batch)

                train_batch += 1

            train_loss_gen /= nr_batches_train
            train_loss_enc /= nr_batches_train
            train_loss_dis /= nr_batches_train

            logger.info('Epoch terminated')
            print(
                "Epoch %d | time = %ds | loss gen = %.4f | loss enc = %.4f | loss dis = %.4f "
                % (epoch, time.time() - begin, train_loss_gen, train_loss_enc,
                   train_loss_dis))

            epoch += 1

        logger.warn('Testing evaluation...')

        inds = rng.permutation(testx.shape[0])
        testx = testx[inds]  # shuffling  dataset
        testy = testy[inds]  # shuffling  dataset
        scores = []
        inference_time = []

        # Create scores
        for t in range(nr_batches_test):

            # construct randomly permuted minibatches
            ran_from = t * batch_size
            ran_to = (t + 1) * batch_size
            begin_val_batch = time.time()

            feed_dict = {
                input_pl: testx[ran_from:ran_to],
                is_training_pl: False
            }

            scores += sess.run(list_scores, feed_dict=feed_dict).tolist()
            inference_time.append(time.time() - begin_val_batch)

        logger.info('Testing : mean inference time is %.4f' %
                    (np.mean(inference_time)))

        ran_from = nr_batches_test * batch_size
        ran_to = (nr_batches_test + 1) * batch_size
        size = testx[ran_from:ran_to].shape[0]
        fill = np.ones([batch_size - size, data.get_shape_input()[1]])

        batch = np.concatenate([testx[ran_from:ran_to], fill], axis=0)
        feed_dict = {input_pl: batch, is_training_pl: False}

        batch_score = sess.run(list_scores, feed_dict=feed_dict).tolist()

        scores += batch_score[:size]

        # Highest 80% are anomalous
        per = np.percentile(scores, 80)

        y_pred = scores.copy()
        y_pred = np.array(y_pred)

        inds = (y_pred < per)
        inds_comp = (y_pred >= per)

        y_pred[inds] = 0
        y_pred[inds_comp] = 1

        precision, recall, f1, _ = precision_recall_fscore_support(
            testy, y_pred, average='binary')

        print("Testing(%d) : Prec = %.4f | Rec = %.4f | F1 = %.4f " %
              (nc, precision, recall, f1))
        content = "Testing(%d) : Prec = %.4f | Rec = %.4f | F1 = %.4f " % (
            nc, precision, recall, f1)
        f1 = open('./output.txt', 'a')
        f1.write(content + "\r\n")
Example #2
0
def train_and_test(nb_epochs, weight, method, degree, random_seed):
    """ Runs the Bigan on the KDD dataset

    Note:
        Saves summaries on tensorboard. To display them, please use cmd line
        tensorboard --logdir=model.training_logdir() --port=number
    Args:
        nb_epochs (int): number of epochs
        weight (float, optional): weight for the anomaly score composition
        method (str, optional): 'fm' for ``Feature Matching`` or "cross-e"
                                     for ``cross entropy``, "efm" etc.
        anomalous_label (int): int in range 0 to 10, is the class/digit
                                which is considered outlier
    """
    logger = logging.getLogger("GAN.train.kdd.{}".format(method))

    # Placeholders
    input_pl = tf.placeholder(tf.float32,
                              shape=data.get_shape_input(),
                              name="input")
    is_training_pl = tf.placeholder(tf.bool, [], name='is_training_pl')
    learning_rate = tf.placeholder(tf.float32, shape=(), name="lr_pl")

    # Data
    trainx, trainy = data.get_train()
    trainx_copy = trainx.copy()
    testx, testy = data.get_test()

    # Parameters
    starting_lr = network.learning_rate
    batch_size = network.batch_size
    latent_dim = network.latent_dim
    ema_decay = 0.9999

    rng = np.random.RandomState(RANDOM_SEED)
    nr_batches_train = int(trainx.shape[0] / batch_size)
    nr_batches_test = int(testx.shape[0] / batch_size)

    logger.info('Building training graph...')

    logger.warn("The GAN is training with the following parameters:")
    display_parameters(batch_size, starting_lr, ema_decay, weight, method,
                       degree)

    gen = network.generator
    dis = network.discriminator

    # Sample noise from random normal distribution
    random_z = tf.random_normal([batch_size, latent_dim],
                                mean=0.0,
                                stddev=1.0,
                                name='random_z')
    # Generate images with generator
    generator = gen(random_z, is_training=is_training_pl)
    # Pass real and fake images into discriminator separately
    real_d, inter_layer_real = dis(input_pl, is_training=is_training_pl)
    fake_d, inter_layer_fake = dis(generator,
                                   is_training=is_training_pl,
                                   reuse=True)

    with tf.name_scope('loss_functions'):
        # Calculate seperate losses for discriminator with real and fake images
        real_discriminator_loss = tf.losses.sigmoid_cross_entropy(
            tf.constant(1, shape=[batch_size]),
            real_d,
            scope='real_discriminator_loss')
        fake_discriminator_loss = tf.losses.sigmoid_cross_entropy(
            tf.constant(0, shape=[batch_size]),
            fake_d,
            scope='fake_discriminator_loss')
        # Add discriminator losses
        discriminator_loss = real_discriminator_loss + fake_discriminator_loss
        # Calculate loss for generator by flipping label on discriminator output
        generator_loss = tf.losses.sigmoid_cross_entropy(
            tf.constant(1, shape=[batch_size]), fake_d, scope='generator_loss')

    with tf.name_scope('optimizers'):
        # control op dependencies for batch norm and trainable variables
        dvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                  scope='discriminator')
        gvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                  scope='generator')

        update_ops_gen = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
                                           scope='generator')
        update_ops_dis = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
                                           scope='discriminator')

        optimizer_dis = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                               beta1=0.5,
                                               name='dis_optimizer')
        optimizer_gen = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                               beta1=0.5,
                                               name='gen_optimizer')

        with tf.control_dependencies(
                update_ops_gen):  # attached op for moving average batch norm
            gen_op = optimizer_gen.minimize(generator_loss, var_list=gvars)
        with tf.control_dependencies(update_ops_dis):
            dis_op = optimizer_dis.minimize(discriminator_loss, var_list=dvars)

        dis_ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
        maintain_averages_op_dis = dis_ema.apply(dvars)

        with tf.control_dependencies([dis_op]):
            train_dis_op = tf.group(maintain_averages_op_dis)

        gen_ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
        maintain_averages_op_gen = gen_ema.apply(gvars)

        with tf.control_dependencies([gen_op]):
            train_gen_op = tf.group(maintain_averages_op_gen)

    with tf.name_scope('training_summary'):
        with tf.name_scope('dis_summary'):
            tf.summary.scalar('real_discriminator_loss',
                              real_discriminator_loss, ['dis'])
            tf.summary.scalar('fake_discriminator_loss',
                              fake_discriminator_loss, ['dis'])
            tf.summary.scalar('discriminator_loss', discriminator_loss,
                              ['dis'])

        with tf.name_scope('gen_summary'):
            tf.summary.scalar('loss_generator', generator_loss, ['gen'])

        sum_op_dis = tf.summary.merge_all('dis')
        sum_op_gen = tf.summary.merge_all('gen')

    logger.info('Building testing graph...')

    with tf.variable_scope("latent_variable"):
        z_optim = tf.get_variable(
            name='z_optim',
            shape=[batch_size, latent_dim],
            initializer=tf.truncated_normal_initializer())
        reinit_z = z_optim.initializer
    # EMA
    generator_ema = gen(z_optim,
                        is_training=is_training_pl,
                        getter=get_getter(gen_ema),
                        reuse=True)
    # Pass real and fake images into discriminator separately
    real_d_ema, inter_layer_real_ema = dis(input_pl,
                                           is_training=is_training_pl,
                                           getter=get_getter(gen_ema),
                                           reuse=True)
    fake_d_ema, inter_layer_fake_ema = dis(generator_ema,
                                           is_training=is_training_pl,
                                           getter=get_getter(gen_ema),
                                           reuse=True)

    with tf.name_scope('error_loss'):
        delta = input_pl - generator_ema
        delta_flat = tf.contrib.layers.flatten(delta)
        gen_score = tf.norm(delta_flat,
                            ord=degree,
                            axis=1,
                            keep_dims=False,
                            name='epsilon')

    with tf.variable_scope('Discriminator_loss'):
        if method == "cross-e":
            dis_score = tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.ones_like(fake_d_ema), logits=fake_d_ema)

        elif method == "fm":
            fm = inter_layer_real_ema - inter_layer_fake_ema
            fm = tf.contrib.layers.flatten(fm)
            dis_score = tf.norm(fm,
                                ord=degree,
                                axis=1,
                                keep_dims=False,
                                name='d_loss')

        dis_score = tf.squeeze(dis_score)

    with tf.variable_scope('Total_loss'):
        loss = (1 - weight) * gen_score + weight * dis_score

    with tf.variable_scope("Test_learning_rate"):
        step = tf.Variable(0, trainable=False)
        boundaries = [300, 400]
        values = [0.01, 0.001, 0.0005]
        learning_rate_invert = tf.train.piecewise_constant(
            step, boundaries, values)
        reinit_lr = tf.variables_initializer(
            tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                              scope="Test_learning_rate"))

    with tf.name_scope('Test_optimizer'):
        optimizer = tf.train.AdamOptimizer(learning_rate_invert).minimize(
            loss, global_step=step, var_list=[z_optim], name='optimizer')
        reinit_optim = tf.variables_initializer(
            tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                              scope='Test_optimizer'))

    reinit_test_graph_op = [reinit_z, reinit_lr, reinit_optim]

    with tf.name_scope("Scores"):
        list_scores = loss

    logdir = create_logdir(method, weight, random_seed)

    sv = tf.train.Supervisor(logdir=logdir,
                             save_summaries_secs=None,
                             save_model_secs=120)

    logger.info('Start training...')
    with sv.managed_session() as sess:

        logger.info('Initialization done')

        writer = tf.summary.FileWriter(logdir, sess.graph)

        train_batch = 0
        epoch = 0

        while not sv.should_stop() and epoch < nb_epochs:

            lr = starting_lr

            begin = time.time()
            trainx = trainx[rng.permutation(
                trainx.shape[0])]  # shuffling unl dataset
            trainx_copy = trainx_copy[rng.permutation(trainx.shape[0])]

            train_loss_dis, train_loss_gen = [0, 0]
            # training
            for t in range(nr_batches_train):
                display_progression_epoch(t, nr_batches_train)

                # construct randomly permuted minibatches
                ran_from = t * batch_size
                ran_to = (t + 1) * batch_size

                # train discriminator
                feed_dict = {
                    input_pl: trainx[ran_from:ran_to],
                    is_training_pl: True,
                    learning_rate: lr
                }
                _, ld, sm = sess.run(
                    [train_dis_op, discriminator_loss, sum_op_dis],
                    feed_dict=feed_dict)
                train_loss_dis += ld
                writer.add_summary(sm, train_batch)

                # train generator
                feed_dict = {
                    input_pl: trainx_copy[ran_from:ran_to],
                    is_training_pl: True,
                    learning_rate: lr
                }
                _, lg, sm = sess.run(
                    [train_gen_op, generator_loss, sum_op_gen],
                    feed_dict=feed_dict)
                train_loss_gen += lg
                writer.add_summary(sm, train_batch)

                train_batch += 1

            train_loss_gen /= nr_batches_train
            train_loss_dis /= nr_batches_train

            logger.info('Epoch terminated')
            print(
                "Epoch %d | time = %ds | loss gen = %.4f | loss dis = %.4f " %
                (epoch, time.time() - begin, train_loss_gen, train_loss_dis))

            epoch += 1

        logger.warn('Testing evaluation...')
        inds = rng.permutation(testx.shape[0])
        testx = testx[inds]  # shuffling unl dataset
        testy = testy[inds]
        scores = []
        inference_time = []

        # Testing
        for t in range(nr_batches_test):

            # construct randomly permuted minibatches
            ran_from = t * batch_size
            ran_to = (t + 1) * batch_size
            begin_val_batch = time.time()

            # invert the gan
            feed_dict = {
                input_pl: testx[ran_from:ran_to],
                is_training_pl: False
            }

            for step in range(STEPS_NUMBER):
                _ = sess.run(optimizer, feed_dict=feed_dict)
            scores += sess.run(list_scores, feed_dict=feed_dict).tolist()
            inference_time.append(time.time() - begin_val_batch)
            sess.run(reinit_test_graph_op)

        logger.info('Testing : mean inference time is %.4f' %
                    (np.mean(inference_time)))
        ran_from = nr_batches_test * batch_size
        ran_to = (nr_batches_test + 1) * batch_size
        size = testx[ran_from:ran_to].shape[0]
        fill = np.ones([batch_size - size, 121])

        batch = np.concatenate([testx[ran_from:ran_to], fill], axis=0)
        feed_dict = {input_pl: batch, is_training_pl: False}

        for step in range(STEPS_NUMBER):
            _ = sess.run(optimizer, feed_dict=feed_dict)
        batch_score = sess.run(list_scores, feed_dict=feed_dict).tolist()

        scores += batch_score[:size]

        per = np.percentile(scores, 80)

        y_pred = scores.copy()
        y_pred = np.array(y_pred)

        inds = (y_pred < per)
        inds_comp = (y_pred >= per)

        y_pred[inds] = 0
        y_pred[inds_comp] = 1

        precision, recall, f1, _ = precision_recall_fscore_support(
            testy, y_pred, average='binary')
        print("Testing : Prec = %.4f | Rec = %.4f | F1 = %.4f " %
              (precision, recall, f1))
Example #3
0
def train_and_test(nb_epochs, weight, method, degree, random_seed):
    """ Runs the Bigan on the KDD dataset

    Note:
        Saves summaries on tensorboard. To display them, please use cmd line
        tensorboard --logdir=model.training_logdir() --port=number
    Args:
        nb_epochs (int): number of epochs
        weight (float, optional): weight for the anomaly score composition
        method (str, optional): 'fm' for ``Feature Matching`` or "cross-e"
                                     for ``cross entropy``, "efm" etc.
        anomalous_label (int): int in range 0 to 10, is the class/digit
                                which is considered outlier
    """
    logger = logging.getLogger("BiGAN.train.kdd.{}".format(method))

    # Placeholders
    input_pl = tf.placeholder(tf.float32,
                              shape=data.get_shape_input(),
                              name="input")
    is_training_pl = tf.placeholder(tf.bool, [], name='is_training_pl')
    learning_rate = tf.placeholder(tf.float32, shape=(), name="lr_pl")

    # Data
    trainx, trainy = data.get_train()
    trainx_copy = trainx.copy()
    testx, testy = data.get_test()

    # Parameters
    starting_lr_gen = network.learning_rate_gen
    starting_lr_gen_mine = network.learning_rate_gen_mine
    starting_lr = network.learning_rate
    batch_size = network.batch_size
    latent_dim = network.latent_dim
    ema_decay = 0.9999

    rng = np.random.RandomState(RANDOM_SEED)
    nr_batches_train = int(trainx.shape[0] / batch_size)
    nr_batches_test = int(testx.shape[0] / batch_size)

    logger.info('Building training graph...')

    logger.warn("The BiGAN is training with the following parameters:")
    display_parameters(batch_size, starting_lr, ema_decay, weight, method,
                       degree)

    gen = network.decoder
    enc = network.encoder
    dis = network.discriminator

    with tf.variable_scope('encoder_model'):
        z_gen = enc(input_pl, is_training=is_training_pl)

    with tf.variable_scope('generator_model'):
        z = tf.random_normal([batch_size, latent_dim])
        x_gen = gen(z, is_training=is_training_pl)

    with tf.variable_scope('discriminator_model'):
        l_encoder, inter_layer_inp = dis(z_gen,
                                         input_pl,
                                         is_training=is_training_pl)
        l_generator, inter_layer_rct = dis(z,
                                           x_gen,
                                           is_training=is_training_pl,
                                           reuse=True)

    with tf.name_scope('loss_functions'):
        # discriminator
        loss_dis_enc = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.ones_like(l_encoder), logits=l_encoder))
        loss_dis_gen = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.zeros_like(l_generator), logits=l_generator))
        loss_discriminator = loss_dis_gen + loss_dis_enc

        # generator
        # loss_generator1 = tf.reduce_mean(tf.keras.losses.MSLE(input_pl,x_gen))
        loss_generator1 = tf.reduce_mean(tf.keras.losses.MSE(input_pl, x_gen))
        # loss_generator1 = tf.reduce_mean(tf.keras.losses.KLD(input_pl,x_gen))
        # loss_generator1 = tf.reduce_mean(-(x_gen.log_prob(input_pl))) #layerlari probability yapip bunu yazsam olacak mi acaba!!!!!??????
        loss_generator = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.ones_like(l_generator), logits=l_generator))
        #loss_generator = loss_generator = loss_generator1

        # encoder
        loss_encoder = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.zeros_like(l_encoder), logits=l_encoder))

    with tf.name_scope('optimizers'):
        # control op dependencies for batch norm and trainable variables
        tvars = tf.trainable_variables()
        dvars = [var for var in tvars if 'discriminator_model' in var.name]
        gvars = [var for var in tvars if 'generator_model' in var.name]
        evars = [var for var in tvars if 'encoder_model' in var.name]

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        update_ops_gen = [
            x for x in update_ops if ('generator_model' in x.name)
        ]
        update_ops_enc = [x for x in update_ops if ('encoder_model' in x.name)]
        update_ops_dis = [
            x for x in update_ops if ('discriminator_model' in x.name)
        ]

        optimizer_dis = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                               beta1=0.5,
                                               name='dis_optimizer')
        optimizer_gen = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                               beta1=0.5,
                                               name='gen_optimizer')
        optimizer_enc = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                               beta1=0.5,
                                               name='enc_optimizer')
        optimizer_gen1 = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                                beta1=0.5,
                                                name='gen_optimizer1')

        with tf.control_dependencies(update_ops_gen):
            gen_op = optimizer_gen.minimize(loss_generator, var_list=gvars)

        with tf.control_dependencies(update_ops_gen):
            gen_op1 = optimizer_gen1.minimize(loss_generator1, var_list=gvars)

        with tf.control_dependencies(update_ops_enc):
            enc_op = optimizer_enc.minimize(loss_encoder, var_list=evars)

        with tf.control_dependencies(update_ops_dis):
            dis_op = optimizer_dis.minimize(loss_discriminator, var_list=dvars)

        # Exponential Moving Average for estimation
        dis_ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
        maintain_averages_op_dis = dis_ema.apply(dvars)

        with tf.control_dependencies([dis_op]):
            train_dis_op = tf.group(maintain_averages_op_dis)

        gen_ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
        maintain_averages_op_gen = gen_ema.apply(gvars)

        with tf.control_dependencies([gen_op]):
            train_gen_op = tf.group(maintain_averages_op_gen)

        with tf.control_dependencies([gen_op1]):
            train_gen_op1 = tf.group(maintain_averages_op_gen)

        enc_ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
        maintain_averages_op_enc = enc_ema.apply(evars)

        with tf.control_dependencies([enc_op]):
            train_enc_op = tf.group(maintain_averages_op_enc)

    with tf.name_scope('summary'):
        with tf.name_scope('dis_summary'):
            tf.summary.scalar('loss_discriminator', loss_discriminator,
                              ['dis'])
            tf.summary.scalar('loss_dis_encoder', loss_dis_enc, ['dis'])
            tf.summary.scalar('loss_dis_gen', loss_dis_gen, ['dis'])

        with tf.name_scope('gen_summary'):
            tf.summary.scalar('loss_generator', loss_generator, ['gen'])
            tf.summary.scalar('loss_generator1', loss_generator1, ['gen1'])
            tf.summary.scalar('loss_encoder', loss_encoder, ['gen'])

        sum_op_dis = tf.summary.merge_all('dis')
        sum_op_gen = tf.summary.merge_all('gen')
        sum_op_gen1 = tf.summary.merge_all('gen1')

    logger.info('Building testing graph...')

    with tf.variable_scope('encoder_model'):
        z_gen_ema = enc(input_pl,
                        is_training=is_training_pl,
                        getter=get_getter(enc_ema),
                        reuse=True)

    with tf.variable_scope('generator_model'):
        reconstruct_ema = gen(z_gen_ema,
                              is_training=is_training_pl,
                              getter=get_getter(gen_ema),
                              reuse=True)

    with tf.variable_scope('discriminator_model'):
        l_encoder_ema, inter_layer_inp_ema = dis(z_gen_ema,
                                                 input_pl,
                                                 is_training=is_training_pl,
                                                 getter=get_getter(dis_ema),
                                                 reuse=True)
        l_generator_ema, inter_layer_rct_ema = dis(z_gen_ema,
                                                   reconstruct_ema,
                                                   is_training=is_training_pl,
                                                   getter=get_getter(dis_ema),
                                                   reuse=True)
    with tf.name_scope('Testing'):
        with tf.variable_scope('Reconstruction_loss'):
            delta = input_pl - reconstruct_ema
            delta_flat = tf.contrib.layers.flatten(delta)
            gen_score = tf.norm(delta_flat,
                                ord=degree,
                                axis=1,
                                keep_dims=False,
                                name='epsilon')

        with tf.variable_scope('Discriminator_loss'):
            if method == "cross-e":
                dis_score = tf.nn.sigmoid_cross_entropy_with_logits(
                    labels=tf.ones_like(l_generator_ema),
                    logits=l_generator_ema)

            elif method == "fm":
                fm = inter_layer_inp_ema - inter_layer_rct_ema
                fm = tf.contrib.layers.flatten(fm)
                dis_score = tf.norm(fm,
                                    ord=degree,
                                    axis=1,
                                    keep_dims=False,
                                    name='d_loss')

            dis_score = tf.squeeze(dis_score)

        with tf.variable_scope('Score'):
            list_scores = (
                1 - weight
            ) * gen_score + weight * dis_score  # bastaki 0 carpanini kaldir ve dis scoredaki 1 yerine weight yaz

    logdir = create_logdir(weight, method, random_seed)

    sv = tf.train.Supervisor(logdir=logdir,
                             save_summaries_secs=None,
                             save_model_secs=120)

    logger.info('Start training...')
    with sv.managed_session() as sess:

        logger.info('Initialization done')
        writer = tf.summary.FileWriter(logdir, sess.graph)
        train_batch = 0
        epoch = 0

        lr_gen_mine = starting_lr_gen_mine

        tt = []
        xx = time.time()
        while not sv.should_stop() and epoch < nb_epochs:

            lr = starting_lr
            lr_gen = starting_lr_gen
            lr_gen_mine = starting_lr_gen_mine
            begin = time.time()

            # construct randomly permuted minibatches
            trainx = trainx[rng.permutation(
                trainx.shape[0])]  # shuffling dataset
            trainx_copy = trainx_copy[rng.permutation(trainx.shape[0])]
            train_loss_dis, train_loss_gen, train_loss_enc, train_loss_gen1 = [
                0, 0, 0, 0
            ]

            print('newloss running')

            # training

            zz = []
            if epoch == 0:
                xy = time.time()
                trainx = trainx[rng.permutation(
                    trainx.shape[0])]  # shuffling dataset
                trainx_copy = trainx_copy[rng.permutation(trainx.shape[0])]
                train_loss_gen1 = 0
                for k in range(100):
                    kk = time.time()
                    for t in range(nr_batches_train):

                        #display_progression_epoch(t, nr_batches_train)
                        ran_from = t * batch_size
                        ran_to = (t + 1) * batch_size
                        #
                        # pretrain generator
                        feed_dict = {
                            input_pl: trainx_copy[ran_from:ran_to],
                            is_training_pl: True,
                            learning_rate: lr_gen_mine
                        }
                        _, lg1, sm1 = sess.run(
                            [train_gen_op1, loss_generator1, sum_op_gen1],
                            feed_dict=feed_dict)
                    zz.extend([time.time() - kk])
                    print(k)
                print('mean :', np.mean(zz))
                print('std :', np.std(zz))

                print(time.time() - xy)

            if epoch > 0:
                for t in range(nr_batches_train):

                    display_progression_epoch(t, nr_batches_train)
                    ran_from = t * batch_size
                    ran_to = (t + 1) * batch_size

                    # train discriminator
                    feed_dict = {
                        input_pl: trainx[ran_from:ran_to],
                        is_training_pl: True,
                        learning_rate: lr
                    }

                    _, ld, sm = sess.run(
                        [train_dis_op, loss_discriminator, sum_op_dis],
                        feed_dict=feed_dict)

                    train_loss_dis += ld
                    writer.add_summary(sm, train_batch)

                    # train generator and encoder

                    feed_dict = {
                        input_pl: trainx_copy[ran_from:ran_to],
                        is_training_pl: True,
                        learning_rate: lr_gen
                    }

                    _, _, le, lg, sm = sess.run([
                        train_gen_op, train_enc_op, loss_encoder,
                        loss_generator, sum_op_gen
                    ],
                                                feed_dict=feed_dict)

                    # mine

                    feed_dict = {
                        input_pl: trainx_copy[ran_from:ran_to],
                        is_training_pl: True,
                        learning_rate: lr_gen_mine
                    }
                    _, lg1, sm1 = sess.run(
                        [train_gen_op1, loss_generator1, sum_op_gen1],
                        feed_dict=feed_dict)

                    #lg1 = 0

                    train_loss_gen1 += lg1
                    train_loss_gen += lg
                    train_loss_enc += le
                    writer.add_summary(sm, train_batch)

                    train_batch += 1

            train_loss_gen /= nr_batches_train
            train_loss_gen1 /= nr_batches_train
            train_loss_enc /= nr_batches_train
            train_loss_dis /= nr_batches_train

            print(
                "Epoch %d | time = %.4fs | loss gen = %.4f | loss gen1 = %.4f| loss enc = %.4f | loss dis = %.4f "
                % (epoch, time.time() - begin, train_loss_gen, train_loss_gen1,
                   train_loss_enc, train_loss_dis))
            tt.extend([time.time() - begin])
            epoch += 1
        print(time.time() - xx)
        print('mean :', np.mean(tt))
        print('std :', np.std(tt))
        logger.warn('Testing evaluation...')

        inds = rng.permutation(testx.shape[0])
        testx = testx[inds]  # shuffling  dataset
        testy = testy[inds]  # shuffling  dataset
        scores = []
        inference_time = []

        # Create scores
        for t in range(nr_batches_test):

            # construct randomly permuted minibatches
            ran_from = t * batch_size
            ran_to = (t + 1) * batch_size
            begin_val_batch = time.time()

            feed_dict = {
                input_pl: testx[ran_from:ran_to],
                is_training_pl: False
            }

            scores += sess.run(list_scores, feed_dict=feed_dict).tolist()
            inference_time.append(time.time() - begin_val_batch)

        logger.info('Testing : mean inference time is %.4f' %
                    (np.mean(inference_time)))

        ran_from = nr_batches_test * batch_size
        ran_to = (nr_batches_test + 1) * batch_size
        size = testx[ran_from:ran_to].shape[0]
        fill = np.ones([batch_size - size, 39])

        batch = np.concatenate([testx[ran_from:ran_to], fill], axis=0)
        feed_dict = {input_pl: batch, is_training_pl: False}

        batch_score = sess.run(list_scores, feed_dict=feed_dict).tolist()

        scores += batch_score[:size]

        # Highest 80% are anomalous
        per = np.percentile(scores, 50)

        res = pd.concat([pd.Series(scores), pd.Series(testy)], axis=1)
        #res.to_csv('/Users/oguzkaplan/Documents/repo/thesis/notebooks/bigan_kdd_scores_newloss_method1_corr_32_egbad_like.csv')

        y_pred = scores.copy()
        y_pred = np.array(y_pred)

        inds = (y_pred < per)
        inds_comp = (y_pred >= per)

        y_pred[inds] = 1
        y_pred[inds_comp] = 0

        precision, recall, f1, _ = precision_recall_fscore_support(
            testy, y_pred, average='binary')
        from sklearn.metrics import accuracy_score
        acc = accuracy_score(testy, y_pred)

        print("Testing : Prec = %.4f | Rec = %.4f | F1 = %.4f | Acc = %.4f" %
              (precision, recall, f1, acc))
Example #4
0

args = parse_args()
#################################################
# Create Directories
#################################################
root = Path(args.save_path)
if root.exists():
    os.system("rm -rf %s" % str(root))

root.mkdir()
(root / "models").mkdir()
writer = SummaryWriter(str(root))
#################################################
train_set = get_train()[0]
test_set = get_test()

netG = Generator(args.z_dim).cuda()
netE = EnergyModel().cuda()
netH = StatisticsNetwork(args.z_dim).cuda()

params = {"lr": 1e-4, "betas": (0.5, 0.9)}
optimizerE = torch.optim.Adam(netE.parameters(), **params)
optimizerG = torch.optim.Adam(netG.parameters(), **params)
optimizerH = torch.optim.Adam(netH.parameters(), **params)

##################################################

start_time = time.time()
e_costs = []
g_costs = []