示例#1
0
文件: test.py 项目: davidath/evitrac
def px(pae_dict):
    saver = tf.train.Saver()

    # P(x) Session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        # Restore pretrained model
        saver.restore(sess, pxfinestr.split('.meta')[0])

        if cp.get('Experiment', 'PREFIX') == 'MNIST':
            # Save hidden/output layer results for pipeline training
            px_Z_latent = utils.run_OOM(sess,
                                        pae_dict['conv_in'],
                                        XX_full,
                                        pae_dict['conv_z'],
                                        batch_size=batch_size)
        else:
            px_Z_latent = utils.run_OOM(sess,
                                        pae_dict['sda_in'],
                                        XX_full,
                                        pae_dict['sda_hidden'],
                                        batch_size=batch_size)

        # Print clustering ACC
        utils.log_accuracy(cp, YY_full, px_Z_latent, 'PX - ACC FULL', SEED)

        # Print clustering NMI
        utils.log_NMI(cp, YY_full, px_Z_latent, 'PX - NMI FULL', SEED)

        # Print clustering CHS score
        utils.log_CHS(cp, XX_full, px_Z_latent, 'PX - CHS FULL', SEED)

    sess.close()
示例#2
0
def evitram(evitramd):
    saver = tf.train.Saver()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        # Restore pretrained model
        saver.restore(sess, evitramfinestr.split('.meta')[0])

        if cp.get('Experiment', 'PREFIX') == 'MNIST':
            # Save hidden/output layer results for pipeline training
            px_Z_latent = utils.run_OOM(sess,
                                        evitram_dict['conv_in'],
                                        XX_full,
                                        evitram_dict['conv_z'],
                                        batch_size=batch_size)
            # Save latent space
            utils.save_OOM(sess,
                           evitram_dict['conv_in'],
                           XX_full,
                           evitram_dict['conv_z'],
                           path='COND_' + cp.get('Experiment', 'PX_Z_FULL'),
                           batch_size=batch_size)

            # Save reconstruction
            utils.save_OOM(sess,
                           evitram_dict['conv_in'],
                           XX_full,
                           evitram_dict['conv_out'],
                           path='COND_' +
                           cp.get('Experiment', 'PX_XREC_TRAIN'),
                           batch_size=batch_size)
        else:
            px_Z_latent = utils.run_OOM(sess,
                                        evitram_dict['sda_in'],
                                        XX_full,
                                        evitram_dict['sda_hidden'],
                                        batch_size=batch_size)

            utils.save_OOM(sess,
                           evitram_dict['sda_in'],
                           XX_full,
                           evitram_dict['sda_hidden'],
                           path='COND_' + cp.get('Experiment', 'PX_Z_FULL'),
                           batch_size=batch_size)

        # Print clustering ACC
        utils.log_accuracy(cp, YY_full, px_Z_latent, 'COND - ACC FULL', SEED)

        # Print clustering NMI
        utils.log_NMI(cp, YY_full, px_Z_latent, 'COND - NMI FULL', SEED)

        # Print clustering CHS score
        utils.log_CHS(cp, XX_full, px_Z_latent, 'COND - CHS FULL', SEED)

    sess.close()
    def train_KT_AT(self):
        # summary for current training loop and a running average object for loss
        # Use tqdm for progress bar
        accuracy_dict = {}

        for epoch in tqdm(range(self.num_epochs), total=self.num_epochs):
            self.student_model.train()
            self.train()

            if epoch % self.log_num == 0:
                acc = self.test()
                accuracy_dict[epoch] = acc
                utils.log_accuracy("KD_AT.csv", accuracy_dict)
                print(f"\nAccuracy: {acc:05.3f}")
                self.save_model()

            self.scheduler.step()

        utils.plot_accuracy("KD_AT.csv")
    def train(self):
        best_acc = 0
        accuracy_dict = {}

        for batch in tqdm(range(self.num_epochs)):

            for _ in range(self.ng):
                self.generator_optimizer.zero_grad()
                # generate guassian noise
                z = torch.randn((128, 100)).to(self.device)

                # get generator output
                psuedo_datapoint = self.generator(z)

                # teacher/student outputs: logits, attention1, attention2, attention3
                # compute model output, fetch teacher/student output, and compute KD loss
                student_logits = self.student_model(psuedo_datapoint)[0]
                teacher_logits = self.teacher_model(psuedo_datapoint)[0]

                generator_loss = -(utils.KL_Loss(student_logits, teacher_logits))
                generator_loss.backward()

                # performs updates using calculated gradients
                torch.nn.utils.clip_grad_norm_(self.generator.parameters(), 5)
                self.generator_optimizer.step()

            psuedo_datapoint = psuedo_datapoint.detach()

            with torch.no_grad():
                teacher_outputs = self.teacher_model(psuedo_datapoint)

            for _ in range(self.ns):
                self.student_optimizer.zero_grad()

                # teacher/student outputs: logits, attention1, attention2, attention3
                # compute model output, fetch teacher/student output, and compute KD loss
                student_outputs = self.student_model(psuedo_datapoint)

                # student_loss = KL_Loss(teacher_logits, teacher_outputs)
                student_loss = utils.student_loss_zero_shot(student_outputs, teacher_outputs)

                student_loss.backward()
                # performs updates using calculated gradients
                torch.nn.utils.clip_grad_norm_(self.student_model.parameters(), 5)
                self.student_optimizer.step()

            if (batch + 1) % self.log_num == 0 or (batch + 1) == self.num_epochs:
                acc = self.test()

                print(f"\nAccuracy: {acc:05.3f}")
                print(f'Student Loss: {student_loss:05.3f}')
                utils.writeMetrics({"accuracy": acc}, self.acc_counter)
                accuracy_dict[batch] = acc
                utils.log_accuracy("zero_shot.csv", accuracy_dict)
                self.acc_counter += 1
                self.save_model()

                if acc > best_acc:
                    best_acc = acc

            utils.writeMetrics({"Student Loss": student_loss, "Generator Loss": generator_loss}, self.counter)
            self.counter += 1
            self.cosine_annealing_generator.step()
            self.cosine_annealing_student.step()
示例#5
0
def px(pae_dict):
    # Initialize model save string
    pxmodelstr = pxfinestr.split('.meta')[0]

    # Variable initilization and saving
    init = tf.global_variables_initializer()
    saver = tf.train.Saver()

    if cp.get('Experiment', 'PREFIX') == 'MNIST':
        # Tensorboard (comment / uncomment)
        ######################################################################

        from datetime import datetime

        now = datetime.utcnow().strftime("%m-%d_%H-%M:%S")
        root_logdir = cp.get('Experiment', 'ModelOutputPath')
        logdir = "{}/{}{}-{}/".format(
            root_logdir,
            cp.get('Experiment', 'PREFIX') + '_' +
            cp.get('Experiment', 'Enumber') + '_px', sys.argv[2], now)

        tf.summary.scalar(name='xrecon loss', tensor=pae_dict['px_mse'])
        summary = tf.summary.merge_all()
        file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
        ######################################################################

    # Start Session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        # Initialize graph variables
        init.run()

        if cp.get('Experiment', 'PREFIX') == 'MNIST':
            train_dict = {
                'cp': cp,
                'sess': sess,
                'data': XX_full,
                'sumr': summary,
                'savestr': pxmodelstr,
                'saver': saver,
                'fw': file_writer
            }
        else:
            train_dict = {
                'cp': cp,
                'sess': sess,
                'data': XX_full,
                'savestr': pxmodelstr,
                'saver': saver,
                'ae_ids': ae_ids,
                'out_': out_
            }

        if cp.get('Experiment', 'PREFIX') == 'MNIST':
            ConvAE.train(train_dict, pae_dict)
        else:
            SAE.train(train_dict, pae_dict)

        # Get batch size for batch output save
        batch_size = cp.getint('Hyperparameters', 'BatchSize')

        if cp.get('Experiment', 'PREFIX') == 'MNIST':
            # Save hidden/output layer results for pipeline training
            px_Z_latent = utils.run_OOM(sess,
                                        pae_dict['conv_in'],
                                        XX_full,
                                        pae_dict['conv_z'],
                                        batch_size=batch_size)
        else:
            # Save hidden/output layer results for pipeline training
            px_Z_latent = utils.run_OOM(sess,
                                        pae_dict['sda_in'],
                                        XX_full,
                                        pae_dict['sda_hidden'],
                                        batch_size=batch_size)
        #  utils.save_OOM(sess, pae_dict['conv_in'], XX_full,
        #  pae_dict['conv_out'],
        #  path=cp.get('Experiment', 'PX_XREC_TRAIN'),
        #  batch_size=batch_size)

    # Print clustering ACC
    utils.log_accuracy(cp, YY_full, px_Z_latent, 'PX - ACC FULL', SEED)

    # Print clustering NMI
    utils.log_NMI(cp, YY_full, px_Z_latent, 'PX - NMI FULL', SEED)

    sess.close()
示例#6
0
def evitram():
    # Restore pretrained model
    restorestr = pxfinestr.split('.meta')[0]

    # Save model str
    evitramstr = evitramfinestr.split('.meta')[0]

    # Load pretrained evidence representations for all sources
    K = []
    for e in sys.argv[3:]:
        cp2 = utils.load_config(e)
        K.append(cp2.get('Experiment', 'PX_Z_TRAIN'))

    sect = 'Experiment'
    ev_paths = [cp.get(sect, i) for i in cp.options(sect) if 'evidence' in i]

    if cp.get('Experiment', 'PREFIX') == 'MNIST':
        evitram_dict = ConvAE.build_EviTRAM(cp, SEED)
    else:
        # Layerwise autoencoder number
        ae_ids = [str(i) for i in xrange(cp.getint('Experiment', 'AENUM'))]
        evitram_dict = SAE.build_EviTRAM(cp, ae_ids, SEED)

    # Get variables to restore from pretrained model P(x) Encoder
    var_list = tf.trainable_variables()

    for ev_path_id, ev_path in enumerate(ev_paths):
        if cp.get('Experiment', 'PREFIX') == 'MNIST':
            # Prepare "restore" variable list
            for v in var_list:
                if v.name == 'Pre_Q' + str(ev_path_id) + '/kernel:0':
                    var_list.remove(v)
            for v in var_list:
                if v.name == 'Pre_Q' + str(ev_path_id) + '/bias:0':
                    var_list.remove(v)
        else:
            # Prepare "restore" variable list
            for v in var_list:
                if v.name == 'Pre_Q' + str(ev_path_id) + '/kernel:0':
                    var_list.remove(v)
            for v in var_list:
                if v.name == 'Pre_Q' + str(ev_path_id) + '/bias:0':
                    var_list.remove(v)
            for v in var_list:
                if v.name == 'Pre_Comp_Q' + str(ev_path_id) + '/kernel:0':
                    var_list.remove(v)
            for v in var_list:
                if v.name == 'Pre_Comp_Q' + str(ev_path_id) + '/bias:0':
                    var_list.remove(v)

    ##########################################################
    # Tensorboard (comment / uncomment)
    ##########################################################

    from datetime import datetime

    now = datetime.utcnow().strftime("%m-%d_%H-%M:%S")
    root_logdir = cp.get('Experiment', 'ModelOutputPath')
    logdir = "{}/{}{}-{}/".format(
        root_logdir,
        cp.get('Experiment', 'PREFIX') + '_' +
        cp.get('Experiment', 'Enumber') + '_cond', sys.argv[2], now)
    tf.summary.scalar(name='cond loss', tensor=evitram_dict['evitram_loss'])
    tf.summary.scalar(name='recon loss', tensor=evitram_dict['px_mse'])
    summary = tf.summary.merge_all()
    file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())

    ##########################################################

    # Initialize & restore P(x) AE weights
    init = tf.global_variables_initializer()
    saver = tf.train.Saver(var_list)
    saverCOND = tf.train.Saver()

    # Task outcomes
    EV = [np.load(i) for i in K]

    # Start Session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        # Init values
        init.run()
        # Restore finetuned model
        saver.restore(sess, restorestr)

        train_dict = {
            'cp': cp,
            'sess': sess,
            'data': XX_full,
            'sumr': summary,
            'savestr': evitramstr,
            'saver': saverCOND,
            'fw': file_writer,
            'EV': EV,
            'ev_paths': ev_paths
        }

        if cp.get('Experiment', 'PREFIX') == 'MNIST':
            ConvAE.evitram_train(train_dict, evitram_dict)
        else:
            SAE.evitram_train(train_dict, evitram_dict)

        # Get batch size for batch output save
        batch_size = train_dict['cp'].getint('Hyperparameters', 'BatchSize')

        if cp.get('Experiment', 'PREFIX') == 'MNIST':
            # Save hidden/output layer results for pipeline training
            px_Z_latent = utils.run_OOM(sess,
                                        evitram_dict['conv_in'],
                                        XX_full,
                                        evitram_dict['conv_z'],
                                        batch_size=batch_size)
        else:
            px_Z_latent = utils.run_OOM(sess,
                                        evitram_dict['sda_in'],
                                        XX_full,
                                        evitram_dict['sda_hidden'],
                                        batch_size=batch_size)
        #  utils.save_OOM(sess, pae_dict['conv_in'], XX_full,
        #  pae_dict['conv_out'],
        #  path=cp.get('Experiment', 'PX_XREC_TRAIN'),
        #  batch_size=batch_size)

    # Print clustering ACC
    utils.log_accuracy(cp, YY_full, px_Z_latent, 'COND - ACC FULL', SEED)

    # Print clustering NMI
    utils.log_NMI(cp, YY_full, px_Z_latent, 'COND - NMI FULL', SEED)

    sess.close()
示例#7
0
def main_func(device, out_dir_results, random_seed, in_dir_data, batch_size,
              workers, num_epochs, num_classes, size_image):
    # create an output folder
    os.makedirs(OUT_DIR_RESULTS, exist_ok=True)

    # fill padded area with ImageNet's mean pixel value converted to range [0, 255]
    max_padding = tv.transforms.Lambda(utils.pad_function)

    # transform images
    transforms_train = tv.transforms.Compose([
        max_padding,
        tv.transforms.RandomOrder([
            tv.transforms.RandomCrop((size_image, size_image)),
            tv.transforms.RandomHorizontalFlip()
        ]),
        tv.transforms.ToTensor(),
        tv.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    transforms_eval = tv.transforms.Compose([
        max_padding,
        tv.transforms.CenterCrop((size_image, size_image)),
        tv.transforms.ToTensor(),
        tv.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    # instantiate dataset objects according to the pre-defined splits
    ds_train = utils.DatasetBirds(in_dir_data,
                                  transform=transforms_train,
                                  train=True)
    ds_val = utils.DatasetBirds(in_dir_data,
                                transform=transforms_eval,
                                train=True)
    ds_test = utils.DatasetBirds(in_dir_data,
                                 transform=transforms_eval,
                                 train=False)

    splits = skms.StratifiedShuffleSplit(n_splits=1,
                                         test_size=0.1,
                                         random_state=random_seed)
    idx_train, idx_val = next(
        splits.split(np.zeros(len(ds_train)), ds_train.targets))

    # set hyper-parameters
    params = {'batch_size': batch_size, 'num_workers': workers}

    # instantiate data loaders
    train_loader = td.DataLoader(dataset=ds_train,
                                 sampler=td.SubsetRandomSampler(idx_train),
                                 **params)
    val_loader = td.DataLoader(dataset=ds_val,
                               sampler=td.SubsetRandomSampler(idx_val),
                               **params)
    test_loader = td.DataLoader(dataset=ds_test, **params)

    model_desc = utils.get_model_desc(num_classes=num_classes, pretrained=True)

    # instantiate the model
    model = tv.models.resnet50(pretrained=True)
    model.fc = nn.Linear(model.fc.in_features, num_classes)
    model.to(DEVICE)

    # instantiate optimizer and scheduler
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95)

    # train and validate the model
    best_snapshot_path = None
    val_acc_avg = list()
    best_val_acc = -1.0

    for epoch in range(num_epochs):

        # train the model
        model.train()
        train_loss = list()
        for batch in train_loader:
            x, y = batch

            x = x.to(device)
            y = y.to(device)

            optimizer.zero_grad()

            # calculate the loss
            y_pred = model(x)

            # calculate the loss
            loss = F.cross_entropy(y_pred, y)

            # backprop & update weights
            loss.backward()
            optimizer.step()

            train_loss.append(loss.item())

        # validate the model
        model.eval()
        val_loss = list()
        val_acc = list()
        with torch.no_grad():
            for batch in val_loader:
                x, y = batch

                x = x.to(device)
                y = y.to(device)

                # predict bird species
                y_pred = model(x)

                # calculate the loss
                loss = F.cross_entropy(y_pred, y)

                # calculate the accuracy
                acc = skm.accuracy_score(
                    [val.item() for val in y],
                    [val.item() for val in y_pred.argmax(dim=-1)])

                val_loss.append(loss.item())
                val_acc.append(acc)

            val_acc_avg.append(np.mean(val_acc))

            # save the best model snapshot
            current_val_acc = val_acc_avg[-1]
            if current_val_acc > best_val_acc:
                if best_snapshot_path is not None:
                    os.remove(best_snapshot_path)

                best_val_acc = current_val_acc
                best_snapshot_path = os.path.join(
                    out_dir_results,
                    f'model_{model_desc}_ep={epoch}_acc={best_val_acc}.pt')

                torch.save(model.state_dict(), best_snapshot_path)

        # adjust the learning rate
        scheduler.step()

        # print performance metrics
        print('Epoch {} |> Train. loss: {:.4f} | Val. loss: {:.4f}'.format(
            epoch + 1, np.mean(train_loss), np.mean(val_loss)))

    # use the best model snapshot
    model.load_state_dict(torch.load(best_snapshot_path, map_location=device))

    # test the model
    true = list()
    pred = list()
    with torch.no_grad():
        for batch in test_loader:
            x, y = batch

            x = x.to(device)
            y = y.to(device)

            y_pred = model(x)

            true.extend([val.item() for val in y])
            pred.extend([val.item() for val in y_pred.argmax(dim=-1)])

    # calculate the accuracy
    test_accuracy = skm.accuracy_score(true, pred)

    # save the accuracy
    path_to_logs = f'{out_dir_results}/logs.csv'
    utils.log_accuracy(path_to_logs, model_desc, test_accuracy)

    print('Test accuracy: {:.3f}'.format(test_accuracy))
示例#8
0
def evitram(evitramd):
    saver = tf.train.Saver()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        # Restore pretrained model
        saver.restore(sess, evitramfinestr.split('.meta')[0])

        if cp.get('Experiment', 'PREFIX') == 'MNIST' or \
            cp.get('Experiment', 'PREFIX') == 'AMNIST':
            # Save hidden/output layer results for pipeline training
            px_Z_latent = utils.run_OOM(sess, evitram_dict['conv_in'], XX_full,
                          evitram_dict['conv_z'],
                          batch_size=batch_size)
        elif cp.get('Experiment', 'PREFIX') == 'WEATHER':
            # Save hidden/output layer results for pipeline training
            px_Z_latent_tr = utils.run_OOM(sess, evitram_dict['conv_in'], XX_full,
                                        evitram_dict['conv_z'],
                                        batch_size=batch_size)
            px_Z_latent_te = utils.run_OOM(sess, evitram_dict['conv_in'], XX_test,
                                        evitram_dict['conv_z'],
                                        batch_size=batch_size)
        else:
            px_Z_latent_tr = utils.run_OOM(sess, evitram_dict['sda_in'], XX_full,
                                        evitram_dict['sda_hidden'],
                                        batch_size=batch_size)
            if not(np.array_equal(XX_test, np.zeros(shape=(1,1)))):
                px_Z_latent_te = utils.run_OOM(sess, evitram_dict['sda_in'], XX_test,
                                            evitram_dict['sda_hidden'],
                                            batch_size=batch_size)


        if 'WEATHER' in cp.get('Experiment', 'PREFIX'):
           # Print clustering ACC
            utils.log_accuracy_isof(cp, YY_full, px_Z_latent_tr,
                               'COND - ACC FULL (Train)', SEED)
            if not(np.array_equal(XX_test, np.zeros(shape=(1,1)))):
                utils.log_accuracy_isof(cp, YY_test, px_Z_latent_te,
                                   'COND - ACC FULL (Test)', SEED)

            utils.log_anomalyPRF_isof(cp, YY_full, px_Z_latent_tr,
                               'COND - PRF FULL (Test)', SEED)
            if not(np.array_equal(XX_test, np.zeros(shape=(1,1)))):
                utils.log_anomalyPRF_isof(cp, YY_test, px_Z_latent_te,
                                   'COND - PRF FULL (Test)', SEED)

        else:
            # Print clustering ACC
            utils.log_accuracy(cp, YY_full, px_Z_latent,
                               'PX - ACC FULL', SEED)

            # Print clustering NMI
            utils.log_NMI(cp, YY_full, px_Z_latent,
                          'PX - NMI FULL', SEED)

            # Print clustering CHS score
            utils.log_CHS(cp, XX_full, px_Z_latent,
                      'PX - CHS FULL', SEED)

    sess.close()
示例#9
0
def px(pae_dict):
    saver = tf.train.Saver()

    # P(x) Session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        # Restore pretrained model
        saver.restore(sess, pxfinestr.split('.meta')[0])

        if cp.get('Experiment', 'PREFIX') == 'MNIST' or \
            cp.get('Experiment', 'PREFIX') == 'AMNIST':
            # Save hidden/output layer results for pipeline training
            px_Z_latent = utils.run_OOM(sess,
                                        pae_dict['conv_in'],
                                        XX_full,
                                        pae_dict['conv_z'],
                                        batch_size=batch_size)
        else:
            px_Z_latent = utils.run_OOM(sess,
                                        pae_dict['sda_in'],
                                        XX_full,
                                        pae_dict['sda_hidden'],
                                        batch_size=batch_size)
            if not (np.array_equal(XX_test, np.zeros(shape=(1, 1)))):
                px_Z_latent_te = utils.run_OOM(sess,
                                               pae_dict['sda_in'],
                                               XX_test,
                                               pae_dict['sda_hidden'],
                                               batch_size=batch_size)

        if cp.get('Experiment', 'Enumber') == 'windT':
            # Print clustering ACC
            utils.log_accuracy_AC(cp, YY_full, px_Z_latent, 'PX - ACC FULL',
                                  SEED)
            #  utils.log_accuracy(cp, YY_test, px_Z_latent_te,
            #  'PX - ACC TEST', SEED)

            # Print clustering NMI
            utils.log_NMI_AC(cp, YY_full, px_Z_latent, 'PX - NMI FULL', SEED)
            #  utils.log_NMI(cp, YY_test, px_Z_latent_te,
            #  'PX - NMI TEST', SEED)

            # Print clustering CHS score
            utils.log_anomalyPRF_AC(cp, YY_full, px_Z_latent,
                                    'PX - PRF FULL (FULL)', SEED)
            #  utils.log_anomalyPRF(cp, YY_test, px_Z_latent_te,
            #  'PX - PRF FULL (Test)', SEED)
        else:
            # Print clustering ACC
            utils.log_accuracy(cp, YY_full, px_Z_latent, 'PX - ACC FULL', SEED)

            if not (np.array_equal(XX_test, np.zeros(shape=(1, 1)))):
                utils.log_accuracy(cp, YY_test, px_Z_latent_te,
                                   'PX - ACC TEST', SEED)

            # Print clustering NMI
            utils.log_NMI(cp, YY_full, px_Z_latent, 'PX - NMI FULL', SEED)
            if not (np.array_equal(XX_test, np.zeros(shape=(1, 1)))):
                utils.log_NMI(cp, YY_test, px_Z_latent_te, 'PX - NMI TEST',
                              SEED)

            # Print clustering CHS score
            utils.log_anomalyPRF(cp, YY_full, px_Z_latent,
                                 'PX - PRF FULL (FULL)', SEED)
            if not (np.array_equal(XX_test, np.zeros(shape=(1, 1)))):
                utils.log_anomalyPRF(cp, YY_test, px_Z_latent_te,
                                     'PX - PRF FULL (Test)', SEED)

    sess.close()