コード例 #1
0
        def __init__(self, name = None, model = ALOCC_Model()):
            if name is None:
                # Layers
                self.n_conv_modules =  # number of conv. modules
                self.n_conv_layers_per_module = 1 # number of conv. layers in each module (between each pool layer/dim reduction)
                self.n_dense_layers = 2 # number of dense layers in 
                self.n_dense_units = model.z_dim
                if len(self.n_conv_layers_per_module) == 1:
                    self.n_conv_layers_per_module = [self.n_conv_layers_per_module]*self.n_conv_modules
                if len(self.n_dense_units) == 1:
                    self.n_dense_units = [self.n_dense_units]*self.n_dense_layers

                # Filters
                self.filter_size = 4
                self.stride = 2
                self.channels = [8,16,32,64,128] # num output channels/filters in each conv. module

                if len(self.channel_factor) == 1:
                    self.channel_factor = [self.channel_factor] * (self.n_conv_modules-1)

                # Other layers
                self.max_pool = cfg.max_pool
                self.pool_size = 2
                if len(self.pool_size) == 1:
                    self.pool_size = [self.pool_size]*self.n_conv_modules
                self.use_batch_norm = cfg.use_batch_norm
                self.use_dropout = cfg.use_dropout
                self.dropout_rate = cfg.dropout_rate

                if self.max_pool:
                    self.dim_red_stride = [self.stride]*self.n_conv_modules
                else:
                    self.dim_red_stride = [self.pool_size]

            if name == 'VGG16':
                self.n_conv_modules = 5 # number of conv. modules
                self.n_conv_layers_per_module = [2,2,3,3,3] # number of conv. layers in each module (between each pool layer/dim reduction)
                self.n_dense_layers = 3 # number of dense layers in 
                self.n_dense_units = [4096, 4096, 1000]
                
                # Filters
                self.filter_size = 4
                self.stride = 2
                self.channel_factor = [2,2,2,1] # increase/decrease factor of num channels after each conv. module. Scalar or list with n_conv_modules-1 elements.
                self.init_channels = 64 # num channels after in first conv. module (closest to input/reconstruction layer)
                
                # Other layers
                self.max_pool = False
                self.use_batch_norm = True
                self.use_dropout = False
コード例 #2
0
ファイル: train.py プロジェクト: wakamezake/ALOCC-CVPR2018
def main(_):
    """
    The main function for training steps     
    """
    pp.pprint(flags.FLAGS.__flags)
    n_per_itr_print_results = 100
    kb_work_on_patch = True

    # ---------------------------------------------------------------------------------------------
    # ---------------------------------------------------------------------------------------------
    # Manual Switchs ------------------------------------------------------------------------------
    # ---------------------------------------------------------------------------------------------
    # DATASET PARAMETER : UCSD
    # FLAGS.dataset = 'UCSD'
    # FLAGS.dataset_address = './dataset/UCSD_Anomaly_Dataset.v1p2/UCSDped2/Train'

    nd_input_frame_size = (240, 360)
    nd_slice_size = (45, 45)
    n_stride = 25
    n_fetch_data = 600
    # ---------------------------------------------------------------------------------------------
    # # DATASET PARAMETER : MNIST
    # FLAGS.dataset = 'mnist'
    # FLAGS.dataset_address = './dataset/mnist'
    # nd_input_frame_size = (28, 28)
    # nd_slice_size = (28, 28)

    FLAGS.train = True

    FLAGS.input_width = nd_slice_size[0]
    FLAGS.input_height = nd_slice_size[1]
    FLAGS.output_width = nd_slice_size[0]
    FLAGS.output_height = nd_slice_size[1]

    FLAGS.sample_dir = 'export/' + FLAGS.dataset + '_%d.%d' % (
        nd_slice_size[0], nd_slice_size[1])
    FLAGS.input_fname_pattern = '*'

    check_some_assertions()

    # manual handling of GPU
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)
    run_config = tf.ConfigProto(gpu_options=gpu_options)
    run_config.gpu_options.allow_growth = True

    with tf.Session(config=run_config) as sess:
        tmp_model = ALOCC_Model(
            sess,
            input_width=FLAGS.input_width,
            input_height=FLAGS.input_height,
            output_width=FLAGS.output_width,
            output_height=FLAGS.output_height,
            batch_size=FLAGS.batch_size,
            sample_num=FLAGS.batch_size,
            attention_label=FLAGS.attention_label,
            r_alpha=FLAGS.r_alpha,
            dataset_name=FLAGS.dataset,
            dataset_address=FLAGS.dataset_address,
            input_fname_pattern=FLAGS.input_fname_pattern,
            checkpoint_dir=FLAGS.checkpoint_dir,
            is_training=FLAGS.train,
            log_dir=FLAGS.log_dir,
            sample_dir=FLAGS.sample_dir,
            nd_patch_size=nd_slice_size,
            n_stride=n_stride,
            n_per_itr_print_results=n_per_itr_print_results,
            kb_work_on_patch=kb_work_on_patch,
            nd_input_frame_size=nd_input_frame_size,
            n_fetch_data=n_fetch_data)

        # show_all_variables()

        if FLAGS.train:
            print('Program is on Train Mode')
            tmp_model.train(FLAGS)
        else:
            if not tmp_model.load(FLAGS.checkpoint_dir)[0]:
                print('Program is on Test Mode')
                raise Exception(
                    "[!] Train a model first, then run test mode from file test.py"
                )
コード例 #3
0
def main(_):
    print('Program is started at', time.clock())
    pp.pprint(flags.FLAGS.__flags)

    n_per_itr_print_results = 100
    n_fetch_data = 180
    kb_work_on_patch = False
    nd_input_frame_size = (240, 360)
    #nd_patch_size = (45, 45)
    n_stride = 10
    nd_patch_step = (n_stride, n_stride)
    #FLAGS.checkpoint_dir = "./checkpoint/UCSD_128_45_45/"

    #FLAGS.dataset = 'UCSD'
    #FLAGS.dataset_address = './dataset/UCSD_Anomaly_Dataset.v1p2/UCSDped2/Test'
    lst_test_dirs = ['Test004', 'Test005', 'Test006']

    #DATASET PARAMETER : MNIST
    #FLAGS.dataset = 'mnist'
    #FLAGS.dataset_address = './dataset/mnist'
    #nd_input_frame_size = (28, 28)
    #nd_patch_size = (28, 28)
    #FLAGS.checkpoint_dir = "./checkpoint/mnist_128_28_28/"

    #FLAGS.input_width = nd_patch_size[0]
    #FLAGS.input_height = nd_patch_size[1]
    #FLAGS.output_width = nd_patch_size[0]
    #FLAGS.output_height = nd_patch_size[1]

    check_some_assertions()

    nd_patch_size = (FLAGS.input_width, FLAGS.input_height)
    # FLAGS.nStride = n_stride

    #FLAGS.input_fname_pattern = '*'
    FLAGS.train = False
    FLAGS.epoch = 1
    FLAGS.batch_size = 1

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
    run_config = tf.ConfigProto(gpu_options=gpu_options)
    run_config.gpu_options.allow_growth = True
    with tf.Session(config=run_config) as sess:
        tmp_ALOCC_model = ALOCC_Model(
            sess,
            input_width=FLAGS.input_width,
            input_height=FLAGS.input_height,
            output_width=FLAGS.output_width,
            output_height=FLAGS.output_height,
            batch_size=FLAGS.batch_size,
            sample_num=FLAGS.batch_size,
            attention_label=FLAGS.attention_label,
            r_alpha=FLAGS.r_alpha,
            is_training=FLAGS.train,
            dataset_name=FLAGS.dataset,
            dataset_address=FLAGS.dataset_address,
            input_fname_pattern=FLAGS.input_fname_pattern,
            checkpoint_dir=FLAGS.checkpoint_dir,
            sample_dir=FLAGS.sample_dir,
            nd_patch_size=nd_patch_size,
            n_stride=n_stride,
            n_per_itr_print_results=n_per_itr_print_results,
            kb_work_on_patch=kb_work_on_patch,
            nd_input_frame_size=nd_input_frame_size,
            n_fetch_data=n_fetch_data)

        show_all_variables()

        print('--------------------------------------------------')
        print('Load Pretrained Model...')
        tmp_ALOCC_model.f_check_checkpoint()

        if FLAGS.dataset == 'mnist':
            mnist = input_data.read_data_sets(FLAGS.dataset_address)

            specific_idx_anomaly = np.where(mnist.train.labels != 6)[0]
            specific_idx = np.where(mnist.train.labels == 6)[0]
            ten_precent_anomaly = [
                specific_idx_anomaly[x]
                for x in random.sample(range(0, len(specific_idx_anomaly)),
                                       len(specific_idx) // 40)
            ]

            data = mnist.train.images[specific_idx].reshape(-1, 28, 28, 1)
            tmp_data = mnist.train.images[ten_precent_anomaly].reshape(
                -1, 28, 28, 1)
            data = np.append(data, tmp_data).reshape(-1, 28, 28, 1)

            lst_prob = tmp_ALOCC_model.f_test_frozen_model(
                data[0:FLAGS.batch_size])
            print('check is ok')
            exit()
            #generated_data = tmp_ALOCC_model.feed2generator(data[0:FLAGS.batch_size])

        # else in UCDS (depends on infrustructure)
        tmp_lst_image_paths = []
        for s_image_dirs in sorted(
                glob(os.path.join(FLAGS.dataset_address,
                                  'Test[0-9][0-9][0-9]'))):

            if os.path.basename(s_image_dirs) not in ['Test004']:
                print('Skip ', os.path.basename(s_image_dirs))
                continue
            for s_image_dir_files in sorted(
                    glob(os.path.join(s_image_dirs + '/*'))):
                if os.path.basename(s_image_dir_files) not in ['068.tif']:
                    print('Skip ', os.path.basename(s_image_dirs),
                          os.path.basename(s_image_dir_files))
                    continue
                tmp_lst_image_paths.append(s_image_dir_files)

        #random
        #lst_image_paths = [tmp_lst_image_paths[x] for x in random.sample(range(0, len(tmp_lst_image_paths)), n_fetch_data)]
        lst_image_paths = tmp_lst_image_paths

        #images =read_lst_images(lst_image_paths,nd_patch_size,nd_patch_step,b_work_on_patch=False)
        images = read_lst_images_without_noise2(lst_image_paths, nd_patch_size,
                                                nd_patch_step)

        lst_prob = process_frame(images, tmp_ALOCC_model)

        print('pseudocode test is finished')
コード例 #4
0
ファイル: test.py プロジェクト: KratzErik/ALOCC
def main(_):
    print('Program is started at', time.clock())
    #    pp.pprint(flags.FLAGS.__flags) # print all flags, suppress to unclutter output

    n_per_itr_print_results = 100
    n_fetch_data = 10
    kb_work_on_patch = False
    lst_test_dirs = ['Test004', 'Test005', 'Test006']

    n_stride = 10
    if FLAGS.dataset == 'UCSD':
        nd_input_frame_size = (240, 360)
        nd_patch_size = (45, 45)
        FLAGS.checkpoint_dir = "./checkpoint/UCSD_128_45_45/"
        FLAGS.dataset = 'UCSD'
        FLAGS.dataset_address = './dataset/UCSD_Anomaly_Dataset.v1p2/UCSDped2/Test'

    #DATASET PARAMETER : MNIST
    if FLAGS.dataset == 'mnist':
        FLAGS.dataset_address = './dataset/mnist'
        nd_input_frame_size = (28, 28)
        nd_patch_size = (28, 28)
        FLAGS.checkpoint_dir = "./checkpoint/mnist_128_28_28/"
        FLAGS.input_width = nd_patch_size[0]
        FLAGS.input_height = nd_patch_size[1]
        FLAGS.output_width = nd_patch_size[0]
        FLAGS.output_height = nd_patch_size[1]

    if FLAGS.dataset == 'bdd100k':
        nd_input_frame_size = (FLAGS.input_height, FLAGS.input_width)
        nd_patch_size = nd_input_frame_size
        FLAGS.checkpoint_dir = "checkpoint/{}_{}_{}_{}".format(
            FLAGS.dataset, FLAGS.batch_size, FLAGS.output_height,
            FLAGS.output_width)

    log_dir = "./log/" + cfg.dataset + "/" + cfg.architecture + "/"
    FLAGS.sample_dir = log_dir

    check_some_assertions()

    nd_patch_size = (FLAGS.input_width, FLAGS.input_height)
    FLAGS.nStride = n_stride

    #FLAGS.input_fname_pattern = '*'
    FLAGS.train = False
    FLAGS.epoch = 1

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
    run_config = tf.ConfigProto(gpu_options=gpu_options)
    run_config.gpu_options.allow_growth = True
    with tf.Session(config=run_config) as sess:
        tmp_ALOCC_model = ALOCC_Model(
            sess,
            input_width=FLAGS.input_width,
            input_height=FLAGS.input_height,
            output_width=FLAGS.output_width,
            output_height=FLAGS.output_height,
            batch_size=FLAGS.batch_size,
            sample_num=FLAGS.batch_size,
            attention_label=FLAGS.attention_label,
            r_alpha=FLAGS.r_alpha,
            is_training=FLAGS.train,
            dataset_name=FLAGS.dataset,
            dataset_address=FLAGS.dataset_address,
            input_fname_pattern=FLAGS.input_fname_pattern,
            checkpoint_dir=FLAGS.checkpoint_dir,
            sample_dir=FLAGS.sample_dir,
            nd_patch_size=nd_patch_size,
            n_stride=n_stride,
            n_per_itr_print_results=n_per_itr_print_results,
            kb_work_on_patch=kb_work_on_patch,
            nd_input_frame_size=nd_input_frame_size,
            n_fetch_data=n_fetch_data)

        #        show_all_variables()

        print('--------------------------------------------------')
        print('Loading pretrained model from ', tmp_ALOCC_model.checkpoint_dir,
              '...')
        tmp_ALOCC_model.f_check_checkpoint()

        if FLAGS.dataset == 'mnist':
            #mnist = input_data.read_data_sets(FLAGS.dataset_address)
            mnist = tf.keras.datasets.mnist
            (x_train, y_train), (x_test, y_test) = mnist.load_data()

            inlier_idx = tmp_ALOCC_model.attention_label
            specific_idx = np.where(y_test == inlier_idx)[0]
            inlier_data = x_test[specific_idx].reshape(-1, 28, 28, 1)

            anomaly_frac = 0.5
            potential_idx_anomaly = np.where(y_test != inlier_idx)[0]
            specific_idx_anomaly = [
                potential_idx_anomaly[x] for x in random.sample(
                    range(0, len(potential_idx_anomaly)),
                    math.ceil(anomaly_frac * len(specific_idx) /
                              (1 - anomaly_frac)))
            ]

            anomaly_data = x_test[specific_idx_anomaly].reshape(-1, 28, 28, 1)
            data = np.append(inlier_data, anomaly_data).reshape(-1, 28, 28, 1)

            # True labels are 1 for inliers and 0 for anomalies, since discriminator outputs higher values for inliers
            labels = np.append(np.ones(len(inlier_data)),
                               np.zeros(len(anomaly_data)))

            # Shuffle data so not only anomaly points are removed if data is shortened below
            tmp_perm = np.random.permutation(len(data))
            data = data[tmp_perm]
            labels = labels[tmp_perm]

            # Only whole batches
            n_batches = len(data) // tmp_ALOCC_model.batch_size
            #            print("Batch size: ", tmp_ALOCC_model.batch_size, "n batches: ", n_batches)
            data = data[:n_batches * tmp_ALOCC_model.batch_size]
            labels = labels[:len(data)]

            # Get test results from discriminator
            results_d = tmp_ALOCC_model.f_test_frozen_model(data)

            # Compute performance metrics
            roc_auc = roc_auc_score(labels, results_d)
            print('AUROC: ', roc_auc)

            roc_prc = average_precision_score(labels, results_d)
            print("AUPRC: ", roc_prc)

            print('Test completed')
            exit()
            #generated_data = tmp_ALOCC_model.feed2generator(data[0:FLAGS.batch_size])
        elif FLAGS.dataset == 'UCSD':
            # else in UCDS (depends on infrustructure)
            for s_image_dirs in sorted(
                    glob(
                        os.path.join(FLAGS.dataset_address,
                                     'Test[0-9][0-9][0-9]'))):
                tmp_lst_image_paths = []
                if os.path.basename(s_image_dirs) not in ['Test004']:
                    print('Skip ', os.path.basename(s_image_dirs))
                    continue
                for s_image_dir_files in sorted(
                        glob(os.path.join(s_image_dirs + '/*'))):
                    if os.path.basename(s_image_dir_files) not in ['068.tif']:
                        print('Skip ', os.path.basename(s_image_dir_files))
                        continue
                    tmp_lst_image_paths.append(s_image_dir_files)

                #random
                #lst_image_paths = [tmp_lst_image_paths[x] for x in random.sample(range(0, len(tmp_lst_image_paths)), n_fetch_data)]
                lst_image_paths = tmp_lst_image_paths
                #images =read_lst_images(lst_image_paths,nd_patch_size,nd_patch_step,b_work_on_patch=False)
                images = read_lst_images_w_noise2(lst_image_paths,
                                                  nd_patch_size, nd_patch_step)

                lst_prob = process_frame(os.path.basename(s_image_dirs),
                                         images, tmp_ALOCC_model)

                print('pseudocode test is finished')

                # This code for just check output for readers
                # ...

        elif FLAGS.dataset in ('prosivic', 'dreyeve', 'bdd100k'):
            data = tmp_ALOCC_model.data
            labels = tmp_ALOCC_model.test_labels

        # Below is done for all datasets
        test_dir = log_dir + "test/"
        check_dir(test_dir)

        # Shuffle data so not only anomaly points are removed if data is shortened below
        tmp_perm = np.random.permutation(len(data))
        data = data[tmp_perm]
        labels = labels[tmp_perm]

        # Only whole batches
        n_batches = len(data) // tmp_ALOCC_model.batch_size
        #            print("Batch size: ", tmp_ALOCC_model.batch_size, "n batches: ", n_batches)
        data = data[:n_batches * tmp_ALOCC_model.batch_size]
        labels = labels[:len(data)]

        # Get test results from discriminator
        results_d = tmp_ALOCC_model.f_test_frozen_model(data)

        # Compute performance metrics
        roc_auc = roc_auc_score(labels, results_d)
        print('AUROC: ', roc_auc)

        roc_prc = average_precision_score(labels, results_d)
        print("AUPRC: ", roc_prc)

        # Pickle results
        results = [labels, results_d]
        results_file = test_dir + "results.pkl"
        with open(results_file, 'wb') as f:
            pickle.dump(results, f)

        print('Test completed')
        exit()
コード例 #5
0
ファイル: test-cv.py プロジェクト: zacbaum/ALOCC-CVPR2018
def main(_):
    n_per_itr_print_results = 100
    kb_work_on_patch = False
    nd_input_frame_size = (180, 270)
    nd_slice_size = (180, 270)
    n_stride = 1

    FLAGS.input_width = nd_slice_size[0]
    FLAGS.input_height = nd_slice_size[1]
    FLAGS.output_width = nd_slice_size[0]
    FLAGS.output_height = nd_slice_size[1]

    FLAGS.dataset = "data-alocc"
    FLAGS.dataset_address = "./dataset/data-alocc/test/"
    FLAGS.checkpoint_dir = "./checkpoint/" + "{}_{}_{}_{}_{}".format(
        FLAGS.dataset,
        FLAGS.batch_size,
        FLAGS.output_height,
        FLAGS.output_width,
        FLAGS.r_alpha
    )
    FLAGS.sample_dir = os.path.join("./samples/check_QA", (str(FLAGS.use_ckpt) + "_" + str(FLAGS.r_alpha)))

    check_some_assertions()

    nd_patch_size = (FLAGS.input_width, FLAGS.input_height)
    nd_patch_step = (n_stride, n_stride)

    FLAGS.nStride = n_stride
    # FLAGS.input_fname_pattern = '*'
    FLAGS.train = False
    FLAGS.epoch = 1

    pp.pprint(flags.FLAGS.__flags)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
    run_config = tf.ConfigProto(gpu_options=gpu_options)
    run_config.gpu_options.allow_growth = True
    with tf.Session(config=run_config) as sess:
        tmp_ALOCC_model = ALOCC_Model(
            sess,
            input_width=FLAGS.input_width,
            input_height=FLAGS.input_height,
            output_width=FLAGS.output_width,
            output_height=FLAGS.output_height,
            batch_size=FLAGS.batch_size,
            sample_num=FLAGS.batch_size,
            attention_label=FLAGS.attention_label,
            r_alpha=FLAGS.r_alpha,
            is_training=FLAGS.train,
            dataset_name=FLAGS.dataset,
            dataset_address=FLAGS.dataset_address,
            input_fname_pattern=FLAGS.input_fname_pattern,
            checkpoint_dir=FLAGS.checkpoint_dir,
            sample_dir=FLAGS.sample_dir,
            nd_patch_size=nd_patch_size,
            n_stride=n_stride,
            n_per_itr_print_results=n_per_itr_print_results,
            kb_work_on_patch=kb_work_on_patch,
            nd_input_frame_size=nd_input_frame_size,
        )

        # show_all_variables()

        print("--------------------------------------------------")
        print("Load Pretrained Model...")
        tmp_ALOCC_model.f_check_checkpoint(checkpoint_number=FLAGS.use_ckpt)

        t = time.time()

        data_folder = "/home/zbaum/Baum/COVID-Lung-Classifier/data-alocc/test/0/"
        lst_image_paths = [
            x
            for x in glob(
                os.path.join(data_folder, FLAGS.input_fname_pattern)
            )
        ]
        images_0 = read_lst_images(lst_image_paths, None, None, b_work_on_patch=False)

        data_folder = "/home/zbaum/Baum/COVID-Lung-Classifier/data-alocc/test/1/"
        lst_image_paths = [
            x
            for x in glob(
                os.path.join(data_folder, FLAGS.input_fname_pattern)
            )
        ]
        images_1 = read_lst_images(lst_image_paths, None, None, b_work_on_patch=False)

        images = np.vstack((images_0, images_1))

        t = time.time() - t
        print(" [*] Loaded Data in {:3f}s".format(t))

        print(images.shape)
        tmp_ALOCC_model.f_test_frozen_model(images)
コード例 #6
0
from models import ALOCC_Model
from keras.datasets import mnist

from keras.losses import binary_crossentropy
from keras import backend as K

import numpy as np

import matplotlib.pyplot as plt

import cv2
import numpy as np

from landsat_data_loader import LandsatDataLoader

self = ALOCC_Model(dataset_name='mnist', input_height=28, input_width=28)
self.adversarial_model.load_weights('./checkpoint/ALOCC_Model_9.h5')

root = "/QCOLT/QCOLT_DEV_OPS/"
path = root + '/TDS_NOVELTY_DETECTION/EXP_02/nominal_chips/'

loader = LandsatDataLoader(path)

X_train = loader.load_data()
X_train = X_train / 255

print("Number of samples loaded =", X_train.shape[0])
print("Dimensions (H,W) = ({},{})".format(X_train.shape[1], X_train.shape[2]))


def resize_img(img, target_edge=500):
コード例 #7
0
def main(_):
    print('Program is started at', time.clock())
    pp.pprint(flags.FLAGS.__flags)

    n_per_itr_print_results = 100
    n_fetch_data = 10
    kb_work_on_patch = False
    nd_input_frame_size = (240, 360)
    #nd_patch_size = (45, 45)
    n_stride = 10
    #FLAGS.checkpoint_dir = "./checkpoint/UCSD_128_45_45/"

    #FLAGS.dataset = 'UCSD'
    #FLAGS.dataset_address = './dataset/UCSD_Anomaly_Dataset.v1p2/UCSDped2/Test'
    lst_test_dirs = ['Test004', 'Test005', 'Test006']

    #DATASET PARAMETER : MNIST
    #FLAGS.dataset = 'mnist'
    #FLAGS.dataset_address = './dataset/mnist'
    #nd_input_frame_size = (28, 28)
    #nd_patch_size = (28, 28)
    #FLAGS.checkpoint_dir = "./checkpoint/mnist_128_28_28/"

    #FLAGS.input_width = nd_patch_size[0]
    #FLAGS.input_height = nd_patch_size[1]
    #FLAGS.output_width = nd_patch_size[0]
    #FLAGS.output_height = nd_patch_size[1]

    check_some_assertions()

    nd_patch_size = (FLAGS.input_width, FLAGS.input_height)
    # FLAGS.nStride = n_stride

    #FLAGS.input_fname_pattern = '*'
    FLAGS.train = False
    FLAGS.epoch = 1
    FLAGS.batch_size = 504

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
    run_config = tf.ConfigProto(gpu_options=gpu_options)
    run_config.gpu_options.allow_growth = True
    with tf.Session(config=run_config) as sess:
        tmp_ALOCC_model = ALOCC_Model(
            sess,
            input_width=FLAGS.input_width,
            input_height=FLAGS.input_height,
            output_width=FLAGS.output_width,
            output_height=FLAGS.output_height,
            batch_size=FLAGS.batch_size,
            sample_num=FLAGS.batch_size,
            attention_label=FLAGS.attention_label,
            r_alpha=FLAGS.r_alpha,
            is_training=FLAGS.train,
            dataset_name=FLAGS.dataset,
            dataset_address=FLAGS.dataset_address,
            input_fname_pattern=FLAGS.input_fname_pattern,
            checkpoint_dir=FLAGS.checkpoint_dir,
            sample_dir=FLAGS.sample_dir,
            nd_patch_size=nd_patch_size,
            n_stride=n_stride,
            n_per_itr_print_results=n_per_itr_print_results,
            kb_work_on_patch=kb_work_on_patch,
            nd_input_frame_size=nd_input_frame_size,
            n_fetch_data=n_fetch_data)

        show_all_variables()

        print('--------------------------------------------------')
        print('Load Pretrained Model...')
        tmp_ALOCC_model.f_check_checkpoint()

        if FLAGS.dataset == 'mnist':
            mnist = input_data.read_data_sets(FLAGS.dataset_address)

            specific_idx_anomaly = np.where(mnist.train.labels != 6)[0]
            specific_idx = np.where(mnist.train.labels == 6)[0]
            ten_precent_anomaly = [
                specific_idx_anomaly[x]
                for x in random.sample(range(0, len(specific_idx_anomaly)),
                                       len(specific_idx) // 40)
            ]

            data = mnist.train.images[specific_idx].reshape(-1, 28, 28, 1)
            tmp_data = mnist.train.images[ten_precent_anomaly].reshape(
                -1, 28, 28, 1)
            data = np.append(data, tmp_data).reshape(-1, 28, 28, 1)

            lst_prob = tmp_ALOCC_model.f_test_frozen_model(
                data[0:FLAGS.batch_size])
            print('check is ok')
            exit()
            #generated_data = tmp_ALOCC_model.feed2generator(data[0:FLAGS.batch_size])
        else:
            data = read_data.test_data(1)
            np.random.shuffle(data)
            lst_prob = tmp_ALOCC_model.f_test_frozen_model(
                data[0:FLAGS.batch_size])
            print('check is ok')
            exit()
コード例 #8
0
def main(_):
    """
    The main function for training steps     
    """
    n_per_itr_print_results = 100
    kb_work_on_patch = True

    # ---------------------------------------------------------------------------------------------
    # ---------------------------------------------------------------------------------------------
    # Manual Switchs ------------------------------------------------------------------------------
    # ---------------------------------------------------------------------------------------------
    # DATASET PARAMETER : data-alocc
    FLAGS.dataset = "data-alocc"
    FLAGS.dataset_address = "./dataset/data-alocc/train"
    kb_work_on_patch = False
    nd_input_frame_size = (180, 270)
    nd_slice_size = (180, 270)
    n_stride = 1

    FLAGS.train = True

    FLAGS.input_width = nd_slice_size[0]
    FLAGS.input_height = nd_slice_size[1]
    FLAGS.output_width = nd_slice_size[0]
    FLAGS.output_height = nd_slice_size[1]

    FLAGS.sample_dir = ("export/" + FLAGS.dataset + "_%d.%d.%f_" %
                        (nd_slice_size[0], nd_slice_size[1], FLAGS.r_alpha))
    FLAGS.input_fname_pattern = "*"

    pp.pprint(flags.FLAGS.__flags)

    check_some_assertions()

    # manual handling of GPU
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
    run_config = tf.ConfigProto(gpu_options=gpu_options)
    run_config.gpu_options.allow_growth = True

    with tf.Session(config=run_config) as sess:
        tmp_model = ALOCC_Model(
            sess,
            input_width=FLAGS.input_width,
            input_height=FLAGS.input_height,
            output_width=FLAGS.output_width,
            output_height=FLAGS.output_height,
            batch_size=FLAGS.batch_size,
            sample_num=FLAGS.batch_size,
            attention_label=FLAGS.attention_label,
            r_alpha=FLAGS.r_alpha,
            dataset_name=FLAGS.dataset,
            dataset_address=FLAGS.dataset_address,
            input_fname_pattern=FLAGS.input_fname_pattern,
            checkpoint_dir=FLAGS.checkpoint_dir,
            is_training=FLAGS.train,
            log_dir=FLAGS.log_dir,
            sample_dir=FLAGS.sample_dir,
            nd_patch_size=nd_slice_size,
            n_stride=n_stride,
            n_per_itr_print_results=n_per_itr_print_results,
            kb_work_on_patch=kb_work_on_patch,
            nd_input_frame_size=nd_input_frame_size,
        )

        # show_all_variables()

        if FLAGS.train:
            print("Program is on Train Mode")
            tmp_model.train(FLAGS)
        else:
            if not tmp_model.load(FLAGS.checkpoint_dir)[0]:
                print("Program is on Test Mode")
                raise Exception(
                    "[!] Train a model first, then run test mode from file test.py"
                )
コード例 #9
0
ファイル: test.py プロジェクト: KratzErik/ALOCC_Keras_SMILE
    cfg = Configuration(dataset, exp_name)

    if args.out_name is None:
        outlier_dir = cfg.test_out_folder
    else:
        outlier_dir = cfg.test_folder + "out/" + args.out_name + "/"

    log = ["################################################################"]
    test_time = datetime.datetime.now()
    log.append("# Test started at: %s" % test_time)
    log.append("# Dataset: %s, outliers: %s" %
               (dataset, cfg.test_out_folder.replace(cfg.test_folder, "")))

    model = ALOCC_Model(dataset_name=dataset,
                        input_height=cfg.image_height,
                        input_width=cfg.image_width,
                        is_training=False,
                        outlier_dir=outlier_dir,
                        cfg=cfg)
    if load_epoch == 'final':
        model.load_last_checkpoint()
        load_epoch = str(model.start_epoch - 1)
    else:
        trained_adv_path = model_dir + 'ALOCC_Model_%s_adv.h5' % load_epoch
        trained_d_path = model_dir + 'ALOCC_Model_%s_d.h5' % load_epoch
        model.adversarial_model.load_weights(trained_adv_path)
        model.discriminator.load_weights(trained_d_path)

    print("Loading trained model from epoch %s" % load_epoch)

    data = model.data
    batch_size = model.cfg.test_batch_size
コード例 #10
0
ファイル: test.py プロジェクト: wqwangsdu/ALOCC-CVPR2018
def main(_):
    print('Program is started at', time.clock())
    pp.pprint(flags.FLAGS.__flags)

    n_per_itr_print_results = 100
    n_fetch_data = 10
    kb_work_on_patch = False
    nd_input_frame_size = (240, 360)
    nd_sliced_size = (224, 224)
    #nd_patch_size = (45, 45)
    n_stride = 10
    #FLAGS.checkpoint_dir = "./checkpoint/UCSD_128_45_45/"

    #FLAGS.dataset = 'UCSD'
    #FLAGS.dataset_address = './dataset/UCSD_Anomaly_Dataset.v1p2/UCSDped2/Test'
    lst_test_dirs = ['Test004', 'Test005', 'Test006']

    #DATASET PARAMETER : MNIST
    #FLAGS.dataset = 'mnist'
    #FLAGS.dataset_address = './dataset/mnist'
    #nd_input_frame_size = (28, 28)
    #nd_patch_size = (28, 28)
    #FLAGS.checkpoint_dir = "./checkpoint/mnist_128_28_28/"

    #FLAGS.input_width = nd_patch_size[0]
    #FLAGS.input_height = nd_patch_size[1]
    #FLAGS.output_width = nd_patch_size[0]
    #FLAGS.output_height = nd_patch_size[1]

    check_some_assertions()

    nd_patch_size = (FLAGS.input_width, FLAGS.input_height)
    FLAGS.nStride = n_stride

    #FLAGS.input_fname_pattern = '*'
    FLAGS.train = False
    FLAGS.epoch = 1
    FLAGS.batch_size = 1

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
    run_config = tf.ConfigProto(gpu_options=gpu_options)
    run_config.gpu_options.allow_growth = True
    with tf.Session(config=run_config) as sess:
        tmp_ALOCC_model = ALOCC_Model(
            sess,
            input_width=FLAGS.input_width,
            input_height=FLAGS.input_height,
            seq_len=FLAGS.seq_len,
            output_width=FLAGS.output_width,
            output_height=FLAGS.output_height,
            batch_size=FLAGS.batch_size,
            sample_num=FLAGS.batch_size,
            attention_label=FLAGS.attention_label,
            r_alpha=FLAGS.r_alpha,
            is_training=FLAGS.train,
            dataset_name=FLAGS.dataset,
            dataset_address=FLAGS.dataset_address,
            input_fname_pattern=FLAGS.input_fname_pattern,
            checkpoint_dir=FLAGS.checkpoint_dir,
            sample_dir=FLAGS.sample_dir,
            nd_patch_size=nd_patch_size,
            n_stride=n_stride,
            n_per_itr_print_results=n_per_itr_print_results,
            kb_work_on_patch=kb_work_on_patch,
            nd_input_frame_size=nd_input_frame_size,
            n_fetch_data=n_fetch_data)

        show_all_variables()

        print('--------------------------------------------------')
        print('Load Pretrained Model...')
        flag = tmp_ALOCC_model.f_check_checkpoint()
        if flag == -1:
            print('[!] Load checkpoint failed')
            import sys
            sys.exit()

        if FLAGS.dataset == 'mnist':
            mnist = input_data.read_data_sets(FLAGS.dataset_address)

            specific_idx_anomaly = np.where(mnist.train.labels != 6)[0]
            specific_idx = np.where(mnist.train.labels == 6)[0]
            ten_precent_anomaly = [
                specific_idx_anomaly[x]
                for x in random.sample(range(0, len(specific_idx_anomaly)),
                                       len(specific_idx) // 40)
            ]

            data = mnist.train.images[specific_idx].reshape(-1, 28, 28, 1)
            tmp_data = mnist.train.images[ten_precent_anomaly].reshape(
                -1, 28, 28, 1)
            data = np.append(data, tmp_data).reshape(-1, 28, 28, 1)

            lst_prob = tmp_ALOCC_model.f_test_frozen_model(
                data[0:FLAGS.batch_size])
            print('check is ok')
            exit()
            #generated_data = tmp_ALOCC_model.feed2generator(data[0:FLAGS.batch_size])

        elif FLAGS.dataset == 'ped1_seq':
            from scipy.stats import logistic
            from matplotlib import pyplot as plt
            import shutil
            result_path = './test_result'

            try:
                shutil.rmtree(result_path)
            except:
                pass

            os.mkdir(result_path)

            root = '/home/ltj/codes/split_dataset/share/data/videos/avenue/avenue_test_t8_splited'
            lst = os.listdir(root)
            for fn in lst:
                tmp = []
                path = os.path.join(root, fn)
                h5_lst = os.listdir(path)
                h5_len = len(h5_lst)
                for i in range(h5_len):
                    h5_path = os.path.join(path, str(i) + '.h5')
                    with h5py.File(h5_path, 'r') as f:
                        # assert type(f['data'].value) is np.ndarray
                        tmp.append(f['data'].value)
                lst_prob, generated = tmp_ALOCC_model.f_test_frozen_model(tmp)
                probs = logistic.cdf(np.concatenate(lst_prob))

                T = np.array(range(len(probs)))
                plt.plot(T, probs)
                plt.savefig(
                    os.path.join(result_path,
                                 fn.split('.')[0] + '.jpg'))
                plt.clf()
                plt.cla()
                plt.close()

        # else in UCDS (depends on infrustructure)
        elif FLAGS.dataset == 'UCSD':
            for s_image_dirs in sorted(
                    glob.glob(
                        os.path.join(FLAGS.dataset_address,
                                     'Test[0-9][0-9][0-9]'))):
                print(s_image_dirs)
                tmp_lst_image_paths = []
                if os.path.basename(s_image_dirs) not in ['Test004']:
                    print('Skip ', os.path.basename(s_image_dirs))
                    continue
                for s_image_dir_files in sorted(
                        glob.glob(os.path.join(s_image_dirs + '/*'))):
                    if os.path.basename(s_image_dir_files) not in ['068.tif']:
                        print('Skip ', os.path.basename(s_image_dir_files))
                        continue
                    tmp_lst_image_paths.append(s_image_dir_files)

                # random
                # lst_image_paths = [tmp_lst_image_paths[x] for x in random.sample(range(0, len(tmp_lst_image_paths)), n_fetch_data)]
                lst_image_paths = tmp_lst_image_paths
                # images =read_lst_images(lst_image_paths,nd_patch_size,nd_patch_step,b_work_on_patch=False)
                images = read_lst_images_w_noise2(lst_image_paths,
                                                  nd_patch_size, nd_patch_step)

                lst_prob = process_frame(os.path.basename(s_image_dirs),
                                         images, tmp_ALOCC_model)

                print('pseudocode test is finished')

                # This code for just check output for readers
                # ...
        """
def main(_):
    print('Program is started at', time.clock())
    pp.pprint(flags.FLAGS.__flags)

    n_per_itr_print_results = 100
    n_fetch_data = 180
    kb_work_on_patch = False
    nd_input_frame_size = (240, 360)
    #nd_patch_size = (45, 45)
    n_stride = 10
    nd_patch_step = (n_stride, n_stride)
    #FLAGS.checkpoint_dir = "./checkpoint/UCSD_128_45_45/"
    #FLAGS.dataset = 'UCSD'
    #FLAGS.dataset_address = './dataset/UCSD_Anomaly_Dataset.v1p2/UCSDped2/Test'

    check_some_assertions()

    nd_patch_size = (FLAGS.input_width, FLAGS.input_height)
    # FLAGS.nStride = n_stride

    #FLAGS.input_fname_pattern = '*'
    FLAGS.train = False
    FLAGS.epoch = 1
    FLAGS.batch_size = 56

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
    run_config = tf.ConfigProto(gpu_options=gpu_options)
    run_config.gpu_options.allow_growth = True
    with tf.Session(config=run_config) as sess:
        tmp_ALOCC_model = ALOCC_Model(
            sess,
            input_width=FLAGS.input_width,
            input_height=FLAGS.input_height,
            output_width=FLAGS.output_width,
            output_height=FLAGS.output_height,
            batch_size=FLAGS.batch_size,
            sample_num=FLAGS.batch_size,
            attention_label=FLAGS.attention_label,
            r_alpha=FLAGS.r_alpha,
            is_training=FLAGS.train,
            dataset_name=FLAGS.dataset,
            dataset_address=FLAGS.dataset_address,
            input_fname_pattern=FLAGS.input_fname_pattern,
            checkpoint_dir=FLAGS.checkpoint_dir,
            sample_dir=FLAGS.sample_dir,
            nd_patch_size=nd_patch_size,
            n_stride=n_stride,
            n_per_itr_print_results=n_per_itr_print_results,
            kb_work_on_patch=kb_work_on_patch,
            nd_input_frame_size=nd_input_frame_size,
            n_fetch_data=n_fetch_data)

        show_all_variables()

        print('--------------------------------------------------')
        print('Load Pretrained Model...')
        tmp_ALOCC_model.f_check_checkpoint()

        #generated_data = tmp_ALOCC_model.feed2generator(data[0:FLAGS.batch_size])

        # else in UCDS (depends on infrustructure)
        tmp_lst_image_paths = []
        tmp_gt = []

        # append the directories in the list you want to test
        for s_image_dirs in sorted(
                glob(os.path.join(FLAGS.dataset_address,
                                  'Test[0-9][0-9][0-9]'))):

            if os.path.basename(s_image_dirs) not in ['Test004']:
                print('Skip ', os.path.basename(s_image_dirs))
                continue
            for s_image_dir_files in sorted(
                    glob(os.path.join(s_image_dirs + '/*'))):
                tmp_lst_image_paths.append(s_image_dir_files)

        # append the ground truth directories
        for s_image_dirs in sorted(
                glob(
                    os.path.join(FLAGS.dataset_address,
                                 'Test[0-9][0-9][0-9]_gt'))):

            if os.path.basename(s_image_dirs) not in ['Test004_gt']:
                print('Skip ', os.path.basename(s_image_dirs))
                continue
            for s_image_dir_files in sorted(
                    glob(os.path.join(s_image_dirs + '/*'))):
                tmp_gt.append(s_image_dir_files)

        lst_image_paths = tmp_lst_image_paths

        images = read_lst_images_without_noise2(lst_image_paths, nd_patch_size,
                                                nd_patch_step)

        lst_prob = process_frame(images, tmp_gt, tmp_ALOCC_model)

        print('Test is finished')
コード例 #12
0
def main(_):
    print('Program is started at', time.clock())
    pp.pprint(flags.FLAGS.__flags)

    n_per_itr_print_results = 100
    n_fetch_data = 10
    kb_work_on_patch = False
    nd_input_frame_size = (240, 360)
    #nd_patch_size = (45, 45)
    n_stride = 10
    #FLAGS.checkpoint_dir = "./checkpoint/UCSD_128_45_45/"

    check_some_assertions()

    nd_patch_size = (FLAGS.input_width, FLAGS.input_height)
    # FLAGS.nStride = n_stride

    #FLAGS.input_fname_pattern = '*'
    FLAGS.train = False
    FLAGS.epoch = 1
    FLAGS.batch_size = 100

    message = []
    total = []
    acc = 0
    pres = 0
    recall = 0
    f1 = 0

    for i in range(10):
        FLAGS.checkpoint_dir = "result/checkpoint_step2/cifar-10_128_32_32_vae0/"
        FLAGS.attention_label = i
        FLAGS.sample_dir = 'samples/'
        FLAGS.checkpoint_dir = FLAGS.checkpoint_dir.replace(
            "vae0", "vae{}".format(FLAGS.attention_label))

        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
        run_config = tf.ConfigProto(gpu_options=gpu_options)
        run_config.gpu_options.allow_growth = True
        tf.reset_default_graph()

        with tf.Session(config=run_config) as sess:
            tmp_ALOCC_model = ALOCC_Model(
                sess,
                input_width=FLAGS.input_width,
                input_height=FLAGS.input_height,
                output_width=FLAGS.output_width,
                output_height=FLAGS.output_height,
                batch_size=FLAGS.batch_size,
                sample_num=FLAGS.batch_size,
                attention_label=FLAGS.attention_label,
                r_alpha=FLAGS.r_alpha,
                r_beta=FLAGS.r_beta,
                is_training=FLAGS.train,
                pre=FLAGS.pretrain,
                pre_dir=FLAGS.pre_dir,
                dataset_name=FLAGS.dataset,
                dataset_address=FLAGS.dataset_address,
                input_fname_pattern=FLAGS.input_fname_pattern,
                checkpoint_dir=FLAGS.checkpoint_dir,
                sample_dir=FLAGS.sample_dir,
                nd_patch_size=nd_patch_size,
                n_stride=n_stride,
                n_per_itr_print_results=n_per_itr_print_results,
                kb_work_on_patch=kb_work_on_patch,
                nd_input_frame_size=nd_input_frame_size,
                n_fetch_data=n_fetch_data)

            show_all_variables()

            print('--------------------------------------------------')
            print('Load Pretrained Model...')
            tmp_ALOCC_model.f_check_checkpoint()

            if FLAGS.dataset == 'mnist':
                mnist = input_data.read_data_sets(FLAGS.dataset_address)

                specific_idx_anomaly = np.where(mnist.train.labels != 6)[0]
                specific_idx = np.where(mnist.train.labels == 6)[0]
                ten_precent_anomaly = [
                    specific_idx_anomaly[x]
                    for x in random.sample(range(0, len(specific_idx_anomaly)),
                                           len(specific_idx) // 40)
                ]

                data = mnist.train.images[specific_idx].reshape(-1, 28, 28, 1)
                tmp_data = mnist.train.images[ten_precent_anomaly].reshape(
                    -1, 28, 28, 1)
                data = np.append(data, tmp_data).reshape(-1, 28, 28, 1)

                lst_prob = tmp_ALOCC_model.f_test_frozen_model(
                    data[0:FLAGS.batch_size])
                print('check is ok')
                exit()
                #generated_data = tmp_ALOCC_model.feed2generator(data[0:FLAGS.batch_size])
            else:
                data, labels = read_data.test_data(FLAGS.attention_label)
                # np.random.shuffle(data)
                lst_prob = tmp_ALOCC_model.f_test_frozen_model(data)
                # maxi = max(lst_prob)
                # mini = min(lst_prob)
                # average = (maxi+mini) / 2.0
                # print(average)
                best_th = np.mean(lst_prob)
                for x in range(len(lst_prob)):
                    if lst_prob[x] >= best_th:
                        lst_prob[x] = 1
                    else:
                        lst_prob[x] = 0
                C = confusion_matrix(labels, lst_prob)
                print(C)
                msg = "class_id: {}, ".format(FLAGS.attention_label) + "threshold: {:.3f}\n".format(best_th) + \
                    'accuracy: {:.3f}, precision: {:.3f}, recall: {:.3f}, f1 score: {:.3f}\n'.format(
                        # average,
                        accuracy_score(labels, lst_prob),
                        precision_score(labels, lst_prob, average='binary'),
                        recall_score(labels, lst_prob, average='binary'),
                        f1_score(labels, lst_prob, average='binary')) + str(C)
                acc += accuracy_score(labels, lst_prob) / 10.
                pres += precision_score(labels, lst_prob,
                                        average='binary') / 10.
                recall += recall_score(labels, lst_prob,
                                       average='binary') / 10.
                f1 += f1_score(labels, lst_prob, average='binary') / 10.

                print(msg)
                message.append(msg)
                print("\n")
                # logging.info(msg)
                # print('check is ok')
                # exit()

    with open("print.txt", "w+") as f:
        for msg in message:
            f.write(msg)
            f.write("\n")

        result = 'accuracy: {:.3f}, precision: {:.3f}, recall: {:.3f}, f1 score: {:.3f}'.format(
            acc, pres, recall, f1)
        f.write(result)
        f.close()