Ejemplo n.º 1
0
def main(_):
    print('Program is started at', time.clock())
    pp.pprint(flags.FLAGS.__flags)

    n_per_itr_print_results = 100
    n_fetch_data = 180
    kb_work_on_patch = False
    nd_input_frame_size = (240, 360)
    #nd_patch_size = (45, 45)
    n_stride = 10
    nd_patch_step = (n_stride, n_stride)
    #FLAGS.checkpoint_dir = "./checkpoint/UCSD_128_45_45/"

    #FLAGS.dataset = 'UCSD'
    #FLAGS.dataset_address = './dataset/UCSD_Anomaly_Dataset.v1p2/UCSDped2/Test'
    lst_test_dirs = ['Test004', 'Test005', 'Test006']

    #DATASET PARAMETER : MNIST
    #FLAGS.dataset = 'mnist'
    #FLAGS.dataset_address = './dataset/mnist'
    #nd_input_frame_size = (28, 28)
    #nd_patch_size = (28, 28)
    #FLAGS.checkpoint_dir = "./checkpoint/mnist_128_28_28/"

    #FLAGS.input_width = nd_patch_size[0]
    #FLAGS.input_height = nd_patch_size[1]
    #FLAGS.output_width = nd_patch_size[0]
    #FLAGS.output_height = nd_patch_size[1]

    check_some_assertions()

    nd_patch_size = (FLAGS.input_width, FLAGS.input_height)
    # FLAGS.nStride = n_stride

    #FLAGS.input_fname_pattern = '*'
    FLAGS.train = False
    FLAGS.epoch = 1
    FLAGS.batch_size = 1

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
    run_config = tf.ConfigProto(gpu_options=gpu_options)
    run_config.gpu_options.allow_growth = True
    with tf.Session(config=run_config) as sess:
        tmp_ALOCC_model = ALOCC_Model(
            sess,
            input_width=FLAGS.input_width,
            input_height=FLAGS.input_height,
            output_width=FLAGS.output_width,
            output_height=FLAGS.output_height,
            batch_size=FLAGS.batch_size,
            sample_num=FLAGS.batch_size,
            attention_label=FLAGS.attention_label,
            r_alpha=FLAGS.r_alpha,
            is_training=FLAGS.train,
            dataset_name=FLAGS.dataset,
            dataset_address=FLAGS.dataset_address,
            input_fname_pattern=FLAGS.input_fname_pattern,
            checkpoint_dir=FLAGS.checkpoint_dir,
            sample_dir=FLAGS.sample_dir,
            nd_patch_size=nd_patch_size,
            n_stride=n_stride,
            n_per_itr_print_results=n_per_itr_print_results,
            kb_work_on_patch=kb_work_on_patch,
            nd_input_frame_size=nd_input_frame_size,
            n_fetch_data=n_fetch_data)

        show_all_variables()

        print('--------------------------------------------------')
        print('Load Pretrained Model...')
        tmp_ALOCC_model.f_check_checkpoint()

        if FLAGS.dataset == 'mnist':
            mnist = input_data.read_data_sets(FLAGS.dataset_address)

            specific_idx_anomaly = np.where(mnist.train.labels != 6)[0]
            specific_idx = np.where(mnist.train.labels == 6)[0]
            ten_precent_anomaly = [
                specific_idx_anomaly[x]
                for x in random.sample(range(0, len(specific_idx_anomaly)),
                                       len(specific_idx) // 40)
            ]

            data = mnist.train.images[specific_idx].reshape(-1, 28, 28, 1)
            tmp_data = mnist.train.images[ten_precent_anomaly].reshape(
                -1, 28, 28, 1)
            data = np.append(data, tmp_data).reshape(-1, 28, 28, 1)

            lst_prob = tmp_ALOCC_model.f_test_frozen_model(
                data[0:FLAGS.batch_size])
            print('check is ok')
            exit()
            #generated_data = tmp_ALOCC_model.feed2generator(data[0:FLAGS.batch_size])

        # else in UCDS (depends on infrustructure)
        tmp_lst_image_paths = []
        for s_image_dirs in sorted(
                glob(os.path.join(FLAGS.dataset_address,
                                  'Test[0-9][0-9][0-9]'))):

            if os.path.basename(s_image_dirs) not in ['Test004']:
                print('Skip ', os.path.basename(s_image_dirs))
                continue
            for s_image_dir_files in sorted(
                    glob(os.path.join(s_image_dirs + '/*'))):
                if os.path.basename(s_image_dir_files) not in ['068.tif']:
                    print('Skip ', os.path.basename(s_image_dirs),
                          os.path.basename(s_image_dir_files))
                    continue
                tmp_lst_image_paths.append(s_image_dir_files)

        #random
        #lst_image_paths = [tmp_lst_image_paths[x] for x in random.sample(range(0, len(tmp_lst_image_paths)), n_fetch_data)]
        lst_image_paths = tmp_lst_image_paths

        #images =read_lst_images(lst_image_paths,nd_patch_size,nd_patch_step,b_work_on_patch=False)
        images = read_lst_images_without_noise2(lst_image_paths, nd_patch_size,
                                                nd_patch_step)

        lst_prob = process_frame(images, tmp_ALOCC_model)

        print('pseudocode test is finished')
Ejemplo n.º 2
0
def main(_):
    print('Program is started at', time.clock())
    #    pp.pprint(flags.FLAGS.__flags) # print all flags, suppress to unclutter output

    n_per_itr_print_results = 100
    n_fetch_data = 10
    kb_work_on_patch = False
    lst_test_dirs = ['Test004', 'Test005', 'Test006']

    n_stride = 10
    if FLAGS.dataset == 'UCSD':
        nd_input_frame_size = (240, 360)
        nd_patch_size = (45, 45)
        FLAGS.checkpoint_dir = "./checkpoint/UCSD_128_45_45/"
        FLAGS.dataset = 'UCSD'
        FLAGS.dataset_address = './dataset/UCSD_Anomaly_Dataset.v1p2/UCSDped2/Test'

    #DATASET PARAMETER : MNIST
    if FLAGS.dataset == 'mnist':
        FLAGS.dataset_address = './dataset/mnist'
        nd_input_frame_size = (28, 28)
        nd_patch_size = (28, 28)
        FLAGS.checkpoint_dir = "./checkpoint/mnist_128_28_28/"
        FLAGS.input_width = nd_patch_size[0]
        FLAGS.input_height = nd_patch_size[1]
        FLAGS.output_width = nd_patch_size[0]
        FLAGS.output_height = nd_patch_size[1]

    if FLAGS.dataset == 'bdd100k':
        nd_input_frame_size = (FLAGS.input_height, FLAGS.input_width)
        nd_patch_size = nd_input_frame_size
        FLAGS.checkpoint_dir = "checkpoint/{}_{}_{}_{}".format(
            FLAGS.dataset, FLAGS.batch_size, FLAGS.output_height,
            FLAGS.output_width)

    log_dir = "./log/" + cfg.dataset + "/" + cfg.architecture + "/"
    FLAGS.sample_dir = log_dir

    check_some_assertions()

    nd_patch_size = (FLAGS.input_width, FLAGS.input_height)
    FLAGS.nStride = n_stride

    #FLAGS.input_fname_pattern = '*'
    FLAGS.train = False
    FLAGS.epoch = 1

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
    run_config = tf.ConfigProto(gpu_options=gpu_options)
    run_config.gpu_options.allow_growth = True
    with tf.Session(config=run_config) as sess:
        tmp_ALOCC_model = ALOCC_Model(
            sess,
            input_width=FLAGS.input_width,
            input_height=FLAGS.input_height,
            output_width=FLAGS.output_width,
            output_height=FLAGS.output_height,
            batch_size=FLAGS.batch_size,
            sample_num=FLAGS.batch_size,
            attention_label=FLAGS.attention_label,
            r_alpha=FLAGS.r_alpha,
            is_training=FLAGS.train,
            dataset_name=FLAGS.dataset,
            dataset_address=FLAGS.dataset_address,
            input_fname_pattern=FLAGS.input_fname_pattern,
            checkpoint_dir=FLAGS.checkpoint_dir,
            sample_dir=FLAGS.sample_dir,
            nd_patch_size=nd_patch_size,
            n_stride=n_stride,
            n_per_itr_print_results=n_per_itr_print_results,
            kb_work_on_patch=kb_work_on_patch,
            nd_input_frame_size=nd_input_frame_size,
            n_fetch_data=n_fetch_data)

        #        show_all_variables()

        print('--------------------------------------------------')
        print('Loading pretrained model from ', tmp_ALOCC_model.checkpoint_dir,
              '...')
        tmp_ALOCC_model.f_check_checkpoint()

        if FLAGS.dataset == 'mnist':
            #mnist = input_data.read_data_sets(FLAGS.dataset_address)
            mnist = tf.keras.datasets.mnist
            (x_train, y_train), (x_test, y_test) = mnist.load_data()

            inlier_idx = tmp_ALOCC_model.attention_label
            specific_idx = np.where(y_test == inlier_idx)[0]
            inlier_data = x_test[specific_idx].reshape(-1, 28, 28, 1)

            anomaly_frac = 0.5
            potential_idx_anomaly = np.where(y_test != inlier_idx)[0]
            specific_idx_anomaly = [
                potential_idx_anomaly[x] for x in random.sample(
                    range(0, len(potential_idx_anomaly)),
                    math.ceil(anomaly_frac * len(specific_idx) /
                              (1 - anomaly_frac)))
            ]

            anomaly_data = x_test[specific_idx_anomaly].reshape(-1, 28, 28, 1)
            data = np.append(inlier_data, anomaly_data).reshape(-1, 28, 28, 1)

            # True labels are 1 for inliers and 0 for anomalies, since discriminator outputs higher values for inliers
            labels = np.append(np.ones(len(inlier_data)),
                               np.zeros(len(anomaly_data)))

            # Shuffle data so not only anomaly points are removed if data is shortened below
            tmp_perm = np.random.permutation(len(data))
            data = data[tmp_perm]
            labels = labels[tmp_perm]

            # Only whole batches
            n_batches = len(data) // tmp_ALOCC_model.batch_size
            #            print("Batch size: ", tmp_ALOCC_model.batch_size, "n batches: ", n_batches)
            data = data[:n_batches * tmp_ALOCC_model.batch_size]
            labels = labels[:len(data)]

            # Get test results from discriminator
            results_d = tmp_ALOCC_model.f_test_frozen_model(data)

            # Compute performance metrics
            roc_auc = roc_auc_score(labels, results_d)
            print('AUROC: ', roc_auc)

            roc_prc = average_precision_score(labels, results_d)
            print("AUPRC: ", roc_prc)

            print('Test completed')
            exit()
            #generated_data = tmp_ALOCC_model.feed2generator(data[0:FLAGS.batch_size])
        elif FLAGS.dataset == 'UCSD':
            # else in UCDS (depends on infrustructure)
            for s_image_dirs in sorted(
                    glob(
                        os.path.join(FLAGS.dataset_address,
                                     'Test[0-9][0-9][0-9]'))):
                tmp_lst_image_paths = []
                if os.path.basename(s_image_dirs) not in ['Test004']:
                    print('Skip ', os.path.basename(s_image_dirs))
                    continue
                for s_image_dir_files in sorted(
                        glob(os.path.join(s_image_dirs + '/*'))):
                    if os.path.basename(s_image_dir_files) not in ['068.tif']:
                        print('Skip ', os.path.basename(s_image_dir_files))
                        continue
                    tmp_lst_image_paths.append(s_image_dir_files)

                #random
                #lst_image_paths = [tmp_lst_image_paths[x] for x in random.sample(range(0, len(tmp_lst_image_paths)), n_fetch_data)]
                lst_image_paths = tmp_lst_image_paths
                #images =read_lst_images(lst_image_paths,nd_patch_size,nd_patch_step,b_work_on_patch=False)
                images = read_lst_images_w_noise2(lst_image_paths,
                                                  nd_patch_size, nd_patch_step)

                lst_prob = process_frame(os.path.basename(s_image_dirs),
                                         images, tmp_ALOCC_model)

                print('pseudocode test is finished')

                # This code for just check output for readers
                # ...

        elif FLAGS.dataset in ('prosivic', 'dreyeve', 'bdd100k'):
            data = tmp_ALOCC_model.data
            labels = tmp_ALOCC_model.test_labels

        # Below is done for all datasets
        test_dir = log_dir + "test/"
        check_dir(test_dir)

        # Shuffle data so not only anomaly points are removed if data is shortened below
        tmp_perm = np.random.permutation(len(data))
        data = data[tmp_perm]
        labels = labels[tmp_perm]

        # Only whole batches
        n_batches = len(data) // tmp_ALOCC_model.batch_size
        #            print("Batch size: ", tmp_ALOCC_model.batch_size, "n batches: ", n_batches)
        data = data[:n_batches * tmp_ALOCC_model.batch_size]
        labels = labels[:len(data)]

        # Get test results from discriminator
        results_d = tmp_ALOCC_model.f_test_frozen_model(data)

        # Compute performance metrics
        roc_auc = roc_auc_score(labels, results_d)
        print('AUROC: ', roc_auc)

        roc_prc = average_precision_score(labels, results_d)
        print("AUPRC: ", roc_prc)

        # Pickle results
        results = [labels, results_d]
        results_file = test_dir + "results.pkl"
        with open(results_file, 'wb') as f:
            pickle.dump(results, f)

        print('Test completed')
        exit()
Ejemplo n.º 3
0
def main(_):
    n_per_itr_print_results = 100
    kb_work_on_patch = False
    nd_input_frame_size = (180, 270)
    nd_slice_size = (180, 270)
    n_stride = 1

    FLAGS.input_width = nd_slice_size[0]
    FLAGS.input_height = nd_slice_size[1]
    FLAGS.output_width = nd_slice_size[0]
    FLAGS.output_height = nd_slice_size[1]

    FLAGS.dataset = "data-alocc"
    FLAGS.dataset_address = "./dataset/data-alocc/test/"
    FLAGS.checkpoint_dir = "./checkpoint/" + "{}_{}_{}_{}_{}".format(
        FLAGS.dataset,
        FLAGS.batch_size,
        FLAGS.output_height,
        FLAGS.output_width,
        FLAGS.r_alpha
    )
    FLAGS.sample_dir = os.path.join("./samples/check_QA", (str(FLAGS.use_ckpt) + "_" + str(FLAGS.r_alpha)))

    check_some_assertions()

    nd_patch_size = (FLAGS.input_width, FLAGS.input_height)
    nd_patch_step = (n_stride, n_stride)

    FLAGS.nStride = n_stride
    # FLAGS.input_fname_pattern = '*'
    FLAGS.train = False
    FLAGS.epoch = 1

    pp.pprint(flags.FLAGS.__flags)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
    run_config = tf.ConfigProto(gpu_options=gpu_options)
    run_config.gpu_options.allow_growth = True
    with tf.Session(config=run_config) as sess:
        tmp_ALOCC_model = ALOCC_Model(
            sess,
            input_width=FLAGS.input_width,
            input_height=FLAGS.input_height,
            output_width=FLAGS.output_width,
            output_height=FLAGS.output_height,
            batch_size=FLAGS.batch_size,
            sample_num=FLAGS.batch_size,
            attention_label=FLAGS.attention_label,
            r_alpha=FLAGS.r_alpha,
            is_training=FLAGS.train,
            dataset_name=FLAGS.dataset,
            dataset_address=FLAGS.dataset_address,
            input_fname_pattern=FLAGS.input_fname_pattern,
            checkpoint_dir=FLAGS.checkpoint_dir,
            sample_dir=FLAGS.sample_dir,
            nd_patch_size=nd_patch_size,
            n_stride=n_stride,
            n_per_itr_print_results=n_per_itr_print_results,
            kb_work_on_patch=kb_work_on_patch,
            nd_input_frame_size=nd_input_frame_size,
        )

        # show_all_variables()

        print("--------------------------------------------------")
        print("Load Pretrained Model...")
        tmp_ALOCC_model.f_check_checkpoint(checkpoint_number=FLAGS.use_ckpt)

        t = time.time()

        data_folder = "/home/zbaum/Baum/COVID-Lung-Classifier/data-alocc/test/0/"
        lst_image_paths = [
            x
            for x in glob(
                os.path.join(data_folder, FLAGS.input_fname_pattern)
            )
        ]
        images_0 = read_lst_images(lst_image_paths, None, None, b_work_on_patch=False)

        data_folder = "/home/zbaum/Baum/COVID-Lung-Classifier/data-alocc/test/1/"
        lst_image_paths = [
            x
            for x in glob(
                os.path.join(data_folder, FLAGS.input_fname_pattern)
            )
        ]
        images_1 = read_lst_images(lst_image_paths, None, None, b_work_on_patch=False)

        images = np.vstack((images_0, images_1))

        t = time.time() - t
        print(" [*] Loaded Data in {:3f}s".format(t))

        print(images.shape)
        tmp_ALOCC_model.f_test_frozen_model(images)
Ejemplo n.º 4
0
def main(_):
    print('Program is started at', time.clock())
    pp.pprint(flags.FLAGS.__flags)

    n_per_itr_print_results = 100
    n_fetch_data = 10
    kb_work_on_patch = False
    nd_input_frame_size = (240, 360)
    #nd_patch_size = (45, 45)
    n_stride = 10
    #FLAGS.checkpoint_dir = "./checkpoint/UCSD_128_45_45/"

    #FLAGS.dataset = 'UCSD'
    #FLAGS.dataset_address = './dataset/UCSD_Anomaly_Dataset.v1p2/UCSDped2/Test'
    lst_test_dirs = ['Test004', 'Test005', 'Test006']

    #DATASET PARAMETER : MNIST
    #FLAGS.dataset = 'mnist'
    #FLAGS.dataset_address = './dataset/mnist'
    #nd_input_frame_size = (28, 28)
    #nd_patch_size = (28, 28)
    #FLAGS.checkpoint_dir = "./checkpoint/mnist_128_28_28/"

    #FLAGS.input_width = nd_patch_size[0]
    #FLAGS.input_height = nd_patch_size[1]
    #FLAGS.output_width = nd_patch_size[0]
    #FLAGS.output_height = nd_patch_size[1]

    check_some_assertions()

    nd_patch_size = (FLAGS.input_width, FLAGS.input_height)
    # FLAGS.nStride = n_stride

    #FLAGS.input_fname_pattern = '*'
    FLAGS.train = False
    FLAGS.epoch = 1
    FLAGS.batch_size = 504

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
    run_config = tf.ConfigProto(gpu_options=gpu_options)
    run_config.gpu_options.allow_growth = True
    with tf.Session(config=run_config) as sess:
        tmp_ALOCC_model = ALOCC_Model(
            sess,
            input_width=FLAGS.input_width,
            input_height=FLAGS.input_height,
            output_width=FLAGS.output_width,
            output_height=FLAGS.output_height,
            batch_size=FLAGS.batch_size,
            sample_num=FLAGS.batch_size,
            attention_label=FLAGS.attention_label,
            r_alpha=FLAGS.r_alpha,
            is_training=FLAGS.train,
            dataset_name=FLAGS.dataset,
            dataset_address=FLAGS.dataset_address,
            input_fname_pattern=FLAGS.input_fname_pattern,
            checkpoint_dir=FLAGS.checkpoint_dir,
            sample_dir=FLAGS.sample_dir,
            nd_patch_size=nd_patch_size,
            n_stride=n_stride,
            n_per_itr_print_results=n_per_itr_print_results,
            kb_work_on_patch=kb_work_on_patch,
            nd_input_frame_size=nd_input_frame_size,
            n_fetch_data=n_fetch_data)

        show_all_variables()

        print('--------------------------------------------------')
        print('Load Pretrained Model...')
        tmp_ALOCC_model.f_check_checkpoint()

        if FLAGS.dataset == 'mnist':
            mnist = input_data.read_data_sets(FLAGS.dataset_address)

            specific_idx_anomaly = np.where(mnist.train.labels != 6)[0]
            specific_idx = np.where(mnist.train.labels == 6)[0]
            ten_precent_anomaly = [
                specific_idx_anomaly[x]
                for x in random.sample(range(0, len(specific_idx_anomaly)),
                                       len(specific_idx) // 40)
            ]

            data = mnist.train.images[specific_idx].reshape(-1, 28, 28, 1)
            tmp_data = mnist.train.images[ten_precent_anomaly].reshape(
                -1, 28, 28, 1)
            data = np.append(data, tmp_data).reshape(-1, 28, 28, 1)

            lst_prob = tmp_ALOCC_model.f_test_frozen_model(
                data[0:FLAGS.batch_size])
            print('check is ok')
            exit()
            #generated_data = tmp_ALOCC_model.feed2generator(data[0:FLAGS.batch_size])
        else:
            data = read_data.test_data(1)
            np.random.shuffle(data)
            lst_prob = tmp_ALOCC_model.f_test_frozen_model(
                data[0:FLAGS.batch_size])
            print('check is ok')
            exit()
Ejemplo n.º 5
0
def main(_):
    print('Program is started at', time.clock())
    pp.pprint(flags.FLAGS.__flags)

    n_per_itr_print_results = 100
    n_fetch_data = 10
    kb_work_on_patch = False
    nd_input_frame_size = (240, 360)
    nd_sliced_size = (224, 224)
    #nd_patch_size = (45, 45)
    n_stride = 10
    #FLAGS.checkpoint_dir = "./checkpoint/UCSD_128_45_45/"

    #FLAGS.dataset = 'UCSD'
    #FLAGS.dataset_address = './dataset/UCSD_Anomaly_Dataset.v1p2/UCSDped2/Test'
    lst_test_dirs = ['Test004', 'Test005', 'Test006']

    #DATASET PARAMETER : MNIST
    #FLAGS.dataset = 'mnist'
    #FLAGS.dataset_address = './dataset/mnist'
    #nd_input_frame_size = (28, 28)
    #nd_patch_size = (28, 28)
    #FLAGS.checkpoint_dir = "./checkpoint/mnist_128_28_28/"

    #FLAGS.input_width = nd_patch_size[0]
    #FLAGS.input_height = nd_patch_size[1]
    #FLAGS.output_width = nd_patch_size[0]
    #FLAGS.output_height = nd_patch_size[1]

    check_some_assertions()

    nd_patch_size = (FLAGS.input_width, FLAGS.input_height)
    FLAGS.nStride = n_stride

    #FLAGS.input_fname_pattern = '*'
    FLAGS.train = False
    FLAGS.epoch = 1
    FLAGS.batch_size = 1

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
    run_config = tf.ConfigProto(gpu_options=gpu_options)
    run_config.gpu_options.allow_growth = True
    with tf.Session(config=run_config) as sess:
        tmp_ALOCC_model = ALOCC_Model(
            sess,
            input_width=FLAGS.input_width,
            input_height=FLAGS.input_height,
            seq_len=FLAGS.seq_len,
            output_width=FLAGS.output_width,
            output_height=FLAGS.output_height,
            batch_size=FLAGS.batch_size,
            sample_num=FLAGS.batch_size,
            attention_label=FLAGS.attention_label,
            r_alpha=FLAGS.r_alpha,
            is_training=FLAGS.train,
            dataset_name=FLAGS.dataset,
            dataset_address=FLAGS.dataset_address,
            input_fname_pattern=FLAGS.input_fname_pattern,
            checkpoint_dir=FLAGS.checkpoint_dir,
            sample_dir=FLAGS.sample_dir,
            nd_patch_size=nd_patch_size,
            n_stride=n_stride,
            n_per_itr_print_results=n_per_itr_print_results,
            kb_work_on_patch=kb_work_on_patch,
            nd_input_frame_size=nd_input_frame_size,
            n_fetch_data=n_fetch_data)

        show_all_variables()

        print('--------------------------------------------------')
        print('Load Pretrained Model...')
        flag = tmp_ALOCC_model.f_check_checkpoint()
        if flag == -1:
            print('[!] Load checkpoint failed')
            import sys
            sys.exit()

        if FLAGS.dataset == 'mnist':
            mnist = input_data.read_data_sets(FLAGS.dataset_address)

            specific_idx_anomaly = np.where(mnist.train.labels != 6)[0]
            specific_idx = np.where(mnist.train.labels == 6)[0]
            ten_precent_anomaly = [
                specific_idx_anomaly[x]
                for x in random.sample(range(0, len(specific_idx_anomaly)),
                                       len(specific_idx) // 40)
            ]

            data = mnist.train.images[specific_idx].reshape(-1, 28, 28, 1)
            tmp_data = mnist.train.images[ten_precent_anomaly].reshape(
                -1, 28, 28, 1)
            data = np.append(data, tmp_data).reshape(-1, 28, 28, 1)

            lst_prob = tmp_ALOCC_model.f_test_frozen_model(
                data[0:FLAGS.batch_size])
            print('check is ok')
            exit()
            #generated_data = tmp_ALOCC_model.feed2generator(data[0:FLAGS.batch_size])

        elif FLAGS.dataset == 'ped1_seq':
            from scipy.stats import logistic
            from matplotlib import pyplot as plt
            import shutil
            result_path = './test_result'

            try:
                shutil.rmtree(result_path)
            except:
                pass

            os.mkdir(result_path)

            root = '/home/ltj/codes/split_dataset/share/data/videos/avenue/avenue_test_t8_splited'
            lst = os.listdir(root)
            for fn in lst:
                tmp = []
                path = os.path.join(root, fn)
                h5_lst = os.listdir(path)
                h5_len = len(h5_lst)
                for i in range(h5_len):
                    h5_path = os.path.join(path, str(i) + '.h5')
                    with h5py.File(h5_path, 'r') as f:
                        # assert type(f['data'].value) is np.ndarray
                        tmp.append(f['data'].value)
                lst_prob, generated = tmp_ALOCC_model.f_test_frozen_model(tmp)
                probs = logistic.cdf(np.concatenate(lst_prob))

                T = np.array(range(len(probs)))
                plt.plot(T, probs)
                plt.savefig(
                    os.path.join(result_path,
                                 fn.split('.')[0] + '.jpg'))
                plt.clf()
                plt.cla()
                plt.close()

        # else in UCDS (depends on infrustructure)
        elif FLAGS.dataset == 'UCSD':
            for s_image_dirs in sorted(
                    glob.glob(
                        os.path.join(FLAGS.dataset_address,
                                     'Test[0-9][0-9][0-9]'))):
                print(s_image_dirs)
                tmp_lst_image_paths = []
                if os.path.basename(s_image_dirs) not in ['Test004']:
                    print('Skip ', os.path.basename(s_image_dirs))
                    continue
                for s_image_dir_files in sorted(
                        glob.glob(os.path.join(s_image_dirs + '/*'))):
                    if os.path.basename(s_image_dir_files) not in ['068.tif']:
                        print('Skip ', os.path.basename(s_image_dir_files))
                        continue
                    tmp_lst_image_paths.append(s_image_dir_files)

                # random
                # lst_image_paths = [tmp_lst_image_paths[x] for x in random.sample(range(0, len(tmp_lst_image_paths)), n_fetch_data)]
                lst_image_paths = tmp_lst_image_paths
                # images =read_lst_images(lst_image_paths,nd_patch_size,nd_patch_step,b_work_on_patch=False)
                images = read_lst_images_w_noise2(lst_image_paths,
                                                  nd_patch_size, nd_patch_step)

                lst_prob = process_frame(os.path.basename(s_image_dirs),
                                         images, tmp_ALOCC_model)

                print('pseudocode test is finished')

                # This code for just check output for readers
                # ...
        """
Ejemplo n.º 6
0
def main(_):
    print('Program is started at', time.clock())
    pp.pprint(flags.FLAGS.__flags)

    n_per_itr_print_results = 100
    n_fetch_data = 10
    kb_work_on_patch = False
    nd_input_frame_size = (240, 360)
    #nd_patch_size = (45, 45)
    n_stride = 10
    #FLAGS.checkpoint_dir = "./checkpoint/UCSD_128_45_45/"

    check_some_assertions()

    nd_patch_size = (FLAGS.input_width, FLAGS.input_height)
    # FLAGS.nStride = n_stride

    #FLAGS.input_fname_pattern = '*'
    FLAGS.train = False
    FLAGS.epoch = 1
    FLAGS.batch_size = 100

    message = []
    total = []
    acc = 0
    pres = 0
    recall = 0
    f1 = 0

    for i in range(10):
        FLAGS.checkpoint_dir = "result/checkpoint_step2/cifar-10_128_32_32_vae0/"
        FLAGS.attention_label = i
        FLAGS.sample_dir = 'samples/'
        FLAGS.checkpoint_dir = FLAGS.checkpoint_dir.replace(
            "vae0", "vae{}".format(FLAGS.attention_label))

        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
        run_config = tf.ConfigProto(gpu_options=gpu_options)
        run_config.gpu_options.allow_growth = True
        tf.reset_default_graph()

        with tf.Session(config=run_config) as sess:
            tmp_ALOCC_model = ALOCC_Model(
                sess,
                input_width=FLAGS.input_width,
                input_height=FLAGS.input_height,
                output_width=FLAGS.output_width,
                output_height=FLAGS.output_height,
                batch_size=FLAGS.batch_size,
                sample_num=FLAGS.batch_size,
                attention_label=FLAGS.attention_label,
                r_alpha=FLAGS.r_alpha,
                r_beta=FLAGS.r_beta,
                is_training=FLAGS.train,
                pre=FLAGS.pretrain,
                pre_dir=FLAGS.pre_dir,
                dataset_name=FLAGS.dataset,
                dataset_address=FLAGS.dataset_address,
                input_fname_pattern=FLAGS.input_fname_pattern,
                checkpoint_dir=FLAGS.checkpoint_dir,
                sample_dir=FLAGS.sample_dir,
                nd_patch_size=nd_patch_size,
                n_stride=n_stride,
                n_per_itr_print_results=n_per_itr_print_results,
                kb_work_on_patch=kb_work_on_patch,
                nd_input_frame_size=nd_input_frame_size,
                n_fetch_data=n_fetch_data)

            show_all_variables()

            print('--------------------------------------------------')
            print('Load Pretrained Model...')
            tmp_ALOCC_model.f_check_checkpoint()

            if FLAGS.dataset == 'mnist':
                mnist = input_data.read_data_sets(FLAGS.dataset_address)

                specific_idx_anomaly = np.where(mnist.train.labels != 6)[0]
                specific_idx = np.where(mnist.train.labels == 6)[0]
                ten_precent_anomaly = [
                    specific_idx_anomaly[x]
                    for x in random.sample(range(0, len(specific_idx_anomaly)),
                                           len(specific_idx) // 40)
                ]

                data = mnist.train.images[specific_idx].reshape(-1, 28, 28, 1)
                tmp_data = mnist.train.images[ten_precent_anomaly].reshape(
                    -1, 28, 28, 1)
                data = np.append(data, tmp_data).reshape(-1, 28, 28, 1)

                lst_prob = tmp_ALOCC_model.f_test_frozen_model(
                    data[0:FLAGS.batch_size])
                print('check is ok')
                exit()
                #generated_data = tmp_ALOCC_model.feed2generator(data[0:FLAGS.batch_size])
            else:
                data, labels = read_data.test_data(FLAGS.attention_label)
                # np.random.shuffle(data)
                lst_prob = tmp_ALOCC_model.f_test_frozen_model(data)
                # maxi = max(lst_prob)
                # mini = min(lst_prob)
                # average = (maxi+mini) / 2.0
                # print(average)
                best_th = np.mean(lst_prob)
                for x in range(len(lst_prob)):
                    if lst_prob[x] >= best_th:
                        lst_prob[x] = 1
                    else:
                        lst_prob[x] = 0
                C = confusion_matrix(labels, lst_prob)
                print(C)
                msg = "class_id: {}, ".format(FLAGS.attention_label) + "threshold: {:.3f}\n".format(best_th) + \
                    'accuracy: {:.3f}, precision: {:.3f}, recall: {:.3f}, f1 score: {:.3f}\n'.format(
                        # average,
                        accuracy_score(labels, lst_prob),
                        precision_score(labels, lst_prob, average='binary'),
                        recall_score(labels, lst_prob, average='binary'),
                        f1_score(labels, lst_prob, average='binary')) + str(C)
                acc += accuracy_score(labels, lst_prob) / 10.
                pres += precision_score(labels, lst_prob,
                                        average='binary') / 10.
                recall += recall_score(labels, lst_prob,
                                       average='binary') / 10.
                f1 += f1_score(labels, lst_prob, average='binary') / 10.

                print(msg)
                message.append(msg)
                print("\n")
                # logging.info(msg)
                # print('check is ok')
                # exit()

    with open("print.txt", "w+") as f:
        for msg in message:
            f.write(msg)
            f.write("\n")

        result = 'accuracy: {:.3f}, precision: {:.3f}, recall: {:.3f}, f1 score: {:.3f}'.format(
            acc, pres, recall, f1)
        f.write(result)
        f.close()