Exemple #1
0
def main(argv=None):

    model = create_model(pretrained=False,
                         architecture=FLAGS.archi,
                         is_train=False)
    model = load_pth(model, FLAGS.model)

    dataset_manager = DatasetManager(FLAGS.input)
    _, valid_dataset_container = dataset_manager.gen_train_and_valid(
        idx_kfold=FLAGS.idx_kfold,
        kfold=FLAGS.kfold,
        # 以下Datasetクラスに渡す引数
        shape=(FLAGS.img_height, FLAGS.img_width, NUM_CHANNELS),
        mode='train',
        draw_first=FLAGS.draw_first,
        thickness=FLAGS.thickness,
        white_background=FLAGS.white_background,
        draw_contour=FLAGS.draw_contour,
        draw_contour_version=FLAGS.draw_contour_version)

    total_correct = 0
    num_sample = 0
    arr_score_base = np.asarray([1.0, 1.0 / 2.0, 1.0 / 3.0])
    try:
        for i, dataset in enumerate(valid_dataset_container):
            print("Validating {}/{}".format(
                i + 1, len(valid_dataset_container.csv_files)))
            num_sample += len(dataset)
            train_loader = DataLoader(dataset,
                                      batch_size=FLAGS.batch_size,
                                      shuffle=False,
                                      num_workers=4)
            with torch.no_grad():
                for batch_idx, sample in enumerate(
                        tqdm(train_loader, ascii=True)):
                    images, labels = sample['image'].to(
                        DEVICE), sample['y'].to(DEVICE)
                    logits = model.forward(images)
                    val_logits_top_k, idx_logits_top_k = torch.topk(logits, 3)
                    idx_logits_top_k = idx_logits_top_k.t()
                    correct = idx_logits_top_k.eq(
                        labels.view(1, -1).expand_as(idx_logits_top_k))
                    cur_pred = torch.sum(correct, dim=1).cpu().data.numpy()
                    cur_score = np.sum(np.multiply(cur_pred, arr_score_base))
                    total_correct += cur_score
                print("Validation score in 1~{} is {}".format(
                    i + 1, total_correct / num_sample))
    except StopIteration:
        pass

    val_score = total_correct / num_sample

    print("Validation score is {}".format(val_score))
def eval(model_name, file_prefix):
    transformer = Transformer.load_model(model_name, is_training=False)

    cfg = transformer.config
    batch_size = cfg['train_params']['batch_size']
    seq_len = cfg['train_params']['seq_len'] + 1
    print(f'batch_size:{batch_size} seq_len:{seq_len}')

    dm = DatasetManager(cfg['dataset'])
    dm.maybe_download_data_files()
    data_iter = dm.data_generator(batch_size,
                                  seq_len,
                                  data_type='test',
                                  file_prefix=file_prefix,
                                  epoch=1)

    refs = []
    hypos = []
    for source_ids, target_ids in data_iter:
        valid_size = len(source_ids)

        if valid_size < batch_size:
            source_ids = np.array(
                list(source_ids) + [[PAD_ID] * seq_len] *
                (batch_size - source_ids.shape[0]))
            target_ids = np.array(
                list(target_ids) + [[PAD_ID] * seq_len] *
                (batch_size - target_ids.shape[0]))

        pred_ids = transformer.predict(source_ids)

        refs += [[recover_sentence(sent_ids, dm.target_id2word)]
                 for sent_ids in target_ids[:valid_size]]
        hypos += [
            recover_sentence(sent_ids, dm.target_id2word)
            for sent_ids in pred_ids[:valid_size]
        ]
        print(f"Num. sentences processed: {len(hypos)}", end='\r', flush=True)

    print()

    bleu_score = corpus_bleu(refs, hypos)
    results = dict(
        num_sentences=len(hypos),
        bleu_score=bleu_score * 100.,
    )

    # Sample a few translated sentences.
    indices = np.random.choice(list(range(len(hypos))), size=10, replace=False)
    for i in indices:
        print(f"Source: '{refs[i][0]}' ==> Target: '{hypos[i]}'.")

    print(results)
    def describe(self, image, test_image: bool):
        """
        Perform MRELBP Description for an Image
        :param image: Image object or float32 ndarray image_scaled with zero mean and unit variance. (Use ImageUtils.scale_img first)
        :param test_image: Boolean to determine if we are evaluating the test image
        :return: MRELBP descriptor histogram
        """
        if isinstance(image, DatasetManager.Image):
            if test_image:
                image_data = image.test_data
            else:
                image_data = image.data
        elif isinstance(image, np.ndarray):
            image_data = image
            if self.save_img:
                raise ValueError(
                    'save_img set but passed as ndarray instead of DatasetManager.Image'
                )
        else:
            raise ValueError('Invalid image_scaled type')

        # Zero-pad image_scaled with padding border.
        image_padded = pad(array=image_data,
                           pad_width=self.padding,
                           mode='constant',
                           constant_values=0)
        # Allocate memory for output image_scaled
        image_filtered = np.zeros(image_padded.shape, dtype=np.float32)
        # Perform median filter on image_scaled
        SharedFunctions.median_filter(image_padded, self.w_c, self.padding,
                                      image_filtered)
        # Make new Image instance to avoid overwriting input image's data

        if self.save_img:
            GenerateExamples.write_image(
                ImageUtils.convert_float32_image_uint8(image_padded), 'MRELBP',
                '{}-padded.png'.format(image.name))
            GenerateExamples.write_image(
                ImageUtils.convert_float32_image_uint8(image_filtered),
                'MRELBP', '{}-median-filtered.png'.format(image.name))
            describe_image = DatasetManager.Image(image_filtered, image.name,
                                                  None)
        else:
            describe_image = DatasetManager.Image(image_filtered, None, None)

        # Return MRELBP descriptor
        return self.calculate_relbp(describe_image)
Exemple #4
0
def main(argv=None):
    os.makedirs(FLAGS.model, exist_ok=True)
    os.makedirs(FLAGS.log, exist_ok=True)

    with open(os.path.join(FLAGS.model, 'config.json'), 'w') as f:
        json.dump(FLAGS.flag_values_dict(), f, indent=4, sort_keys=True)

    dataset_manager = DatasetManager(FLAGS.input)
    train_container, valid_container = dataset_manager.gen_train_and_valid(
        idx_kfold=FLAGS.idx_kfold, kfold=FLAGS.kfold, shuffle_train=True, shuffle_valid=True, verbose=False,
        # Parameters for DatasetClass
        shape=(FLAGS.img_height, FLAGS.img_width, NUM_CHANNELS), mode='train',
        draw_first=FLAGS.draw_first, thickness=FLAGS.thickness, white_background=FLAGS.white_background, draw_contour=FLAGS.draw_contour, draw_contour_version=FLAGS.draw_contour_version)

    if FLAGS.debug:
        show_images(train_container)
        sys.exit()

    model = create_model(pretrained=FLAGS.pretrained, architecture=FLAGS.archi)
    if FLAGS.restart is not None:
        util.load_pth(model, FLAGS.restart)

    with open(os.path.join(FLAGS.model, "model.txt"), 'w') as f:
        print(model, file=f)

    optimizer = OptimizerManager(
        optimizer=FLAGS.optimizer, lr=FLAGS.lr, lr_decay=FLAGS.lr_decay, milestones=FLAGS.milestones,
        model=model, momentum=FLAGS.momentum)
    criterion = metrics.softmax_cross_entropy_with_logits()
    score_fn = metrics.map3()

    writer = SummaryWriter(log_dir=FLAGS.log)

    if FLAGS.step is not None:
        total_step = FLAGS.step
        total_epoch = None
    elif FLAGS.epoch is not None:
        total_step = np.iinfo(np.int32).max
        total_epoch = FLAGS.epoch
    else:
        raise AssertionError("step or epoch must be specified.")

    _ = train(model, optimizer, criterion, score_fn, train_container, valid_container,
              total_step=total_step, total_epoch=total_epoch, save_interval=FLAGS.save_interval, writer=writer)
Exemple #5
0
def transformer(bsize=None):
    import sys
    sys.path.insert(0, './transformer/')
    import transformer as transf
    from data import DatasetManager
    dm = DatasetManager("wmt14")
    dm.maybe_download_data_files()
    dm.load_vocab()
    transformer = transf.Transformer(
        num_heads=8,
        d_model=512,
        d_ff=2048,
        model_name="transformer",
        tf_sess_config=dict(allow_soft_placement=True)
    )
    train_params = dict(
        learning_rate=1e-4,
        batch_size=bsize,
        seq_len=10,
        max_steps=300000,
    )
    transformer.build_model("wmt14", dm.source_id2word, dm.target_id2word, 0,**train_params)
    loss = transformer._loss

    optimizer = tf.train.AdamOptimizer(learning_rate=0.2).minimize(tf.reduce_sum(loss))
    return optimizer
Exemple #6
0
    def test_build_and_load_model(self):
        dm = DatasetManager('iwslt15')
        dm.load_vocab()

        self.t.build_model('iwslt15', dm.source_id2word, dm.target_id2word,
                           PAD_ID)
        print_trainable_variables()
        self.t.init()
        value_dict = self.t.get_variable_values()

        tf.reset_default_graph()
        model = Transformer.load_model('test')
        out = model.predict(np.zeros(model.raw_input_ph.shape))
        assert out.shape == model.raw_target_ph.shape

        value_dict2 = model.get_variable_values()
        for k in value_dict2:
            print("\n*************************************")
            print(k)
            print(value_dict[k])
            print(value_dict2[k])
            assert np.allclose(value_dict[k], value_dict2[k])
Exemple #7
0
    def __init__(self, path):
        """
        Generate Example images for dissertation write-up
        :param path: Image to produce example images with
        """
        self.image_path = path
        image_name = path.split(os.sep)[-1].partition('.')[0]
        #image_uint8 = cv2.imread(path, cv2.IMREAD_GRAYSCALE)

        image_uint8 = cv2.resize(cv2.imread(path, cv2.IMREAD_GRAYSCALE), (0, 0),
                                fx=0.5,
                                fy=0.5)
        # Convert from uint8 to float32 without normalizing to zero mean
        image_unscaled = ImageUtils.convert_uint8_image_float32(image_uint8)
        # Convert from uint8 to float32 while normalizing to zero mean
        image_scaled = ImageUtils.scale_uint8_image_float32(image_uint8)
        image_gauss_10 = ImageUtils.add_gaussian_noise_skimage(image_scaled, 10)
        image_gauss_25 = ImageUtils.add_gaussian_noise_skimage(image_scaled, 25)
        image_speckle_002 = ImageUtils.add_speckle_noise_skimage(image_scaled, 0.02)
        image_speckle_004 = ImageUtils.add_speckle_noise_skimage(image_scaled, 0.04)
        image_salt_pepper_002 = ImageUtils.add_salt_pepper_noise_skimage(image_scaled, 0.02)
        image_salt_pepper_004 = ImageUtils.add_salt_pepper_noise_skimage(image_scaled, 0.04)
        image_label = path.split(os.sep)[-1].partition('-')[0]
        # Generate different permutations of this sample image
        self.image_uint8 = DatasetManager.Image(image_uint8, image_name, image_label)
        self.image_unscaled = DatasetManager.Image(image_unscaled, image_name, image_label)
        self.image_scaled = DatasetManager.Image(image_scaled, image_name, image_label)
        self.image_gauss_10 = DatasetManager.Image(image_gauss_10, image_name, image_label)
        self.image_gauss_10.test_noise='gaussian'; self.image_gauss_10.test_noise_val=10
        self.image_gauss_25 = DatasetManager.Image(image_gauss_25, image_name, image_label)
        self.image_gauss_25.test_noise = 'gaussian'; self.image_gauss_25.test_noise_val = 25
        self.image_speckle_002 = DatasetManager.Image(image_speckle_002, image_name, image_label)
        self.image_speckle_002.test_noise = 'speckle'; self.image_speckle_002.test_noise_val = 0.02
        self.image_speckle_004 = DatasetManager.Image(image_speckle_004, image_name, image_label)
        self.image_speckle_004.test_noise = 'speckle'; self.image_speckle_004.test_noise_val = 0.04
        self.image_salt_pepper_002 = DatasetManager.Image(image_salt_pepper_002, image_name, image_label)
        self.image_salt_pepper_002.test_noise = 'salt-pepper'; self.image_salt_pepper_002.noise_val = 0.02
        self.image_salt_pepper_004 = DatasetManager.Image(image_salt_pepper_004, image_name, image_label)
        self.image_salt_pepper_004.test_noise = 'salt-pepper'; self.image_salt_pepper_004.noise_val = 0.04
        self.path = os.path.join(GlobalConfig.get('CWD'), 'example')

        write_image(ImageUtils.convert_float32_image_uint8(self.image_unscaled.data), None, image_name + '-unedited.png')
        write_image(ImageUtils.convert_float32_image_uint8(self.image_scaled.data), None, image_name + '-scaled.png')
Exemple #8
0
def main():
    # Parse Args.
    # 'scale' allows the image_scaled scale to be set. Eg: 0.25, 0.5, 1.0
    argList = sys.argv[1:]
    shortArg = 'a:d:t:s:S:k:rn:i:me'
    longArg = [
        'algorithm=', 'dataset=', 'train-ratio=', 'scale=', 'test-scale=',
        'folds=', 'rotations', 'noise=', 'noise-intensity=', 'multiprocess',
        'example', 'data-ratio=', 'mrlbp-classifier=', 'noise-train', 'ecs',
        'debug'
    ]

    valid_algorithms = [
        'RLBP', 'MRLBP', 'MRELBP', 'BM3DELBP', 'NoiseClassifier'
    ]
    valid_datasets = ['kylberg']
    valid_noise = ['gaussian', 'speckle', 'salt-pepper']
    valid_mrlbp_classifiers = ['svm', 'knn']

    try:
        args, vals = getopt.getopt(argList, shortArg, longArg)

        for arg, val in args:
            if arg in ('-a', '--algorithm'):
                if val in valid_algorithms:
                    print('Using algorithm:', val)
                    GlobalConfig.set("algorithm", val)
                else:
                    raise ValueError(
                        'Invalid algorithm configured, choose one of the following:',
                        valid_algorithms)
            elif arg in ('-d', '--dataset'):
                if val in valid_datasets:
                    print("Using dataset:", val)
                    GlobalConfig.set("dataset", val)
                else:
                    raise ValueError(
                        'Invalid dataset configured, choose one of the following:',
                        valid_datasets)
            elif arg in ('-t', '--train-test'):
                if 0 < float(val) <= 1.0:
                    print('Using train-ratio ratio of', val)
                    GlobalConfig.set('train_ratio', float(val))
                else:
                    raise ValueError(
                        'Train-test ratio must be 0 < train-test <= 1.0')
            elif arg in ('-s', '--scale'):
                if 0 < float(val) <= 1.0:
                    print('Using training image scale:', val)
                    GlobalConfig.set('scale', float(val))
                else:
                    raise ValueError('Scale must be 0 < scale <= 1.0')
            elif arg in ('-S', '--test-scale'):
                if 0 < float(val) <= 1.0:
                    print('Using testing image scale:', val)
                    GlobalConfig.set('test_scale', float(val))
                else:
                    raise ValueError('Test scale must be 0 < scale <= 1.0')
            elif arg in ('-k', '--folds'):
                print('Doing {} folds'.format(val))
                GlobalConfig.set("folds", int(val))
            elif arg in ('-r', '--rotations'):
                print('Using rotated image_scaled sources')
                GlobalConfig.set("rotate", True)
            elif arg in ('-n', '--noise'):
                if val in valid_noise:
                    print('Applying noise:', val)
                    GlobalConfig.set("noise", val)
                else:
                    raise ValueError(
                        'Invalid noise type, choose one of the following:',
                        valid_noise)
            elif arg in ('-i', '--noise-intensity'):
                print('Using noise intensity (sigma / ratio) of:', val)
                GlobalConfig.set("noise_val", float(val))
            elif arg in ('-m', '--multiprocess'):
                cores = psutil.cpu_count()
                print('Using {} processor cores for computing featurevectors'.
                      format(cores))
                GlobalConfig.set('multiprocess', True)
                GlobalConfig.set('cpu_count', cores)
            elif arg in ('-e', '--example'):
                print('Generating algorithm example image_scaled')
                GlobalConfig.set('examples', True)
            elif arg == '--data-ratio':
                if 0 < float(val) <= 1.0:
                    print('Using dataset ratio:', val)
                    GlobalConfig.set('data_ratio', float(val))
                else:
                    raise ValueError('Data ratio must be 0 < ratio <= 1.0')
            elif arg == '--mrlbp-classifier':
                if val in valid_mrlbp_classifiers:
                    print(
                        "MRLBP algorithm (if configured) will use {} classifier"
                        .format(val))
                    GlobalConfig.set('mrlbp_classifier', val)
                else:
                    raise ValueError(
                        'Invalid classifier chosen for mrlbp, choose one of the following:',
                        valid_mrlbp_classifiers)
            elif arg == '--noise-train':
                print(
                    "Applying noise to the training dataset as well as the test dataset"
                )
                GlobalConfig.set('train_noise', True)
            elif arg == '--ecs':
                print("Loading dataset from C:\Local")
                GlobalConfig.set('ECS', True)
            elif arg == '--debug':
                print("Running in debug mode")
                GlobalConfig.set('debug', True)
            else:
                raise ValueError('Unhandled argument provided:', arg)
    except getopt.error as err:
        print(str(err))

    if GlobalConfig.get('ECS'):
        GlobalConfig.set(
            'CWD',
            r'\\filestore.soton.ac.uk\users\ojvl1g17\mydocuments\COMP3200-Texture-Classification'
        )
    else:
        GlobalConfig.set('CWD', os.getcwd())

    if GlobalConfig.get('examples'):
        write_examples()

    # Load configured Dataset
    if GlobalConfig.get('dataset') == 'kylberg':
        if GlobalConfig.get('debug'):
            # To save time in debug mode, only load one class and load a smaller proportion of it (25% of samples)
            kylberg = DatasetManager.KylbergTextures(
                num_classes=2, data_ratio=GlobalConfig.get('data_ratio'))
        else:
            kylberg = DatasetManager.KylbergTextures(
                num_classes=28, data_ratio=GlobalConfig.get('data_ratio'))
        # Load Dataset & Cross Validator
        dataset = kylberg.load_data()
        cross_validator = kylberg.get_cross_validator()

        print("Dataset loaded")
    elif GlobalConfig.get('dataset') is None:
        raise ValueError('No Dataset configured')
    else:
        raise ValueError('Invalid dataset')

    if GlobalConfig.get('rotate'):
        dataset_folder = GlobalConfig.get('dataset') + '-rotated'
    else:
        dataset_folder = GlobalConfig.get('dataset')

    out_folder = os.path.join(GlobalConfig.get('CWD'), 'out',
                              GlobalConfig.get('algorithm'), dataset_folder)
    # Initialise algorithm
    if GlobalConfig.get('algorithm') == 'RLBP':
        print("Applying RLBP algorithm")
        algorithm = RLBP.RobustLBP()
    elif GlobalConfig.get('algorithm') == 'MRLBP':
        print("Applying MRLBP algorithm")
        algorithm = RLBP.MultiresolutionLBP(p=[8, 16, 24], r=[1, 2, 3])
    elif GlobalConfig.get('algorithm') == 'MRELBP':
        print("Applying MRELBP algorithm")
        algorithm = MRELBP.MedianRobustExtendedLBP(r1=[2, 4, 6, 8],
                                                   p=8,
                                                   w_center=3,
                                                   w_r1=[3, 5, 7, 9])
    elif GlobalConfig.get('algorithm') == 'BM3DELBP':
        print("Applying BM3DELBP algorithm")
        algorithm = BM3DELBP.BM3DELBP()
    elif GlobalConfig.get('algorithm') == 'NoiseClassifier':
        # Noise Classifier is used in BM3DELBP algorithm usually, this allows for benchmarking of the classifier alone
        algorithm = NoiseClassifier.NoiseClassifier()
        pass
    else:
        raise ValueError('Invalid algorithm choice')

    # Get the Training out directory (i.e. Images without scaling/rotation/noise)
    train_out_dir = os.path.join(
        out_folder, algorithm.get_outdir(noisy_image=False,
                                         scaled_image=False))
    # Get the Testing out directory (i.e. Images with scaling/rotation/noise)
    if GlobalConfig.get('noise') is not None:
        noisy_image = True
    else:
        noisy_image = False
    if GlobalConfig.get('test_scale') is not None:
        scaled_image = True
    else:
        scaled_image = False
    test_out_dir = os.path.join(
        out_folder, algorithm.get_outdir(noisy_image, scaled_image))

    # Out path for noise classifier
    noise_out_dir = os.path.join(
        GlobalConfig.get('CWD'), 'out', 'NoiseClassifier', dataset_folder,
        "scale-{}".format(int(GlobalConfig.get('scale') * 100)))
    test_noise_out_dir = os.path.join(
        GlobalConfig.get('CWD'), 'out', 'NoiseClassifier', dataset_folder,
        algorithm.get_outdir(noisy_image, scaled_image))

    print("Replacing DatasetManager.Image with BM3DELBPImages")
    # Convert DatasetManager.Image into BM3DELBP.BM3DELBPImage
    if GlobalConfig.get('algorithm') == 'NoiseClassifier' or GlobalConfig.get(
            'algorithm') == 'BM3DELBP':
        for index, img in enumerate(dataset):
            dataset[index] = BM3DELBP.BM3DELBPImage(img)
            # Also convert rotated images if necessary
            if img.test_rotations is not None:
                for index, rotated_img in enumerate(img.test_rotations):
                    img.test_rotations[index] = BM3DELBP.BM3DELBPImage(
                        rotated_img)

    if GlobalConfig.get('multiprocess'):
        for index, img in enumerate(dataset):
            dataset[index] = (index, img)

        if GlobalConfig.get('rotate'):
            maxtasks = 50
        else:
            maxtasks = None

        if GlobalConfig.get(
                'algorithm') == 'NoiseClassifier' or GlobalConfig.get(
                    'algorithm') == 'BM3DELBP':
            with Pool(processes=GlobalConfig.get('cpu_count'),
                      maxtasksperchild=maxtasks) as pool:
                # Generate image noise featurevectors
                for index, image in tqdm.tqdm(pool.istarmap(
                        describe_noise_pool,
                        zip(dataset, repeat(noise_out_dir),
                            repeat(test_noise_out_dir))),
                                              total=len(dataset),
                                              desc='Noise Featurevectors'):
                    dataset[index] = image
        else:
            with Pool(processes=GlobalConfig.get('cpu_count'),
                      maxtasksperchild=maxtasks) as pool:
                # Generate featurevectors
                for index, image in tqdm.tqdm(pool.istarmap(
                        describe_image_pool,
                        zip(repeat(algorithm), dataset, repeat(train_out_dir),
                            repeat(test_out_dir))),
                                              total=len(dataset),
                                              desc='Texture Featurevectors'):
                    dataset[index] = image
    else:
        # Process the images without using multiprocessing Pools
        if GlobalConfig.get(
                'algorithm') == 'NoiseClassifier' or GlobalConfig.get(
                    'algorithm') == 'BM3DELBP':
            for index, img in enumerate(dataset):
                # Generate image noise featurevectors
                describe_noise(img, noise_out_dir, test_noise_out_dir)
        else:
            print("BEGINNING TIMER:")
            start = timer()
            for index, img in enumerate(dataset):
                # Generate featurevetors
                describe_image(algorithm, img, train_out_dir, test_out_dir)
            end = timer()
            print("TIME TAKEN:", end - start)

    # Train models and perform predictions
    if GlobalConfig.get('algorithm') == 'RLBP':
        predictor = RLBP.RobustLBPPredictor(dataset, cross_validator)
    elif GlobalConfig.get('algorithm') == 'MRLBP':
        print("Performing MRLBP Classification")
        predictor = RLBP.MultiresolutionLBPPredictor(dataset, cross_validator)
    elif GlobalConfig.get('algorithm') == 'MRELBP':
        print("Performing MRELBP Classification")
        predictor = MRELBP.MedianRobustExtendedLBPPredictor(
            dataset, cross_validator)
    elif GlobalConfig.get('algorithm') == 'BM3DELBP':
        print("Performing BM3DELBP Classification")
        predictor = BM3DELBP.BM3DELBPPredictor(dataset, cross_validator)
    elif GlobalConfig.get('algorithm') == 'NoiseClassifier':
        print("Applying noise classifier")
        predictor = BM3DELBP.NoiseTypePredictor(dataset, cross_validator)
    else:
        raise ValueError('Invalid algorithm choice')

    # Get the test label & test prediction for every fold of cross validation
    y_test, y_predicted = predictor.begin_cross_validation()
    if GlobalConfig.get('algorithm') == 'NoiseClassifier':
        if GlobalConfig.get('noise') is None:
            classes = ['no-noise', 'gaussian', 'speckle', 'salt-pepper']
        else:
            classes = ['gaussian', 'speckle', 'salt-pepper']
    else:
        classes = kylberg.classes

    # Display confusion matrix
    ClassificationUtils.pretty_print_conf_matrix(
        y_test,
        y_predicted,
        classes,
        title='{} Confusion Matrix'.format(GlobalConfig.get('algorithm')),
        out_dir=test_out_dir)

    # Display classification report
    ClassificationUtils.make_classification_report(y_test, y_predicted,
                                                   classes, test_out_dir)
Exemple #9
0
def manager(config):
    manager = DatasetManager(config)
    return manager
Exemple #10
0
def tune_noise_classifier():
    GlobalConfig.set('dataset', 'kylberg')
    GlobalConfig.set('ECS', True)
    GlobalConfig.set('algorithm', 'NoiseClassifier')
    GlobalConfig.set('scale', 0.5)
    GlobalConfig.set(
        'CWD',
        r'\\filestore.soton.ac.uk\users\ojvl1g17\mydocuments\COMP3200-Texture-Classification'
    )
    #GlobalConfig.set('CWD', os.getcwd())
    GlobalConfig.set('folds', 10)
    cores = psutil.cpu_count()
    GlobalConfig.set('cpu_count', cores)

    dataset = DatasetManager.KylbergTextures(num_classes=28, data_ratio=0.5)
    images = dataset.load_data()
    gc.collect()

    bm3d_images = []
    # Convert to BM3D images
    for image in images:
        new_image = BM3DELBP.BM3DELBPImage(image)
        bm3d_images.append(new_image)

    print("Image dataset loaded, loaded {} images".format(len(images)))

    noise_classifier = NoiseClassifier()
    cross_validator = dataset.get_cross_validator()

    bm3d_sigma = [10, 30, 40, 50]
    homomorphic_cutoff = [0.1, 0.5, 5, 10]
    homomorphic_a = [0.5, 0.75, 1.0]
    homomorphic_b = [0.1, 0.5, 1.0, 1.25]

    settings_jobs = []  # List of all configuration tuples

    for sigma_val in bm3d_sigma:
        for cutoff in homomorphic_cutoff:
            for a in homomorphic_a:
                for b in homomorphic_b:
                    settings_jobs.append((sigma_val, cutoff, a, b))

    results = []  # List of tuples (F1, sigma_val, cutoff, a, b)

    out_csv = os.path.join(GlobalConfig.get('CWD'), 'NoiseClassifierTuning',
                           'Results.txt')

    with Pool(processes=GlobalConfig.get('cpu_count'),
              maxtasksperchild=50) as pool:
        # Generate image featurevectors and replace DatasetManager.Image with BM3DELBP.BM3DELBPImage
        for result in tqdm.tqdm(pool.istarmap(
                do_classification,
                zip(settings_jobs, repeat(noise_classifier),
                    repeat(cross_validator), repeat(bm3d_images))),
                                total=len(settings_jobs),
                                desc='NoiseClassifier tuning'):
            f1, sigma, cutoff, a, b = result
            # Log to CSV file
            if os.path.isfile(out_csv):
                # CSV exists, append to end of file
                with open(out_csv, 'a', encoding="utf-8",
                          newline='') as resultsfile:
                    writer = csv.writer(resultsfile)
                    writer.writerow([f1, sigma, cutoff, a, b])

            else:
                # CSV does not exist. Write the headings
                with open(out_csv, 'w', encoding="utf-8",
                          newline='') as resultsfile:
                    writer = csv.writer(resultsfile)
                    writer.writerow(['f1', 'sigma_psd', 'cutoff', 'a', 'b'])
                    writer.writerow([f1, sigma, cutoff, a, b])

            results.append(result)

    # Sort largest to smallest, by F1 score
    results.sort(key=lambda tup: tup[0], reverse=True)

    print("Finished tuning parameters.")
    print("The top 3 results were:")
    f1, sigma, cutoff, a, b = results[0]
    print("F1: {}, sigma_val: {}, cutoff_freq: {}, a: {}, b: {}".format(
        f1, sigma, cutoff, a, b))
    f1, sigma, cutoff, a, b = results[1]
    print("F1: {}, sigma_val: {}, cutoff_freq: {}, a: {}, b: {}".format(
        f1, sigma, cutoff, a, b))
    f1, sigma, cutoff, a, b = results[2]
    print("F1: {}, sigma_val: {}, cutoff_freq: {}, a: {}, b: {}".format(
        f1, sigma, cutoff, a, b))