Esempio n. 1
0
def get_object_confusion(class1, class2, similarity_model, config):

    model_config = config['model']
    benchmark_config = config['benchmark']
    model_path = model_config['model_filename']
    dataset_path = benchmark_config['dataset_path']

    params = {
        'dim': model_config['input_shape'],
        'batch_size': benchmark_config['batch_size'],
        'shuffle': False
    }

    test_dataset = ImageDataset(dataset_path, 'validation')
    test_dataset.prepare_specific(benchmark_config['test_cases'] // 2, class1,
                                  class2)
    test_generator = DataGenerator(test_dataset, **params)
    preds = np.array([])
    gts = np.array([])

    for i in tqdm(range(len(test_generator))):
        batch = test_generator[i]
        pred = similarity_model.predict_on_batch(batch[0])
        preds = np.append(preds, pred.flatten())
        gts = np.append(gts, batch[1])
        if benchmark_config['vis_output'] and not i % benchmark_config[
                'test_cases'] // (5 * benchmark_config['batch_size']):
            show_output(batch[0][0], batch[0][1], pred, batch[1])
    te_acc = compute_accuracy(preds, gts)
    print("Class 1: " + class1 + ", Class2: " + class2 +
          ", Distinguishability Score: " + str(te_acc))

    return te_acc
Esempio n. 2
0
    def train(self, config):
        train_dataset = ImageDataset(config['dataset_path'], 'seen',
                                     config['data_augmentation_suffixes'],
                                     config['allow_different_views'])
        train_dataset.prepare(config['num_train_pairs'])

        val_dataset = ImageDataset(config['dataset_path'], 'test')
        val_dataset.prepare(config['num_val_pairs'])

        train_generator = DataGenerator(
            train_dataset,
            batch_size=config['batch_size'],
            dim=self.config['input_shape'],
            shuffle=config['shuffle_training_inputs'],
            dataset_type=config['dataset_type'])
        val_generator = DataGenerator(
            val_dataset,
            batch_size=config['batch_size'],
            dim=self.config['input_shape'],
            shuffle=config['shuffle_training_inputs'],
            dataset_type=config['dataset_type'])

        model_path, _ = os.path.split(self.config['model_filename'])
        callbacks = [
            keras.callbacks.TensorBoard(log_dir=self.log_dir,
                                        histogram_freq=0,
                                        write_graph=True,
                                        write_images=False),
            keras.callbacks.ModelCheckpoint(self.checkpoint_path,
                                            verbose=0,
                                            save_weights_only=True)
        ]

        self.keras_model.compile(
            loss=utils.contrastive_loss,
            optimizer=Adam(lr=config['learning_rate']),
            metrics=[utils.accuracy, utils.auc_roc, 'acc'])

        history = self.keras_model.fit_generator(
            generator=train_generator,
            validation_data=val_generator,
            epochs=config['epochs'],
            use_multiprocessing=True,
            callbacks=callbacks,
            workers=multiprocessing.cpu_count())

        self.keras_model.save(self.config['model_filename'])
        #-------------------------------------------------------

        #make new dataset
        seen_dataset = ImageDataset(config['dataset_path'], 'seen')
        seen = []

        #only add imgs that are returned by nearpy

        new = []
        for i in range(5):
            new += [
                os.path.join(x, 'view_00000{}'.format(i)) for x in os.listdir(
                    os.path.join(config['dataset_path'], 'test'))
            ]
        pred_arr = []
        dimension = 9984
        engine = Engine(dimension, vector_filters=[NearestFilter(5)])
        #for i in range(0, iter):
        #seen = list(seen_dataset._class_labels)
        seen = []
        for i in range(5):
            seen += [
                os.path.join(x, 'view_00000{}'.format(i)) for x in os.listdir(
                    os.path.join(config['dataset_path'], 'seen'))
            ]
        for class1 in seen:
            folder1 = os.path.join(
                os.path.join(config['dataset_path'], 'seen'), class1)
            for obj in os.listdir(folder1):
                im2 = os.path.join(folder1, obj)
                image = np.load(im2)
                engine.store_vector(image['arr_0'], class1)
        for img in new:
            #nea rpy stuff
            folder1 = os.path.join(
                os.path.join(config['dataset_path'], 'test'), img)
            im = os.path.join(folder1, os.listdir(folder1)[0])
            image = np.load(im)['arr_0']
            neighbors = engine.neighbours(image)
            for n in neighbors:
                folder1 = os.path.join(
                    os.path.join(config['dataset_path'], 'seen'), n[1])
                im = os.path.join(folder1,
                                  os.listdir(folder1)[0])  #make random
                neighbor = np.load(im)['arr_0']
                prediction = self.predict(
                    [np.array([image]),
                     np.array([neighbor])], 1)
                pred_arr += [[img[:-12], n[1][:-12], prediction]]

        for item in pred_arr:
            val_dataset.prepare_specific(1, item[0], item[1])
            f = open("ground.txt", "a")
            if item[0] == item[1]:
                f.write('1 {} {} {} '.format(item[0], item[1], item[2]))
            else:
                f.write('0 {} {} {} '.format(item[0], item[1], item[2]))
            f.close()