def test_filter_clustering(self):
        """
        Tests if clustering for ageranges filter works correctly.
        :return:
        """

        filter1_results = (True, 5, "", AgeRange(2, 5))
        filter2_results = (True, 3, "", AgeRange(3, 6))
        filter3_results = (True, 2, "", AgeRange(5, 7))
        filter4_results = (True, 1, "", AgeRange(6, 7))

        clustering = AgeRangeClustering([
            filter1_results, filter2_results, filter3_results, filter4_results
        ])

        clustering.find_clusters()

        clusters = clustering.get_found_clusters()

        self.assertEqual(len(clusters), 2)

        postprocessed_clusters = {
            str(age_range.get_range()): weight
            for age_range, weight in clusters.items()
        }

        self.assertIn("[6, 7]", postprocessed_clusters)
        self.assertIn("[2, 7]", postprocessed_clusters)
        self.assertEqual(postprocessed_clusters["[6, 7]"], 1)
        self.assertEqual(postprocessed_clusters["[2, 7]"], 10)
    def test_non_maximum_suppresion(self):
        """
        Tests if the NMS works correctly for age ranges.
        :return:
        """

        age_ranges = [
            AgeRange(5, 8),
            AgeRange(6, 9),
            AgeRange(10, 13),
        ]

        self.assertEqual(
            AgeRangeClustering._non_maximum_suppresion(age_ranges).get_range(),
            [5, 13])
    def import_from_lmdb(self, lmdb_foldername):
        """
        Imports the dataset from LMDB format into the root_folder.
        :param lmdb_foldername: filename LMDB.
        """

        lmdb_env = lmdb.open(lmdb_foldername)
        lmdb_txn = lmdb_env.begin()
        lmdb_cursor = lmdb_txn.cursor()

        datum = caffe_pb2.Datum()

        for key, value in lmdb_cursor:
            key = str(key, encoding="UTF-8").split("_dbuild_", 1)[-1]

            datum.ParseFromString(value)

            label = datum.label
            data = caffe.io.datum_to_array(datum)

            # CxHxW to HxWxC in cv2
            image_blob = np.asarray(np.transpose(data, (1, 2, 0)), order='C')

            image = Image(uri=key, image_id=key, metadata=[AgeRange(label, label)], blob_content=image_blob)
            self.put_image(image, autoencode_uri=False)

        lmdb_env.close()
Esempio n. 4
0
    def apply_to(self, text):
        """
        Applies this filter to the specified text.
        :param text: text object
        :return: True if filter passes. False otherwise.
        """
        translate_dict = self.translate_dict

        content = text.get_content()

        ages_strings = []

        matches = self.compiled_regex.findall(content)

        possible_ages = []
        for match in matches:
            age = match

            if age.lower() in translate_dict:
                possible_ages.append(translate_dict[age.lower()])
            else:
                try:
                    possible_ages.append(int(age))
                except Exception as ex:
                    ages_strings.append(age)

        """
        if len(possible_ages) > 1:
            # We give more confidence to the attached text instead of the search query string.
            # The search query string is always the latest age of the list.
            # For this reason, we duplicate by 2 the number of possible ages below the last index.

            new_possible_ages = []

            for age in possible_ages:
                if age == possible_ages[-1]:
                    new_possible_ages += [age]
                else:
                    new_possible_ages += [age, age]

            possible_ages = new_possible_ages
        """

        # We have a list of possible ages for this text inside possible_ages.
        reduced_ages = self.ivan_algorithm(possible_ages)

        if reduced_ages:

            min_age = int(min(reduced_ages))
            max_age = int(max(reduced_ages))

        else:
            min_age = -1
            max_age = -1

        return self._age_range_check_filter2(AgeRange(min_age, max_age), ages_strings)
Esempio n. 5
0
    def _non_maximum_suppresion(age_ranges):
        """
        Computes the non maximum suppression of a list of age ranges.
        :param age_ranges:
        :return:
        """
        minimum_x = min([age_range.get_range()[0] for age_range in age_ranges])
        maximum_y = max([age_range.get_range()[1] for age_range in age_ranges])

        return AgeRange(minimum_x, maximum_y)
    def _preprocess_metadata(raw_metadata):
        """
        Processes the metadata content in order to be framework-compliant (AgeRange classes for ages)
        :param raw_metadata: raw metadata content to be processed.
        :return: Metadata dict with {FileName:AgeRange} format.
        """
        metadata_content = json.loads("".join(raw_metadata))
        preprocessed_metadata = {}

        for key, value in metadata_content.items():
            preprocessed_metadata[key] = AgeRange.from_string(value)

        return preprocessed_metadata
Esempio n. 7
0
    def apply_to(self, image):
        """
        Applies this filter to the specified image.
        :param image:
        :return: True if filter passes. False otherwise.
        """

        response = requests.put(self.api_url, data=image.get_jpeg())

        if response.status_code != 200:
            raise Exception(
                "Backend ({}) for filtering with {} is returning a bad response!"
                .format(self.api_url, AgeEstimationFilter.__name__))

        response_json = json.loads(response.text)
        if 'Age_range' not in response_json:
            raise Exception(
                "This filter does not understand backend's language. It may be a different version."
            )

        age_range = AgeRange.from_string(response_json['Age_range'])

        return self._age_range_check_filter(age_range)
Esempio n. 8
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from main.dataset.generic_image_age_dataset import GenericImageAgeDataset
from main.normalizer.image.histogram_normalizer import HistogramNormalizer
from main.normalizer.image.size_normalizer import SizeNormalizer
from main.resource.image import Image
from main.tools.age_range import AgeRange

__author__ = 'Iván de Paz Centeno'

example = Image(uri="/home/ivan/edurne.jpg", metadata=[AgeRange(24, 26)])
example2 = Image(uri="/home/ivan/edurne.jpg", metadata=[AgeRange(24, 26)])
example3 = Image(uri="/home/ivan/edurne.jpg", metadata=[AgeRange(1, 2)])

histogram_normalizer = HistogramNormalizer()
size_normalizer = SizeNormalizer()

generic_dataset = GenericImageAgeDataset(
    "/home/ivan/generic_dataset_test3/",
    dataset_normalizers=[histogram_normalizer, size_normalizer])

generic_dataset.put_image(example)
generic_dataset.put_image(example2)
generic_dataset.put_image(example3)

generic_dataset.save_dataset()
Esempio n. 9
0
from main.dataset.generic_image_age_dataset import GenericImageAgeDataset
from main.dataset.raw_crawled_dataset import RawCrawledDataset
from main.filter.advanced.age_estimation_filter import AgeEstimationFilter
from main.filter.advanced.age_estimation_text_inference_filter import AgeEstimationTextInferenceFilter
from main.filter.advanced.face_detection_filter import FaceDetectionFilter
from main.filter.multifilter import Multifilter
from main.resource.image import Image
from main.resource.text import Text
from main.tools.age_range import AgeRange

__author__ = "Ivan de Paz Centeno"

SAVE_BATCH_AMMOUNT = 200
MAX_IMAGE_SIZE = (1200, 1200)
AGE_GROUPING_SIZE = 2
EXPECTED_AGE_RANGE = AgeRange(0, 4)

if len(sys.argv) != 2:
    print("A parameter for folder/zip location of the processable dataset is needed.")
    exit(-1)

source = sys.argv[1]

if not os.path.exists(source):
    print("Given folder/filename does not exist.")
    exit(-1)

if os.path.isdir(source): source_type = "DIR"
else: source_type = "FILE"