Exemple #1
0
def demograp(subjects_queryset: QuerySet):
    men_ages = []
    women_ages = []
    men_count = 0
    women_count = 0

    for subject in subjects_queryset.iterator():
        age = subject.pred_age
        sex = subject.pred_sex
        if age and sex:
            if sex == Subject.SEX_MAN:
                men_ages.append(age)
                men_count += 1
            elif sex == Subject.SEX_WOMAN:
                women_ages.append(age)
                women_count += 1

    lower = 18
    upper = 74
    step = 6
    labels = list(range(lower, upper, step))

    return {
        'age_labels': labels,
        'men_ages': _age_stats(men_ages, labels),
        'women_ages': _age_stats(women_ages, labels),
        'men_count': men_count,
        'women_count': women_count,
        'hourly_count': _hourly_count(subjects_queryset),
        'daily_count': _daily_count(subjects_queryset),
    }
Exemple #2
0
    def queryset_train_data(
            queryset: QuerySet) -> Tuple[np.ndarray, np.ndarray]:
        embeddings = []
        subjects = []
        queryset = queryset.exclude(faces__embeddings_bytes__isnull=True)
        for subject in queryset.iterator():
            for face in subject.faces.all():
                embeddings.append(face.embeddings)
                subjects.append(subject.id)

        embeddings = np.array(embeddings, np.float32)
        subjects = np.array(subjects, np.int32)

        return embeddings, subjects
Exemple #3
0
    def split_images(self,
                     images: QuerySet = None,
                     test_fraction: float = 0.2):
        if images is None:
            images = GroundTruthImage.objects.all()
        if self.specialized_organ:
            images = images.filter(plant_organ=self.specialized_organ)
        if self.specialized_background:
            images = images.filter(background_type=self.specialized_background)
        species = images.values('specie__name').annotate(
            nb_image=Count('specie')).filter(nb_image__gte=50)

        for specie in species.iterator():
            print(specie['specie__name'], specie['nb_image'])

        specie_to_pos = {}
        self.save()  # allow to create ref to CNN in classes
        for i in range(species.count()):
            specie = Specie.objects.get(latin_name=species[i]['specie__name'])
            try:
                class_m = Class.objects.get(cnn=self, specie=specie)
            except Class.DoesNotExist:
                class_m = Class(cnn=self, specie=specie)
            class_m.pos = i
            class_m.save()
            specie_to_pos[specie] = i

        data_images, data_labels = [], []

        for image in images.iterator():
            if image.specie in specie_to_pos:
                data_images.append(image.preprocess())
                data_labels.append(specie_to_pos[image.specie])

        data_images_np = np.array(data_images)
        data_labels_np = np.array(data_labels)
        shufflesplit = StratifiedShuffleSplit(n_splits=2, test_size=0.2)
        train_index, test_index = list(
            shufflesplit.split(data_images_np, data_labels_np))[0]
        self.train_images, self.test_images = data_images_np[
            train_index], data_images_np[test_index]
        self.train_labels, self.test_labels = to_categorical(
            data_labels_np[train_index]), to_categorical(
                data_labels_np[test_index])
        print(self.train_images.shape)
Exemple #4
0
    def split_images(self, images: QuerySet = None, test_fraction: float = 0.2):
        if images is None:
            images = GroundTruthImage.objects.all()
        if self.specialized_organ:
            images = images.filter(plant_organ=self.specialized_organ)
        if self.specialized_background:
            images = images.filter(background_type=self.specialized_background)
        species = images.values('specie__name').annotate(nb_image=Count('specie')).filter(
            nb_image__gte=self.nb_image_by_class)

        self.classes.all().delete()
        for specie in species.iterator():
            print(specie['specie__name'], specie['nb_image'])

        specie_to_pos = {}
        specie_to_nb = {}
        specie_to_counter = {}
        self.save()  # allow to create ref to CNN in classes
        nb_class = species.count()
        for i in range(nb_class):
            specie = Specie.objects.get(latin_name=species[i]['specie__name'])
            try:
                class_m = Class.objects.get(cnn=self, specie=specie)
            except Class.DoesNotExist:
                class_m = Class(cnn=self, specie=specie)
            class_m.pos = i
            class_m.save()
            specie_to_pos[specie] = i
            specie_to_nb[specie] = species[i]['nb_image']
            specie_to_counter[specie] = 0

        train_images = []
        test_images = []

        for image in images.iterator():
            specie = image.specie
            if specie in specie_to_pos:
                if specie_to_counter[specie] / specie_to_nb[specie] < 1 - test_fraction:
                    train_images.append(image)
                else:
                    test_images.append(image)
                specie_to_counter[specie] += 1

        batch_size = 32

        def train_generator():
            i = 0
            xs, ys = np.zeros((batch_size, 224, 224, 3), dtype=np.float_), np.zeros((batch_size, nb_class),
                                                                                    dtype=np.float_)
            for image in train_images:
                i += 1
                if i == batch_size - 1:
                    yield xs, ys
                    xs, ys = np.zeros((batch_size, 224, 224, 3), dtype=np.float_), np.zeros((batch_size, nb_class),
                                                                                            dtype=np.float_)

                    i = 0
                xs[i] = image.preprocess()
                ys[i, specie_to_pos[image.specie]] = 1

        def test_generator():
            i = 0
            xs, ys = np.zeros((batch_size, 224, 224, 3), dtype=np.float_), np.zeros((batch_size, nb_class),
                                                                                    dtype=np.float_)
            for image in test_images:
                i += 1
                if i == batch_size - 1:
                    yield xs, ys
                    xs, ys = np.zeros((batch_size, 224, 224, 3), dtype=np.float_), np.zeros((batch_size, nb_class),
                                                                                            dtype=np.float_)
                    i = 0
                xs[i] = image.preprocess()
                ys[i] = tf.keras.utils.to_categorical(specie_to_pos[image.specie], nb_class)

        self.train_ds = tf.data.Dataset.from_generator(train_generator, (tf.float64, tf.float64),
                                                       ((batch_size, 224, 224, 3), (batch_size, nb_class)))
        self.test_ds = tf.data.Dataset.from_generator(test_generator, (tf.float64, tf.float64),
                                                      ((batch_size, 224, 224, 3), (batch_size, nb_class)))