Exemplo n.º 1
0
def init_app():
    try:
        app.logger.debug("Adding users")
        load_users()

        app.logger.debug("Adding voters")
        for box in BOXES:
            app.logger.debug(f"Processing box: {box}")

            box_obj = Box.objects(number=box).first()
            if not box_obj:
                box_obj = Box(number=box)
                load_votes_matrix(box_obj)
            for i in range(1, 350 + 1):
                voter_obj = Voter.objects(order=i, box=box_obj).first()
                if not voter_obj:
                    circuit_obj = Circuit.objects(name=CIRCUIT).first()
                    if not circuit_obj:
                        circuit_obj = Circuit(name=CIRCUIT).save()
                    try:
                        Voter(
                            order=i,
                            box=box_obj,
                            circuit=circuit_obj,
                        ).save()
                    except mongoengine.errors.NotUniqueError:
                        continue
    except Exception:
        app.logger.exception("There was something wrong with init.")

    app.logger.debug("Done init")
Exemplo n.º 2
0
def save_state(order):
    app.logger.debug("Saving order: %s" % order)
    box = Box.objects(number=request.form.get("box")).first()
    voter = Voter.objects(order=order, box=box).first()
    voter.update(status=request.form.get("intencion"),
                 last_updated=datetime.now())
    return Response(status=200)
Exemplo n.º 3
0
    def detect_face_boxes(self, img: np.ndarray) -> List[Box]:
        img_300x300: np.ndarray = cv.resize(src=img,
                                            dsize=DETECTION_NETWORK_INPUT_SIZE)
        img_caffe_blob: np.ndarray = cv.dnn.blobFromImage(
            image=img_300x300,
            scalefactor=1.0,
            size=DETECTION_NETWORK_INPUT_SIZE,
            mean=BLOB_MEAN_SUBTRACTION.to_bgr(),
        )
        self.face_detection_network.setInput(img_caffe_blob)
        inferences: np.ndarray = self.face_detection_network.forward()

        # Parsing inferences informed by:
        # https://answers.opencv.org/question/208419/can-someone-explain-the-output-of-forward-in-dnn-module/
        confidences: Sequence[float] = inferences[0, 0, :, 2]
        y_scale, x_scale, _ = img.shape
        # 1. Extract box coordinates (all between 0 and 1)
        #    (top_left_x, top_left_y, bottom_right_x, bottom_right_y)
        # 2. Scale them up to original image size
        # 3. Round values and convert to int
        # 4. Collect to Box
        boxes: List[Box] = [
            Box.from_values(*values)
            for values in ((inferences[0, 0, :, 3:7] *
                            np.array([x_scale, y_scale, x_scale, y_scale])
                            ).round().astype(int))
        ]

        return [
            box for confidence, box in zip(confidences, boxes)
            if confidence >= self.detection_threshold
        ]
def gender_faces(img: np.ndarray, faces: List[Face]) -> List[GenderedFace]:
    """
    Classify the genders of the faces in an image

    :param img: The image
    :param faces: The bounding boxes of the faces in img
    :return: List of inferred genders and the confidence value parallel with face_boxes
    """

    ret = []
    pad_width = max(img.shape) + 100  # TODO: Make this smarter
    img = cv2.copyMakeBorder(img, pad_width, pad_width, pad_width, pad_width,
                             cv2.BORDER_REFLECT_101)
    for face in faces:
        # Extract each face, and resize to (H,L)
        center_x, center_y = face.box.center
        h = face.box.bottom_right_y - face.box.top_left_y
        w = face.box.bottom_right_x - face.box.top_left_x
        side_len = max(w, h)
        radius = side_len / 2
        box = Box.from_values(
            center_x - radius + pad_width,
            center_y - radius + pad_width,
            center_x + radius + pad_width,
            center_y + radius + pad_width,
        )
        face_crop = img[box.top_left_y:box.bottom_right_y,
                        box.top_left_x:box.bottom_right_x, :]
        face_crop = cv2.resize(face_crop, (H, L))

        # Extract features. If features are not found, use defaults.
        feat = face_utils.shape_to_np(face.shape)
        landmarks = np.zeros(8)
        x = box.top_left_x
        y = box.top_left_y
        landmarks[0] = feat[3][0] - feat[2][0] - x
        landmarks[1] = feat[1][0] - feat[0][0] - x
        landmarks[2] = feat[4][0] - x
        landmarks[3] = feat[4][0] - x
        landmarks[4] = feat[3][1] - feat[2][1] - y
        landmarks[5] = feat[1][1] - feat[0][1] - y
        landmarks[6] = feat[4][1] - y - 5
        landmarks[7] = feat[4][1] - y + 5

        # Classify
        X = np.concatenate(
            (face_crop.reshape(L * H * 3), landmarks)).reshape(1, -1)
        y_prob = GENDER_CLASSIFIER.predict_proba(X)[0]
        y_pred = GENDER_CLASSIFIER.predict(X).astype(int)[0]

        # Create GenderedFace object
        g_face = GenderedFace.from_face(face=face,
                                        gender=Gender(y_pred),
                                        gender_confidence=y_prob[y_pred])
        ret.append(g_face)

    return ret
Exemplo n.º 5
0
def telegram():
    app.logger.debug("Entered telegram")
    _filter = get_current_user_roles(current_user)

    box = Box.objects(number__in=_filter).first()
    if box:
        return redirect(url_for('telegram_box', box_number=box.number))
    else:
        return redirect(url_for('padron'))
Exemplo n.º 6
0
def load_boxes(user_id, label_name):
    """Load sample boxes."""

    print "Box"

    box = Box(user_id=user_id, label_name=label_name)

    db.session.add(box)
    db.session.commit()
Exemplo n.º 7
0
def telegram_box(box_number):
    box_permission = Permission(RoleNeed(box_number))
    app.logger.debug("Entered telegram %s" % box_number)
    _roles = get_current_user_roles(current_user)

    boxes = Box.objects(number__in=_roles).order_by("number")
    pager = Pager(int(box_number), [int(box.number) for box in boxes])
    pages = pager.get_pages()
    if box_permission.can():
        box = Box.objects(number=box_number).first()
        if box:
            return render_template('telegram.html',
                                   parties=box.parties,
                                   other_votes=box.other_votes,
                                   pages=pages,
                                   box_number=box.number)
        else:
            flash("Mesa inexistente")
            return redirect(url_for('padron'))
    else:
        abort(403)
Exemplo n.º 8
0
    def test_Box(self):
        """Tests Box model instantiation."""

        add_user = User(username="******", password="******")
        db.session.add(add_user)

        add_box = Box(user_id=1, label_name="To Try")
        db.session.add(add_box)

        box = Box.query.filter(Box.label_name == "To Try").first()

        self.assertEqual(box.user_id, 1)
        self.assertEqual(box.__repr__(), "<Box box_id=1 user_id=1>")
Exemplo n.º 9
0
def export():
    boxes = Box.objects()
    padron = []
    for box in boxes:
        voters = Voter.objects(box=box)
        for voter in voters:
            padron.append([
                voter.order,
                voter.status,
                box.number,
            ])
    return render_template(
        'export.html',
        padron=padron,
    )
Exemplo n.º 10
0
def summary():
    _filter = get_current_user_roles(current_user)

    boxes = Box.objects(number__in=_filter).order_by("-number")
    custom_boxes = []
    for box in boxes:
        custom_box = {
            'number': box.number,
            "recurrido": Voter.objects(box=box, status=2).count(),
            "voto": Voter.objects(box=box, status=3).count(),
            "ausentes": Voter.objects(box=box, status=4).count()
        }

        custom_boxes.append(custom_box)
    votos_recurrido = Voter.objects(status=2, box__in=boxes).count()
    votos_ns_nc = Voter.objects(status=3, box__in=boxes).count()
    votos_ausentes = Voter.objects(status=4, box__in=boxes).count()
    data = [
        {
            "intention": "Recurrido",
            "count": votos_recurrido
        },
        {
            "intention": "Voto",
            "count": votos_ns_nc
        },
        {
            "intention": "Ausentes",
            "count": votos_ausentes
        },
    ]
    values = [votos_recurrido, votos_ns_nc, votos_ausentes]
    labels = ["Recurrido", "Voto", "Ausentes"]

    return render_template(
        'summary.html',
        values=values,
        labels=labels,
        colors=COLORS,
        boxes=custom_boxes,
        data=data,
    )
Exemplo n.º 11
0
def padron_box(box_number):
    box_permission = Permission(RoleNeed(str(box_number)))
    app.logger.info("Entered Padron for box: %s" % box_number)
    _roles = get_current_user_roles(current_user)
    app.logger.debug("Roles: %s" % _roles)
    app.logger.debug("box_permission: %s" % box_permission)

    pager = Pager(int(box_number), [int(role) for role in _roles])
    pages = pager.get_pages()
    box = Box.objects(number=box_number).first()
    _padron = Voter.objects(box=box)
    if box_permission.can():
        return render_template(
            'padron.html',
            padron=_padron,
            pages=pages,
            box_number=box_number,
        )
    else:
        abort(403)
Exemplo n.º 12
0
def totales():
    candidates = [
        "president", "gobernor", "diputado", "senador", "intendente", "general"
    ]
    other_votes = ["blank", "nulled", "recurrent", "refuted"]
    app.logger.debug("Entered totales")
    _filter = get_current_user_roles(current_user)

    boxes = Box.objects(number__in=_filter)
    results = {}
    other_results = {}
    for box in boxes:
        for party in box.parties:
            if not results.get(party.name):
                results[party.name] = {}
            for candidate in candidates:
                if not results[party.name].get(candidate):
                    results[party.name][candidate] = {}
                results[party.name][candidate]["count"] = \
                    results[party.name][candidate].get("count", 0) + party.votes[candidate].count
                results[party.name][candidate]["enabled"] = party.votes[
                    candidate].enabled
        for vote_type in other_votes:
            if not other_results.get(vote_type):
                other_results[vote_type] = {}
            for candidate in candidates:
                if not other_results[vote_type].get(candidate):
                    other_results[vote_type][candidate] = {}
                other_results[vote_type][candidate]["count"] = \
                    other_results[vote_type][candidate].get("count", 0) + box.other_votes[vote_type][candidate].count
                other_results[vote_type][candidate][
                    "enabled"] = box.other_votes[vote_type][candidate].enabled

    if results:
        return render_template('totales.html',
                               results=results,
                               other_results=other_results)
    else:
        return redirect(url_for('padron'))
Exemplo n.º 13
0
def save_recipe_to_box():
    """Save the recipe to a user recipe box."""

    if session.get("user_id"):
        label_name = request.form.get("box-label")
        new_label_name = request.form.get("new-label")
        notes = request.form.get("notes")
        recipe_id = request.form.get("recipe-id")
        user_id = session['user_id']

        if label_name and new_label_name:
            flash("Choose only one field.")
            return redirect("/save_recipe/" + str(recipe_id))
        elif label_name:
            box_id = Box.query.filter_by(user_id=user_id,
                                         label_name=label_name).first().box_id
        elif new_label_name:
            box = Box(user_id=user_id, label_name=new_label_name)
            db.session.add(box)
            db.session.commit()

            box_id = Box.query.filter_by(
                user_id=user_id, label_name=new_label_name).first().box_id
        else:
            flash("Choose an existing label or create a new label.")
            return redirect("/save_recipe/" + str(recipe_id))

        if RecipeBox.query.filter_by(box_id=box_id, recipe_id=recipe_id).all():
            flash("This recipe already exists in the selected label category.")
            return redirect('/save_recipe/' + str(recipe_id))
        else:
            recipebox = RecipeBox(recipe_id=recipe_id,
                                  box_id=box_id,
                                  recipe_notes=notes)
            db.session.add(recipebox)
            db.session.commit()

            return redirect("/my_recipes")
Exemplo n.º 14
0
    def track_faces(
        self,
        clip_dir: str,
        out_base_dir: str,
        draw_on_dir: str = None,
        detect_only: bool = False,
    ):
        # Setup
        # load image paths
        frames: List[os.DirEntry] = load_and_sort_dir(clip_dir)
        draw_on_frames: List[os.DirEntry] = load_and_sort_dir(draw_on_dir)
        assert len(draw_on_frames) in (0, len(frames))

        # create output directory
        out_dir: str = create_output_dir(out_base_dir)

        # initialize variables required for object tracking
        new_face_id: Iterator[int] = count(start=1)
        tracked_faces: Dict[int, TrackedFace] = {}

        # Iterate Through Video Frames
        for frame, draw_on_frame in zip_longest(frames, draw_on_frames):
            # load new frame
            img = cv.imread(frame.path)

            # load out_img
            out_img: np.ndarray = (img.copy() if draw_on_frame is None else
                                   cv.imread(draw_on_frame.path))

            # ensure out_img is at least as large as img
            assert len(img.shape) == len(out_img.shape) and all(
                out_dim >= in_dim
                for in_dim, out_dim in zip(img.shape, out_img.shape))

            detected_face_boxes: List[Box] = self.detect_face_boxes(img)

            # If tracking is disabled, draw the boxes and move to next frame
            if detect_only:
                write_boxes(
                    out_path=os.path.join(out_dir, frame.name),
                    out_img=out_img,
                    boxes=detected_face_boxes,
                )
                continue

            detected_faces: List[GenderedFace] = gender_faces(
                img=img,
                faces=[
                    self.recognize_face(img, detected_face_box)
                    for detected_face_box in detected_face_boxes
                ],
            )

            current_face_ids: Set[int] = set()
            lost_face_ids: Set[int] = set()

            # Iterate over the known (tracked) faces
            for tracked_face in tracked_faces.values():
                matched_detected_faces: List[GenderedFace] = [
                    detected_face for detected_face in detected_faces
                    if self.faces_match(tracked_face, detected_face)
                ]

                if not matched_detected_faces:
                    # Tracked face was not matched to and detected face
                    # Increment staleness since we didn't detect this face
                    tracked_face.staleness += 1
                    # Update tracker with img and get confidence
                    tracked_confidence: float = tracked_face.tracker.update(
                        img)
                    if (tracked_face.staleness < self.tracking_expiry
                            and tracked_confidence >= self.tracking_threshold):
                        # Assume face is still in frame but we failed to detect
                        # Update box with predicted location box
                        predicted_box: Box = Box.from_dlib_rect(
                            tracked_face.tracker.get_position())
                        tracked_face.box = predicted_box
                        current_face_ids.add(tracked_face.id_)
                    else:
                        # Assume face has left frame because either it is too stale or confidence is too low
                        if self.remember_identities:
                            # Set effectively infinite staleness to force tracker reset if face is found again later
                            tracked_face.staleness = sys.maxsize
                        else:
                            lost_face_ids.add(tracked_face.id_)
                    continue

                # Tracked face was matched to one or more detected faces
                # Multiple matches should rarely happen if faces in frame are distinct. We take closest to prev location
                # TODO: Handle same person multiple times in frame
                matched_detected_face = min(
                    matched_detected_faces,
                    key=lambda face: tracked_face.box.distance_to(face.box),
                )
                # Update tracked_face
                tracked_face.descriptor = matched_detected_face.descriptor
                tracked_face.shape = matched_detected_face.descriptor
                tracked_face.box = matched_detected_face.box
                if tracked_face.staleness >= self.tracking_expiry:
                    # Face was not present in last frame so reset tracker
                    tracked_face.tracker = dlib.correlation_tracker()
                    tracked_face.tracker.start_track(
                        image=img,
                        bounding_box=tracked_face.box.to_dlib_rect())
                else:
                    # Face was present in last frame so just update guess
                    tracked_face.tracker.update(
                        image=img, guess=tracked_face.box.to_dlib_rect())
                tracked_face.staleness = 0
                tracked_face.gender = matched_detected_face.gender
                tracked_face.gender_confidence = matched_detected_face.gender_confidence
                # Add tracked_face to current_ids to reflect that it is in the frame
                current_face_ids.add(tracked_face.id_)
                # remove matched_detected_face from detected_faces
                detected_faces.remove(matched_detected_face)

            # Delete all faces that were being tracked but are now lost
            # lost_face_ids will always be empty if self.remember_identities is True
            for id_ in lost_face_ids:
                del tracked_faces[id_]

            for new_face in detected_faces:
                # This is a new face (previously unseen)
                id_ = next(new_face_id)
                tracker: dlib.correlation_tracker = dlib.correlation_tracker()
                tracker.start_track(image=img,
                                    bounding_box=new_face.box.to_dlib_rect())
                tracked_faces[id_] = TrackedFace(
                    box=new_face.box,
                    descriptor=new_face.descriptor,
                    shape=new_face.shape,
                    id_=id_,
                    tracker=tracker,
                    gender=new_face.gender,
                    gender_confidence=new_face.gender_confidence,
                )
                current_face_ids.add(id_)

            write_boxes(
                out_path=os.path.join(out_dir, frame.name),
                out_img=out_img,
                boxes=[tracked_faces[id_].box for id_ in current_face_ids],
                labelss=[[
                    (
                        f'Person {id_}',
                        Point(3, 14),
                    ),
                    (
                        f'{tracked_faces[id_].gender.name[0].upper()}: {round(100 * tracked_faces[id_].gender_confidence, 1)}%',
                        Point(3, 30),
                    ),
                ] for id_ in current_face_ids],
                color=Color.yellow(),
            )

            print(
                f"Processed {frame.name}.  Currently tracking {len(tracked_faces)} faces"
            )
        return out_dir
Exemplo n.º 15
0
def track_faces(
    self,
    clip_dir: str,
    out_base_dir: str,
    draw_on_dir: str = None,
    detect_only: bool = False,
):
    """
    This is NOT recognition
    Tracking should be based on smooth object motion, not face recognition

    Steps Every frame:

    detect faces
    for old-face in tracked-faces:
        for new-face in detected-faces:
            if new-face in old-face-region and old-face in new-face-region:
                match new-face and old-face
                break
        if old-face not in matches and tracker.update(img) > thresh:
            match to tracked location

    for new-face not in matches:
        create new tracked-face
    """
    # Setup
    # load image paths
    frames: List[os.DirEntry] = load_and_sort_dir(clip_dir)
    draw_on_frames: List[os.DirEntry] = load_and_sort_dir(draw_on_dir)
    assert len(draw_on_frames) in (0, len(frames))

    # create output directory
    out_dir: str = create_output_dir(out_base_dir)

    # initialize variables required for object tracking
    new_face_id: Iterator[int] = count(start=1)
    tracked_faces: Dict[int, TrackedFace] = {}

    prev_img = None

    # Iterate Through Video Frames
    for frame, draw_on_frame in zip_longest(frames, draw_on_frames):
        # Read Images
        # read image to process
        img: np.ndarray = cv.imread(frame.path)
        # read image to draw on (if different)
        out_img = img.copy() if draw_on_frame is None else cv.imread(
            draw_on_frame.path)
        # ensure out_img is at least as large as img
        assert len(img.shape) == len(out_img.shape) and all(
            out_dim >= in_dim
            for in_dim, out_dim in zip(img.shape, out_img.shape))

        detected_face_boxes: List[Box] = self.detect_faces(img)

        # If tracking is disabled, draw the boxes and move to next frame
        if detect_only:
            write_boxes(
                out_path=os.path.join(out_dir, frame.name),
                out_img=out_img,
                boxes=detected_face_boxes,
            )
            continue

        current_ids_to_detection_idx: Dict[int, Optional[int]] = {}
        lost_tracked_face_ids: List[int] = []

        # Iterate over the known (tracked) faces
        for tracked_face in tracked_faces.values():
            # Update the tracker with the new image
            # Tracker generates new predicted_rect from previous predicted_rect
            # Tracker returns its confidence that the face is inside new predicted_rect
            predicted_rect_confidence: float = tracked_face.tracker.update(img)
            if predicted_rect_confidence < self.tracking_threshold:
                # We've lost te object. Maybe due to a cut. Can't simply look for closest faces.
                # We assume the face is no longer present in img and stop tracking it
                print(
                    f"Too low: id={tracked_face.id_}, conf={predicted_rect_confidence}, frame={frame.name}"
                )
                lost_tracked_face_ids.append(tracked_face.id_)
                # TODO: In this case, maybe matchTemplate with found faces to see if one is above thresh
                continue
            predicted_rect: dlib.rectangle = tracked_face.tracker.get_position(
            )
            tracked_last_rect: dlib.rectangle = tracked_face.box.to_dlib_rect()

            # Iterate over newly detected faces
            for detected_i, detected_face_box in enumerate(
                    detected_face_boxes):

                # TODO Maybe just do distance based
                #  add confidence here?
                #  I think track motion and distance
                detected_rect = detected_face_box.to_dlib_rect()

                if (
                        # TODO: verify these are good checks. Maybe check that the l2 dist is minimal instead
                        #  need to make sure not modifying tracked faces as we go if we start computing minimums
                        #  THEY ARENT
                        # sanity check: face hasn't moved too much
                        tracked_last_rect.contains(detected_rect.center())
                        and detected_rect.contains(tracked_last_rect.center())
                        # sanity check: tracker prediction isn't too far from detection
                        and detected_rect.contains(predicted_rect.center()) and
                        predicted_rect.contains(detected_rect.center())):

                    # detected_face_box and tracked_face are the same face
                    # tracker was already update to this location
                    if tracked_face.id_ in current_ids_to_detection_idx:
                        print(
                            f'[ERROR]  {tracked_face.id_} found multiple times. Keeping first match'
                        )
                    else:
                        tracked_face.box = detected_face_box
                        current_ids_to_detection_idx[
                            tracked_face.id_] = detected_i
                        new_tracker = dlib.correlation_tracker()
                        new_tracker.start_track(image=img,
                                                bounding_box=detected_rect)
                        tracked_face.tracker = new_tracker

            if tracked_face.id_ not in current_ids_to_detection_idx:
                assert predicted_rect_confidence >= self.tracking_threshold
                # Didn't detect this face, but tracker is confident it is at the predicted location.
                # We assume detector gave false negative
                tracked_face.box = Box.from_dlib_rect(predicted_rect)
                # tracker was updated to predicted_rect in update() call in condition
                current_ids_to_detection_idx[tracked_face.id_] = None

        # Remove lost face ids
        for lost_tracked_face_id in lost_tracked_face_ids:
            del tracked_faces[lost_tracked_face_id]

        tracked_detection_idxs = current_ids_to_detection_idx.values()

        # Track new faces
        for detected_i, detected_face_box in enumerate(detected_face_boxes):
            if detected_i not in tracked_detection_idxs:
                # Assume new face has entered frame and start tracking it
                id_ = next(new_face_id)
                tracker: dlib.correlation_tracker = dlib.correlation_tracker()
                tracker.start_track(
                    image=img, bounding_box=detected_face_box.to_dlib_rect())
                tracked_faces[id_] = TrackedFace(id_=id_,
                                                 box=detected_face_box,
                                                 tracker=tracker)
                current_ids_to_detection_idx[id_] = detected_i

        tracked_detection_idxs = current_ids_to_detection_idx.values()
        assert all(i in tracked_detection_idxs
                   for i in range(len(detected_face_boxes)))
        assert len(current_ids_to_detection_idx) == len(tracked_faces)

        write_boxes(
            out_path=os.path.join(out_dir, frame.name),
            out_img=out_img,
            boxes=[face.box for face in tracked_faces.values()],
            labelss=[[(f'Person {face.id_}', Point(1, -9))]
                     for face in tracked_faces.values()],
        )
Exemplo n.º 16
0
def boxes():
    app.logger.info("Entered Mesas")
    boxes = Box.objects()
    return render_template('boxes.html', boxes=boxes)
Exemplo n.º 17
0
 def recognize_face(self, img: np.ndarray, face_box: Box) -> Face:
     shape: dlib.full_object_detection = self.face_shape_predictor(
         img, face_box.to_dlib_rect())
     descriptor: np.ndarray = np.asarray(
         self.face_recognition_model.compute_face_descriptor(img, shape))
     return Face(box=face_box, shape=shape, descriptor=descriptor)