def add_subject(self, dto):
     """
     Call validate on dto(), convert dto to Subject, call validate() on it and add it to the repo.
     dto should contain "name,teacher_name". Missing fields are initialised with None.
     Validate and repo exceptions are raised.
     """
     dto.validate(["name", "teacher_name"])
     subject = Subject(None, *dto.split())
     subject.validate()
     self._subject_repository.add_entity(subject)
 def get_subject_by_teacher(self, dto):
     """
     Call validate on dto(), convert dto to Subject and return a list of matching subjects as ordered dicts.
     dto should contain "teacher_name". Missing fields are initialised with None.
     """
     dto.validate(["teacher_name"])
     subject = Subject(None, None, *dto.split())
     return self._subject_repository.get_entity(subject)
 def del_subject(self, dto):
     """
     Call validate on dto(), convert dto to Subject and delete it from the repo.
     dto should contain "id". Missing fields are initialised with None.
     Repo exceptions are raised.
     """
     dto.validate(["id"])
     subject = Subject(*dto.split(), None, None)
     self._subject_repository.del_entity(subject)
Esempio n. 4
0
def createSubject():
    """
    post endpoint
    ---
    tags:
      - subjectController
    parameters:
      - name: body
        in: body
        required: true
        schema:
          required:
            - subjectName
          properties:
            subjectName:
              type: string
              description: The subject's name.
            subjectId:
              type: string
              description: The subject's id.
              default: ""
    responses:
      200:
        description: The response from subject_controller
        schema:
    """
    subjectName = request.json['subjectName']
    subjectId = request.json['subjectId']
    adminId = request.json['adminId']

    if userRep.findById(adminId) is None:
        return "Not this user", status.HTTP_404_NOT_FOUND
    if subjectId is "":
        subjectId = ig.generateId('subject')
    else:
        subject = subjectRep.findById(subjectId)
        if subject is not None:
            return '', 226

    subject = Subject(id=subjectId,
                      name=subjectName,
                      createTime=tg.getNowAsMilli(),
                      updateTime=tg.getNowAsMilli())
    subject = subjectRep.save(subject)
    subjectUser = SubjectUser(id=ig.generateId('subjectUser'),
                              userId=adminId,
                              subjectId=subjectId,
                              role=1,
                              createTime=tg.getNowAsMilli(),
                              updateTime=tg.getNowAsMilli())
    subjectUserRep.save(subjectUser)
    return json_util.dumps({'subject': subject.__dict__
                            }), status.HTTP_200_OK, ContentType.json
Esempio n. 5
0
def save(subject: Subject):
    subject._id = mongo.db.subject.insert_one(subject.__dict__).inserted_id
    return subject
Esempio n. 6
0
def main(argv):
    # parameters for loading data and images
    detection_model_path = '../trained_models/detection_models/haarcascade_frontalface_default.xml'
    emotion_model_path = '../trained_models/emotion_models/fer2013_mini_XCEPTION.102-0.66.hdf5'
    gender_model_path = '../trained_models/gender_models/simple_CNN.81-0.96.hdf5'
    emotion_labels = get_labels('fer2013')
    gender_labels = get_labels('imdb')
    font = cv2.FONT_HERSHEY_SIMPLEX

    # hyper-parameters for bounding boxes shape
    frame_window = 10
    gender_offsets = (30, 60)
    emotion_offsets = (20, 40)

    # loading models
    face_detection = load_detection_model(detection_model_path)
    emotion_classifier = load_model(emotion_model_path, compile=False)
    gender_classifier = load_model(gender_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    gender_target_size = gender_classifier.input_shape[1:3]

    # starting lists for calculating modes
    gender_window = []
    emotion_window = []

    # starting video streaming
    cv2.namedWindow('window_frame')
    video_capture = cv2.VideoCapture(0)

    try:
        opts, args = getopt.getopt(argv, "hs:", ["subject="])
    except getopt.GetoptError:
        print("emotion.py -s <subject>")
        sys.exit(2)

    subject = Subject()
    for opt, arg in opts:
        if opt == '-h':
            print("emotion.py - s <subject>")
            sys.exit()
        elif opt in ("-s", "--subject"):
            subject.name = arg

    print(subject.name)

    subject.start()

    while True:

        bgr_image = video_capture.read()[1]
        gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
        rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
        faces = detect_faces(face_detection, gray_image)

        for face_coordinates in faces:

            x1, x2, y1, y2 = apply_offsets(face_coordinates, gender_offsets)
            rgb_face = rgb_image[y1:y2, x1:x2]

            x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
            gray_face = gray_image[y1:y2, x1:x2]
            try:
                rgb_face = cv2.resize(rgb_face, (gender_target_size))
                gray_face = cv2.resize(gray_face, (emotion_target_size))
            except:
                continue
            gray_face = preprocess_input(gray_face, False)
            gray_face = np.expand_dims(gray_face, 0)
            gray_face = np.expand_dims(gray_face, -1)
            emotion_label_arg = np.argmax(
                emotion_classifier.predict(gray_face))
            emotion_text = emotion_labels[emotion_label_arg]
            emotion_window.append(emotion_text)

            rgb_face = np.expand_dims(rgb_face, 0)
            rgb_face = preprocess_input(rgb_face, False)
            gender_prediction = gender_classifier.predict(rgb_face)
            gender_label_arg = np.argmax(gender_prediction)
            gender_text = gender_labels[gender_label_arg]
            gender_window.append(gender_text)

            subject.addMood(emotion_text, gender_text)

            if len(gender_window) > frame_window:
                emotion_window.pop(0)
                gender_window.pop(0)
            try:
                emotion_mode = mode(emotion_window)
                gender_mode = mode(gender_window)
            except:
                continue

            if gender_text == gender_labels[0]:
                color = (0, 0, 255)
            else:
                color = (255, 0, 0)

            draw_bounding_box(face_coordinates, rgb_image, color)
            draw_text(face_coordinates, rgb_image, gender_mode, color, 0, -20,
                      1, 1)
            draw_text(face_coordinates, rgb_image, emotion_mode, color, 0, -45,
                      1, 1)

        bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
        cv2.imshow('window_frame', bgr_image)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    subject.stop()
 def get_all(self):
     """
     Return a list of all subjects as ordered dicts.
     """
     return self._subject_repository.get_entity(Subject(None, None, None))