예제 #1
0
        break

    frame = imutils.resize(frame, width=800)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    clone = frame.copy()

    # detect faces in grayscale image
    rects = detector(gray, 1)

    # loop over face detections
    for rect in rects:
        # align faces
        face = fa.align(frame, gray, rect)

        # resize and crop image
        age_crops = cp.preprocess(age_mp.preprocess(sp.preprocess(face)))
        age_crops = np.array([iap.preprocess(c) for c in age_crops])

        gender_crops = cp.preprocess(gender_mp.preprocess(sp.preprocess(face)))
        gender_crops = np.array([iap.preprocess(c) for c in gender_crops])

        # predict on age and gender based on extracted crops
        age_pred = age_model.predict(age_crops).mean(axis=0)
        gender_pred = gender_model.predict(gender_crops).mean(axis=0)

        # draw bounding box around face
        x, y, w, h = face_utils.rect_to_bb(rect)
        cv2.rectangle(clone, (x, y), (x + w, y + h), (0, 255, 0), 2)

        clone = agh.visualize_video(age_pred, gender_pred, age_le, gender_le,
                                    clone, (x, y))
예제 #2
0
    rects = detector(gray, 1)

    if len(rects) == 0:
        show_local_img(advertise, montage_copy)
    # loop over the face detections
    else:
        if len(rects) > 0:
            rect = rects[0]
            # determine the facial landmarks for the face region, then
            # align the face
            shape = predictor(gray, rect)
            face = fa.align(image, gray, rect)

            # resize the face to a fixed size, then extract 10-crop
            # patches from it
            face = sp.preprocess(face)
            patches = cp.preprocess(face)

            # allocate memory for the age and gender patches
            agePatches = np.zeros((patches.shape[0], 3, 227, 227),
                                  dtype="float")
            genderPatches = np.zeros((patches.shape[0], 3, 227, 227),
                                     dtype="float")

            # loop over the patches
            for j in np.arange(0, patches.shape[0]):
                # perform mean subtraction on the patch
                agePatch = ageMP.preprocess(patches[j])
                genderPatch = genderMP.preprocess(patches[j])
                agePatch = iap.preprocess(agePatch)
                genderPatch = iap.preprocess(genderPatch)
예제 #3
0
iap = ImageToArrayPreprocessor()

# load a sample of testing images
rows = open(config.TEST_MX_LIST).read().strip().split("\n")
rows = np.random.choice(rows, size=args["sample_size"])

# loop over the rows
for row in rows:
	# unpack the row
	(_, gtLabel, imagePath) = row.strip().split("\t")
	image = cv2.imread(imagePath)

	# pre-process the image, one for the age model and another for
	# the gender model
	ageImage = iap.preprocess(ageMP.preprocess(
		sp.preprocess(image)))
	genderImage = iap.preprocess(genderMP.preprocess(
		sp.preprocess(image)))
	ageImage = np.expand_dims(ageImage, axis=0)
	genderImage = np.expand_dims(genderImage, axis=0)

	# pass the ROIs through their respective models
	agePreds = ageModel.predict(ageImage)[0]
	genderPreds = genderModel.predict(genderImage)[0]

	# sort the predictions according to their probability
	ageIdxs = np.argsort(agePreds)[::-1]
	genderIdxs = np.argsort(genderPreds)[::-1]

	# visualize the age and gender predictions
	ageCanvas = AgeGenderHelper.visualizeAge(agePreds, ageLE)
means = json.loads(open(config.DATASET_MEAN).read())
sp = SimplePreprocessor(227, 227)
mp = MeanPreprocessor(means['R'], means['G'], means['B'])
cp = CropPreprocessor(227, 227)
iap = ImageToArrayPreprocessor()

#load the pretrained network
model = load_model(config.MODEL_PATH)

# In[5]:

predictions = []
for image in tqdm(images[:100]):
    face = face_get(image)
    crop = sp.preprocess(face)
    crop = mp.preprocess(crop)
    crop = np.expand_dims(crop, axis=0)
    #crop = np.array([iap.preprocess(c) for c in crop], dtype = "float32")
    pred = model.predict(crop)
    predictions.append([image, pred.mean(axis=0)])

# In[6]:

neu = [pred[1][0] for pred in predictions]
hap = [pred[1][1] for pred in predictions]
sad = [pred[1][2] for pred in predictions]
ang = [pred[1][3] for pred in predictions]
imgs = [pred[0] for pred in predictions]

# In[7]: