Example #1
0
        break

    frame = imutils.resize(frame, width=800)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    clone = frame.copy()

    # detect faces in grayscale image
    rects = detector(gray, 1)

    # loop over face detections
    for rect in rects:
        # align faces
        face = fa.align(frame, gray, rect)

        # resize and crop image
        age_crops = cp.preprocess(age_mp.preprocess(sp.preprocess(face)))
        age_crops = np.array([iap.preprocess(c) for c in age_crops])

        gender_crops = cp.preprocess(gender_mp.preprocess(sp.preprocess(face)))
        gender_crops = np.array([iap.preprocess(c) for c in gender_crops])

        # predict on age and gender based on extracted crops
        age_pred = age_model.predict(age_crops).mean(axis=0)
        gender_pred = gender_model.predict(gender_crops).mean(axis=0)

        # draw bounding box around face
        x, y, w, h = face_utils.rect_to_bb(rect)
        cv2.rectangle(clone, (x, y), (x + w, y + h), (0, 255, 0), 2)

        clone = agh.visualize_video(age_pred, gender_pred, age_le, gender_le,
                                    clone, (x, y))
Example #2
0
	genderMeans["B"])
iap = ImageToArrayPreprocessor()

# load a sample of testing images
rows = open(config.TEST_MX_LIST).read().strip().split("\n")
rows = np.random.choice(rows, size=args["sample_size"])

# loop over the rows
for row in rows:
	# unpack the row
	(_, gtLabel, imagePath) = row.strip().split("\t")
	image = cv2.imread(imagePath)

	# pre-process the image, one for the age model and another for
	# the gender model
	ageImage = iap.preprocess(ageMP.preprocess(
		sp.preprocess(image)))
	genderImage = iap.preprocess(genderMP.preprocess(
		sp.preprocess(image)))
	ageImage = np.expand_dims(ageImage, axis=0)
	genderImage = np.expand_dims(genderImage, axis=0)

	# pass the ROIs through their respective models
	agePreds = ageModel.predict(ageImage)[0]
	genderPreds = genderModel.predict(genderImage)[0]

	# sort the predictions according to their probability
	ageIdxs = np.argsort(agePreds)[::-1]
	genderIdxs = np.argsort(genderPreds)[::-1]

	# visualize the age and gender predictions
	ageCanvas = AgeGenderHelper.visualizeAge(agePreds, ageLE)
Example #3
0
sp = AspectAwarePreprocessor(width=224, height=224)
mp = MeanPreprocessor(config.R_MEAN, config.G_MEAN, config.B_MEAN)
iap = ImageToArrayPreprocessor(dataFormat="channels_first")

# loop over the testing images
for row in rows:
    # grab the target class label and the image path from the row
    (target, imagePath) = row.split("\t")[1:]
    target = int(target)

    # load the image from disk and pre-process it by resizing the
    # image and applying the pre-processors
    image = cv2.imread(imagePath)
    orig = image.copy()
    orig = imutils.resize(orig, width=min(500, orig.shape[1]))
    image = iap.preprocess(mp.preprocess(sp.preprocess(image)))
    image = np.expand_dims(image, axis=0)

    # classify the image and grab the indexes of the top-5 predictions
    preds = model.predict(image)[0]
    idxs = np.argsort(preds)[::-1][:5]

    # show the true class label
    print("[INFO] actual={}".format(le.inverse_transform(target)))

    # format and display the top predicted class label
    label = le.inverse_transform(idxs[0])
    label = label.replace(":", " ")
    label = "{}: {:.2f}%".format(label, preds[idxs[0]] * 100)
    cv2.putText(orig, label, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6,
                (0, 255, 0), 2)
            # resize the face to a fixed size, then extract 10-crop
            # patches from it
            face = sp.preprocess(face)
            patches = cp.preprocess(face)

            # allocate memory for the age and gender patches
            agePatches = np.zeros((patches.shape[0], 3, 227, 227),
                                  dtype="float")
            genderPatches = np.zeros((patches.shape[0], 3, 227, 227),
                                     dtype="float")

            # loop over the patches
            for j in np.arange(0, patches.shape[0]):
                # perform mean subtraction on the patch
                agePatch = ageMP.preprocess(patches[j])
                genderPatch = genderMP.preprocess(patches[j])
                agePatch = iap.preprocess(agePatch)
                genderPatch = iap.preprocess(genderPatch)

                # update the respective patches lists
                agePatches[j] = agePatch
                genderPatches[j] = genderPatch

            # make predictions on age and gender based on the extracted
            # patches
            agePreds = ageModel.predict(agePatches)
            genderPreds = genderModel.predict(genderPatches)

            # compute the average for each class label based on the
            # predictions for the patches
means = json.loads(open(config.DATASET_MEAN).read())
sp = SimplePreprocessor(227, 227)
mp = MeanPreprocessor(means['R'], means['G'], means['B'])
cp = CropPreprocessor(227, 227)
iap = ImageToArrayPreprocessor()

#load the pretrained network
model = load_model(config.MODEL_PATH)

# In[5]:

predictions = []
for image in tqdm(images[:100]):
    face = face_get(image)
    crop = sp.preprocess(face)
    crop = mp.preprocess(crop)
    crop = np.expand_dims(crop, axis=0)
    #crop = np.array([iap.preprocess(c) for c in crop], dtype = "float32")
    pred = model.predict(crop)
    predictions.append([image, pred.mean(axis=0)])

# In[6]:

neu = [pred[1][0] for pred in predictions]
hap = [pred[1][1] for pred in predictions]
sad = [pred[1][2] for pred in predictions]
ang = [pred[1][3] for pred in predictions]
imgs = [pred[0] for pred in predictions]

# In[7]: