Exemplo n.º 1
0
    def set_frame(self):
        """Sets pixmap image to video frame"""

        if not self.online:
            self.spin(1)
            return

        if self.deque and self.online:
            # Grab latest frame
            frame = self.deque[-1]
            if(window.cb_move_dec.isChecked()):
                flag_d, frame = self.move_det.detectV1(frame, (253, 54, 78))
                if(flag_d):
                    CreateAnomaly(frame, "Движение", self.camera_stream_link)
            if(window.cb_face_dec.isChecked()):
                flag_d, frame = face_detector.detect(frame)
                if(flag_d):
                    CreateAnomaly(frame, "Лицо",
                                  self.camera_stream_link)
            # Keep frame aspect ratio
            if self.maintain_aspect_ratio:
                self.frame = QImage.resize(frame, width=self.screen_width)
            # Force resize
            else:
                self.frame = cv2.resize(frame, (self.screen_width, self.screen_height))

            # Convert to pixmap and set to video frame
            self.img = QImage(self.frame, self.frame.shape[1], self.frame.shape[0],
                                   QImage.Format_RGB888).rgbSwapped()
            self.pix = QPixmap.fromImage(self.img)
            self.video_frame.setPixmap(self.pix)
Exemplo n.º 2
0
def home():
    if request.method == 'POST':
        img1=app.root_path+'/static/before.png'
        img2=app.root_path+'/static/after.png'
        f = request.files['image']
        f.save(img1)
        faces=detect(img1,img2)
        rand=randint(0,10000)
        return render_template('home.html',faces=faces,rand=rand)
    return render_template('home.html',faces=-1)
    def compute(self):
        print 'SpecificWorker.compute...'
        try:
            data = self.camerasimple_proxy.getImage()
            arr = np.fromstring(data.image, np.uint8)
            frame = np.reshape(arr, (data.height, data.width, data.depth))
            gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)

            # Detect faces
            faces = face_detector.detect(frame)

            emotions_temp = list()
            for (x1, y1, x2, y2) in faces:

                # Align the face
                fa = FaceAligner(predictor, desiredFaceWidth=IMAGE_SIZE * 2)
                faceAligned = fa.align(frame, gray,
                                       dlib.rectangle(x1, y1, x2, y2))

                # Convert to grayscale
                faceAligned = cv2.cvtColor(faceAligned, cv2.COLOR_RGB2GRAY)

                # Closely crop out the face
                faces2 = face_cascade.detectMultiScale(faceAligned)
                if len(faces2) == 0:
                    continue
                (x, y, w, h) = faces2[0]
                cropped_frame = faceAligned[y:y + h, x:x + w]

                # Apply adaptive histogram equalization
                clahe = cv2.createCLAHE(clipLimit=10.0, tileGridSize=(2, 2))
                cropped_frame = clahe.apply(cropped_frame)

                # Resize the image
                cropped_frame = cv2.resize(cropped_frame,
                                           (IMAGE_SIZE, IMAGE_SIZE))

                # Do necessary preprocessing
                cropped_frame = cropped_frame.reshape(
                    (1, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
                cropped_frame = (cropped_frame - np.mean(cropped_frame)
                                 ) / np.std(cropped_frame)

                # Feed the cropped and preprocessed frame to classifier
                result = self.sess.run(self.output,
                                       {self.x_input: cropped_frame})

                # Get the emotion
                emotion = EMOTIONS[np.argmax(result)]

                # Store emotion data
                emotionData = SEmotion()
                emotionData.x = x1
                emotionData.y = y1
                emotionData.w = abs(x2 - x1)
                emotionData.h = abs(y2 - y1)
                emotionData.emotion = emotion
                emotions_temp.append(emotionData)

                # For testing purpose
                cv2.imshow("Image Fed to Classifier",
                           cropped_frame.reshape((IMAGE_SIZE, IMAGE_SIZE)))

            self.emotionList = emotions_temp

        except Ice.Exception, e:
            traceback.print_exc()
            print e
Exemplo n.º 4
0
        Pow = Pow * 30
        Pow[Pow > 255] = 255

        nr = min(nw, nh)

        #calculate value
        ltarr = np.tril(Pow[0:nr, 0:nr][::-1])[::-1]
        rtarr = np.tril(Pow[-nr:, 0:nr])
        lbarr = np.triu(Pow[0:nr, -nr:])
        rbarr = np.triu(Pow[-nr:, -nr:][::-1])[::-1]
        num = nr * (nr + 1) / 2
        lt = np.sum(ltarr) / num
        rt = np.sum(rtarr) / num
        lb = np.sum(lbarr) / num
        rb = np.sum(rbarr) / num

        return lt + rt + lb + rb


if __name__ == "__main__":
    fname = "C:\\Users\\120350181\\Desktop\\one_image\\Keishi_Ueda\\img\\e139.jpg"
    img = cv2.imread(fname)
    dets = detect(img)
    bd = BokehDetector()
    for j, d in enumerate(dets):
        start = time.time()
        result = bd.getValue(img, d)
        end = time.time()
        result3 = result3.append(pd.DataFrame({"num": result}, index=[fname]))
        break
Exemplo n.º 5
0
def swap_faces(source, target):
    # Get the frame from the target image
    target_image, target_points = detect(target)
    old_gray = cv2.cvtColor(target_image, cv2.COLOR_BGR2GRAY)
    face_det = dlib.get_frontal_face_detector()
    face_pred = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
    video = cv2.VideoCapture(source)
    retval, frame = video.read()

    # Output video parameters
    fourcc = cv2.VideoWriter_fourcc(*'MJPG')
    height = len(frame)
    width = len(frame[0])

    fps = video.get(5)
    output = cv2.VideoWriter('output5.avi', fourcc, fps, (width, height))

    lk_params = dict(winSize=(15, 15),
                     maxLevel=2,
                     criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
                               10, 0.03))
    # Getting the first frame for optical flow
    gray_previous = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    dets = face_det(gray_previous, 1)
    for (i, det) in enumerate(dets):
        points_previous = face_utils.shape_to_np(face_pred(gray_previous, det))

    count = 0
    while (True):
        print(count)
        count += 1
        retval, frame_new = video.read()
        if retval:
            gray_frame_new = cv2.cvtColor(frame_new, cv2.COLOR_BGR2GRAY)
            points_previous = points_previous.reshape(68, 2).astype(np.float32)
            of_points_new, status, err = cv2.calcOpticalFlowPyrLK(
                gray_previous, gray_frame_new, points_previous, None,
                **lk_params)
            dets_new = face_det(gray_frame_new, 1)
            for (i, det) in enumerate(dets_new):
                points_new = face_utils.shape_to_np(
                    face_pred(gray_frame_new, det))

            # Getting the coordinates of the features in the frame through optical flow and dlib facial detection
            new_points = np.zeros((68, 2))
            for i in range(68):
                # Case where features are not easily visible - Eg: Side view, bad lighting
                if (len(points_new) != 68 and status[i] == 1):
                    new_points[i, :] = of_points_new[i, :]
                # Case where each of the facial features is visible
                elif (len(points_new) == 68 and status[i] == 1):
                    new_points[i, :] = 0.5 * of_points_new[
                        i, :] + 0.5 * points_new[i, :]
                # Otherwise, take average of new facial features and old ones
                else:
                    new_points[i, :] = 0.5 * points_previous[
                        i, :] + 0.5 * points_new[i, :]

            swapped = warp(frame_new, target_image, new_points, target_points)
            #new stuff
            print(np.max(new_points.shape), 1)
            source_r = np.zeros(new_points.shape)
            count2 = 0
            for i in range(new_points.shape[0]):
                if (not (new_points[i][0] >= frame_new.shape[1]
                         or new_points[i][1] >= frame_new.shape[0]
                         or new_points[i][0] < 0 or new_points[i][1] < 0)):
                    source_r[count2, :] = new_points[i, :]
                    count2 = count2 + 1
                else:
                    print("got one")
            print(count2)
            source_r = source_r[:count2]
            print(source_r.shape)
            #end of new stuff
            new_points = np.array(source_r).astype(int)
            print(new_points.shape)
            hull = cv2.convexHull(np.array(new_points), False).astype(np.int32)
            mask = np.zeros(frame_new.shape, dtype=np.uint8)
            cv2.fillConvexPoly(mask, hull, (255, 255, 255))

            boundingRect = cv2.boundingRect(np.float32(hull))
            width = boundingRect[2]
            height = boundingRect[3]
            centreX = int(boundingRect[0] + width / 2)
            centreY = int(boundingRect[1] + height / 2)
            center = ((centreX, centreY))
            swapped = cv2.seamlessClone(swapped, frame_new, mask, center,
                                        cv2.NORMAL_CLONE)

            output.write(swapped)
            gray_previous = gray_frame_new
            points_previous = points_new
        else:
            break

    video.release()
    output.release()
Exemplo n.º 6
0
	def compute(self):
		print 'SpecificWorker.compute...'
		try:
			data = self.camerasimple_proxy.getImage()
			arr = np.fromstring(data.image, np.uint8)
			frame = np.reshape(arr, (data.width, data.height, data.depth))
			gray=cv2.cvtColor(frame,cv2.COLOR_RGB2GRAY )

			# Detect faces
			faces = face_detector.detect(frame)

			emotions_temp = list()
			for (x1,y1,x2,y2) in faces:

				# Align the face
				fa = FaceAligner(predictor,desiredFaceWidth=IMAGE_SIZE*2)
				faceAligned = fa.align(frame, gray, dlib.rectangle(x1,y1,x2,y2))

				# Convert to grayscale
				faceAligned = cv2.cvtColor(faceAligned,cv2.COLOR_RGB2GRAY)

				# Closely crop out the face
				faces2 = face_cascade.detectMultiScale(faceAligned)
				if len(faces2) == 0 :
					continue
				(x,y,w,h) = faces2[0]
				cropped_frame = faceAligned[y:y+h, x:x+w]

				# Apply adaptive histogram equalization
				clahe = cv2.createCLAHE(clipLimit=10.0, tileGridSize=(2,2))
				cropped_frame = clahe.apply(cropped_frame)

				# Resize the image
				cropped_frame = cv2.resize(cropped_frame, (IMAGE_SIZE,IMAGE_SIZE))

				# Do necessary preprocessing
				cropped_frame = cropped_frame.reshape((1,IMAGE_SIZE,IMAGE_SIZE,NUM_CHANNELS))
				cropped_frame = (cropped_frame-np.mean(cropped_frame))/np.std(cropped_frame)

				# Feed the cropped and preprocessed frame to classifier
				result = self.sess.run(self.output, {self.x_input:cropped_frame})

				# Get the emotion
				emotion = EMOTIONS[np.argmax(result)]

				# Store emotion data
				emotionData = SEmotion()
				emotionData.x = x1
				emotionData.y = y1
				emotionData.w = abs(x2-x1)
				emotionData.h = abs(y2-y1)
				emotionData.emotion = emotion
				emotions_temp.append(emotionData)

				# For testing purpose
				cv2.imshow("Image Fed to Classifier", cropped_frame.reshape((IMAGE_SIZE, IMAGE_SIZE)))

			self.emotionList = emotions_temp

		except Ice.Exception, e:
			traceback.print_exc()
			print e
Exemplo n.º 7
0
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
output = cv2.VideoWriter('output.avi', fourcc, fps, (int(width), int(height)))

# 赤(B,G,R)
COLOR = (0, 0, 255)

i = 1
while True:
    # 1フレーム読み込む
    ret, frame = input.read()
    # ファイル末尾まで来たら終了
    if ret == False:
        break

    # 顔部分を切り出して判定
    for (x, y, w, h) in detect(frame):
        clip = frame[y:y + h, x:x + w]

        img = cv2.resize(clip, (IMAGE_HEIGHT_PX, IMAGE_WIDTH_PX))
        img = img.flatten().astype(np.float32) / 255.0
        pred = sess.run(logits,
                        feed_dict={
                            images_placeholder: [img],
                            keep_prob: 1.0
                        })

        cv2.rectangle(frame, (x, y), (x + w, y + h), COLOR, 2)
        cv2.putText(frame, label[np.argmax(pred)], (x, y),
                    cv2.FONT_HERSHEY_SIMPLEX, 1.5, COLOR, 3)
    output.write(frame)
    print(i)
Exemplo n.º 8
0
        shutil.move("{}{}".format(cpath,"demo\\tmp\\tmp.jpg"), "{}\\tmp.jpg".format(dir))
        return None
    else:
        imgdata = base64.b64decode(buffer)
        filename = "{}{}".format(cpath,"demo\\tmp\\tmp.jpg")  # I assume you have a way of picking unique filenames
        with open(filename, 'wb') as f:
            f.write(imgdata)
        print "ok"
        decimg = cv2.imread(filename)
        return decimg
        
def load_dumps(f):
    obj = {}
    while 1:
        try:
            obj.update(pickle.load(f))
        except:
            break
    return obj
  
while 1:
    img=getImageFromServer()
    if not img is None:
        detect(img)
        bokeh = BokehDetector()
        print bokeh.getValue(img)
        #cv2.imshow('Capture',img)
        #key=cv2.waitKey(100)
        #if(int(key)>27): break
    img=''
Exemplo n.º 9
0
# start = time.time()
# bbox = detect(image, detector, using_hog)
# print("Processing time for face detection {}".format(time.time() - start))

# for box in bbox:
#     x_min, x_max, y_min, y_max = box
#     cv2.rectangle(image, (x_min, y_min), (x_max, y_max), (0, 255, 0), 3)
# cv2.imshow("Yoona", image)
# cv2.waitKey(0)

cap = cv2.VideoCapture("/home/yoona/Desktop/test/outpy1616144005.0.mp4")
while cap.isOpened():
    _, frame = cap.read()
    if not _:
        break

    print("Test")
    bbox = detect(frame, detector, using_hog)

    for box in bbox:
        x_min, x_max, y_min, y_max = box
        cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), (0, 255, 0), 3)

    # time.sleep(0.01)
    frame = cv2.resize(frame, (640, 480))
    cv2.imshow("Testtttttttttt", frame)
    k = cv2.waitKey(5)
    if k == 32:
        break
Exemplo n.º 10
0
import cv2

import webcam
import face_detector

while True:
    frame = webcam.capture()

    face_detector.detect(frame)

    cv2.imshow('Video', frame)

    # Press Q to Exit
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

webcam.exit()
Exemplo n.º 11
0
y = data.iloc[:, 0].values.reshape(-1, 1)
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder(handle_unknown='ignore')
y = ohe.fit_transform(y)
print(y)

folder = '/home/clown/test_images/'

for file in listdir(folder):
    path = folder + file
    face_array = extract(path)
    face_array = np.expand_dims(face_array, axis=0)
    age = model.predict(face_array)
    out = ohe.inverse_transform(age)
    print(out)

    label = ''
    if out == [[30]]:
        label = 'Middle_aged'
    elif out == [[15]]:
        label = 'Young'
    elif out == [[1]]:
        label = 'Child'
    elif out == [[51]]:
        label = 'Old'

    output_array = detect(path, label)

    plt.imshow(output_array)
    plt.show()