예제 #1
0
def createVideo(title, script, publishTime):
    intro = VideoFileClip("clips/intro.mp4")
    body = VideoFileClip("clips/body.mp4")
    loop = VideoFileClip("clips/loop.mp4")
    outro = VideoFileClip("clips/outro.mp4")

    titleMP3 = ''.join(e for e in title if e.isalnum())
    titleMP3 = titleMP3 + ".mp3"
    # title +  ".mp3"

    audioclip = AudioFileClip(titleMP3)

    scriptLen = audioclip.duration
    loopLen = loop.duration

    multiplier = scriptLen / loopLen

    new_audioclip = CompositeAudioClip([body.audio, audioclip])
    body.audio = new_audioclip

    x = [intro, body]
    multiplier = multiplier - 1
    while multiplier > 0:
        x.extend([loop])
        multiplier = multiplier - 1

    x.extend([outro])
    final_clip = concatenate_videoclips(x)

    titleMP4 = ''.join(e for e in title if e.isalnum())
    titleMP4 = titleMP4 + ".mp4"

    #    titleMP4 = title + ".mp4"
    final_clip.write_videofile(titleMP4)
    uploadVid(title, script, titleMP4, publishTime)
예제 #2
0
 def take(self, options: dict):
     if options["with_video"]:
         self._take_video(options["video_url"])
     if options["with_audio"]:
         self._take_audio(options["audio_url"])
         if options["with_video"]:
             video = VideoFileClip(self.__path_to_save_video)
             audio = AudioFileClip(self.__path_to_save_audio)
             video.audio = audio.set_duration(video.duration)
             os.remove(self.__path_to_save_video)
             os.remove(self.__path_to_save_audio)
             video.write_videofile(self.__path_to_save_video)
예제 #3
0
def main(filename, tmp_result):
    args = lambda: None
    args.video = True
    args.youtube_video_url = ''
    args.video_speedup = 2
    args.webcam = False
    a = 0
    b = 0
    c = 0
    for i in range(
            0,
            len(
                os.listdir(
                    'C:/Users/mmlab/PycharmProjects/UI_pyqt/cluster_people'))):
        # print(len(os.listdir('C:/Users/mmlab/PycharmProjects/facenet-pytorch-master/facenet-pytorch-master/models/clusteringfolder/{}'.format(i))))
        a += len(
            os.listdir(
                'C:/Users/mmlab/PycharmProjects/UI_pyqt/cluster_people/{}'.
                format(i) + 'human'))
    print("asdfasdfdsa")
    print(
        len(os.listdir(
            'C:/Users/mmlab/PycharmProjects/UI_pyqt/cluster_people')))
    folder = []
    folder_name = []
    folder_in_file = []
    under_folder = []
    for i in range(
            0,
            len(
                os.listdir(
                    'C:/Users/mmlab/PycharmProjects/UI_pyqt/cluster_people'))):
        b = len(
            os.listdir(
                'C:/Users/mmlab/PycharmProjects/UI_pyqt/cluster_people/{}'.
                format(i) + 'human'))
        d = int(b / a * 100)
        print(d)
        folder.append(
            'C:/Users/mmlab/PycharmProjects/UI_pyqt/cluster_people/{}'.format(
                i) + 'human')
        folder_name.append(i)
        if d < 30:
            under_folder.append(str(i) + 'human')
    for i in range(len(under_folder)):
        print(under_folder[i])
    for i in range(0, len(folder)):
        print(folder[i])

        print(folder_name[i])
        if int(folder_name[i]) > 0:
            file = os.path.join(
                'C:/Users/mmlab/PycharmProjects/UI_pyqt/cluster_people/{}'.
                format(int(folder_name[i])) + 'human',
                str(folder_name[i]) + 'human1.png')
            folder_in_file.append(file)
            #print(folder_in_file[i])

    for i in range(0, len(folder_in_file) - 1):
        cv = cv2.imread(folder_in_file[i], cv2.IMREAD_COLOR)
        #cv2.imwrite('model{}.png'.format(i), cv)
    minsize = 20
    threshold = [0.6, 0.7, 0.7]
    factor = 0.709
    image_size = 182
    input_image_size = 160

    # comment out these lines if you do not want video recording
    # USE FOR RECORDING VIDEO
    fourcc = cv2.VideoWriter_fourcc(*'FMP4')

    # Get the path of the classifier and load it
    project_root_folder = os.path.join(
        os.path.abspath(__file__), 'C:/Users/mmlab/PycharmProjects/UI_pyqt/')
    classifier_path = project_root_folder + 'trained_classifier/video_new_name_test4.pkl'
    print(classifier_path)
    with open(classifier_path, 'rb') as f:
        (model, class_names) = pickle.load(f)
        print("Loaded classifier file")

    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            # Bounding box
            pnet, rnet, onet = src.align.detect_face.create_mtcnn(
                sess, project_root_folder + "src/align")
            # Get the path of the facenet model and load it
            facenet_model_path = project_root_folder + "20180402-114759/20180402-114759.pb"
            facenet.load_model(facenet_model_path)

            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            embedding_size = embeddings.get_shape()[1]

            # Start video capture
            people_detected = set()

            person_detected = collections.Counter()

            video_path = project_root_folder
            video_name = filename  # 영상
            full_original_video_path_name = filename
            print(filename)
            video_capture_path = full_original_video_path_name
            if not os.path.isfile(full_original_video_path_name):
                print('Video not found at path ' +
                      full_original_video_path_name +
                      '. Commencing download from YouTube')
                # Note if the video ever gets removed this will cause issues
                #YouTube(args.youtube_video_url).streams.first().download(output_path =video_path, filename=video_name)
            video_capture = cv2.VideoCapture(full_original_video_path_name)
            width = video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)  # float
            height = video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)  # float

            videoclip = VideoFileClip(full_original_video_path_name)
            audioclip = videoclip.audio
            #저장파일 이름
            video_recording = cv2.VideoWriter(
                project_root_folder + 'final_video_mosaic.avi', fourcc, 15,
                (int(width), int(height)))
            output_video_name = project_root_folder + 'final_video_mosaic.avi'

            total_frames_passed = 0

            while True:
                try:
                    ret, frame = video_capture.read()
                except Exception as e:
                    break
                if ret:
                    # Skip frames if video is to be sped up
                    if args.video_speedup:
                        total_frames_passed += 1
                        if total_frames_passed % args.video_speedup != 0:
                            continue

                    bounding_boxes, _ = src.align.detect_face.detect_face(
                        frame, minsize, pnet, rnet, onet, threshold, factor)
                    frame_track = []
                    faces_found = bounding_boxes.shape[0]
                    #known_name=under_folder
                    known_name = ['4human']
                    if faces_found > 0:
                        det = bounding_boxes[:, 0:4]

                        bb = np.zeros((faces_found, 4), dtype=np.int32)
                        for i in range(faces_found):
                            bb[i][0] = det[i][0]
                            bb[i][1] = det[i][1]
                            bb[i][2] = det[i][2]
                            bb[i][3] = det[i][3]

                            # inner exception
                            if bb[i][0] <= 0 or bb[i][1] <= 0 or bb[i][
                                    2] >= len(
                                        frame[0]) or bb[i][3] >= len(frame):
                                print('face is inner of range!')
                                continue

                            cropped = frame[bb[i][1]:bb[i][3],
                                            bb[i][0]:bb[i][2], :]
                            scaled = cv2.resize(
                                cropped, (input_image_size, input_image_size),
                                interpolation=cv2.INTER_CUBIC)
                            # cv2.imshow("Cropped and scaled", scaled)
                            # cv2.waitKey(1)
                            scaled = facenet.prewhiten(scaled)
                            # cv2.imshow("\"Whitened\"", scaled)
                            # cv2.waitKey(1)

                            scaled_reshape = scaled.reshape(
                                -1, input_image_size, input_image_size, 3)
                            feed_dict = {
                                images_placeholder: scaled_reshape,
                                phase_train_placeholder: False
                            }
                            emb_array = sess.run(embeddings,
                                                 feed_dict=feed_dict)
                            predictions = model.predict_proba(emb_array)
                            best_class_indices = np.argmax(predictions, axis=1)
                            best_class_probabilities = predictions[
                                np.arange(len(best_class_indices)),
                                best_class_indices]
                            best_name = class_names[best_class_indices[0]]

                            if best_class_probabilities > 0.09:

                                for y in range(0, len(known_name)):
                                    if class_names[best_class_indices[
                                            0]] == known_name[y]:
                                        frame[bb[i][1] + 5:bb[i][3] - 5,
                                              bb[i][0] + 2:bb[i][2] -
                                              2] = cv2.blur(
                                                  frame[bb[i][1] + 5:bb[i][3] -
                                                        5, bb[i][0] +
                                                        2:bb[i][2] - 2],
                                                  (50, 50))

                                    for j in range(100):
                                        c = +1

                                person_detected[best_name] += 1

                            else:
                                cv2.rectangle(frame, (bb[i][0], bb[i][1]),
                                              (bb[i][2], bb[i][3]),
                                              (0, 255, 0), 2)

                        # total_frames_passed += 1
                        # if total_frames_passed == 2:

                        for person, count in person_detected.items():
                            if count > 4:
                                print("Person Detected: {}, Count: {}".format(
                                    person, count))
                                people_detected.add(person)

                        # person_detected.clear()
                        # total_frames_passed = 0

                    #cv2.putText(frame, "People detected so far:", (20, 20), cv2.FONT_HERSHEY_PLAIN,
                    #1, (255, 0, 0), thickness=1, lineType=2)
                    '''
                    currentYIndex = 40
                    for idx, name in enumerate(people_detected):
                        cv2.putText(frame, name, (20, currentYIndex + 20 * idx), cv2.FONT_HERSHEY_PLAIN,
                                    1, (0, 0, 255), thickness=1, lineType=2)
                    '''
                    cv2.imshow("Face Detection and Identification", frame)
                    video_recording.write(frame)
                    frame_track.append(frame)
                    #if cv2.waitKey(1) & 0xFF == ord('q'):
                    #    break
                else:
                    break
    print("mosaiced")
    video_recording.release()
    video_capture.release()
    cv2.destroyAllWindows()
    videoclip2 = VideoFileClip(output_video_name)
    videoclip2.audio = audioclip
    #저장파일 일므
    videoclip2.write_videofile("tmp_result1.mp4")
    print("done")
예제 #4
0
def test_issue_334():
    # NOTE: this is horrible. Any simpler version ?
    last_move = None
    last_move1 = None

    lis = [
        (0.0, 113, 167, 47),
        (0.32, 138, 159, 47),
        (0.44, 152, 144, 47),
        (0.48, 193, 148, 47),
        (0.6, 193, 148, 47),
        (0.76, 205, 138, 55),
        (0.88, 204, 121, 63),
        (0.92, 190, 31, 127),
        (1.2, 183, 59, 127),
        (1.4, 137, 22, 127),
        (1.52, 137, 22, 127),
        (1.72, 129, 67, 127),
        (1.88, 123, 69, 127),
        (2.04, 131, 123, 63),
        (2.24, 130, 148, 63),
        (2.48, 130, 148, 63),
        (2.8, 138, 180, 63),
        (3.0, 138, 180, 63),
        (3.2, 146, 192, 63),
        (3.28, 105, 91, 151),
        (3.44, 105, 91, 151),
        (3.72, 11, 48, 151),
        (3.96, 5, 78, 151),
        (4.32, 4, 134, 1),
        (4.6, 149, 184, 48),
        (4.8, 145, 188, 48),
        (5.0, 154, 217, 48),
        (5.08, 163, 199, 48),
        (5.2, 163, 199, 48),
        (5.32, 164, 187, 48),
        (5.48, 163, 200, 48),
        (5.76, 163, 200, 48),
        (5.96, 173, 199, 48),
        (6.0, 133, 172, 48),
        (6.04, 128, 165, 48),
        (6.28, 128, 165, 48),
        (6.4, 129, 180, 48),
        (6.52, 133, 166, 48),
        (6.64, 133, 166, 48),
        (6.88, 144, 183, 48),
        (7.0, 153, 174, 48),
        (7.16, 153, 174, 48),
        (7.24, 153, 174, 48),
        (7.28, 253, 65, 104),
        (7.64, 253, 65, 104),
        (7.8, 279, 116, 80),
        (8.0, 290, 105, 80),
        (8.24, 288, 124, 80),
        (8.44, 243, 102, 80),
        (8.56, 243, 102, 80),
        (8.8, 202, 107, 80),
        (8.84, 164, 27, 104),
        (9.0, 164, 27, 104),
        (9.12, 121, 9, 104),
        (9.28, 77, 33, 104),
        (9.32, 52, 23, 104),
        (9.48, 52, 23, 104),
        (9.64, 33, 46, 104),
        (9.8, 93, 49, 104),
        (9.92, 93, 49, 104),
        (10.16, 173, 19, 104),
        (10.2, 226, 173, 48),
        (10.36, 226, 173, 48),
        (10.48, 211, 172, 48),
        (10.64, 208, 162, 48),
        (10.92, 220, 171, 48),
    ]

    lis1 = [
        (0.0, 113, 167, 47),
        (0.32, 138, 159, 47),
        (0.44, 152, 144, 47),
        (0.48, 193, 148, 47),
        (0.6, 193, 148, 47),
        (0.76, 205, 138, 55),
        (0.88, 204, 121, 63),
        (0.92, 190, 31, 127),
        (1.2, 183, 59, 127),
        (1.4, 137, 22, 127),
        (1.52, 137, 22, 127),
        (1.72, 129, 67, 127),
        (1.88, 123, 69, 127),
        (2.04, 131, 123, 63),
        (2.24, 130, 148, 63),
        (2.48, 130, 148, 63),
        (2.8, 138, 180, 63),
        (3.0, 138, 180, 63),
        (3.2, 146, 192, 63),
        (3.28, 105, 91, 151),
        (3.44, 105, 91, 151),
        (3.72, 11, 48, 151),
        (3.96, 5, 78, 151),
        (4.32, 4, 134, 1),
        (4.6, 149, 184, 48),
        (4.8, 145, 188, 48),
        (5.0, 154, 217, 48),
        (5.08, 163, 199, 48),
        (5.2, 163, 199, 48),
        (5.32, 164, 187, 48),
        (5.48, 163, 200, 48),
        (5.76, 163, 200, 48),
        (5.96, 173, 199, 48),
        (6.0, 133, 172, 48),
        (6.04, 128, 165, 48),
        (6.28, 128, 165, 48),
        (6.4, 129, 180, 48),
        (6.52, 133, 166, 48),
        (6.64, 133, 166, 48),
        (6.88, 144, 183, 48),
        (7.0, 153, 174, 48),
        (7.16, 153, 174, 48),
        (7.24, 153, 174, 48),
        (7.28, 253, 65, 104),
        (7.64, 253, 65, 104),
        (7.8, 279, 116, 80),
        (8.0, 290, 105, 80),
        (8.24, 288, 124, 80),
        (8.44, 243, 102, 80),
        (8.56, 243, 102, 80),
        (8.8, 202, 107, 80),
        (8.84, 164, 27, 104),
        (9.0, 164, 27, 104),
        (9.12, 121, 9, 104),
        (9.28, 77, 33, 104),
        (9.32, 52, 23, 104),
        (9.48, 52, 23, 104),
        (9.64, 33, 46, 104),
        (9.8, 93, 49, 104),
        (9.92, 93, 49, 104),
        (10.16, 173, 19, 104),
        (10.2, 226, 173, 48),
        (10.36, 226, 173, 48),
        (10.48, 211, 172, 48),
        (10.64, 208, 162, 48),
        (10.92, 220, 171, 48),
    ]

    def posi(t):
        global last_move
        if len(lis) == 0:
            return (last_move[1], last_move[2])
        if t >= lis[0][0]:
            last_move = item = lis.pop(0)
            return (item[1], item[2])
        else:
            if len(lis) > 0:
                dura = lis[0][0] - last_move[0]
                now = t - last_move[0]
                w = (lis[0][1] - last_move[1]) * (now / dura)
                h = (lis[0][2] - last_move[2]) * (now / dura)
                # print t, last_move[1] + w, last_move[2] + h
                return (last_move[1] + w, last_move[2] + h)
            return (last_move[1], last_move[2])

    def size(t):
        global last_move1
        if len(lis1) == 0:
            return (last_move1[3], last_move1[3] * 1.33)
        if t >= lis1[0][0]:
            last_move1 = item = lis1.pop(0)
            return (item[3], item[3] * 1.33)
        else:
            if len(lis) > 0:
                dura = lis1[0][0] - last_move1[0]
                now = t - last_move1[0]
                s = (lis1[0][3] - last_move1[3]) * (now / dura)
                nsw = last_move1[3] + s
                nsh = nsw * 1.33
                # print t, nsw, nsh
                return (nsw, nsh)
            return (last_move1[3], last_move1[3] * 1.33)

    avatar = VideoFileClip("media/big_buck_bunny_432_433.webm", has_mask=True)
    avatar.audio = None
    maskclip = ImageClip("media/afterimage.png", ismask=True, transparent=True)
    avatar.set_mask(maskclip)  # must set maskclip here..
    concatenated = concatenate_videoclips([avatar] * 3)

    tt = VideoFileClip("media/big_buck_bunny_0_30.webm").subclip(0, 3)
    # TODO: Setting mask here does not work:
    # .set_mask(maskclip).resize(size)])
    final = CompositeVideoClip(
        [tt, concatenated.set_position(posi).resize(size)])
    final.duration = tt.duration
    final.write_videofile(os.path.join(TMP_DIR, "issue_334.mp4"), fps=10)
예제 #5
0
파일: swap.py 프로젝트: SEJUNHONG/Capstone
def main(filename,tmp_result, known_name):
    args = lambda: None
    args.video = True
    args.youtube_video_url = ''
    args.video_speedup = 2
    args.webcam = False

    minsize = 20
    threshold = [0.6, 0.7, 0.7]
    factor = 0.709
    image_size = 182
    input_image_size = 160

    img = cv2.imread("test7.jpg")
    img = cv2.resize(img, (int(img.shape[1] * 0.6), int(img.shape[0] * 0.6)))
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    mask = np.zeros_like(img_gray)
    indexes_triangles = []

    # dlib에 있는 정면 얼굴 검출기(detector)로 입력 사진에서 얼굴을 검출해 faces로 반환
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")

    # Face 1
    faces = detector(img_gray)
    for face in faces:
        # print(face)
        landmarks = predictor(img_gray, face)
        landmarks_points = []
        for n in range(0, 68):
            x = landmarks.part(n).x
            y = landmarks.part(n).y
            landmarks_points.append((x, y))
    points = np.array(landmarks_points, np.int32)
    convexhull = cv2.convexHull(points)
    # cv2.polylines(img, [convexhull], True, (255, 0, 0), 3)
    cv2.fillConvexPoly(mask, convexhull, 255)

    face_image_1 = cv2.bitwise_and(img, img, mask=mask)

    # Delaunay triangulation
    rect = cv2.boundingRect(convexhull)
    subdiv = cv2.Subdiv2D(rect)
    subdiv.insert(landmarks_points)
    triangles = subdiv.getTriangleList()  # landmarks_point 값을 배열로 변환
    triangles = np.array(triangles, dtype=np.int32)
    for t in triangles:
        pt1 = (t[0], t[1])  # 삼각형의 좌표를 배열로 저장
        pt2 = (t[2], t[3])
        pt3 = (t[4], t[5])
        # 삼각형의 좌표와 landmarks_point 가 만나는 곳의 점을 저장(0~68)
        index_pt1 = np.where((points == pt1).all(axis=1))
        index_pt1 = extract_index_nparray(index_pt1)

        index_pt2 = np.where((points == pt2).all(axis=1))
        index_pt2 = extract_index_nparray(index_pt2)

        index_pt3 = np.where((points == pt3).all(axis=1))
        index_pt3 = extract_index_nparray(index_pt3)

        if index_pt1 is not None and index_pt2 is not None and index_pt3 is not None:
            triangle = [index_pt1, index_pt2, index_pt3]  # 삼각형마다의 landmarks_point
            indexes_triangles.append(triangle)



    # comment out these lines if you do not want video recording
    # USE FOR RECORDING VIDEO

    fourcc = cv2.VideoWriter_fourcc(*'FMP4')

    # Get the path of the classifier and load it
    project_root_folder = os.path.join(os.path.abspath(__file__), "C:/Users/mmlab/PycharmProjects/UI_pyqt/")
    classifier_path = project_root_folder + 'trained_classifier/video_new_name_test4.pkl'
    print (classifier_path)
    with open(classifier_path, 'rb') as f:
        (model, class_names) = pickle.load(f)
        print("Loaded classifier file")

    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        with sess.as_default():
            # Bounding box
            pnet, rnet, onet = src.align.detect_face.create_mtcnn(sess, project_root_folder + "src/align")
            # Get the path of the facenet model and load it
            facenet_model_path = project_root_folder + "20180402-114759/20180402-114759.pb"
            facenet.load_model(facenet_model_path)

            images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
            phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
            embedding_size = embeddings.get_shape()[1]

            # Start video capture
            people_detected = set()

            person_detected = collections.Counter()

            if args.webcam is True:
                video_capture = cv2.VideoCapture(0)
            else:
                video_path = project_root_folder
                video_name = "vlog"
                full_original_video_path_name = filename
                video_capture_path = full_original_video_path_name
                if not os.path.isfile(full_original_video_path_name):
                    print('Video not found at path ' + full_original_video_path_name + '. Commencing download from YouTube')
                    # Note if the video ever gets removed this will cause issues
                    #YouTube(args.youtube_video_url).streams.first().download(output_path =video_path, filename=video_name)
                video_capture = cv2.VideoCapture(full_original_video_path_name)
            width = video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)  # float
            height = video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)  # float

            videoclip = VideoFileClip(full_original_video_path_name)
            audioclip = videoclip.audio
            video_recording = cv2.VideoWriter(project_root_folder + 'final_video_swap.avi', fourcc, 13,(int(width), int(height)))
            output_video_name = project_root_folder + 'final_video_swap.avi'

            total_frames_passed = 0

            while True:
                try:
                    ret, frame = video_capture.read()
                except Exception as e:
                    break
                if ret:
                    # Skip frames if video is to be sped up
                    if args.video_speedup:
                        total_frames_passed += 1
                        if total_frames_passed % args.video_speedup != 0:
                            continue

                    bounding_boxes, _ = src.align.detect_face.detect_face(frame, minsize, pnet, rnet, onet,threshold, factor)
                    if bounding_boxes is not None:
                        print('maps:' + str(bounding_boxes))
                        faces_found = bounding_boxes.shape[0]
                        #number = len(under_folder)
                        #for n in range(number):
                            #known_name[n] = under_folder[n]
                        #known_name = ['2human']

                        if faces_found > 0:
                            det = bounding_boxes[:, 0:4]

                            bb = np.zeros((faces_found, 4), dtype=np.int32)
                            for i in range(faces_found):
                                bb[i][0] = det[i][0]
                                bb[i][1] = det[i][1]
                                bb[i][2] = det[i][2]
                                bb[i][3] = det[i][3]

                                if bb[i][0] <= 0 or bb[i][1] <= 0 or bb[i][2] >= len(frame[0]) or bb[i][3] >= len(frame):
                                    print('face is inner of range!')
                                    continue

                                cropped = frame[bb[i][1]:bb[i][3], bb[i][0]:bb[i][2], :]
                                scaled = cv2.resize(cropped, (input_image_size, input_image_size), interpolation=cv2.INTER_CUBIC)
                                # cv2.imshow("Cropped and scaled", scaled)
                                # cv2.waitKey(1)
                                scaled = facenet.prewhiten(scaled)
                                # cv2.imshow("\"Whitened\"", scaled)
                                # cv2.waitKey(1)

                                scaled_reshape = scaled.reshape(-1, input_image_size, input_image_size, 3)
                                feed_dict = {images_placeholder: scaled_reshape, phase_train_placeholder: False}
                                emb_array = sess.run(embeddings, feed_dict=feed_dict)
                                predictions = model.predict_proba(emb_array)
                                best_class_indices = np.argmax(predictions, axis=1)
                                best_class_probabilities = predictions[np.arange(len(best_class_indices)), best_class_indices]
                                best_name = class_names[best_class_indices[0]]
                                print("Name: {}, Probability: {}".format(best_name, best_class_probabilities))
                                if best_class_probabilities > 0.09:
                                    #cv2.rectangle(frame, (bb[i][0], bb[i][1]), (bb[i][2], bb[i][3]), (0, 255, 0), 2)    #boxing face
                                    text_x = bb[i][0]
                                    text_y = bb[i][3] + 20
                                    for j in range(len(known_name)):
                                        if class_names[best_class_indices[0]] == known_name[j]:
                                            img2=frame[bb[i][1]-10 : bb[i][3]+20, bb[i][0]-10: bb[i][2]+20]
                                            try:
                                                img2_gray=cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
                                                img2_new_face = np.zeros_like(img2)
                                                faces2 = detector(img2_gray)
                                                print(len(faces2))

                                                if len(faces2)>0:
                                                    face1=faces2[0]
                                                    landmarks = predictor(img2_gray, face1)
                                                    landmarks_points2 = []
                                                    for n in range(0, 68):
                                                        x = landmarks.part(n).x
                                                        y = landmarks.part(n).y
                                                        landmarks_points2.append((x, y))
                                                        # for face in faces2:
                                                        #     landmarks = predictor(img2_gray, face)
                                                        #     landmarks_points2 = []
                                                        #     for n in range(0, 68):
                                                        #         x = landmarks.part(n).x
                                                        #         y = landmarks.part(n).y
                                                        #         landmarks_points2.append((x, y))

                                                        # cv2.circle(img2, (x, y), 3, (0, 255, 0), -1)
                                                        points2 = np.array(landmarks_points2, np.int32)
                                                        convexhull2 = cv2.convexHull(points2)

                                                    lines_space_mask = np.zeros_like(img_gray)
                                                    lines_space_new_face = np.zeros_like(img2)

                                                    # Triangulation of both faces
                                                    for triangle_index in indexes_triangles:
                                                        # Triangulation of the first face
                                                        tr1_pt1 = landmarks_points[triangle_index[0]]
                                                        tr1_pt2 = landmarks_points[triangle_index[1]]
                                                        tr1_pt3 = landmarks_points[triangle_index[2]]
                                                        triangle1 = np.array([tr1_pt1, tr1_pt2, tr1_pt3], np.int32)

                                                        rect1 = cv2.boundingRect(triangle1)
                                                        (x, y, w, h) = rect1

                                                        cropped_triangle = img[y: y + h, x: x + w]
                                                        cropped_tr1_mask = np.zeros((h, w), np.uint8)

                                                        points = np.array([[tr1_pt1[0] - x, tr1_pt1[1] - y],[tr1_pt2[0] - x, tr1_pt2[1] - y],[tr1_pt3[0] - x, tr1_pt3[1] - y]], np.int32)

                                                        cv2.fillConvexPoly(cropped_tr1_mask, points, 255)

                                                        # Triangulation of second face
                                                        tr2_pt1 = landmarks_points2[triangle_index[0]]
                                                        tr2_pt2 = landmarks_points2[triangle_index[1]]
                                                        tr2_pt3 = landmarks_points2[triangle_index[2]]
                                                        triangle2 = np.array([tr2_pt1, tr2_pt2, tr2_pt3], np.int32)

                                                        rect2 = cv2.boundingRect(triangle2)
                                                        (x, y, w, h) = rect2

                                                        if x<0:
                                                            x=0
                                                            rect2=(x, y, w, h)
                                                            (x,y,w,h)=rect2
                                                        if y<0:
                                                            y=0
                                                            rect2 = (x, y, w, h)
                                                            (x, y, w, h) = rect2
                                                        if w<0:
                                                            w=0
                                                            rect2 = (x, y, w, h)
                                                            (x, y, w, h) = rect2
                                                        if h<0:
                                                            h=0
                                                            rect2 = (x, y, w, h)
                                                            (x, y, w, h) = rect2
                                                        print(rect2)
                                                        cropped_tr2_mask = np.zeros((h, w), np.uint8)

                                                        points2 = np.array([[tr2_pt1[0] - x, tr2_pt1[1] - y],[tr2_pt2[0] - x, tr2_pt2[1] - y],[tr2_pt3[0] - x, tr2_pt3[1] - y]], np.int32)

                                                        cv2.fillConvexPoly(cropped_tr2_mask, points2, 255)

                                                        # Warp triangles
                                                        points = np.float32(points)
                                                        points2 = np.float32(points2)
                                                        M = cv2.getAffineTransform(points, points2)
                                                        warped_triangle = cv2.warpAffine(cropped_triangle, M, (w, h))
                                                        warped_triangle = cv2.bitwise_and(warped_triangle, warped_triangle,mask=cropped_tr2_mask)

                                                        # Reconstructing destination face
                                                        img2_new_face_rect_area = img2_new_face[y: y + h, x: x + w]
                                                        img2_new_face_rect_area_gray = cv2.cvtColor(img2_new_face_rect_area,cv2.COLOR_BGR2GRAY)
                                                        _, mask_triangles_designed = cv2.threshold(img2_new_face_rect_area_gray, 1,255, cv2.THRESH_BINARY_INV)
                                                        _, mask_triangles_designed2 = cv2.threshold(warped_triangle,1, 255,cv2.THRESH_BINARY_INV)
                                                        print(len(warped_triangle))
                                                        print(len(mask_triangles_designed))

                                                        if len(warped_triangle) == len(mask_triangles_designed):
                                                            warped_triangle = cv2.bitwise_and(warped_triangle, warped_triangle,mask=mask_triangles_designed)
                                                        else:
                                                            warped_triangle = warped_triangle

                                                        img2_new_face_rect_area = cv2.add(img2_new_face_rect_area, warped_triangle)
                                                        img2_new_face[y: y + h, x: x + w] = img2_new_face_rect_area

                                                    img2_face_mask = np.zeros_like(img2_gray)
                                                    img2_head_mask = cv2.fillConvexPoly(img2_face_mask, convexhull2, 255)
                                                    img2_face_mask = cv2.bitwise_not(img2_head_mask)

                                                    img2_head_noface = cv2.bitwise_and(img2, img2, mask=img2_face_mask)
                                                    result = cv2.add(img2_head_noface, img2_new_face)

                                                    (x, y, w, h) = cv2.boundingRect(convexhull2)
                                                    center_face2 = (int((x + x + w) / 2), int((y + y + h) / 2))

                                                    seamlessclone = cv2.seamlessClone(result, img2, img2_head_mask, center_face2, cv2.MIXED_CLONE)
                                                    frame[bb[i][1]-10 : bb[i][3]+20, bb[i][0]-10: bb[i][2]+20]=seamlessclone
                                                    cv2.imshow("result", result)
                                            except Exception as e:
                                                print(e)
                                                pass




                                    # cv2.putText(frame, class_names[best_class_indices[0]], (text_x, text_y),cv2.FONT_HERSHEY_COMPLEX_SMALL,1, (0, 0, 255), thickness=1, lineType=2)
                                    # person_detected[best_name] += 1

                                # total_frames_passed += 1
                                # if total_frames_passed == 2:
                            for person, count in person_detected.items():
                                if count > 4:
                                    print("Person Detected: {}, Count: {}".format(person, count))
                                    people_detected.add(person)
                                # person_detected.clear()
                                total_frames_passed = 0

                            # cv2.putText(frame, "People detected so far:", (20, 20), cv2.FONT_HERSHEY_PLAIN,
                            #             1, (255, 0, 0), thickness=1, lineType=2)
                            currentYIndex = 40
                            for idx, name in enumerate(people_detected):
                                cv2.putText(frame, name, (20, currentYIndex + 20 * idx), cv2.FONT_HERSHEY_PLAIN,
                                            1, (0, 0, 255), thickness=1, lineType=2)
                            cv2.imshow("Face Detection and Identification", frame)
                            video_recording.write(frame)
                            if cv2.waitKey(1) & 0xFF == ord('q'):
                                break
                else:
                    break
    video_recording.release()
    video_capture.release()
    cv2.destroyAllWindows()
    videoclip2 = VideoFileClip(output_video_name)
    videoclip2.audio = audioclip
    videoclip2.write_videofile(tmp_result)
예제 #6
0
def main(filename, tmp_result):
    args = lambda: None
    args.video = True
    args.youtube_video_url = ''
    args.video_speedup = 2
    args.webcam = False
    a = 0
    b = 0
    c = 0
    for i in range(
            0,
            len(
                os.listdir(
                    'C:/Users/mmlab/PycharmProjects/UI_pyqt/cluster_people'))):
        # print(len(os.listdir('C:/Users/mmlab/PycharmProjects/facenet-pytorch-master/facenet-pytorch-master/models/clusteringfolder/{}'.format(i))))
        a += len(
            os.listdir(
                'C:/Users/mmlab/PycharmProjects/UI_pyqt/cluster_people/{}'.
                format(i) + 'human'))

    print(
        len(os.listdir(
            'C:/Users/mmlab/PycharmProjects/UI_pyqt/cluster_people')))
    folder = []
    folder_name = []
    folder_in_file = []
    under_folder = []
    for i in range(
            0,
            len(
                os.listdir(
                    'C:/Users/mmlab/PycharmProjects/UI_pyqt/cluster_people'))):
        b = len(
            os.listdir(
                'C:/Users/mmlab/PycharmProjects/UI_pyqt/cluster_people/{}'.
                format(i) + 'human'))
        d = int(b / a * 100)
        print(d)
        folder.append(
            'C:/Users/mmlab/PycharmProjects/UI_pyqt/cluster_people/{}'.format(
                i) + 'human')
        folder_name.append(i)
        if d < 30:
            under_folder.append(str(i) + 'human')
    for i in range(len(under_folder)):
        print(under_folder[i])
    for i in range(0, len(folder)):
        print(folder[i])

        print(folder_name[i])
        if int(folder_name[i]) > 0:
            file = os.path.join(
                'C:/Users/mmlab/PycharmProjects/UI_pyqt/cluster_people/{}'.
                format(int(folder_name[i])) + 'human',
                str(folder_name[i]) + 'human1.png')
            folder_in_file.append(file)

    minsize = 20
    threshold = [0.6, 0.7, 0.7]
    factor = 0.709
    input_image_size = 160

    # comment out these lines if you do not want video recording
    # USE FOR RECORDING VIDEO
    fourcc = cv2.VideoWriter_fourcc(*'FMP4')

    # Get the path of the classifier and load it
    project_root_folder = os.path.join(
        os.path.abspath(__file__), 'C:/Users/mmlab/PycharmProjects/UI_pyqt/')
    classifier_path = project_root_folder + 'trained_classifier/video_new_name_test4.pkl'

    with open(classifier_path, 'rb') as f:
        (model, class_names) = pickle.load(f)

    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            # Bounding box
            pnet, rnet, onet = src.align.detect_face.create_mtcnn(
                sess, project_root_folder + "src/align")
            # Get the path of the facenet model and load it
            facenet_model_path = project_root_folder + "20180402-114759/20180402-114759.pb"
            facenet.load_model(facenet_model_path)

            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            # Start video capture

            video_capture = cv2.VideoCapture(filename)
            width = video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)  # float
            height = video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)  # float

            videoclip = VideoFileClip(filename)
            audioclip = videoclip.audio
            #저장파일 이름
            video_recording = cv2.VideoWriter(
                project_root_folder + 'final_video_mosaic.avi', fourcc, 15,
                (int(width), int(height)))
            output_video_name = project_root_folder + 'final_video_mosaic.avi'

            total_frames_passed = 0

            while True:
                try:
                    ret, frame = video_capture.read()
                except Exception as e:
                    break
                if ret:
                    # Skip frames if video is to be sped up
                    if args.video_speedup:
                        total_frames_passed += 1
                        if total_frames_passed % args.video_speedup != 0:
                            continue

                    bounding_boxes, _ = src.align.detect_face.detect_face(
                        frame, minsize, pnet, rnet, onet, threshold, factor)
                    frame_track = []
                    faces_found = bounding_boxes.shape[0]
                    #known_name=under_folder
                    known_name = ['1human']
                    if faces_found > 0:
                        det = bounding_boxes[:, 0:4]

                        bb = np.zeros((faces_found, 4), dtype=np.int32)
                        for i in range(faces_found):
                            bb[i][0] = det[i][0]
                            bb[i][1] = det[i][1]
                            bb[i][2] = det[i][2]
                            bb[i][3] = det[i][3]

                            # inner exception
                            if bb[i][0] <= 0 or bb[i][1] <= 0 or bb[i][
                                    2] >= len(
                                        frame[0]) or bb[i][3] >= len(frame):
                                print('face is inner of range!')
                                continue

                            cropped = frame[bb[i][1]:bb[i][3],
                                            bb[i][0]:bb[i][2], :]
                            scaled = cv2.resize(
                                cropped, (input_image_size, input_image_size),
                                interpolation=cv2.INTER_CUBIC)

                            scaled = facenet.prewhiten(scaled)

                            scaled_reshape = scaled.reshape(
                                -1, input_image_size, input_image_size, 3)
                            feed_dict = {
                                images_placeholder: scaled_reshape,
                                phase_train_placeholder: False
                            }
                            emb_array = sess.run(embeddings,
                                                 feed_dict=feed_dict)
                            predictions = model.predict_proba(emb_array)
                            best_class_indices = np.argmax(predictions, axis=1)
                            best_class_probabilities = predictions[
                                np.arange(len(best_class_indices)),
                                best_class_indices]

                            if best_class_probabilities > 0.09:
                                for y in range(0, len(known_name)):
                                    if class_names[best_class_indices[
                                            0]] != known_name[y]:
                                        frame[bb[i][1] + 5:bb[i][3] - 5,
                                              bb[i][0] + 2:bb[i][2] -
                                              2] = cv2.blur(
                                                  frame[bb[i][1] + 5:bb[i][3] -
                                                        5, bb[i][0] +
                                                        2:bb[i][2] - 2],
                                                  (50, 50))

                    video_recording.write(frame)
                    frame_track.append(frame)
                else:
                    break
    print("mosaiced")
    video_recording.release()
    video_capture.release()
    cv2.destroyAllWindows()
    videoclip2 = VideoFileClip(output_video_name)
    videoclip2.audio = audioclip
    videoclip2.write_videofile("tmp_result1.mp4")
    print("done")
예제 #7
0
            audio_output_path = filename.replace('.mp3', '-manipulated.wav')
            print("SLOWING AND REVERBING")
            slow_and_reverb(input_path=filename, output_path=audio_output_path)
            os.remove(filename)
            print("DONE SLOWING AND REVERBING")

            print("MAKING VIDEO")
            video_output_path = filename.replace('.mp3', '.gif')
            download_gif(video_output_path)
            soundtrack = AudioFileClip(audio_output_path)
            videoclip = VideoFileClip(video_output_path).loop(
                duration=soundtrack.duration)

            final_path = download_dir + '/' + unique_id + '/' + video_title + ' - Slowed And Reverbed.mp4'
            videoclip.audio = soundtrack
            videoclip.write_videofile(final_path,
                                      codec='mpeg4',
                                      audio_bitrate="320k")
            print("DONE MAKING VIDEO")
            youtube_title = video_title + ' - Slowed And Reverbed'

            os.remove(audio_output_path)
            os.remove(video_output_path)
            processed_submissions.append(submission)

            keywords = youtube_title.split(' ').extend(['slowed', 'reverbed'])
            options = {
                'snippet': {
                    'title': youtube_title,
                    'description': '😈',
예제 #8
0
def downloadRedditVideos(subreddit, time=1000, filter="month", output="output.mp4"):
    for filename in os.listdir("videos/"):
        file_path = os.path.join("videos/", filename)
        try:
            if os.path.isfile(file_path) or os.path.islink(file_path):
                os.unlink(file_path)
        except Exception as e:
            print("Failed to delete %s. Reason: %s" % (file_path, e))

    addTime = 0
    i = 0
    if subreddit is 1:
        subreddit = "perfectlycutscreams"
    elif subreddit is 2:
        subreddit = "watchpeopledieinside"
    elif subreddit is 3:
        subreddit = "contagiouslaughter"
    elif subreddit is 4:
        subreddit = "livestreamfail"
    elif subreddit is 5:
        subreddit = "whatcouldgowrong"

    for submission in reddit.subreddit(subreddit).hot(limit=500):
        if submission.media is not None:
            if (
                "https://clips.twitch.tv/" in submission.url
                and "tt_" not in submission.url
            ):
                if addTime < time:
                    dl_clip(submission.url, str(i).rjust(2, "0"))
                    videoD = VideoFileClip(
                        "videos/" + str(i).rjust(2, "0") + ".mp4"
                    ).duration
                    addTime += videoD
                    i += 1
            elif "reddit_video" in submission.media:
                if (
                    addTime < time
                    and submission.media["reddit_video"]["duration"] < 200
                ):
                    video = submission.media["reddit_video"]["fallback_url"]
                    v = requests.get(video)

                    open("tmp/video.mp4", "wb").write(v.content)
                    a = requests.get(re.sub("[^/]*$", "audio", video, 1))
                    if a.status_code != 200:
                        b = requests.get(re.sub("[^/]*$", "DASH_audio.mp4", video, 1))
                        if b.status_code != 200:
                            open("videos/" + str(i).rjust(2, "0") + ".mp4", "wb").write(
                                v.content
                            )
                        else:
                            open("tmp/audio.mp4", "wb").write(b.content)
                            combined = VideoFileClip("tmp/video.mp4")
                            combined.audio = CompositeAudioClip(
                                [AudioFileClip("tmp/audio.mp4")]
                            )
                            combined.write_videofile(
                                "videos/" + str(i).rjust(2, "0") + ".mp4",
                                temp_audiofile="tmp/tmp_audio.mp3",
                            )

                    else:
                        open("tmp/audio.mp4", "wb").write(a.content)
                        combined = VideoFileClip("tmp/video.mp4")
                        combined.audio = CompositeAudioClip(
                            [AudioFileClip("tmp/audio.mp4")]
                        )
                        combined.write_videofile(
                            "videos/" + str(i).rjust(2, "0") + ".mp4",
                            temp_audiofile="tmp/tmp_audio.mp3",
                        )

                    os.unlink("tmp/video.mp4")
                    os.unlink("tmp/audio.mp4")

                    addTime += submission.media["reddit_video"]["duration"]
                    print("Video seconds: " + str(addTime))
                    i += 1