Beispiel #1
0
def video_to_images(input_video, output_dir, skip=1):
    import os

    import cv2
    from imutils.video import count_frames
    skip = int(skip)
    # Read the video from specified path 
    cam = cv2.VideoCapture(input_video) 
    total_frames = count_frames(input_video)
    os.makedirs(output_dir, exist_ok=True) 
    # frame 
    currentframe = 0

    
    # while(True):
    for current_frame in tqdm(range(0, total_frames, skip)): 
        # reading from frame 
        ret,frame = cam.read() 
        
        if ret: 
            # if video is still left continue creating images 
            name =  os.path.join(output_dir,f'{current_frame:05d}' + '.jpg') 
            if currentframe % skip == 0:
                cv2.imwrite(name, frame) 
        else: 
            break
    # Release all space and windows once done 
    cam.release() 
    cv2.destroyAllWindows() 
Beispiel #2
0
def assert_video_frames_appropriate_for_benchmark(config):
    """Asserts the number of frames in the video are appropriate for the benchmark"""
    print("counting frames in input")
    frame_count = count_frames(config["video_path"])
    print("frames counted: ", frame_count)
    assert (config["n_frames"] * config[
        "downsample"]) <= frame_count, "The provided video must have at least n_frames"
    return frame_count
Beispiel #3
0
    def run_app(self, filename):
        print('FINAL FILENAME:', filename)
        if os.path.isfile(filename):
            filename = filename + '1'
        #self.unzip()
        print(self.FILES)

        #FOLDER = './Desktop/VMET/test/'
        DIM = (1920, 1080)

        # Find number of videos
        num_vid = 0
        for fname in self.FILES:
            num_vid += 1

        # Find number of frames in videos
        first = self.FILES[0]

        if self.cancelled:
            return
        num_frames = count_frames(first, False)
        # Calculate total progress bar size
        total = num_frames + num_vid + num_frames

        if self.cancelled:
            return
        self.clear_extra_files()
        if self.cancelled:
            return
        num_vid, height, width = self.resize_all()
        if self.cancelled:
            return
        self.tile_video(num_vid, height, width, 'video_temp.mp4', num_frames,
                        total)
        if self.cancelled:
            return
        self.mix_audio('audio_temp.mp3', total)
        if self.cancelled:
            return
        self.combine('video_temp.mp4', 'audio_temp.mp3', filename,
                     total)  #FINAL WRITE
        if self.cancelled:
            return
        self.remove_temp_files('video_temp.mp4', 'audio_temp.mp3')
Beispiel #4
0
    def insert_database_encode_from_video(self, name, videopath, num_sample=3):
        """
        Create detected, aligned and encoding face, 
        write into database folder,
        inserted into encodings.pickle
        """
        from imutils.video import count_frames
        imsizewidth = 300
        random_number = 20
        max_faces = num_sample

        nframe = count_frames(videopath)
        random_frame = []
        for x in range(random_number):
            random_frame.append(random.randint(1, nframe))

        dface = 0
        cap = cv2.VideoCapture(videopath)
        for frame_no in random_frame:
            _, frame = cap.read()
            # _, frame = cap.read()
            cap.set(cv2.CAP_PROP_POS_FRAMES, frame_no)
            print('Get face on frame: ', int(cap.get(cv2.CAP_PROP_POS_FRAMES)))
            __, frame = cap.read()

            self.set_image_to_be_processed(frame)
            self.detect_faces()
            self.align_faces(target_face_percent=0.25)
            if self.faces_aligned != []:
                e = self.generate_encoding(self.faces_aligned[0])
                self.insert_encoding(e, name)
                self.save_faces_detected(filename_suffix=name)
                self.save_faces_normalized(filename_suffix=name)
                dface = dface + 1

            if dface >= max_faces:
                break

        cap.release()
        print('Done')
Beispiel #5
0
def perform_analysis(media_file_path, colors_df, is_video, match_colors,
                     show_progress):
    msg = """
            This may take awhile, but it will run in the background.
            A pop-up will appear when the analysis is done.
            Click OK to begin the analysis.
            """
    showinfo('Analysis Started', msg)
    if is_video:
        cap = cv2.VideoCapture(media_file_path)
        num_frames = count_frames(media_file_path)
        frame_width = cap.get(3)
        frame_height = cap.get(4)
        scale = 1
        if frame_width > 100 or frame_height > 100:
            scale = get_scale(100, frame_width, frame_height)
        new_width = int(scale * frame_width)
        new_height = int(scale * frame_height)
        num_pixels = num_frames * new_width * new_height

        if show_progress:
            with alive_bar(num_pixels) as bar:
                for y in analyze_video_with_progress(cap, colors_df,
                                                     match_colors):
                    bar()
        else:
            analyze_video(cap, colors_df, match_colors)

        cap.release()
    else:
        img = cv2.imread(media_file_path)
        img = resize_image(img, is_video)
        if show_progress:
            with alive_bar(img.shape[0] * img.shape[1]) as bar:
                for y in analyze_image_with_progress(img, colors_df,
                                                     match_colors):
                    bar()
        else:
            analyze_image(img, colors_df, match_colors)
Beispiel #6
0
    def find_scenes(self):
        # instance scenedetect objects to detect scenes using ContentDetector
        # input:  string- video path;
        # return: scene_list in FrameTimecode format, see below detail sample / explaination.
        video_manager = VideoManager([self.video_path])
        fps = video_manager.get_framerate()
        nFrames = count_frames(self.video_path)

        stats_manager = StatsManager()
        scene_manager = SceneManager(stats_manager)

        # select ContentDetector to detect scenes
        # Threshhold = 30 by default, set it lower if density is darker, say 27
        # it can be analyzed from output scene timecode or generated images against video
        scene_manager.add_detector(ContentDetector())
        base_timecode = video_manager.get_base_timecode()

        scene_list = []

        try:
            # set downscale factor according to resolution ratio to improve speed
            video_manager.set_downscale_factor()
            video_manager.start()
            # scene detection on video_manager(video_path)
            scene_manager.detect_scenes(frame_source=video_manager)
            # scene_list = scene_manager.get_cut_list(base_timecode)
            scene_list = scene_manager.get_scene_list(base_timecode)
        finally:
            video_manager.release()

        if scene_list == []:
            scene_list = [(FrameTimecode(0, fps), FrameTimecode(nFrames, fps))]

        # return a list of tuple to indicate each scene start & end frame number in FrameTimecode
        # looks like:       [(FrameTimecode(frame=0, fps=4.358900), FrameTimecode(frame=68, fps=4.358900))]
        # another examples: [(FrameTimecode(frame=0, fps=23.976024), FrameTimecode(frame=90, fps=23.976024)), ...,
        #                    (FrameTimecode(frame=1966, fps=23.976024), FrameTimecode(frame=1980, fps=23.976024))
        #                   ]
        return scene_list
Beispiel #7
0
    def create_database_from_video(self, videopath, num_sample=3):
        """
        Create aligned & detected face, write into database folder
        """
        from imutils.video import count_frames
        imsizewidth = 300
        random_number = 20
        max_faces = num_sample

        nframe = count_frames(videopath)
        random_frame = []
        for x in range(random_number):
            random_frame.append(random.randint(1, nframe))

        dface = 0
        cap = cv2.VideoCapture(videopath)
        for frame_no in random_frame:
            _, frame = cap.read()
            # _, frame = cap.read()
            cap.set(cv2.CAP_PROP_POS_FRAMES, frame_no)
            print('Get face(s) on frame: ',
                  int(cap.get(cv2.CAP_PROP_POS_FRAMES)))
            __, frame = cap.read()

            self.set_image_to_be_processed(frame)
            self.detect_faces()
            self.align_faces(target_face_percent=0.28)
            if self.faces_aligned != []:
                self.save_faces_detected(filename_suffix='FROM_VIDEO')
                self.save_faces_normalized(filename_suffix='FROM_VIDEO')
                dface = dface + 1

            if dface >= max_faces:
                break

        cap.release()
        print('Done')
def countFrames(vidFile):
  # count the total number of frames in the video file
  override = False 
  total = count_frames(vidFile, override=override)
  return(total)
"""
Created on Sat Sep 19 18:06:48 2020
@author: ISH KAPOOR
"""
# import the necessary packages
from imutils.video import count_frames
import argparse
import os
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v",
                "--video",
                required=True,
                help="path to input video file")
ap.add_argument("-o",
                "--override",
                type=int,
                default=-1,
                help="whether to force manual frame count")
args = vars(ap.parse_args())
# count the total number of frames in the video file
override = False if args["override"] < 0 else True
total = count_frames(args["video"], override=override)
# display the frame count to the terminal
print("[INFO] {:,} total frames read from {}".format(
    total, args["video"][args["video"].rfind(os.path.sep) + 1:]))
'''
python count_frames.py --video Platformer_Demo_L_1.webm
'''
Beispiel #10
0
                                                                         keras.metrics.Precision(),
                                                                         keras.metrics.Recall(),
                                                                         keras.metrics.AUC()
])
precision_list = []
recall_list = []
avrg_fps_all_videos = []
f1_list = []
for i,video in enumerate(args["video"].split(',')):
  print('#######################')
  print("starting video "+str(i+1))
  vs = cv2.VideoCapture(video)
  path = Path(video)
  gt_file = path.parent.parent
  gt_file = str(gt_file) + '/GroundTruth/face_bb.txt'
  last_frame = count_frames(video) - 1
  #print("Last frame of video = "+str(last_frame))
  dict_faces, positive_windows = lib.parse_ground_truth(gt_file,last_frame)
  firstFrame = None
  subtractor = cv2.createBackgroundSubtractorMOG2(history = 50, varThreshold= 200, detectShadows = True)
  width_avg_list = []
  fps_video = []
  frame_nb = -1
  prev_frame_time = 0
  new_frame_time = 0
  while True:
  # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 400 pixels
      frame = vs.read()
      frame = frame[1]  
      if frame is None:
  3 : Stop
'''

def countFrames(vidFile):
  # count the total number of frames in the video file
  override = False 
  total = count_frames(vidFile, override=override)
  return(total)

dataset = sys.argv[1] 
typeVid = sys.argv[2] # example handDetected(train_bg.mp4)  or raw(train.mp4)

classes = ["/background/","/next/","/prev/","/stop/"]
classes = [dataset +c for c in classes]
for clss in classes:
  print(clss+"train.mp4",count_frames(clss+typeVid))

labels = [0,1,2,3]

# Pytorch Code Staring from here
from network import *


# Device configuration
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

def save_models(epoch):
  torch.save(net.state_dict(), "./Models/SimpleMusicModel_{}.model".format(epoch))
  print("Chekcpoint saved :",epoch)

Beispiel #12
0
def generateVideo(boxes, statusWindow):
    path = 'videos/p2.mp4'
    vs = cv2.VideoCapture(path)
    fps = 12
    capSize = (640, 360)

    fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')

    out = cv2.VideoWriter()
    success = out.open('./teste5.mp4', fourcc, fps, capSize, True)

    num_frames = count_frames(path)
    #print(num_frames)

    get_point = 0
    i = 0

    try:
        while i < num_frames:
            ret, frame = vs.read()

            frame = imutils.resize(frame, width=450)

            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            #cv2.imshow("gray ", gray )

            gau = cv2.GaussianBlur(gray, (7, 7), 0)

            img_resize = image_utils.getRotateRect(gau, boxes)
            feature = image_utils().extract_features(img_resize)

            timestamp = datetime.datetime.now()

            score = SVM().predict(feature)

            available_slots = []

            for index, scr in enumerate(score):
                if scr == 0:
                    cv2.polylines(frame, np.int32([boxes[index]]), True,
                                  (0, 0, 255), 2)
                    s = False
                    i = 0
                else:
                    ## if the parking slot is availble

                    slot_first_x_cord = float(boxes[index][1][0])

                    if (checkMinimum(available_slots, slot_first_x_cord)):
                        cv2.polylines(frame, np.int32([boxes[index]]), True,
                                      (255, 0, 0), 2)

                        if s == False:
                            cv2.putText(
                                frame,
                                timestamp.strftime(
                                    " Available: %d %m %Y %I:%M:%S"),
                                (10, frame.shape[0] - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 0, 0), 2)

                            i += 1
                            if i > 100:
                                s = True

                    else:
                        cv2.polylines(frame, np.int32([boxes[index]]), True,
                                      (0, 255, 0), 2)

                        if s == False:
                            cv2.putText(
                                frame,
                                timestamp.strftime(
                                    " Available: %d %m %Y %I:%M:%S"),
                                (10, frame.shape[0] - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2)
                            i += 1
                            if i > 100:
                                s = True

                    msg = "Parking slots are available "
                    clr = 'green'
                    updateStatusWindow(msg, clr)

                    available_slots.append(slot_first_x_cord)

            if not available_slots:
                msg = "Parking slots are not available "
                clr = 'red'
                updateStatusWindow(msg, clr)

            cv2.imshow("frame", frame)

            key = cv2.waitKey(10) & 0xFF

            if key == ord("q"):
                break

            if key == ord('p'):
                hist = cv2.calcHist([a[1]], [0], None, [256], [0, 256])
                plt.plot(hist)
                plt.show()

            i += 1
    except KeyboardInterrupt:
        pass
Beispiel #13
0
from ferramentas import image_utils
from svm import SVM
import datetime

path = 'videos/p2.mp4'
vs = cv2.VideoCapture(path)
fps = 12
capSize = (640, 360)
#capSize = (1920,1080)
#fourcc = cv2.VideoWriter_fourcc(*'DIVX')
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
#fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter()
success = out.open('./teste5.mp4', fourcc, fps, capSize, True)

num_frames = count_frames(path)
print(num_frames)

get_point = 0
i = 0

while i < num_frames:
    ret, frame = vs.read()

    frame = imutils.resize(frame, width=450)

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    #cv2.imshow("gray ", gray )

    gau = cv2.GaussianBlur(gray, (7, 7), 0)
    #cv2.imshow("gau ", gau )
def flm_detector(video_path,
                 output_path,
                 output_as_video=False,
                 output_flm_video=False,
                 output_flm_npy=False,
                 dim=2):
    cv_plot = False

    # face mesh settings
    mpDraw = mp.solutions.drawing_utils
    mpFaceMesh = mp.solutions.face_mesh
    faceMesh = mpFaceMesh.FaceMesh(max_num_faces=1)
    # drawSpec = mpDraw.DrawingSpec(thickness=1, circle_radius=2)
    drawSpec = get_face_landmark_style()
    black_drawSpec = mpDraw.DrawingSpec(thickness=1, circle_radius=1)
    connection_drawSpec = mpDraw.DrawingSpec(thickness=1, circle_radius=2)

    frame_id = 0
    pTime = 0

    cap = cv2.VideoCapture(video_path)
    total_frame = count_frames(video_path)
    all_FLms = np.zeros((total_frame, total_FLms, dim))
    if output_as_video or output_flm_video:
        # video_output = os.path.join(output_path,
        #                             "{}_landmark_flm.avi".format(os.path.basename(os.path.normpath(output_path))))
        video_output = os.path.join(
            output_path, "{}_landmark.avi".format(
                os.path.basename(os.path.normpath(output_path))))
        # fourcc = cv2.VideoWriter_fourcc(*'PIM1')
        fourcc = cv2.VideoWriter_fourcc(*'PIM1')
        video_writer = cv2.VideoWriter(str(video_output), fourcc, camera_fps,
                                       (width, height))

    while True:
        success, img = cap.read()
        # imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        if not success:
            # print("Finish")
            break
        imgRGB = img
        results = faceMesh.process(imgRGB)
        black_img = np.zeros(img.shape)

        if results.multi_face_landmarks:
            for faceLms in results.multi_face_landmarks:
                # mpDraw.draw_landmarks(img, faceLms, mpFaceMesh.FACE_CONNECTIONS, drawSpec, connection_drawSpec)
                mpDraw.draw_landmarks(img, faceLms, None, drawSpec,
                                      connection_drawSpec)
                mpDraw.draw_landmarks(black_img, faceLms, None, black_drawSpec,
                                      None)

            np_faceLms = np.zeros((total_FLms, 2))
            for id, lm in enumerate(faceLms.landmark):
                ih, iw, ic = img.shape
                x, y = _normalized_to_pixel_coordinates(lm.x, lm.y, iw, ih)
                # x,y = int(lm.x*iw), int(lm.y*ih)
                # print(id,x,y)
                np_faceLms[id] = [x, y]

            all_FLms[frame_id] = np_faceLms

        # cv ploting
        if cv_plot:
            cTime = time.time()
            fps = 1 / (cTime - pTime)
            pTime = cTime
            cv2.putText(black_img, f'FPS: {int(fps)}', (20, 70),
                        cv2.FONT_HERSHEY_PLAIN, 3, (0, 0, 255), 3)
            WindowName = "Image"
            cv2.imshow("Image", black_img)
            cv2.setWindowProperty(WindowName, cv2.WND_PROP_TOPMOST, 1)
            cv2.waitKey(10)

        if output_flm_video:
            # b_img = np.copy(black_img)
            # rgb_img = cv2.cvtColor(black_img, cv2.COLOR_BGR2RGB)
            video_writer.write(black_img.astype('uint8'))
        elif output_as_video:
            # rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            video_writer.write(img)

        frame_id += 1

    cap.release()

    if output_as_video or output_flm_video:
        # Close the video writer
        video_writer.release()

    if output_flm_npy:
        flm_npy_path = os.path.join(
            output_path,
            "{}_flm".format(os.path.basename(os.path.normpath(output_path))))
        np.save(flm_npy_path, all_FLms)

    return all_FLms
Beispiel #15
0
    for face in faces:
        # fa.align參數分別是要擷取的圖像、要被辨識的圖像(灰階)、要對齊的圖像
        faceAligned = fa.align(gray, gray, face)
        global face_name
        cv2.imwrite('./face/{0}.jpg'.format(face_name), faceAligned)
        face_name += 1

    print('Working with {0} frames. completed {1:.2f}'.format(
        idx, idx / float(totle) * 100))


detect_video = 'onionman.mp4'
videoCapture = cv2.VideoCapture(detect_video)
success, frame = videoCapture.read()
frame_counter = 1
frame_totle = count_frames(detect_video)

path = 'face'
if not os.path.isdir(path):
    os.mkdir(path)
else:
    shutil.rmtree(path)
    os.mkdir(path)

while success:

    detect(frame, frame_counter, frame_totle)
    success, frame = videoCapture.read()
    frame_counter += 1

print('Done!')