Ejemplo n.º 1
0
def getTimeAxis(video_file_path, video_frame, srt_file_path, x, y, w, h, threshhold=0.7):
    """
    x, y, w, h means the cut position of the video
    :return: output a srt subtitle file which contains time axis set
    """
    myclip = VideoFileClip(video_file_path)
    dt = 1. / video_frame
    frames = int(myclip.duration / dt)
    keyFrames = list([])
    keyFrames.append(0)

    im1 = Image.fromarray(myclip.get_frame(0)).crop((x, y, x+w, y+h))
    for i in range(frames):
        im2 = Image.fromarray(myclip.get_frame(i/24.)).crop((x, y, x+w, y+h))
        print("\nimg: %d\timg: %d" % (i-1, i), end='\t')
        ratio = calc_similar(im2, im1)
        print(ratio)
        if ratio < threshhold:  # idk why sometimes the ratio > 1 so i adjust it with a factor of some number
            keyFrames.append(i)
            # print("ratio: ", ratio, " < 0.7, add key frame!-------------------")
        # else:
            # print("ratio: ", ratio, " > 0.7, they are the same words!")
        im1 = im2

    print(keyFrames)

    with open(srt_file_path, 'w') as f:
        for i in range(len(keyFrames) - 1):
            f.writelines(('%d' % i, '\n', getTime(keyFrames[i]/24), ' --> ', getTime(keyFrames[i + 1]/24), '\n\n'))
Ejemplo n.º 2
0
class MoviePyReader(FramesSequence):
    class_priority = 4
    @classmethod
    def class_exts(cls):
        return {'mov', 'mp4', 'avi', 'mpeg', 'wmv', 'mkv'}
    def __init__(self, filename):
        if VideoFileClip is None:
            raise ImportError('The MoviePyReader requires moviepy to work.')
        self.clip = VideoFileClip(filename)
        self.filename = filename
        self._fps = self.clip.fps
        self._len = int(self.clip.fps * self.clip.end)

        first_frame = self.clip.get_frame(0)
        self._shape = first_frame.shape
        self._dtype = first_frame.dtype

    def get_frame(self, i):
        return Frame(self.clip.get_frame(i / self._fps), frame_no=i)

    def __len__(self):
        return self._len

    @property
    def frame_shape(self):
        return self._shape

    @property
    def frame_rate(self):
        return self._fps

    @property
    def pixel_type(self):
        return self._dtype  
Ejemplo n.º 3
0
class MoviePyReader(FramesSequence):
    class_priority = 4
    @classmethod
    def class_exts(cls):
        return {'mov', 'mp4', 'avi', 'mpeg', 'wmv', 'mkv'}
    def __init__(self, filename):
        if VideoFileClip is None:
            raise ImportError('The MoviePyReader requires moviepy to work.')
        self.clip = VideoFileClip(filename)
        self.filename = filename
        self._fps = self.clip.fps
        self._len = int(self.clip.fps * self.clip.end)

        first_frame = self.clip.get_frame(0)
        self._shape = first_frame.shape
        self._dtype = first_frame.dtype

    def get_frame(self, i):
        return Frame(self.clip.get_frame(i / self._fps), frame_no=i)

    def __len__(self):
        return self._len

    @property
    def frame_shape(self):
        return self._shape

    @property
    def frame_rate(self):
        return self._fps

    @property
    def pixel_type(self):
        return self._dtype  
def SelectFrames(videopath, filename, x1, x2, y1, y2, cropping, videotype,
                 start, stop, Task, selectionalgorithm):
    ''' Selecting frames from videos for labeling.'''
    if start > 1.0 or stop > 1.0 or start < 0 or stop < 0 or start >= stop:
        print(
            "Please change start & stop, they should form a normalized interval with 0<= start < stop<=1."
        )
    else:
        basefolder = 'data-' + Task + '/'
        auxiliaryfunctions.attempttomakefolder(basefolder)
        videos = auxiliaryfunctions.GetVideoList(filename, videopath,
                                                 videotype)
        for vindex, video in enumerate(videos):
            print("Loading ", video)
            clip = VideoFileClip(os.path.join(videopath, video))
            print("Duration of video [s], ", clip.duration, "fps, ", clip.fps,
                  "Cropped frame dimensions: ", clip.size)

            ####################################################
            # Creating folder with name of experiment and extract random frames
            ####################################################
            folder = video.split('.')[0]
            auxiliaryfunctions.attempttomakefolder(
                os.path.join(basefolder, folder))
            indexlength = int(np.ceil(np.log10(clip.duration * clip.fps)))
            # Extract the first frame (not cropped!) - useful for data augmentation
            index = 0
            image = img_as_ubyte(clip.get_frame(index * 1. / clip.fps))
            io.imsave(
                os.path.join(basefolder, folder,
                             "img" + str(index).zfill(indexlength) + ".png"),
                image)

            if cropping == True:
                # Select ROI of interest by adjusting values in myconfig.py
                clip = clip.crop(y1=y1, y2=y2, x1=x1, x2=x2)
            print("Extracting frames ...")
            if selectionalgorithm == 'uniform':
                frames2pick = frameselectiontools.UniformFrames(
                    clip, numframes2pick, start, stop)
            elif selectionalgorithm == 'kmeans':
                frames2pick = frameselectiontools.KmeansbasedFrameselection(
                    clip, numframes2pick, start, stop)
            else:
                print(
                    "Please implement this method yourself and send us a pull request!"
                )
                frames2pick = []

            indexlength = int(np.ceil(np.log10(clip.duration * clip.fps)))
            for index in frames2pick:
                try:
                    image = img_as_ubyte(clip.get_frame(index * 1. / clip.fps))
                    io.imsave(
                        os.path.join(
                            basefolder, folder,
                            "img" + str(index).zfill(indexlength) + ".png"),
                        image)
                except FileNotFoundError:
                    print("Frame # ", index, " does not exist.")
Ejemplo n.º 5
0
class MoviePy(Loader):
    def __init__(self, video_path, parent_dir=''):
        super().__init__(video_path, parent_dir)
        self.video = VideoFileClip(self.path, audio=False)
        self.fps = self.video.fps

    def read_iter(self, batch_size=1, limit=None, stride=1, start=0):
        # start and limit should be in original frame indices
        images, image_ids = [], []
        positions = np.arange(0, self.video.duration * self.fps,
                              stride / self.fps)
        start = start // stride
        end = start + limit // stride if limit is not None else None
        for pos in positions[:start]:
            self.video.get_frame(pos)
        for image_id, pos in enumerate(positions[start:end]):
            image = self.video.get_frame(pos)
            image = np.ascontiguousarray(image[:, :, ::-1])
            image = torch.as_tensor(image)
            images.append(image)
            image_ids.append(image_id)
            if len(images) == batch_size:
                yield images, image_ids
                images, image_ids = [], []
        if len(images) > 0:
            yield images, image_ids
Ejemplo n.º 6
0
def speed_test_moviepy(video_dir, video_list=VIDEO_LIST):
    from moviepy.editor import VideoFileClip
    for video_name in video_list:
        print('\t', video_name, flush=True)
        bar = ProgressBar().start()
        clip = VideoFileClip(osp.join(video_dir, video_name))
        for i in bar(range(int(clip.duration * clip.fps))):
            clip.get_frame(i / clip.fps)
Ejemplo n.º 7
0
def speed_test_moviepy(video_dir, video_list=VIDEO_LIST):
    from moviepy.editor import VideoFileClip
    for video_name in video_list:
        print('\t', video_name, flush=True)
        start = time.time()
        clip = VideoFileClip(osp.join(video_dir, video_name))
        frame = 0
        for i in tqdm(range(int(round(clip.duration * clip.fps)))):
            clip.get_frame(i / clip.fps)
            frame += 1
        total = time.time() - start
        print('\t frame:', frame)
        print('\t', total, flush=True)
def main():
    """Start here..."""

    # project_video.mp4
    project_video = VideoFileClip('./project_video.mp4')
    sample_img = project_video.get_frame(0)
    zone = LaneBoundaryZone(sample_img.shape[0], sample_img.shape[1])
    project_video_lane = project_video.fl_image(zone.locate_lane_bounds)
    project_video_lane.write_videofile(
        './output_videos/project_video_lane.mp4', audio=False)

    # challenge_video.mp4
    challenge_video = VideoFileClip('./challenge_video.mp4')
    sample_img = challenge_video.get_frame(0)
    zone = LaneBoundaryZone(sample_img.shape[0], sample_img.shape[1])
    zone.hough_threshold = 39
    zone.v_thresh_min = 165
    zone.s_thresh_min = 51
    zone.min_lane_slope = 0.6
    zone.y_thresh_min = 12
    zone.x_thresh_min = 8
    zone.min_windows_per_lane = 2
    zone.combine_pipeline_method = 'or_related_and_groups'
    challenge_video_lane = challenge_video.fl_image(zone.locate_lane_bounds)
    challenge_video_lane.write_videofile(
        './output_videos/challenge_video_lane.mp4', audio=False)

    # harder_challenge_video.mp4
    harder_challenge_video = VideoFileClip('./harder_challenge_video.mp4')
    sample_img = harder_challenge_video.get_frame(0)
    zone = LaneBoundaryZone(sample_img.shape[0], sample_img.shape[1])
    zone.hough_threshold = 39
    zone.v_thresh_min = 246
    zone.s_thresh_min = 73
    zone.min_lane_slope = 0.3
    zone.y_thresh_min = 21
    zone.x_thresh_min = 24
    zone.min_windows_per_lane = 2
    zone.min_lane_line_cluster_size = 7
    zone.horizon_frame_smooth_size = 6
    zone.lane_find_window_height = 10
    zone.lane_find_margin = 10
    zone.proportion_image_height_levels = 0.33
    zone.lane_curve_radius_min = 0
    zone.lane_diverge_max = 0.15
    zone.min_inverse_noise_factor = 5000
    zone.combine_pipeline_method = 'and_related_or_groups'
    harder_challenge_video_lane = harder_challenge_video.fl_image(
        zone.locate_lane_bounds)
    harder_challenge_video_lane.write_videofile(
        './output_videos/harder_challenge_video_lane.mp4', audio=False)
Ejemplo n.º 9
0
        def process_clip():
            clip = VideoFileClip(file_path, target_resolution=[720, 1280])
            # I WAS going to get the last 10 seconds but nvm
            if clip.duration > 10:
                clip = clip.subclip(0, -clip.duration + 10)

            safe_duration = max(0, clip.duration - 0.1)

            # Freeze fram stuff
            freeze_frame_sound = AudioFileClip(
                "assets/wellberightback/sound.mp3")
            freeze_frame = ImageClip(clip.get_frame(safe_duration))\
                .fx(vfx.painting, black=0.001)\
                .fx(vfx.colorx, factor=0.8).set_duration(freeze_frame_sound.duration)
            text = ImageClip("assets/wellberightback/text.png")\
                .set_pos( lambda t: (50, 50) )
            freeze_compos = CompositeVideoClip([freeze_frame, text])\
                .set_duration(freeze_frame_sound.duration).set_audio(freeze_frame_sound)

            # Final clip
            final_clip = concatenate_videoclips([clip, freeze_compos])

            return final_clip, [
                clip, freeze_frame_sound, freeze_frame, text, freeze_compos
            ]
Ejemplo n.º 10
0
class UI_Main(object):
    def setup_UI(self, MainWindow):
        MainWindow.setObjectName("MainWIndow")
        MainWindow.resize(800, 600)
        self.video = VideoFileClip(
            r'C:\Users\jklew\Videos\Music\Fractalia.MP4')
        self.centralWidget = QWidget(MainWindow)
        self.centralWidget.setObjectName("centralWidget")

        self.pixmap_label = QLabel(self.centralWidget)
        self.pixmap_label.setGeometry(QRect(0, 0, 841, 511))
        self.pixmap_label.setText("")
        self.pixmap_label
        im_np = self.video.get_frame(0)
        print(im_np.dtype)
        # im_np = np.ones((1800,2880,3),dtype='uint8')
        # im_np = np.transpose(im_np, (1,0,2))
        # im_np = np.transpose(im_np,(1,0,2)).copy()
        # im_np = im_np.copy()
        qimage = QImage(im_np, im_np.shape[1], im_np.shape[0],
                        QImage.Format_RGB888)
        pixmap = QPixmap(qimage)
        # pixmap = pixmap.scaled(640,400, Qt.KeepAspectRatio)
        self.pixmap_label.setPixmap(pixmap)
        self.pixmap_label.setScaledContents(True)
        self.pixmap_label.setObjectName("Photo")
        MainWindow.setCentralWidget(self.centralWidget)
        self.retranslateUI(MainWindow)
        QMetaObject.connectSlotsByName(MainWindow)

    def retranslateUI(self, MainWindow):
        _translate = QCoreApplication.translate
        MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
def get_samples(subject_id):
    arousal_label_path = root_dir / 'Ratings_affective_behaviour_CCC_centred/arousal/{}.csv'.format(
        subject_id)
    valence_label_path = root_dir / 'Ratings_affective_behaviour_CCC_centred/valence/{}.csv'.format(
        subject_id)

    clip = VideoFileClip(
        str(root_dir / "Video_recordings_MP4/{}.mp4".format(subject_id)))

    subsampled_audio = clip.audio.set_fps(16000)
    video_frames = []
    audio_frames = []

    for i in range(1, 7501):
        time = 0.04 * i

        video = clip.get_frame(time)
        audio = np.array(
            list(subsampled_audio.subclip(time - 0.04,
                                          time).iter_frames())).mean(1)[:640]

        video_frames.append(video)
        audio_frames.append(audio.astype(np.float32))

    arousal = np.loadtxt(str(arousal_label_path), delimiter=',')[:, 1][1:]
    valence = np.loadtxt(str(valence_label_path), delimiter=',')[:, 1][1:]

    return video_frames, audio_frames, np.dstack([arousal, valence
                                                  ])[0].astype(np.float32)
Ejemplo n.º 12
0
def _read_video_thumbnail(
    data: bytes,
    video_ext: str = "mp4",
    frame_ext: str = "png",
    max_size: Tuple[int, int] = (1024, 720)
) -> Tuple[bytes, int, int]:
    # We don't have any way to read the video from memory, so save it to disk.
    temp_file = _temp_file_name(video_ext)
    with open(temp_file, "wb") as file:
        file.write(data)

    # Read temp file and get frame
    clip = VideoFileClip(temp_file)
    frame = clip.get_frame(0)

    # Convert to png and save to BytesIO
    image = Image.fromarray(frame).convert("RGBA")
    thumbnail_file = BytesIO()
    if max_size:
        image.thumbnail(max_size, Image.ANTIALIAS)
    image.save(thumbnail_file, frame_ext)

    os.remove(temp_file)

    w, h = image.size
    return thumbnail_file.getvalue(), w, h
Ejemplo n.º 13
0
def extract_faces(path):
    clip = VideoFileClip(path)
    num_frames = len(list(clip.iter_frames()))
    print(num_frames)
    video_fps = 10.0
    for frame_number in range(num_frames):
        print(frame_number)
        pipeline(clip.get_frame(frame_number / video_fps), frame_number)
Ejemplo n.º 14
0
class Demo(QWidget):
    def __init__(self):
        super().__init__()
        self.video = VideoFileClip(
            r'C:\Users\jklew\Videos\Music\Fractalia.MP4')
        im_np = self.video.get_frame(0)
        self.image = QImage(im_np, im_np.shape[1], im_np.shape[0],
                            QImage.Format_RGB888)
Ejemplo n.º 15
0
    def preprocess(self, seq_len=30, target_resolution=(224, 224)):
        """
        extract frames and audio from the video,
        store the cropped frames and audio file in the output folders
        seq_len: how many frames will be extracted from the video.
                  Considering all videos from this dataset have similar duration
                  video_duration = seq_len / fps
        target_resolution: (desired_height, desired_width) of the facial frame extracted
        """
        video = VideoFileClip(self.video_path,
                              audio=self.extract_audio,
                              target_resolution=target_resolution)
        if self.extract_audio:
            video.audio.write_audiofile(
                os.path.join(self.audios_folder, "audio.wav"))

        times = list(np.arange(0, video.duration, video.duration / seq_len))
        if len(times) < seq_len:
            times.append(video.duration)
        times = times[:seq_len]

        # extract 2D points from csv
        data = np.genfromtxt(self.landmarks_path, delimiter=',')[1:]
        lm_times = [
            int(np.ceil(t))
            for t in list(np.arange(0, len(data),
                                    len(data) / seq_len))
        ]
        if len(lm_times) < seq_len:
            lm_times.append(len(data) - 1)
        lm_times = lm_times[:seq_len]
        index_x = (298, 366)
        index_y = (366, 433)
        landmarks_2d_x = [
            data[t, index_x[0] - 1:index_x[1] - 1] * (1 / 1280)
            for t in lm_times
        ]
        landmarks_2d_y = [
            data[t, index_y[0] - 1:index_y[1]] * (1 / 720) for t in lm_times
        ]

        for i, t in enumerate(times):
            img = cv2.cvtColor(video.get_frame(t), cv2.COLOR_BGR2RGB)
            # extract roi from landmarks and crop
            xs, ys = landmarks_2d_x[i], landmarks_2d_y[i]
            bottom = int(max(ys) * img.shape[0])
            right = int(max(xs) * img.shape[1])
            top = int(min(ys) * img.shape[0])
            left = int(min(xs) * img.shape[1])

            cropped = cv2.resize(img[top:bottom, left:right, :],
                                 target_resolution)
            cv2.imwrite(
                os.path.join(self.frames_folder,
                             "frame_{0:.2f}.jpg".format(t)), cropped)

        print("Video duration {} seconds. Extracted {} frames".format(
            video.duration, len(times)))
Ejemplo n.º 16
0
    def CheckCropping(self):
        ''' Display frame at time "time" for video to check if cropping is fine.
        Select ROI of interest by adjusting values in myconfig.py

        USAGE for cropping:
        clip.crop(x1=None, y1=None, x2=None, y2=None, width=None, height=None, x_center=None, y_center=None)

        Returns a new clip in which just a rectangular subregion of the
        original clip is conserved. x1,y1 indicates the top left corner and
        x2,y2 is the lower right corner of the cropped region.

        All coordinates are in pixels. Float numbers are accepted.
        '''
        from skimage import io
        videosource = self.video_source
        filename = self.filename
        time = self.start
        self.x1 = int(
            self.cfg['video_sets'][videosource]['crop'].split(',')[0])
        self.x2 = int(
            self.cfg['video_sets'][videosource]['crop'].split(',')[1])
        self.y1 = int(
            self.cfg['video_sets'][videosource]['crop'].split(',')[2])
        self.y2 = int(
            self.cfg['video_sets'][videosource]['crop'].split(',')[3])

        clip = VideoFileClip(videosource)

        ny, nx = clip.size  # dimensions of frame (width, height)
        if self.cropping == True:
            # Select ROI of interest by adjusting values in myconfig.py
            clip = clip.crop(y1=self.y1, y2=self.y2, x1=self.x1, x2=self.x2)


#            time = start
        image = clip.get_frame(
            time * clip.duration
        )  #frame is accessed by index *1./clip.fps (fps cancels)
        fname = Path(filename)
        output_path = Path(
            self.config_path).parents[0] / 'labeled-data' / fname.stem
        if output_path.exists():
            saveimg = str(
                Path(self.config_path).parents[0] / 'labeled-data'
            ) + '/IsCroppingOK_' + str(
                fname
            ) + ".png"  #str(self.currFrame).zfill(int(np.ceil(np.log10(self.numberFrames)))) + '.png'
            io.imsave(saveimg, image)
            print(
                'Image cropped. Check the %s if cropping is ok, other wise change the parameters in the config file.'
                % saveimg)
        else:
            print(
                'The path %s does not exist in the config file. Use add function to add this video in the config file and retry.'
                % output_path)
            self.Close(True)
        return image
Ejemplo n.º 17
0
def extract_frames(movie, times):
    ''' extract frames from video '''
    clip = VideoFileClip(movie)
    img_ls = []
    for t in times:
        img = clip.get_frame(t)
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        img_ls.append(img)

    return img_ls
def select_frames():
    if start > 1.0 or stop > 1.0 or start < 0 or stop < 0 or start >= stop:
        raise ValueError('Please change start & stop, they should form a '
                         'normalized interval with 0 <= start < stop <= 1.')
    else:
        base_folder = os.path.join(video_path, 'data-' + task + '/')
        auxiliary_functions.attempt_to_make_folder(base_folder)
        videos = auxiliary_functions.get_video_list(filename, video_path,
                                                    video_type)
        for vindex, video in enumerate(videos):
            print("Loading ", video, '# ', str(vindex + 1), ' of ',
                  str(len(videos)))
            clip = VideoFileClip(os.path.join(video_path, video))
            # print("Duration of video [s], ", clip.duration, "fps, ", clip.fps,
            #       "Cropped frame dimensions: ", clip.size)

            # Create folder with experiment name and extract random frames
            folder = 'selected'
            v_name = video.split('.')[0]
            auxiliary_functions.attempt_to_make_folder(
                os.path.join(base_folder, folder))
            index_length = int(np.ceil(np.log10(clip.duration * clip.fps)))

            # extract first frame (uncropped) - useful for data augmentation
            # index = 0
            # image = img_as_ubyte(clip.get_frame(index * 1. / clip.fps))
            # io.imsave(os.path.join(base_folder, folder, 'img' + v_name + '-'
            #                        + str(index).zfill(index_length) + '.png'), image)

            if cropping is True:
                clip = clip.crop(y1=y1, y2=y2, x1=x1, x2=x2)

            # print("Extracting frames")
            if selection_algorithm == 'uniform':
                frames_to_pick = frame_selection_tools.uniform_frames(
                    clip, num_frames_to_pick, start, stop)
            elif selection_algorithm == 'kmeans':
                frames_to_pick = frame_selection_tools.k_means_based_frame_selection(
                    clip, num_frames_to_pick, start, stop)
            else:
                print('not implemented')
                frames_to_pick = []

            index_length = int(np.ceil(np.log10(clip.duration * clip.fps)))
            for index in frames_to_pick:
                try:
                    image = img_as_ubyte(clip.get_frame(index * 1. / clip.fps))
                    io.imsave(
                        os.path.join(
                            base_folder, folder, 'img' + v_name + '-' +
                            str(index).zfill(index_length) + '.png'), image)
                except FileExistsError:
                    print('Frame # ', index, ' does not exist.')

            clip.close()
Ejemplo n.º 19
0
def get_video_array(video_path, label_path, set, video_name):
    clip = VideoFileClip(video_path, target_resolution=(225, None))
    clip_width = clip.get_frame(0).shape[1]
    assert (clip_width > 224)
    offset = int((clip_width - 224) / 2)
    labels_video = loadmat(label_path)
    try:
        label_list = (labels_video['IF23'][0])
    except:
        label_list = (labels_video['IF'][0])
    start_frame = 0
    while True:
        try:
            print(start_frame)
            frames = []
            for frame in clip.iter_frames():
                frames.append(frame)
                if len(
                        frames
                ) == _FRAMES:  # here you have 16 frames in your frames array
                    np_frames = np.array(frames)
                    np_frames = np_frames[:, :224, offset:224 + offset, :]
                    np_frames = np_frames.transpose((3, 0, 1, 2))
                    #print(np_frames.shape)
                    labels = label_list[int(start_frame):int(start_frame) +
                                        _FRAMES]
                    label = Counter(labels).most_common(1)[0][0]
                    dict = {"frames": np_frames, 'label': label}
                    file_name = os.path.join(
                        set, video_name + str(start_frame) + ".pkl")
                    pickle.dump(dict, open(file_name, "wb"))
                    frames = []  # empty to start over
                    start_frame += _FRAMES

        except Exception as e:
            print(e)
            for pair in enumerate(clip.iter_frames()):
                if pair[0] in range(start_frame, -1):
                    frames.append(pair[1])
            labels = label_list[int(start_frame):-1]
            if labels != []:
                label = Counter(labels).most_common(1)[0][0]
                f = np.array(frames)
                print(len(f))
                f = pad_frames(f)
                print(len(f))
                dict = {"frames": np_frames, 'label': label}
                file_name = os.path.join(
                    set, video_name + str(start_frame) + ".pkl")
                pickle.dump(dict, open(file_name, "wb"))
                break
            else:
                break
    return print('got video blocks')
def videoPipeline(pathToInputVideoClip, pathToOutputVideoClip, linear_svc,
                  orientation, cells_per_block, pixels_per_cell,
                  convert_colorspace):

    print(
        "============================= Training Video ================================"
    )
    video_input = VideoFileClip(pathToInputVideoClip)

    print(video_input.size)  # [1280, 720]

    image = video_input.get_frame(0)

    x_start_stop = [0, video_input.size[0]]
    print(x_start_stop)
    y_start_stop = [
        int(video_input.size[1] / 2),
        int(video_input.size[1] - 80)
    ]
    print(y_start_stop)
    # xy_window_half = (32, 32)
    # xy_overlap_half = (0.20, 0.20)
    # windows_half = slide_window(image, x_start_stop, [400, 480],
    #                             xy_window_half, xy_overlap_half)
    xy_window = (64, 64)
    xy_overlap = (0.15, 0.15)
    windows_normal = sliding_window(image, x_start_stop, y_start_stop,
                                    xy_window, xy_overlap)
    xy_window_1_5 = (96, 96)
    xy_window_1_5_overlap = (0.30, 0.30)
    windows_1_5 = sliding_window(image, x_start_stop, y_start_stop,
                                 xy_window_1_5, xy_window_1_5_overlap)
    xy_window_twice_overlap = (0.50, 0.50)
    xy_window_twice = (128, 128)
    windows_twice = sliding_window(image, x_start_stop, y_start_stop,
                                   xy_window_twice, xy_window_twice_overlap)
    print(windows_normal)
    print(windows_1_5)
    print(windows_twice)

    windows = windows_normal + windows_1_5 + windows_twice
    print("No of Windows are ", len(windows))

    processed_video = video_input.fl_image(lambda image: pipeline(
        image, windows, linear_svc, orientation, cells_per_block,
        pixels_per_cell, convert_colorspace))
    processed_video.write_videofile(pathToOutputVideoClip,
                                    threads=8,
                                    audio=False,
                                    fps=24)
    video_input.reader.close()
    video_input.audio.reader.close_proc()
Ejemplo n.º 21
0
def process_video(video_inp):
    #.format(video_path_prefix)
    output_video_name = "Video_Out/" + video_inp + "_proc.mp4"
    input_video = VideoFileClip(video_inp + ".mp4")
    print("Processing Video [" + input_video + "] and will write to [" +
          output_video_name + "]")
    calibrated = calibrated_camera(input_video.get_frame(0))
    pipeline = Pipeline(calibrated)

    output_video = input_video.fl_image(pipeline.run)

    get_ipython().run_line_magic(
        'time', 'output_video.write_videofile(output_video_name, audio=False)')
Ejemplo n.º 22
0
def average_video(filepath, outpath, start=None, end=None, sample_every=1):
    """Calculate average of video frames"""

    # Load video
    vid = VideoFileClip(filepath, audio=False)
    width = vid.w
    height = vid.h

    if start is None and end is None:
        frame_generator = vid.iter_frames(progress_bar=True, dtype=np.uint8)
    else:
        if start is None:
            start = 0
        if end is None:
            end = vid.duration
        # compute time increment for sampling by frames
        sample_inc = sample_every / vid.fps
        frame_generator = tqdm(
            vid.get_frame(f) for f in frange(start, end, sample_inc))

    # create starting matrix of zeros
    sum_fs = np.zeros(shape=(height, width, 3), dtype=int)
    ma_sum_fs = np.zeros(shape=(height, width, 3), dtype=int)
    prev_f = np.zeros(shape=(height, width, 3), dtype=int)
    sum_delta_fs = np.zeros(shape=(height, width, 3), dtype=int)

    n_frames = 0
    for f in frame_generator:
        delta = f - prev_f
        sum_delta_fs += delta
        sum_fs += f

        ma_sum_fs += f
        if divmod(n_frames, 100)[1] == 0 and n_frames > 0:
            ma_f = ma_sum_fs / 100
            Image.fromarray(ma_f.astype(np.uint8))\
                .save(os.path.join(outpath, 'movavg_{}.png'.format(n_frames)))
            ma_sum_fs = np.zeros(shape=(height, width, 3), dtype=int)

        n_frames += 1
        prev_f = f

    # average out the values for each frame
    average_delta_f = sum_delta_fs / n_frames
    average_f = sum_fs / n_frames

    # Create images
    delta_img = Image.fromarray(average_delta_f.astype(np.uint8))
    delta_img.save(os.path.join(outpath, 'average_delta.png'))
    final_img = Image.fromarray(average_f.astype(np.uint8))
    final_img.save(os.path.join(outpath, 'average.png'))
Ejemplo n.º 23
0
def average_video(filepath, outpath, start=None, end=None, sample_every=1):
    """Calculate average of video frames"""

    # Load video
    vid = VideoFileClip(filepath, audio=False)
    width = vid.w
    height = vid.h

    if start is None and end is None:
        frame_generator = vid.iter_frames(progress_bar=True, dtype=np.uint8)
    else:
        if start is None:
            start = 0
        if end is None:
            end = vid.duration
        # compute time increment for sampling by frames
        sample_inc = sample_every / vid.fps
        frame_generator = tqdm(vid.get_frame(f) for f in frange(start, end, sample_inc))

    # create starting matrix of zeros
    sum_fs = np.zeros(shape=(height, width, 3), dtype=int)
    ma_sum_fs = np.zeros(shape=(height, width, 3), dtype=int)
    prev_f = np.zeros(shape=(height, width, 3), dtype=int)
    sum_delta_fs = np.zeros(shape=(height, width, 3), dtype=int)

    n_frames = 0
    for f in frame_generator:
        delta = f - prev_f
        sum_delta_fs += delta
        sum_fs += f

        ma_sum_fs += f
        if divmod(n_frames, 100)[1] == 0 and n_frames > 0:
            ma_f = ma_sum_fs / 100
            Image.fromarray(ma_f.astype(np.uint8))\
                .save(os.path.join(outpath, 'movavg_{}.png'.format(n_frames)))
            ma_sum_fs = np.zeros(shape=(height, width, 3), dtype=int)

        n_frames += 1
        prev_f = f

    # average out the values for each frame
    average_delta_f = sum_delta_fs / n_frames
    average_f = sum_fs / n_frames

    # Create images
    delta_img = Image.fromarray(average_delta_f.astype(np.uint8))
    delta_img.save(os.path.join(outpath, 'average_delta.png'))
    final_img = Image.fromarray(average_f.astype(np.uint8))
    final_img.save(os.path.join(outpath, 'average.png'))
Ejemplo n.º 24
0
def video2rollscan(videofile, focus, start=0, end=None, savefile=None):
    """
    
    Makes a scan of the roll from the video.
    Requires the pyton module MoviePy
    
    Parameters
    -----------
    
    video
        Any videofile that MoviePy (FFMPEG) can read.
        
    focus
        A function ( f(image)->rectangular image ). For instance
        if the line of interest is defined by y=15 and x=10...230
        
        >>> focus = lambda im : im[ [15], 10:230 ]
        
    start,end
        Where to start and stop, each one either in seconds, or in
        format `(minutes, seconds)`. By default `start=0` and `end`
        is the end of the video.
        
    savefile
        If provided, the scan image will be saved under this name.
    
    Returns
    --------
    
      A W*H*3 RGB picture of the piano roll made by stacking the focus
      lines of the different frames under one another.
    """

    from moviepy.editor import VideoFileClip

    if end is None:
        end = video.duration

    video = VideoFileClip(videofile, audio=False).subclip(start, end)

    tt = np.arange(0, video.duration, 1.0 / video.fps)
    result = np.vstack([focus(video.get_frame(t)) for t in tt])

    if savefile:
        import matplotlib.pyplot as plt

        plt.imsave(savefile)

    return result
Ejemplo n.º 25
0
def VideoInfo(inpath, outpath, sec=10):
    inputv = VideoFileClip(inpath)
    fps = inputv.fps
    width, height = inputv.size
    print("fps={}".format(fps))
    print("width={}, height={}".format(width, height))
    #get_frame return RGB ?
    preframe = inputv.get_frame(sec)[:, :, [2, 1, 0]]
    cv2.namedWindow("info")

    def onMouse(evt, x, y, flags, param):
        if evt == cv2.EVENT_LBUTTONDOWN:
            param["drawing"] = True
            param["start_pos"] = (x, y)
            param["curr_pos"] = (x, y)
        elif evt == cv2.EVENT_MOUSEMOVE and flags & cv2.EVENT_FLAG_LBUTTON:
            param["curr_pos"] = (x, y)
            if param["drawing"]:
                frame = cv2.rectangle(preframe.copy(), param["start_pos"],
                                      (x, y), (0, 255, 0), 0)
                cv2.imshow("info", frame)
        elif evt == cv2.EVENT_LBUTTONUP:
            (x1, y1) = param["start_pos"]
            (x2, y2) = param["curr_pos"]
            X = min(x1, x2)
            Y = min(y1, y2)
            W = max(x1 - x2, x2 - x1)
            H = max(y1 - y2, y2 - y1)
            param["pos"] = (X, Y, W, H)
            frame = cv2.rectangle(preframe.copy(), param["start_pos"],
                                  param["curr_pos"], (0, 255, 0), 0)
            cv2.imshow("info", frame)
            kvalue = cv2.waitKey(0)
            if kvalue & 0xFF == ord('q'):
                cv2.destroyWindow("info")
            elif kvalue & 0xFF == ord('s'):
                cv2.imwrite(outpath + "_jpg.jpg", frame)
                cv2.destroyWindow("info")

    param = {}
    param["pos"] = None
    cv2.setMouseCallback("info", onMouse, param)
    cv2.imshow("info", preframe)
    cv2.waitKey(0)
    print(param["pos"])
    if param["pos"] is not None:
        return param["pos"]
    else:
        return None
Ejemplo n.º 26
0
def create_video():
    ir = ImageReader(read_mode='RGB')
    cc = Camera(ir, None)
    cc.calibrate()
    ta = ThresholdApplier()

    output_video_name = '../output_videos/project_video_result.mp4'
    input_video = VideoFileClip("../project_video.mp4")

    image = input_video.get_frame(0)
    undistorted = cc.undistort(image)
    llf = LaneLineFinder(ta, cc, (cc.get_region_of_interest(image)),
                         undistorted)
    output_video = input_video.fl_image(llf.run)
    output_video.write_videofile(output_video_name, audio=False)
Ejemplo n.º 27
0
def video2rollscan(videofile, focus, start=0, end=None, savefile=None):
    """
    
    Makes a scan of the roll from the video.
    Requires the pyton module MoviePy
    
    Parameters
    -----------
    
    video
        Any videofile that MoviePy (FFMPEG) can read.
        
    focus
        A function ( f(image)->rectangular image ). For instance
        if the line of interest is defined by y=15 and x=10...230
        
        >>> focus = lambda im : im[ [15], 10:230 ]
        
    start,end
        Where to start and stop, each one either in seconds, or in
        format `(minutes, seconds)`. By default `start=0` and `end`
        is the end of the video.
        
    savefile
        If provided, the scan image will be saved under this name.
    
    Returns
    --------
    
      A W*H*3 RGB picture of the piano roll made by stacking the focus
      lines of the different frames under one another.
    """

    from moviepy.editor import VideoFileClip

    if end is None:
        end = video.duration

    video = VideoFileClip(videofile, audio=False).subclip(start, end)

    tt = np.arange(0, video.duration, 1.0 / video.fps)
    result = np.vstack([focus(video.get_frame(t)) for t in tt])

    if savefile:
        import matplotlib.pyplot as plt
        plt.imsave(savefile)

    return result
Ejemplo n.º 28
0
 def is_ingame_clip(self, clip: VideoFileClip) -> bool:
     if self.model:
         frames = []
         # clip = VideoFileClip(clip_path)
         for t in range(1, int(clip.duration)):
             frame = clip.get_frame(t)
             resized_frame = tf.keras.preprocessing.image.smart_resize(frame, (224, 224), interpolation="nearest")
             frames.append(resized_frame)
         # predictions[0] = game, predictions[1] = nogame
         predictions = self.model.predict(np.array(frames))
         percentage = np.average(predictions, axis=0)
         clip.close()
         return percentage[0] > 0.8
     else:
         logging.warning("No model for prediction available -> every clip is chosen as valid (ingame)")
         return True
def main():
    """Start here..."""
    # Create a frane iterator of the project video to run our pipeline on each frame.
    project_video = VideoFileClip('./project_video.mp4')

    # Determine dimensions of the video.
    sample_img_shape = project_video.get_frame(0).shape

    # Create the pipeline function.
    vehicle_zone = VehicleCollisionZone()
    lane_zone = LaneBoundaryZone(sample_img_shape[0], sample_img_shape[1])
    pipeline_fn = lane_car_locate_pipeline(vehicle_zone, lane_zone)

    # Pass the pipeline function to the iterator and fire it up saving the resulting stream to a video file.
    project_video_pipeline = project_video.fl_image(pipeline_fn)
    project_video_pipeline.write_videofile('./output_videos/project_video_pipeline.mp4', audio=False)
Ejemplo n.º 30
0
def visualize_errors(detected_shots, video_id='25010'):
    # Get the errors
    false_positives, false_negatives = get_f1(detected_shots,
                                              video_id,
                                              return_incorrect_shots=True)

    v = VideoFileClip('%s/%s.mp4' % (DATASET_DIR, video_id))

    for name, errors in zip(('FN', 'FP'), (false_negatives, false_positives)):
        for count, trans in enumerate(errors):
            plt.figure(figsize=(25, 80))
            trans[0] = np.floor(trans[0])
            trans[-1] = np.ceil(trans[-1])
            for idx, frame_idx in enumerate(np.arange(trans[0], trans[1] + 1)):
                plt.subplot(1, trans[1] - trans[0] + 1, idx + 1)
                plt.imshow(v.get_frame(frame_idx / v.fps))
                plt.title('%s; %.1f' % (name, frame_idx))
Ejemplo n.º 31
0
def extract_frames(csv_dir, video_dir, output_dir, save_format):
    """
  Extracts frames from video using csv data. It then saves those frames in separate directories
  inside `output_dir` with the names of the directories being the name of the videos and the 
  frames named time_x where x is time in seconds.

  Args:
  - csv_dir: str, path to directory containing the formatted csv files with columns Time and Labels.
  - video_dir: str, path to directory containing the labeled flash videos.
  - output_dir: str, path to directory where sub-directories containing frames will be saved. This will be created if it does not exist.
  - save_format: str, file format for the frames.

  Returns:
  Saves images to directory, does not return a value.
  """
    if not os.path.isdir(output_dir):
        print(
            "Destination directory {} does not exist, creating one now...".format(
                output_dir
            )
        )
        os.makedirs(output_dir)

    csv_files = [file for file in os.listdir(csv_dir) if file.endswith(".csv")]
    saveCount = 0
    print("Found {} csv files in {}".format(len(csv_files), csv_dir))
    print("Starting extraction of frames ...\n")
    for csv in csv_files:
        df = pd.read_csv(os.path.join(csv_dir, csv))
        timelist = df["Time"].to_numpy(dtype="float")
        # Name of sub-directory given by common characters in filenames shared by csv and video files.
        subdir_name = os.path.splitext(csv)[0][:-6]
        # create a directory inside output_dir if it doesn't already exist
        if not os.path.isdir(os.path.join(output_dir, subdir_name)):
            os.makedirs(os.path.join(output_dir, subdir_name))
        # filename of the video has to correspond to the csv file.
        video_filename = subdir_name + ".flv"
        clip = VideoFileClip(os.path.join(video_dir, video_filename))
        for timestep in timelist:
            # extract a frame at the specified timestep
            frame = clip.get_frame(timestep)
            # save to file
            save_frame(frame, output_dir, subdir_name, timestep, save_format)
            saveCount += 1
    print("Saved a total of {} images".format(saveCount))
def CheckCropping(videopath,
                  filename,
                  x1,
                  x2,
                  y1,
                  y2,
                  cropping,
                  videotype,
                  time=start):
    ''' Display frame at time "time" for video to check if cropping is fine. 
    Select ROI of interest by adjusting values in myconfig.py
    
    USAGE for cropping:
    clip.crop(x1=None, y1=None, x2=None, y2=None, width=None, height=None, x_center=None, y_center=None)
    
    Returns a new clip in which just a rectangular subregion of the
    original clip is conserved. x1,y1 indicates the top left corner and
    x2,y2 is the lower right corner of the croped region.
    
    All coordinates are in pixels. Float numbers are accepted.
    '''
    videos = auxiliaryfunctions.GetVideoList(filename, videopath, videotype)
    if filename != 'all':
        videotype = filename.split('.')[1]

    for vindex, video in enumerate(videos):
        clip = VideoFileClip(os.path.join(videopath, video))
        print("Extracting ", video)

        ny, nx = clip.size  # dimensions of frame (width, height)
        if cropping == True:
            # Select ROI of interest by adjusting values in myconfig.py
            clip = clip.crop(y1=y1, y2=y2, x1=x1, x2=x2)

        image = clip.get_frame(
            time * clip.duration
        )  #frame is accessed by index *1./clip.fps (fps cancels)
        io.imsave("IsCroppingOK" + video.split('.')[0] + ".png", image)

        if vindex == len(videos) - 1:
            print(
                "--> Open the CroppingOK-videofilename-.png file(s) to set the output range! <---"
            )
            print("--> Adjust shiftx, shifty, fx and fy accordingly! <---")
    return image
Ejemplo n.º 33
0
def video_to_shortcuts(infile: str, outfile: str):
    video_cap = VideoFileClip(infile)
    frame_size = video_cap.size
    video_basic_info = {
        "frame_rate": video_cap.fps,
        "size": {
            "width": frame_size[0],
            "height": frame_size[1],
        },
        "time": video_cap.duration
    }
    with open(outfile + ".json", "w") as fp:
        json.dump(video_basic_info, fp)
    vc = VideoClip(make_frame=lambda t: video_cap.get_frame(
        (video_cap.duration - 3) * t / (img_count * duration)),
                   duration=img_count * duration)
    vc = vc.set_fps(math.ceil(1 / duration))
    vc.write_gif(outfile + ".gif")
Ejemplo n.º 34
0
def average_video(filepath, outpath, start=None, end=None, sample_every=1):
    """Calculate average of video frames"""

    # Load video
    vid = VideoFileClip(filepath, audio=False).resize(width=66)
    width = vid.w
    height = vid.h

    if start is None and end is None:
        frame_generator = vid.iter_frames(progress_bar=True, dtype=np.uint8)
    else:
        if start is None:
            start = 0
        if end is None:
            end = vid.duration
        # compute time increment for sampling by frames
        sample_inc = sample_every / vid.fps
        frame_generator = tqdm(vid.get_frame(f) for f in frange(start, end, sample_inc))

    # create starting matrix of zeros
    sum_fs = np.zeros(shape=(height, width, 3), dtype=int)
    ma_sum_fs = np.zeros(shape=(height, width, 3), dtype=int)
    prev_f = np.zeros(shape=(height, width, 3), dtype=int)
    sum_delta_fs = np.zeros(shape=(height, width, 3), dtype=int)

    n_frames = 0
    for f in frame_generator:
        #delta = f - prev_f
        #sum_delta_fs += delta
        #sum_fs += f

        #ma_sum_fs += f
        #if divmod(n_frames, 100)[1] == 0 and n_frames > 0:
        #    ma_f = ma_sum_fs / 100
        #    Image.fromarray(ma_f.astype(np.uint8))\
        #        .save(os.path.join(outpath, 'movavg_{}.png'.format(n_frames)))
        #    ma_sum_fs = np.zeros(shape=(height, width, 3), dtype=int)

        #n_frames += 1
        #prev_f = f
        print len(f)
        time.sleep(1.0/float(sample_every))
Ejemplo n.º 35
0
def run_moving_crash(args, target, outfile):
    """Runs a moving crash based on moving (gif/mp4) inputs"""
    video = VideoFileClip(target)
    img = video.get_frame(t=0)  # first frame of the video
    bounds = foreground.get_fg_bounds(img.shape[1], args.max_depth)
    max_depth = bounds.max_depth
    crash_params = crash.CrashParams(
        max_depth, args.threshold, args.bg_value, args.rgb_select)
    options = _options(args.reveal_foreground, args.reveal_background,
                       args.crash, args.reveal_quadrants, args.bg_value)
    frames = video.iter_frames(fps=video.fps)

    def make_frame(_):
        frame = next(frames)
        fg, bounds = foreground.find_foreground(frame, crash_params)
        return _process_img(frame, fg, bounds, options)

    output_video = VideoClip(
        make_frame, duration=video.duration-(4/video.fps))  # trim last 4 frms
    output_video.write_videofile(
        outfile, preset=args.compression, fps=video.fps,
        threads=args.in_parallel)
Ejemplo n.º 36
0
def average_video(filepath, outpath, start=None, end=None, sample_every=1):
    global sb1
    global sb2
    global sb3
    global sb4
    global sb5
    global sb6
    global sb7
    global sb8
    global sb9
    global sb10
    global sb11
    global sb12
    global sb13
    global sb14
    global sb15
    global sb16
    global sb17
    global sb18
    global sb19
    global sb20
    global sb21
    global sb22
    global sb23
    global sb24
    global sb25
    global sb26
    global sb27
    global sb28
    global sb29
    global sb30
    global sb31
    global sb32
    global sb33
    global sb34
    global sb35
    global sb36
    global sb37
    global sb38
    global sb39
    global sb40
    global sb41
    global sb42
    global sb43
    global sb44
    global sb45
    global sb46
    global sb47
    global sb48
    global sb49
    global sb50
    global sb51
    global sb52
    global sb53
    global sb54
    global sb55
    global sb56
    global sb57
    global sb58
    global sb59
    global sb60
    global sb61
    global sb62
    global sb63
    global sb64
    global sb65
    global sb66
    """Calculate average of video frames"""

    # Load video
    vid = VideoFileClip(filepath, audio=False).resize(width=66)
    width = vid.w
    height = vid.h

    if start is None and end is None:
        frame_generator = vid.iter_frames(progress_bar=True, dtype=np.uint8)
    else:
        if start is None:
            start = 0
        if end is None:
            end = vid.duration
        # compute time increment for sampling by frames
        sample_inc = sample_every / vid.fps
        frame_generator = tqdm(vid.get_frame(f) for f in frange(start, end, sample_inc))

    # create starting matrix of zeros
    sum_fs = np.zeros(shape=(height, width, 3), dtype=int)
    ma_sum_fs = np.zeros(shape=(height, width, 3), dtype=int)
    prev_f = np.zeros(shape=(height, width, 3), dtype=int)
    sum_delta_fs = np.zeros(shape=(height, width, 3), dtype=int)

    n_frames = 0
    for f in frame_generator:
        #delta = f - prev_f
        #sum_delta_fs += delta
        #sum_fs += f

        #ma_sum_fs += f
        #if divmod(n_frames, 100)[1] == 0 and n_frames > 0:
        #    ma_f = ma_sum_fs / 100
        #    Image.fromarray(ma_f.astype(np.uint8))\
        #        .save(os.path.join(outpath, 'movavg_{}.png'.format(n_frames)))
        #    ma_sum_fs = np.zeros(shape=(height, width, 3), dtype=int)

        #n_frames += 1
        #prev_f = f
        
        #print len(f[0])
        for x in range(1,len(f[0])+1):
            for y in range(0,len(f)):
                if y < 41:
                    r = f[y][x-1][0]
                    g = f[y][x-1][1]
                    b = f[y][x-1][2]
                    if x == 1:
                        sb1 += pack('BBBB', r, g, b, y)
                    elif x == 2:
                        sb2 += pack('BBBB', r, g, b, y)
                    elif x == 3:
                        sb3 += pack('BBBB', r, g, b, y)
                    elif x == 4:
                        sb4 += pack('BBBB', r, g, b, y)
                    elif x == 5:
                        sb5 += pack('BBBB', r, g, b, y)
                    elif x == 6:
                        sb6 += pack('BBBB', r, g, b, y)
                    elif x == 7:
                        sb7 += pack('BBBB', r, g, b, y)
                    elif x == 8:
                        sb8 += pack('BBBB', r, g, b, y)
                    elif x == 9:
                        sb9 += pack('BBBB', r, g, b, y)
                    elif x == 10:
                        sb10 += pack('BBBB', r, g, b, y)
                    elif x == 11:
                        sb11 += pack('BBBB', r, g, b, y)
                    elif x == 12:
                        sb12 += pack('BBBB', r, g, b, y)
                    elif x == 13:
                        sb13 += pack('BBBB', r, g, b, y)
                    elif x == 14:
                        sb14 += pack('BBBB', r, g, b, y)
                    elif x == 15:
                        sb15 += pack('BBBB', r, g, b, y)
                    elif x == 16:
                        sb16 += pack('BBBB', r, g, b, y)
                    elif x == 17:
                        sb17 += pack('BBBB', r, g, b, y)
                    elif x == 18:
                        sb18 += pack('BBBB', r, g, b, y)
                    elif x == 19:
                        sb19 += pack('BBBB', r, g, b, y)
                    elif x == 20:
                        sb20 += pack('BBBB', r, g, b, y)
                    elif x == 21:
                        sb21 += pack('BBBB', r, g, b, y)
                    elif x == 22:
                        sb22 += pack('BBBB', r, g, b, y)
                    elif x == 23:
                        sb23 += pack('BBBB', r, g, b, y)
                    elif x == 24:
                        sb24 += pack('BBBB', r, g, b, y)
                    elif x == 25:
                        sb25 += pack('BBBB', r, g, b, y)
                    elif x == 26:
                        sb26 += pack('BBBB', r, g, b, y)
                    elif x == 27:
                        sb27 += pack('BBBB', r, g, b, y)
                    elif x == 28:
                        sb28 += pack('BBBB', r, g, b, y)
                    elif x == 29:
                        sb29 += pack('BBBB', r, g, b, y)
                    elif x == 30:
                        sb30 += pack('BBBB', r, g, b, y)
                    elif x == 31:
                        sb31 += pack('BBBB', r, g, b, y)
                    elif x == 32:
                        sb32 += pack('BBBB', r, g, b, y)
                    elif x == 33:
                        sb33 += pack('BBBB', r, g, b, y)
                    elif x == 34:
                        sb34 += pack('BBBB', r, g, b, y)
                    elif x == 35:
                        sb35 += pack('BBBB', r, g, b, y)
                    elif x == 36:
                        sb36 += pack('BBBB', r, g, b, y)
                    elif x == 37:
                        sb37 += pack('BBBB', r, g, b, y)
                    elif x == 38:
                        sb38 += pack('BBBB', r, g, b, y)
                    elif x == 39:
                        sb39 += pack('BBBB', r, g, b, y)
                    elif x == 40:
                        sb40 += pack('BBBB', r, g, b, y)
                    elif x == 41:
                        sb41 += pack('BBBB', r, g, b, y)
                    elif x == 42:
                        sb42 += pack('BBBB', r, g, b, y)
                    elif x == 43:
                        sb43 += pack('BBBB', r, g, b, y)
                    elif x == 44:
                        sb44 += pack('BBBB', r, g, b, y)
                    elif x == 45:
                        sb45 += pack('BBBB', r, g, b, y)
                    elif x == 46:
                        sb46 += pack('BBBB', r, g, b, y)
                    elif x == 47:
                        sb47 += pack('BBBB', r, g, b, y)
                    elif x == 48:
                        sb48 += pack('BBBB', r, g, b, y)
                    elif x == 49:
                        sb49 += pack('BBBB', r, g, b, y)
                    elif x == 50:
                        sb50 += pack('BBBB', r, g, b, y)
                    elif x == 51:
                        sb51 += pack('BBBB', r, g, b, y)
                    elif x == 52:
                        sb52 += pack('BBBB', r, g, b, y)
                    elif x == 53:
                        sb53 += pack('BBBB', r, g, b, y)
                    elif x == 54:
                        sb54 += pack('BBBB', r, g, b, y)
                    elif x == 55:
                        sb55 += pack('BBBB', r, g, b, y)
                    elif x == 56:
                        sb56 += pack('BBBB', r, g, b, y)
                    elif x == 57:
                        sb57 += pack('BBBB', r, g, b, y)
                    elif x == 58:
                        sb58 += pack('BBBB', r, g, b, y)
                    elif x == 59:
                        sb59 += pack('BBBB', r, g, b, y)
                    elif x == 60:
                        sb60 += pack('BBBB', r, g, b, y)
                    elif x == 61:
                        sb61 += pack('BBBB', r, g, b, y)
                    elif x == 62:
                        sb62 += pack('BBBB', r, g, b, y)
                    elif x == 63:
                        sb63 += pack('BBBB', r, g, b, y)
                    elif x == 64:
                        sb64 += pack('BBBB', r, g, b, y)
                    elif x == 65:
                        sb65 += pack('BBBB', r, g, b, y)
                    elif x == 66:
                        sb66 += pack('BBBB', r, g, b, y)
                                
        time.sleep(1.0/float(sample_every))
Ejemplo n.º 37
0
class MyPaintWidget(Widget):
	def __init__(self, **kwargs):
		super(MyPaintWidget, self).__init__(**kwargs)
		self._keyboard = Window.request_keyboard(self._keyboard_closed, self, 'text')
		self._keyboard.bind(on_key_down=self._on_keyboard_down)
		Window.size = (1366, 768)
		self.size = Window.size
		# if smoke true - user can draw rectangle
		self.smoke = False
		# if fire true - user can draw rectangle
		self.fire = False
		# list with points of fire rectangle
		self.rectanglePoints = []
		# number of current frame
		self.numberOfFrame = 0
		# if video open isVideoOpen = True
		self.isVideoOpen = False
		# video
		self.video = None
		# filename of result txt file
		self.txtFilename = ""
		# path to temp directory
		self.dirPath = ""
		# dictionary with points
		self.dictionary = {}
		self.Forest = ""
		self.In = ""
		self.Out = ""
		self.start = 0
		self.finish = 0
		self.type = "None"
		self.move = False
		self.time = time.time()

	def _keyboard_closed(self):
		print('My keyboard have been closed!')
		self._keyboard.unbind(on_key_down=self._on_keyboard_down)
		self._keyboard = None

	def _on_keyboard_down(self, keyboard, keycode, text, modifiers):

		# Keycode is composed of an integer + a string
		# If we hit escape, release the keyboard
		if time.time() - self.time == 0:
			print True
			return True
		else:
			self.time = time.time()

		if keycode[1] == 'p':
			print 'p'
		if keycode[1] == 'f':
			print "t"
			self.NextFrame()
		if keycode[1] == 'a':
			print "a"
			self.DrawSmoke()
		if keycode[1] == 'd':
			self.DrawFire()
		if keycode[1] == 'z':
			self.PrevFrame()
		# Return True to accept the key. Otherwise, it will be used by
		# the system.
		return True
	# def __init__(self, **kwargs):
	# 	super(MyPaintWidget1, self).__init__(**kwargs)
	# 	self._keyboard = Window.request_keyboard(
	# 		self._keyboard_closed, self, 'text')
	# 	if self._keyboard.widget:
	# 		# If it exists, this widget is a VKeyboard object which you can use
	# 		# to change the keyboard layout.
	# 		pass
	# 	self._keyboard.bind(on_key_down=self._on_keyboard_down)
	#
	# def _keyboard_closed(self):
	# 	print('My keyboard have been closed!')
	# 	self._keyboard.unbind(on_key_down=self._on_keyboard_down)
	# 	self._keyboard = None
	#
	# def _on_keyboard_down(self, keyboard, keycode, text, modifiers):
	# 	print('The key', keycode, 'have been pressed')
	# 	print(' - text is %r' % text)
	# 	print(' - modifiers are %r' % modifiers)
	#
	# 	# Keycode is composed of an integer + a string
	# 	# If we hit escape, release the keyboard
	# 	if keycode[1] == 'escape':
	# 		keyboard.release()
	#
	# 	# Return True to accept the key. Otherwise, it will be used by
	# 	# the system.
	# 	return True
	def PrevFrame(self):
		self.numberOfFrame -= 2 * speed
		self.NextFrame()

	def NextFrame(self):
		for line in self.canvas.children:
			if (type(line) == Line):
				self.canvas.children.remove(line)
		for obj in self.canvas.children:
			if type(obj) == Rectangle:
				self.rect = obj
		filename = self.GetFrameFilename()
		if (filename == None):
			self.rect.size = (0, 0)
		else:
			self.rect.source = filename

	def openVideoFile(self):
		# open file explorer
		root = Tkinter.Tk()
		root.withdraw()
		# get video filename
		filename = tkFileDialog.askopenfilename(parent=root, title='Open file to encrypt')

		try:
			self.video = VideoFileClip(filename)
			print self.video.fps
		except Exception:
			print "Error in opening file - " + filename
			return None
		# create default txt filename
		self.txtFilename = filename[:filename.rfind(".")] + "_label.txt"
		# display txt filename
		self.ids.PathSaveFile.text = self.txtFilename
		# create temp directory for frames from video
		# self.dirPath = mkdtemp()
		self.dirPath = "asdf"
		makedirs("asdf")
		print self.video.duration
		# set number of frame
		self.numberOfFrame = 0
		# change winow size
		Window.size = (self.video.size[0], self.video.size[1] + pad)
		self.size = (self.video.size[0], self.video.size[1] + pad)

		self.firstFrame = True
		# find rectangle
		for obj in self.canvas.children:
			if type(obj) == Rectangle:
				self.rect = obj
		# set rectangle size
		self.rect.size = (self.size[0], self.size[1] - pad)
		# set frame to rectangle
		self.rect.source = self.GetFrameFilename()

	# return filename of frame from video
	def GetFrameFilename(self):
		# check if video open
		if (self.video == None):
			print "Error! Video doesn't open!"
			return None
		if (self.firstFrame):
			imsave(self.dirPath + "/temp0.png", self.video.get_frame(self.numberOfFrame / self.video.fps))
			self.firstFrame = False
			self.dictionary[self.numberOfFrame] = {}
			self.dictionary[self.numberOfFrame]["smoke"] = []
			self.dictionary[self.numberOfFrame]["fire"] = []
			return self.dirPath + "/temp0.png"
		else:
			# increase number of frame
			self.numberOfFrame = int(self.numberOfFrame) + speed
			if (self.numberOfFrame >= self.video.duration * self.video.fps):
				self.numberOfFrame = int(self.numberOfFrame) - speed
				self.Video = None
				return None
			self.dictionary[self.numberOfFrame] = {}
			self.dictionary[self.numberOfFrame]["smoke"] = []
			self.dictionary[self.numberOfFrame]["fire"] = []
			# save frame from video
			imsave(self.dirPath + "/temp" + str(self.numberOfFrame) + ".png",
			       self.video.get_frame(int(self.numberOfFrame / self.video.fps)))
			print self.numberOfFrame / (self.video.fps * self.video.duration) * 100
			return self.dirPath + "/temp" + str(self.numberOfFrame) + ".png"

	def DrawSmoke(self):
		# check if video open
		if (self.video == None):
			print "Error! Video doesn't open!"
			return None
		self.smoke = True
		self.fire = False

	def DrawFire(self):
		# check if video open
		if (self.video == None):
			print "Error! Video doesn't open!"
			return None
		self.fire = True
		self.smoke = False

	def CleanSmoke(self):
		points = self.dictionary.get(self.numberOfFrame)
		if (points == None):
			return None

		pointsList = points.get("smoke")
		if (points == None):
			return None
		else:
			for points in pointsList:
				# print "asdf"
				for line in self.canvas.children:
					if type(line) == Line:
						for i in range(0, 4):
							if line.points.count(points[i]) != 2:
								break
						else:
							self.canvas.children.remove(line)
			self.dictionary.get(self.numberOfFrame)["smoke"] = []

	def CleanFire(self):
		points = self.dictionary.get(self.numberOfFrame)
		if (points == None):
			return None

		pointsList = points.get("fire")
		# print "fire"
		# print points
		if (points == None):
			return None
		else:
			for points in pointsList:
				for line in self.canvas.children:
					if type(line) == Line:
						for i in range(0, 4):
							if line.points.count(points[i]) != 2:
								break
						else:
							self.canvas.children.remove(line)
			self.dictionary.get(self.numberOfFrame)["fire"] = []

	def FinishFire(self):
		# check if video open
		if (self.video == None):
			print "Error! Video doesn't open!"
			return None
		self.finish = self.numberOfFrame

	def StartFire(self):
		# check if video open
		if (self.video == None):
			print "Error! Video doesn't open!"
			return None
		self.start = self.numberOfFrame

	def In23(self):
		# check if video open
		if (self.video == None):
			print "Error! Video doesn't open!"
			return None
		print "asdf"
		self.type = "In"
		self.ids.LInfo.text = self.type

	def Out23(self):
		# check if video open
		if (self.video == None):
			print "Error! Video doesn't open!"
			return None
		self.type = "Out"
		self.ids.LInfo.text = self.type

	def Forest23(self):
		# check if video open
		if (self.video == None):
			print "Error! Video doesn't open!"
			return None
		self.type = "Forest"
		self.ids.LInfo.text = self.type

	def Quit(self):
		print self.dirPath
		if (isdir(self.dirPath)):
			rmtree(self.dirPath)
		App.get_running_app().stop()

	def on_touch_down(self, touch):
		super(MyPaintWidget, self).on_touch_down(touch)
		color = (random(), 1, 1)
		if (0 <= touch.y < self.size[1] - pad and 0 <= touch.x < self.size[0] and (self.smoke or self.fire)):
			self.move = True
			self.rectanglePoints.append(touch.x)
			self.rectanglePoints.append(touch.y)
			x = touch.x
			y = touch.y
			with self.canvas:
				if (self.smoke):
					Color(1, 0, 0)
				else:
					Color(0, 0, 1)
				touch.ud['line'] = Line(rectangle=(x, y, 0, 0))

	def on_touch_move(self, touch):
		if (0 <= touch.y < self.size[1] - pad and 0 <= touch.x < self.size[0] and (
			self.smoke or self.fire) and self.move):
			x = self.rectanglePoints[0]
			y = self.rectanglePoints[1]
			touch.ud['line'].rectangle = (x, y, touch.x - x, touch.y - y)

	def on_touch_up(self, touch):
		if (0 <= touch.y < self.size[1] - pad and 0 <= touch.x < self.size[0] and (self.smoke or self.fire)):
			self.move = False
			self.rectanglePoints.append(touch.x)
			self.rectanglePoints.append(touch.y)
			if (self.smoke):
				# self.dictionary[self.numberOfFrame]["smoke"] = self.rectanglePoints
				self.dictionary[self.numberOfFrame]["smoke"].append(self.rectanglePoints)

				# print "smoke dict"
				# print self.dictionary[self.numberOfFrame]["smoke"]
			else:
				self.dictionary[self.numberOfFrame]["fire"].append(self.rectanglePoints)
				# print "fire dict"
				# print self.dictionary[self.numberOfFrame]["fire"]
			self.smoke = False
			self.fire = False
			self.rectanglePoints = []

	def SaveResult(self):
		filename = self.ids.PathSaveFile.text
		if (not (".txt" in filename)):
			print "Error in text filename"
		if isfile(filename):
			i = 0
			while (1):
				filename = filename.replace(".txt", str(i) + ".txt")
				if isfile(filename):
					i += 1
					continue
				else:
					break
		f = open(filename, 'w')
		f.write("type " + self.type + "\n")
		f.write("start %s\n" % self.start)
		f.write("finish %s\n" % self.finish)
		self.dictionary = collections.OrderedDict(sorted(self.dictionary.items()))
		for k, v in self.dictionary.iteritems():
			f.write(str(k))
			f.write(" smoke ")
			if v.get("smoke") != None:
				for item in v["smoke"]:
					f.write("%s " % item)
			else:
				f.write(" 0 0 0 0 ")
			f.write(" fire ")
			if v.get("fire") != None:
				for item in v["fire"]:
					f.write("%s " % item)
			else:
				f.write("0 0 0 0")
			f.write("\n")
		f.close()
def split_video():

  movie_title = os.path.split(args.source_path)[-1]
  offset_csv = os.path.join(args.target_folder, 'offsets.csv')
  offsets = []
  video = VideoFileClip(args.source_path, audio=False)
  framerate = video.fps
  width = (np.size(video.get_frame(0), 1) - args.middle_gap_pixel_size) / 2
  left_video = moviepy.video.fx.all.crop(video, x1=0, width=width)
  right_video = moviepy.video.fx.all.crop(video, x1=width + args.middle_gap_pixel_size, width=width)
  right_frame_iterator = right_video.iter_frames()
  output_ind = args.output_starting_ind

  for ind, left_frame in enumerate(left_video.iter_frames()):
    left_frame = rgb2gray(left_frame)
    right_frame = rgb2gray(right_frame_iterator.next())
    if (ind % 20 == 0): # INITIALIZE
      left_frames = []
      right_frames = []
      offset_frames = []
      first_start = ind
      offset = randint(1,10)
      second_start = first_start + offset
      offset_left = randint(0, 1) == 1
    if (ind >= first_start and ind < first_start + 10): # ADD FRAMES
      right_frames.append(right_frame)
      left_frames.append(left_frame)
    if (ind >= second_start and ind < second_start + 10): # ADD OFFSET FRAMES
      if (offset_left):
        offset_frames.append(left_frame)
      else:
        offset_frames.append(right_frame)
    if (ind % 20 == 19): # SAVE SEGMENT FRAMES TO JPEG
      if args.output_images:
        assert len(left_frames) == 10, 'Only added ' + str(len(left_frames)) + ' left frames on segment ' + str(output_ind) + '. Should have 10.'
        assert len(right_frames) == 10, 'Only added ' + str(len(right_frames)) + ' right frames on segment ' + str(output_ind) + '. Should have 10.'
        assert len(offset_frames) == 10, 'Only added ' + str(len(offset_frames)) + ' offset frames on segment ' + str(output_ind) + '. Should have 10.'
        for frame_ind, left_frame in enumerate(left_frames):
          misc.toimage(left_frame, cmin=np.min(left_frame), cmax=np.max(left_frame)).save(os.path.join(args.target_folder, ('seg-{:06d}-frame-{:02d}-left.jpeg').format(output_ind, frame_ind)))
        for frame_ind, right_frame in enumerate(right_frames):
          misc.toimage(right_frame, cmin=np.min(right_frame), cmax=np.max(right_frame)).save(os.path.join(args.target_folder, ('seg-{:06d}-frame-{:02d}-right.jpeg').format(output_ind, frame_ind)))
      else:
        left_video_out = ImageSequenceClip(left_frames, fps=framerate)
        left_video_out.write_videofile(os.path.join(args.target_folder, 'seg-{:06d}-left.mp4' % output_ind), codec='libx264', audio=False)
        right_video_out = ImageSequenceClip(right_frames, fps=framerate)
        right_video_out.write_videofile(os.path.join(args.target_folder, 'seg-{:06d}-right.mp4' % output_ind), codec='libx264', audio=False)
      offsets.append({ 'id': '%06d' % output_ind, 'offset_frames': 0 })
      output_ind += 1
      if (offset_left):
        if args.output_images:
          for frame_ind, offset_frame in enumerate(offset_frames):
            misc.toimage(offset_frame, cmin=np.min(left_frame), cmax=np.max(left_frame)).save(os.path.join(args.target_folder, ('seg-{:06d}-frame-{:02d}-left.jpeg').format(output_ind, frame_ind)))
          for frame_ind, right_frame in enumerate(right_frames):
            misc.toimage(right_frame, cmin=np.min(right_frame), cmax=np.max(right_frame)).save(os.path.join(args.target_folder, ('seg-{:06d}-frame-{:02d}-right.jpeg').format(output_ind, frame_ind)))
        else:
          left_video_out = ImageSequenceClip(offset_frames, fps=framerate)
          left_video_out.write_videofile(os.path.join(args.target_folder, 'seg-{:06d}-left.mp4' % output_ind), codec='libx264', audio=False)
          right_video_out = ImageSequenceClip(right_frames, fps=framerate)
          right_video_out.write_videofile(os.path.join(args.target_folder, 'seg-{:06d}-right.mp4' % output_ind), codec='libx264', audio=False)
      else:
        if args.output_images:
          for frame_ind, left_frame in enumerate(left_frames):
            misc.toimage(left_frame, cmin=np.min(left_frame), cmax=np.max(left_frame)).save(os.path.join(args.target_folder, ('seg-{:06d}-frame-{:02d}-left.jpeg').format(output_ind, frame_ind)))
          for frame_ind, offset_frame in enumerate(offset_frames):
            misc.toimage(offset_frame, cmin=np.min(right_frame), cmax=np.max(right_frame)).save(os.path.join(args.target_folder, ('seg-{:06d}-frame-{:02d}-right.jpeg').format(output_ind, frame_ind)))
        else:
          left_video_out = ImageSequenceClip(left_frames, fps=framerate)
          left_video_out.write_videofile(os.path.join(args.target_folder, 'seg-{:06d}-left.mp4' % output_ind), codec='libx264', audio=False)
          right_video_out = ImageSequenceClip(offset_frames, fps=framerate)
          right_video_out.write_videofile(os.path.join(args.target_folder, 'seg-{:06d}-right.mp4' % output_ind), codec='libx264', audio=False)
      offsets.append({ 'id': '{:06d}'.format(output_ind), 'offset_frames': offset })
      output_ind += 1
    if (ind % 1000 == 0):
      print('Finished processing {:d} datapoints.'.format(output_ind))
  os.remove(offset_csv)
  with open(offset_csv, 'w') as offset_csv_file:
    w = csv.DictWriter(offset_csv_file, fieldnames=['id', 'offset_frames'])
    w.writeheader()
    w.writerows(offsets)
  return True