コード例 #1
0
    def read_video(self, path):
        # Return: Numpy.ndarray 5-d tensor with shape (1, <No. of frames>, <height>, <width>, <channels>)
        capt = FFmpegReader(filename=path)
        self.fps = int(capt.inputfps)
        list_of_frames = []

        for index, frame in enumerate(capt.nextFrame()):
            # frame -> (<height>, <width>, 3)
            capture_frame = True
            if self.required_fps != None:
                is_valid = range(self.required_fps)
                capture_frame = (index % self.fps) in is_valid

            if capture_frame:
                if self.target_size is not None:
                    temp_image = image.array_to_img(frame)
                    frame = image.img_to_array(
                        temp_image.resize(self.target_size,
                                          Image.ANTIALIAS)).astype('uint8')
                list_of_frames.append(frame)
        temp_video = np.stack(list_of_frames)
        capt.close()
        if self.to_gray:
            temp_video = rgb2gray(temp_video)
        if self.max_frames is not None:
            temp_video = self.process_video(video=temp_video)
        return np.expand_dims(temp_video, axis=0)
コード例 #2
0
ファイル: index.py プロジェクト: szWingLee/ELD
def raw2gray(bayer_images):
    """RGBG -> linear RGB"""
    # T H W C
    lin_rgb = np.stack([
        bayer_images[..., 0],
        np.mean(bayer_images[..., [1, 3]], axis=3), bayer_images[..., 2]
    ],
                       axis=3)

    lin_gray = rgb2gray(lin_rgb)

    return lin_gray
コード例 #3
0
def create_pkl(mp4path):
    tgtpklpath = (mp4path.parent / (mp4path.stem + ".pkl"))
    if not tgtpklpath.exists():
        try:
            images = rgb2gray(vread(mp4path)).astype(np.uint8).squeeze()
            face_detector = FaceDetector()
            faces = np.stack([
                face_detector.crop_mouth(image, bounding_box_shape=(220, 150))
                for image in images
            ], 0)
            faces.dump(tgtpklpath.as_posix())
        except Exception as e:
            print("{}: {}".format(tgtpklpath, e))
コード例 #4
0
    def _read_video(self, path):
        """
        Parameters:
            path (str): Required
                Path of the video to be read

        Returns:
            Numpy.ndarray
                A 5-d tensor with shape (1, <No. of frames>, <height>, <width>, <channels>)
        """

        cap = FFmpegReader(filename=path)
        list_of_frames = []
        self.fps = int(cap.inputfps)                  # Frame Rate

        for index, frame in enumerate(cap.nextFrame()):

            capture_frame = True
            if self.required_fps != None:
                is_valid = range(self.required_fps)
                capture_frame = (index % self.fps) in is_valid

            if capture_frame:

                if self.target_size is not None:
                    temp_image = image.array_to_img(frame)
                    frame = image.img_to_array(
                        temp_image.resize(
                            self.target_size,
                            Image.ANTIALIAS)).astype('uint8')

                # Shape of each frame -> (<height>, <width>, 3)
                list_of_frames.append(frame)

        temp_video = np.stack(list_of_frames)
        cap.close()

        if self.to_gray:
            temp_video = rgb2gray(temp_video)
                
        if self.max_frames is not None:
            temp_video = self._process_video(video=temp_video)

        return temp_video
コード例 #5
0
def main(model_path, data_dir, prediction_path):

    # Grab all the input videos
    video_extensions = ["*.pgm", "*.mp4"]
    videos = []
    for extension in video_extensions:
        video_paths = os.path.join(data_dir, extension)
        videos += glob(video_paths)

    # load Deep learning model
    model = load_model(
        model_path,
        custom_objects={"dice_coef_multilabel": dice_coef_multilabel})

    # Loop for each video
    for vid in videos:
        # Define video's specific parameters
        vid_name = os.path.split(vid)[1]  # with extension
        vid_ext = os.path.splitext(vid_name)[1]
        output_path = prediction_path + os.path.splitext(vid_name)[0] + ".npy"
        vid_reader = skv.FFmpegReader(vid)

        num_frames = vid_reader.getShape()[0]
        batch_array = np.zeros((num_frames, 240, 320, 4)).astype(np.uint8)
        print("Current video:", vid)
        print("with num_frames = ", num_frames)
        # Loop for each frame
        for idx, frame in enumerate(vid_reader.nextFrame()):

            # Printing progress
            print("\rNow is at %d/%d" % (idx, num_frames), end="", flush=True)

            # Preprocessing before DL network
            if vid_ext == ".pgm" or vid_name == "4.5mA_0.0ms_180s.mp4" or "max" in vid_name:
                frame = frame[:, 36:356, :]
            frame_gray = rgb2gray(frame) / 255
            prediction = model.predict(frame_gray)
            batch_array[idx, ] = (prediction[0, ] * 255).astype(np.uint8)

        print("\n")
        print("Saving prediction: ", output_path)
        np.save(output_path, batch_array)
コード例 #6
0
ファイル: block.py プロジェクト: xkp793003821/vbliinds
def blockMotion(videodata, method='DS', mbSize=8, p=2, **plugin_args):
    videodata = vshape(videodata)

    # grayscale
    luminancedata = rgb2gray(videodata)

    numFrames, height, width, channels = luminancedata.shape
    assert numFrames > 1, "Must have more than 1 frame for motion estimation!"

    # luminance is 1 channel, so flatten for computation
    luminancedata = luminancedata.reshape((numFrames, height, width))

    motionData = np.zeros((numFrames - 1, np.int(height / mbSize), np.int(width / mbSize), 2), np.int8)

    if method == "N3SS":
        for i in range(numFrames - 1):
            motion, comps = _N3SS(luminancedata[i + 1, :, :], luminancedata[i, :, :], mbSize, p)
            motionData[i, :, :, :] = motion
    else:
        raise NotImplementedError

    return motionData
コード例 #7
0
ファイル: Cut_LUS.py プロジェクト: Triple-BAM/Final-Project
import numpy as np
from skvideo.io import vread, vwrite
from skvideo.utils import rgb2gray
import os

if __name__ == '__main__':
    orig_path = r'C:\Users\dekel\Desktop\MP4_Vids\DCM10'
    orig_Dir = os.listdir(orig_path)
    for folder in np.arange(len(orig_Dir)):
        in_path = orig_path + '/' + orig_Dir[folder]
        Dir = os.listdir(in_path)
        out_path = r'C:\Users\dekel\Desktop\MP4_Vids\DCM10_cut/' + orig_Dir[
            folder] + '_cut'
        if not os.path.exists(out_path):
            os.makedirs(out_path)
        for file in np.arange(len(Dir)):
            path = in_path + '/' + Dir[file]
            vid = vread(path)
            vid = rgb2gray(vid)
            Video = vid[:, 54:354, 240:540]
            Video[:, :45, :45] = 0
            vwrite(out_path + '/' + Dir[file], Video)

    print('Done!')
コード例 #8
0
tmp = 0
for i in tqdm(range(len(train_files))):

    file = train_files[i]
    vid_path = path + '/' + assign[train_targets[i]] + '/' + file
    ind_frames = []
    frames_count = 5
    while frames_count != 205:
        img = cv2.imread(vid_path + '/frame-' + str(frames_count) + '.png', 1)
        img = cv2.resize(img, (160, 120))
        #     print(vid_path+'/frame-'+str(frames_count)+'.png')
        frames_count = frames_count + 5
        tmp = tmp + 1
        ind_frames.append(img)

    ind_frames = rgb2gray(ind_frames)
    train_frames.append(ind_frames)

train_frames = np.array(train_frames)
print(train_frames.shape)

# In[5]:

import numpy as np
from skvideo.utils import rgb2gray
import cv2
from tqdm import tqdm

test_frames = []
tmp = 0
for i in tqdm(range(len(test_files))):
コード例 #9
0
    def __call__(self, sample):
        for channel in sample:
            if channel['modality'] == 'video':
                channel['features'] = su.rgb2gray(channel['features'])

        return sample
コード例 #10
0
    def plot_video(self,
                   output_video_path,
                   output_record_path,
                   update_template=False):
        # Initialise writer and recorder
        self.vid_writer = skv.FFmpegWriter(output_video_path)
        self.record_fh = open(output_record_path, "w")
        self.record_fh.write("frame,rotation\n")

        # Initialise counter
        idx = 0
        # Loop for each frame
        for pred, pred_masked, frame in zip(self.predictions,
                                            self.predictions_masked,
                                            self.vid_reader.nextFrame()):

            # Print progress
            print("\rNow is at %d/%d" % (idx, self.vid_shape[0]),
                  end="",
                  flush=True)

            # Initialize maps and frames
            frame = img_as_float(frame)  # frame ~ (240, 320, 3)
            frame_gray = rgb2gray(frame)[0, :, :, 0]  # frame_gray ~ (240, 320)
            frame_rgb = np.zeros(frame.shape)  # frame_rgb ~ (240, 320, 3)
            frame_rgb[:, :, :] = frame_gray.reshape(frame_gray.shape[0],
                                                    frame_gray.shape[1], 1)
            useful_map, (pupil_map, _, _, _) = getSegmentation_fromDL(
                pred
            )  # useful_map is used for polar transformation and cross-correlation
            _, (pupil_map_masked, iris_map_masked, glints_map_masked,
                visible_map_masked) = getSegmentation_fromDL(pred_masked)
            rr, _, centre, _, _, _, _, _ = fit_ellipse(pupil_map, 0.5)

            # Cross-correlation
            if idx == 0:
                polar_pattern_template, polar_pattern_template_longer, r_template, theta_template, extra_radian = genPolar(
                    frame_gray,
                    useful_map,
                    center=centre,
                    template=True,
                    filter_sigma=100,
                    adhist_times=2)
                rotated_info = (polar_pattern_template, r_template,
                                theta_template)
                rotation = 0
            elif rr is not None:
                # for finding the rotation value and determine if it is needed to update
                rotation, rotated_info, _ = findTorsion(
                    polar_pattern_template_longer,
                    frame_gray,
                    useful_map,
                    center=centre,
                    filter_sigma=100,
                    adhist_times=2)
                if (update_template == True) and rotation == 0:
                    polar_pattern_template, polar_pattern_template_longer, r_template, theta_template, extra_radian = genPolar(
                        frame_gray,
                        useful_map,
                        center=centre,
                        template=True,
                        filter_sigma=100,
                        adhist_times=2)

            else:
                rotation, rotated_info = np.nan, None

            self.rotation_results.append(rotation)
            self.record_fh.write("{},{}\n".format(idx, rotation))

            # Drawing the frames of visualisation video
            rotation_plot_arr = self._plot_rotation_curve(idx)
            segmented_frame = self._draw_segmented_area(
                frame_gray, pupil_map_masked, iris_map_masked,
                glints_map_masked, visible_map_masked)
            polar_transformed_graph_arr = self._plot_polar_transformed_graph(
                (polar_pattern_template, r_template, theta_template),
                rotated_info, extra_radian)
            frames_to_draw = (frame_rgb, rotation_plot_arr, segmented_frame,
                              polar_transformed_graph_arr)
            final_output = self._build_final_output_frame(frames_to_draw)
            self.vid_writer.writeFrame(final_output)
            idx += 1

        self.delete_handles()
        del self.predictions
        del self.predictions_masked