Exemplo n.º 1
0
 def __init__(self, folder, purge_existing_data):
     self.folder = folder
     self.optical_flow_processor = OpticalFlow()
     self.__files_per_folder = self.files_per_folder
     self.__last_saved_frame_number = 0
     self.__options = {}
     self.create_folders(purge_existing_data)
Exemplo n.º 2
0
def vid2frames(vid):
    oOpticalFlow = OpticalFlow(bThirdChannel=False)
    tuRectangle = (224, 224)
    success, frame = vid.read()
    rgbFrames = []
    oflowFrames = []
    while success:

        if not success:
            print("[warrning]: some video frames are corrupted.")

        frame = cv2.flip(frame, 1)
        frame = cv2.resize(frame, tuRectangle, interpolation=cv2.INTER_AREA)
        rgbFrames.append(frame)

        oflow = oOpticalFlow.next(frame)
        oflowFrames.append(oflow)

        success, frame = vid.read()

    rgbFrames = image_normalize(np.array(rgbFrames), 40)
    oflowFrames = frames_downsample(np.array(oflowFrames), 40)

    #print(rgbFrames.shape)
    #print(oflowFrames.shape)

    return rgbFrames, oflowFrames
Exemplo n.º 3
0
def video_capture(oStream, sColor, sText, tuRectangle = (224, 224), nTimeDuration = 3, bOpticalFlow = False) -> \
 (float, np.array, np.array):

    if bOpticalFlow:
        oOpticalFlow = OpticalFlow(bThirdChannel=False)

    liFrames = []
    liFlows = []
    fTimeStart = time.time()

    # loop over frames from the video file stream
    while True:
        # grab the frame from the threaded video file stream
        (bGrabbed, arFrame) = oStream.read()
        arFrame = rescale_frame(arFrame, 320, 240)
        arFrame = cv2.flip(arFrame, 1)
        liFrames.append(arFrame)

        fTimeElapsed = time.time() - fTimeStart
        s = sText + str(int(fTimeElapsed) + 1) + " sec"

        # paint rectangle & text, show the frame
        arFrameText = rectangle_text(arFrame, sColor, s, "", tuRectangle)
        cv2.imshow("Video", arFrameText)

        # display optical flow
        if bOpticalFlow:
            arFlow = oOpticalFlow.next(image_crop(arFrame, *tuRectangle))
            liFlows.append(arFlow)
            h, w, _ = arFlow.shape
            arZeros = np.zeros((h, w, 1), dtype=np.float32)
            arFlow = np.concatenate((arFlow, arZeros), axis=2)
            cv2.imshow("Optical flow", flow2colorimage(arFlow))

        # stop after nTimeDuration sec
        if fTimeElapsed >= nTimeDuration: break

        # Press 'q' for early exit
        key = cv2.waitKey(1)  #& 0xFF
        if key == ord('q'): break
        #cv2.waitKey(1)

        if key == 114 or key == 100:
            break

    return fTimeElapsed, images_normalize(np.array(liFrames), 40, 224, 224,
                                          True), np.array(liFlows), key
class Videoto3D:

    def __init__(self,  width , height , depth = 10):

        self.width = width
        self.height = height
        self.depth = depth
        self.flower = OpticalFlow()
    
    def video3D(self, filename, color=False, skip=True):
        # color True : RGB
        # color false: hardwired kenerl 
        #skip: True division give 10 frame
        
        cap = cv2.VideoCapture(filename)

        nframe = cap.get(cv2.CAP_PROP_FRAME_COUNT) # give n frame

        if (color == True):
            frames = [x * nframe / self.depth for x in range(self.depth)]
            # print(len(frames))
        else:
            frames = [x * nframe / self.depth for x in range(self.depth)]

        framearray = []
        
        for i in range(self.depth):
            
            cap.set(cv2.CAP_PROP_POS_FRAMES, frames[i])
            ret, prvs = cap.read()
            
            if prvs is None:
                break
            prvs = cv2.resize(prvs, (self.height, self.width))
            
            if color:
                framearray.append(prvs)
                
            else:
                if i == self.depth - 1:
                    break
                
                else:
                    cap.set(cv2.CAP_PROP_POS_FRAMES, frames[i + 1])
                    ret, next_frame = cap.read()
                    next_frame = cv2.resize(next_frame, (self.height, self.width))
                    
                    image1 = prvs
                    image2 = next_frame
                   
                    flow = self.flower.predict(image1, image2)
                    # flow = np.asanyarray(flow)[0]
                    framearray.append(flow)
                    
        cap.release()
        framearray = np.asanyarray(framearray)
        return framearray
Exemplo n.º 5
0
def vid2frames(vid):
	oOpticalFlow = OpticalFlow(bThirdChannel = False)
	tuRectangle = (224, 224)
	success, frame = vid.read()
	rgbFrames = []
	oflowFrames = []
	frames = 0
	fails = 10
	while fails > 0 :

		if success:
			frames += 1

			frame = cv2.flip(frame, 1)
			frame = cv2.resize(frame, tuRectangle, interpolation =cv2.INTER_AREA)

			rgbFrames.append(frame)
			
			oflow = oOpticalFlow.next(frame)
			oflowFrames.append(oflow)

		else:
			fails -= 1

		success, frame = vid.read()

#	success, frame = vid.read()	
#		if not success:
#			print("[warrning]: some video frames are corrupted.")
	
	#print(f"duration: {vid.get(cv2.CAP_PROP_POS_MSEC)}")
	rgbFrames = images_rescale(np.array(rgbFrames))
	#oflowFrames = frames_downsample(np.array(oflowFrames), 40)

	#print(rgbFrames.shape)
	#print(oflowFrames.shape)

	print("frames_count:", frames)

	return rgbFrames, oflowFrames, frames
def video_capture(oStream, sColor, sText, tuRectangle = (224, 224), nTimeDuration = 3, bOpticalFlow = False) -> \
 (float, np.array, np.array):

    if bOpticalFlow:
        oOpticalFlow = OpticalFlow(bThirdChannel=True)

    liFrames = []
    liFlows = []
    fTimeStart = time.time()

    # loop over frames from the video file stream
    while True:
        # grab the frame from the threaded video file stream
        (bGrabbed, arFrame) = oStream.read()
        arFrame = cv2.flip(arFrame, 1)
        liFrames.append(arFrame)

        fTimeElapsed = time.time() - fTimeStart
        s = sText + str(int(fTimeElapsed) + 1) + " sec"

        # paint rectangle & text, show the frame
        arFrameText = rectangle_text(arFrame, sColor, s, "", tuRectangle)
        cv2.imshow("Video", arFrameText)

        # display optical flow
        if bOpticalFlow:
            arFlow = oOpticalFlow.next(image_crop(arFrame, *tuRectangle))
            liFlows.append(arFlow)
            cv2.imshow("Optical flow", flow2colorimage(arFlow))

        # stop after nTimeDuration sec
        if fTimeElapsed >= nTimeDuration: break

        # Press 'q' for early exit
        key = cv2.waitKey(1) & 0xFF
        if key == ord('q'): break
        cv2.waitKey(1)

    return fTimeElapsed, np.array(liFrames), np.array(liFlows)
Exemplo n.º 7
0
 def __init__(self, input_element, input_is_video, blur):
     self.input = input_element
     self.input_is_video = input_is_video
     self.optical_flow_processor = OpticalFlow()
     self.__files_per_folder = self.files_per_folder
     self.w = -1
     self.h = -1
     self.c = -1
     self.frames = -1
     self.max_frames = -1
     self.fps = -1
     self.__blur = blur
     self.__req_w = -1
     self.__req_h = -1
     self.__req_fps = -1
     self.__force_gray = False
     self.__video_capture = None
     self.__last_returned_frame = None
     self.__last_returned_frame_number = 0
     self.__last_returned_of = None
     self.__last_returned_time = 0.0
     self.__getinfo()
def calculate_oflow(rgbFrames, return_dict, indx, prev_f):
    #print(f"proess {indx} running got rgbFrames {rgbFrames.shape}")
    liFlows = []
    #if ind

    oOpticalFlow = OpticalFlow(sAlgorithm="farnback",
                               bThirdChannel=False,
                               fBound=20)

    if indx != 0:
        oOpticalFlow.arPrev = cv2.cvtColor(
            prev_f, cv2.COLOR_BGR2GRAY)  #np.array(prev_f)
        #print(f"oOpticalFlow.arPrev shape {oOpticalFlow.arPrev.shape}")

    for i in range(len(rgbFrames)):
        arFlow = oOpticalFlow.next(rgbFrames[i])

        liFlows.append(arFlow)
        #print("in loop")


#	print(f"proess {i} got oflow = {liFlows.shape}")
    return_dict[indx] = np.array(liFlows)
    def __init__(self,  width , height , depth = 10):

        self.width = width
        self.height = height
        self.depth = depth
        self.flower = OpticalFlow()
Exemplo n.º 10
0
class OutputStream:
    files_per_folder = InputStream.files_per_folder

    def __init__(self, folder, purge_existing_data):
        self.folder = folder
        self.optical_flow_processor = OpticalFlow()
        self.__files_per_folder = self.files_per_folder
        self.__last_saved_frame_number = 0
        self.__options = {}
        self.create_folders(purge_existing_data)

    def create_folders(self, purge_existing_data):
        if not os.path.exists(self.folder):
            os.makedirs(self.folder)
        elif purge_existing_data:
            shutil.rmtree(self.folder)
            os.makedirs(self.folder)
        if not os.path.exists(self.folder + os.sep + 'frames'):
            os.makedirs(self.folder + os.sep + 'frames')
        if not os.path.exists(self.folder + os.sep + 'motion'):
            os.makedirs(self.folder + os.sep + 'motion')
        if not os.path.exists(self.folder + os.sep + 'features'):
            os.makedirs(self.folder + os.sep + 'features')
        if not os.path.exists(self.folder + os.sep + 'filters'):
            os.makedirs(self.folder + os.sep + 'filters')
        if not os.path.exists(self.folder + os.sep + 'others'):
            os.makedirs(self.folder + os.sep + 'others')

    def save_next(self, img, of, features, filters, others):

        # getting the right folder and file ID
        f = self.__last_saved_frame_number
        n_folder = int(f / self.__files_per_folder) + 1
        n_file = (f + 1) - ((n_folder - 1) * self.__files_per_folder)

        folder_name = format(n_folder, '08d')
        file_name = format(n_file, '03d')

        # creating the internal folders, if needed
        if not os.path.isdir(self.folder + os.sep + "frames" + os.sep +
                             folder_name):
            os.makedirs(self.folder + os.sep + "frames" + os.sep + folder_name)
        if not os.path.isdir(self.folder + os.sep + "motion" + os.sep +
                             folder_name):
            os.makedirs(self.folder + os.sep + "motion" + os.sep + folder_name)
        if not os.path.isdir(self.folder + os.sep + "features" + os.sep +
                             folder_name):
            os.makedirs(self.folder + os.sep + "features" + os.sep +
                        folder_name)
        if not os.path.isdir(self.folder + os.sep + "filters" + os.sep +
                             folder_name):
            os.makedirs(self.folder + os.sep + "filters" + os.sep +
                        folder_name)
        if not os.path.isdir(self.folder + os.sep + "others" + os.sep +
                             folder_name):
            os.makedirs(self.folder + os.sep + "others" + os.sep + folder_name)

        # saving frame
        if not os.path.exists(self.folder + os.sep + "frames" + os.sep +
                              folder_name + os.sep + file_name + ".png"):
            cv2.imwrite(
                self.folder + os.sep + "frames" + os.sep + folder_name +
                os.sep + file_name + ".png", img)

        # saving motion field
        if not os.path.exists(self.folder + os.sep + "motion" + os.sep +
                              folder_name + os.sep + file_name + ".of"):
            self.optical_flow_processor.save_flow(
                self.folder + os.sep + "motion" + os.sep + folder_name +
                os.sep + file_name + ".of", of, img, False)

        # saving features
        with GzipFile(
                self.folder + os.sep + "features" + os.sep + folder_name +
                os.sep + file_name + ".feat", 'wb') as file:
            np.save(file, features)

        # saving filters
        with GzipFile(
                self.folder + os.sep + "filters" + os.sep + folder_name +
                os.sep + file_name + ".fil", 'wb') as file:
            np.save(file, filters)

        # saving others
        f = open(
            self.folder + os.sep + "others" + os.sep + folder_name + os.sep +
            file_name + ".txt", 'w')
        if f is None or not f or f.closed:
            raise IOError("Cannot access: " + self.folder + os.sep + "others" +
                          os.sep + folder_name + os.sep + file_name + ".txt")
        json.dump(others, f, indent=4)
        f.close()

        self.__last_saved_frame_number = self.__last_saved_frame_number + 1

    def save_option(self, opt, value):
        self.__options[opt] = value

        f = open(self.folder + os.sep + "options.txt", "w")
        if f is None or not f or f.closed:
            raise IOError("Cannot access: " + self.folder + os.sep +
                          "options.txt")
        json.dump(self.__options, f, indent=4)
        f.close()

    def set_last_frame(self, last_frame_number):
        self.__last_saved_frame_number = last_frame_number - 1
Exemplo n.º 11
0
class InputStream:
    files_per_folder = 100  # global parameter

    def __init__(self, input_element, input_is_video, blur):
        self.input = input_element
        self.input_is_video = input_is_video
        self.optical_flow_processor = OpticalFlow()
        self.__files_per_folder = self.files_per_folder
        self.w = -1
        self.h = -1
        self.c = -1
        self.frames = -1
        self.max_frames = -1
        self.fps = -1
        self.__blur = blur
        self.__req_w = -1
        self.__req_h = -1
        self.__req_fps = -1
        self.__force_gray = False
        self.__video_capture = None
        self.__last_returned_frame = None
        self.__last_returned_frame_number = 0
        self.__last_returned_of = None
        self.__last_returned_time = 0.0
        self.__getinfo()

    def is_video(self):
        return self.input_is_video

    def is_folder(self):
        return not self.input_is_video

    def reset(self):
        w = self.__req_w
        h = self.__req_h
        fps = self.__req_fps
        force_gray = self.__force_gray
        max_frames = self.max_frames
        self.__init__(self.input, self.input_is_video, self.__blur)
        self.set_options(w, h, fps, force_gray, max_frames)

    def set_options(self, w, h, fps, force_gray, max_frames):
        self.__req_w = w
        self.__req_h = h
        self.__req_fps = fps
        self.__force_gray = force_gray
        self.max_frames = max_frames

    def set_last_frame_and_time(self, frame, seconds):
        self.__last_returned_frame_number = frame
        self.__last_returned_time = seconds * 1000
        if self.is_video():
            self.__video_capture.set(cv2.CAP_PROP_POS_MSEC, seconds * 1000.0)

    def get_last_frame_number(self):
        return self.__last_returned_frame_number

    def get_last_frame_time(self):
        return self.__last_returned_time / 1000.0

    def get_next(self, blur_factor=0.0, t=-1, sample_only=False):
        img = None
        next_time = None
        compute_motion = True
        f = None

        # check
        if 0 < self.max_frames <= self.__last_returned_frame_number:
            return None, None

        # opening stream (if not already opened)
        if self.is_video():
            if self.__video_capture is None or not self.__video_capture.isOpened(
            ):
                if self.input != "0":
                    self.__video_capture = cv2.VideoCapture(self.input)
                    self.__video_capture.set(cv2.CAP_PROP_POS_MSEC,
                                             self.__last_returned_time)
                else:
                    self.__video_capture = cv2.VideoCapture(0)
        else:
            f = self.__last_returned_frame_number

        # setting time for the next frame
        if self.input != "0":
            if t > 0.0:
                next_time = t * 1000.0
                if next_time >= self.__last_returned_time:
                    if self.__video_capture is not None:
                        self.__video_capture.set(cv2.CAP_PROP_POS_MSEC,
                                                 next_time)
                    f = Decimal(t * float(self.__req_fps)).quantize(
                        0, ROUND_HALF_UP)
                else:
                    raise IOError("Cannot seek back in time!")
            else:
                if self.__req_fps != self.fps and self.__req_fps > 0.0:
                    next_time = self.__last_returned_time + (
                        1000.0 / float(self.__req_fps))

                    # detect changes when asking for a number of FPS than is bigger than the original one
                    if self.__req_fps > self.fps and self.__last_returned_frame_number > 0:
                        next_frame_original_fps = (next_time *
                                                   float(self.fps)) / 1000.0
                        cur_frame_original_fps = (self.__last_returned_time *
                                                  float(self.fps)) / 1000.0
                        if Decimal(next_frame_original_fps).quantize(0, ROUND_HALF_UP) == \
                                Decimal(cur_frame_original_fps).quantize(0, ROUND_HALF_UP):
                            compute_motion = False

                    if self.__video_capture is not None:
                        self.__video_capture.set(cv2.CAP_PROP_POS_MSEC,
                                                 next_time)
                    f = Decimal(next_time * float(self.__req_fps)).quantize(
                        0, ROUND_HALF_UP)

        # getting a new frame
        if self.is_video():
            ret_val, img = self.__video_capture.read()

            if not ret_val:
                if self.input != "0" and self.__req_fps > 0 and \
                        self.__req_fps != self.fps and next_time == self.__video_capture.get(cv2.CAP_PROP_POS_MSEC):
                    raise IOError("Unable to capture frames from video!")
                elif self.input != "0" and self.__req_fps <= 0 and \
                        self.__video_capture.get(cv2.CAP_PROP_POS_AVI_RATIO) < 1.0:
                    raise IOError("Unable to capture frames from video!")
                else:
                    self.__video_capture.release()
                    img = None  # reached the end of video

            if img is None:
                return None, None

        elif self.is_folder():
            n_folder = int(f / self.__files_per_folder) + 1
            n_file = (f + 1) - ((n_folder - 1) * self.__files_per_folder)

            folder_name = format(n_folder, '08d')
            file_name = format(n_file, '03d')

            if os.path.exists(self.input + os.sep + "frames" + os.sep +
                              folder_name + os.sep + file_name + ".png"):
                img = cv2.imread(self.input + os.sep + "frames" + os.sep +
                                 folder_name + os.sep + file_name + ".png")
            else:
                return None, None

        # rescaling
        if (self.__req_w > 0
                and self.__req_h > 0) and (self.__req_w != self.w
                                           or self.__req_h != self.h):
            img = cv2.resize(img, (self.__req_w, self.__req_h))

        # blurring
        if blur_factor > 0.0:
            if blur_factor > 1.0:
                raise ValueError(
                    "Invalid blur factor (it must be in [0,1]): " +
                    str(blur_factor))
            ww, hh, dd = img.shape
            kernel_size = int(round(float(min(ww, hh)) * 0.25 * blur_factor))
            if kernel_size % 2 == 0:
                kernel_size = kernel_size + 1
            if self.__blur:
                img = ((1.0 - blur_factor) *
                       cv2.GaussianBlur(img,
                                        (kernel_size, kernel_size), 0)).astype(
                                            np.uint8)
            else:
                img = ((1.0 - blur_factor) * img).astype(np.uint8)
        else:
            if blur_factor < 0.0:
                raise ValueError(
                    "Invalid blur factor (it must be in [0,1]): " +
                    str(blur_factor))

        # computing optical flow
        if compute_motion or self.__last_returned_of is None:
            of = self.optical_flow_processor.compute_flow(
                img,
                pass_by=(self.__last_returned_of is None),
                do_not_update_frame_references=sample_only)
        else:
            of = self.__last_returned_of

        if not sample_only:
            if next_time is None:
                next_time = self.__last_returned_time + (1000.0 /
                                                         float(self.fps))

            self.__last_returned_frame_number = self.__last_returned_frame_number + 1
            self.__last_returned_time = next_time
            self.__last_returned_frame = img
            self.__last_returned_of = of

            # open CV is buggy in counting frames...
            if self.__last_returned_frame_number > self.frames:
                self.frames = self.__last_returned_frame_number

        if not self.__force_gray:
            return img, of
        else:
            return np.reshape(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY),
                              (self.__req_h, self.__req_w, 1)), of

    def __getinfo(self):
        if self.is_video():
            if self.input != "0":
                video = cv2.VideoCapture(self.input)
            else:
                video = cv2.VideoCapture(0)

            if video.isOpened():
                fps = video.get(cv2.CAP_PROP_FPS)  # float
                frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))

                if self.input != "0":
                    frames = int(frames)
                else:
                    frames = sys.maxsize  # dummy

                ret_val, img = video.read()

                if ret_val:
                    h, w, c = img.shape
                    w = int(w)
                    h = int(h)
                    c = int(c)
                else:
                    raise IOError("Error while trying to grab a frame from: ",
                                  self.input)
            else:
                raise IOError("Cannot open: ", self.input)

            self.w = w
            self.h = h
            self.c = c
            self.frames = frames
            self.fps = fps

            if self.frames <= 0:
                raise ValueError("Invalid frame count: " + str(self.frames))
            if self.fps <= 0:
                raise ValueError("Invalid FPS count: " + str(self.fps))
            if self.w <= 0 or self.h <= 0:
                raise ValueError("Invalid resolution: " + str(self.w) + "x" +
                                 str(self.h))

        elif self.is_folder():
            first_file = ''
            dirs = glob(self.input + os.sep + "frames" + os.sep + "*" + os.sep)

            if dirs is not None and len(dirs) > 0:
                dirs.sort()

                n = len(dirs) - 2  # discarding '.' and '..'
                i = 1

                for d in dirs:
                    if not os.path.isdir(d):
                        continue
                    d = os.path.basename(os.path.dirname(d))
                    if d == '.' or d == '..':
                        continue

                    folder_name = format(i, '08d')
                    if folder_name != d:
                        raise ValueError("Invalid/unexpected folder: " +
                                         self.input + os.sep + "frames" +
                                         os.sep + d)

                    files = glob(self.input + os.sep + "frames" + os.sep + d +
                                 os.sep + "*.png")
                    files.sort()
                    j = 1

                    if i < n and len(files) != self.__files_per_folder:
                        raise ValueError(
                            "Invalid/unexpected number of files in: " +
                            self.input + os.sep + "frames" + os.sep + d)

                    for f in files:
                        file_name = format(j, '03d')
                        f = os.path.basename(f)
                        if file_name + ".png" != f:
                            raise ValueError("Invalid/unexpected file '" + f +
                                             "' in: " + self.input + os.sep +
                                             "frames" + os.sep + d)
                        j = j + 1

                    if len(first_file) == 0:
                        files.sort()
                        first_file = files[0]
                        self.frames = 0

                    self.frames = self.frames + len(files)

                    i = i + 1

                img = cv2.imread(first_file)
                h, w, c = img.shape

                self.w = int(w)
                self.h = int(h)
                self.c = int(c)
                self.fps = -1

                if self.frames <= 0:
                    raise ValueError("Invalid frame count: " +
                                     str(self.frames))
                if self.w <= 0 or self.h <= 0:
                    raise ValueError("Invalid resolution: " + str(self.w) +
                                     "x" + str(self.h))

                try:
                    opts = json.load(open(self.input + os.sep + "options.txt"))
                    self.fps = float(opts['fps'])
                except (ValueError, IOError):
                    raise IOError(
                        "Options file is missing/unreadable! (needed to read the FPS value): "
                        + self.input + os.sep + "options.txt")
            else:
                raise ValueError("No frames in: " + self.input + os.sep +
                                 "frames" + os.sep)