示例#1
0
    def _py_func_rgb_test_vp(rgb_path, label):

        rgb_path = rgb_path.decode()

        rgb_cap = cv2.VideoCapture(rgb_path)
        rgb_len = rgb_cap.get(cv2.CAP_PROP_FRAME_COUNT)

        total_rgb_file = []
        try:
            index_list = np.arange(0, rgb_len,
                                   rgb_len // _frame_counts)[:_frame_counts]
        except:
            index_list = np.arange(0, rgb_len)[:_frame_counts]

        image_list = []
        for index in index_list:
            rgb_cap.set(cv2.CAP_PROP_POS_FRAMES, index)
            _, image = rgb_cap.read()
            if image is not None:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                image = cv2.resize(image, (340, 256))
                image_list.append(image)

        if len(image_list) != _frame_counts:
            image_list = np.concatenate([image_list] * _frame_counts,
                                        axis=0)[:_frame_counts]

        image = np.stack(image_list, axis=0)

        img = image
        if _test_crop == 'center':
            rgb_file = DataAugmentation.center_crop(img, 224, 224)
            rgb_file = np.float32(rgb_file)
            rgb_file = normalize(rgb_file)
            rgb_file_flip = rgb_file[:, :, ::-1, :]
            total_rgb_file.append(rgb_file)
            total_rgb_file.append(rgb_file_flip)
        else:
            for k in range(5):
                image = DataAugmentation.random_Crop(img, 1, k)
                image = np.float32(image)
                image = normalize(image)
                image_flip = image[:, :, ::-1, :]
                total_rgb_file.append(image)
                total_rgb_file.append(image_flip)

        rgb_cap.release()
        if label is not None:
            one_hot_label = np.zeros(101, dtype=np.float32)
            one_hot_label[label] = 1

            return total_rgb_file, one_hot_label

        return total_rgb_file
示例#2
0
    def _py_func_rgb_test_vp(rgb_path, label):

        rgb_path = rgb_path.decode()

        _batch_size = 25

        rgb_cap = cv2.VideoCapture(rgb_path)
        rgb_len = rgb_cap.get(cv2.CAP_PROP_FRAME_COUNT) - 10

        total_rgb_file = []
        if (rgb_len) <= _batch_size:
            factor = int((_batch_size - 1) // (rgb_len) + 1)
            index_list = np.concatenate([np.arange(0, rgb_len)] * factor,
                                        axis=-1)[:_batch_size]
        else:
            index_list = np.arange(0, rgb_len,
                                   rgb_len // _batch_size)[:_batch_size]

        for index in index_list:
            rgb_cap.set(cv2.CAP_PROP_POS_FRAMES, index)
            _, image = rgb_cap.read()
            if image is not None:
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                image = np.float32(image)
                image = cv2.resize(image, (340, 256))

                if test_crop == 'center':
                    image_ = DataAugmentation.center_crop(image, 224, 224)
                    if _preprocess_name == 'pytorch':
                        image_ = normalize(image_)
                    elif _preprocess_name == 'tf':
                        image_ = tf_preprocess(image_)
                    image_flip = np.fliplr(image_)
                    total_rgb_file.append(image_)
                    total_rgb_file.append(image_flip)
                else:
                    for i in range(5):
                        image_ = DataAugmentation.random_Crop(image, 1, i)
                        if _preprocess_name == 'pytorch':
                            image_ = normalize(image_)
                        elif _preprocess_name == 'tf':
                            image_ = tf_preprocess(image_)
                        image_flip = np.fliplr(image_)
                        total_rgb_file.append(image_)
                        total_rgb_file.append(image_flip)

        if label is not None:
            one_hot_label = np.zeros(101, dtype=np.float32)
            one_hot_label[label] = 1

            return total_rgb_file, one_hot_label

        return total_rgb_file
示例#3
0
    def _py_func_rgb_test_vp(rgb_path, label):

        rgb_path = rgb_path.decode ()

        rgb_file = os.listdir (rgb_path)
        rgb_file = sorted(rgb_file)

        _batch_size = 25

        if len(rgb_file) < _batch_size:
            index_list = np.arange(0,len(rgb_file))
            index_list = np.concatenate([index_list]*(_batch_size//len(rgb_file) + 1),axis=0)[:_batch_size]
        else:
            index_list = np.arange(0,len(rgb_file),len(rgb_file)//_batch_size)[:_batch_size]

        img_list = []
        for i in index_list:
            rgb_file_path = os.path.join(rgb_path,rgb_file[i])

            img = cv2.imread(rgb_file_path)
            img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
            img = cv2.resize(img,(340,256))
            for j in range(5):
                image = DataAugmentation.random_Crop(img,1,j)
                image = cv2.resize(image,(224,224))
                image = np.float32(image)
                image = normalize(image)
                image_flip = np.fliplr(image)
                img_list.append(image)
                img_list.append(image_flip)


        if label is not None:
            one_hot_label = np.zeros (101, dtype=np.float32)
            one_hot_label[label] = 1

            return img_list, one_hot_label

        return img_list
示例#4
0
    def _py_func_flow_test_vp(flow_path, label):

        f_upath, f_vpath = flow_path
        flow_u_path = f_upath.decode()
        flow_v_path = f_vpath.decode()
        flow_file = os.listdir(flow_u_path)
        flow_file = sorted(flow_file)

        flow_len = len(flow_file)
        try:
            index_list = np.arange(0, flow_len - _new_length,
                                   (flow_len - _new_length) //
                                   _frame_counts)[:_frame_counts]
        except:
            index_list = np.arange(0, flow_len - _new_length)[:_frame_counts]

        total_img_list = []
        video_list = []
        for index in index_list:
            img_list = []
            for i in range(index, index + _new_length):
                img_u_path = os.path.join(flow_u_path, flow_file[i])
                img_v_path = os.path.join(flow_v_path, flow_file[i])

                img_u = cv2.imread(img_u_path, 0)
                img_v = cv2.imread(img_v_path, 0)

                if img_u is not None and img_v is not None:
                    img = np.stack([img_u, img_v], axis=-1)
                    img = cv2.resize(img, (340, 256))
                    img_list.append(img)

            img = np.concatenate(img_list, axis=-1)
            video_list.append(img)

        if len(video_list) != _frame_counts:
            video_list = np.concatenate([video_list] * _frame_counts,
                                        axis=0)[:_frame_counts]

        img = np.stack(video_list, axis=0)

        if _test_crop == 'center':
            image = DataAugmentation.center_crop(img, 224, 224)
            image = np.float32(image)
            image = (image / 255 - 0.5) / 0.226
            image_flip = image[:, :, ::-1, :]
            total_img_list.append(image)
            total_img_list.append(image_flip)
        else:
            for k in range(5):
                image = DataAugmentation.random_Crop(img, 1, k)
                image = np.float32(image)
                image = (image / 255 - 0.5) / 0.226
                image_flip = image[:, :, ::-1, :]
                total_img_list.append(image)
                total_img_list.append(image_flip)

        if label is not None:

            one_hot_label = np.zeros(101, dtype=np.float32)
            one_hot_label[label] = 1

            return total_img_list, one_hot_label

        return total_img_list
示例#5
0
    def _py_func_flow_test_vp(flow_path, label):

        f_upath, f_vpath = flow_path
        flow_u_path = f_upath.decode()
        flow_v_path = f_vpath.decode()
        flow_file = os.listdir(flow_u_path)
        flow_file = sorted(flow_file)

        _batch_size = 25
        if len(flow_file) - _new_length < _batch_size:
            index_list = np.arange(0, len(flow_file) - _new_length)
            index_list = np.concatenate([index_list] *
                                        (_batch_size //
                                         (len(flow_file) - _new_length) + 1),
                                        axis=0)[:_batch_size]
        else:
            index_list = np.arange(0,
                                   len(flow_file) - _new_length,
                                   (len(flow_file) - _new_length) //
                                   _batch_size)[:_batch_size]

        total_img_list = []
        for index in index_list:
            img_list = []
            for i in range(index, index + _new_length):
                img_u_path = os.path.join(flow_u_path, flow_file[i])
                img_v_path = os.path.join(flow_v_path, flow_file[i])

                img_u = cv2.imread(img_u_path, 0)
                img_v = cv2.imread(img_v_path, 0)

                # if img_u is not None and img_v is not None:
                img = np.stack([img_u, img_v], axis=-1)
                img_list.append(img)

            img = np.concatenate(img_list, axis=-1)
            img = cv2.resize(img, (340, 256))

            if test_crop == 'center':
                image = DataAugmentation.center_crop(img, 224, 224)
                image = np.float32(image)
                if _preprocess_name == 'pytorch':
                    image = (image / 255 - 0.5) / 0.226
                elif _preprocess_name == 'tf':
                    image = tf_preprocess(image)
                image_flip = np.fliplr(image)
                total_img_list.append(image)
                total_img_list.append(image_flip)
            else:
                for j in range(5):
                    image = DataAugmentation.random_Crop(img, 1, j)
                    image = np.float32(image)
                    if _preprocess_name == 'pytorch':
                        image = (image / 255 - 0.5) / 0.226
                    elif _preprocess_name == 'tf':
                        image = tf_preprocess(image)
                    image_flip = np.fliplr(image)
                    total_img_list.append(image)
                    total_img_list.append(image_flip)

        if label is not None:

            one_hot_label = np.zeros(101, dtype=np.float32)
            one_hot_label[label] = 1

            return total_img_list, one_hot_label

        return total_img_list
示例#6
0
    def _py_func_test_vp(rgb_path, flow_path, label):

        rgb_path = rgb_path.decode ()
        flow_path = flow_path.decode()

        rgb_cap = cv2.VideoCapture(rgb_path)
        flow_cap = cv2.VideoCapture(flow_path)

        rgb_file = []
        flow_file = []
        while 1:
            flag , img = rgb_cap.read()
            if flag is False:break
            if img is not None:
                img = np.float32(img)
                img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
                img = cv2.resize(img,(340,256))
                rgb_file.append(img)
        
        while 1:
            flag , img = flow_cap.read()
            if flag is False:break
            if img is not None:
                img = np.float32(img)
                img = cv2.resize(img,(340,256))
                flow_file.append(img[...,:2])
            
        _batch_size = 25
        min_len = min(len(rgb_file),len(flow_file)) - 10

        if min_len <= _batch_size:
            factor = (_batch_size - 1) // min_len + 1
            index_list = np.concatenate([np.arange(0,min_len)]*factor,axis=-1)[:_batch_size]
        else:
            index_list = np.arange(0,min_len,min_len // _batch_size)[:_batch_size]
        
        rgb_file_list = []
        flow_file_list = []
        for i in index_list:
            r_img = rgb_file[i]
            # r_img = DataAugmentation.center_crop(r_img,224,224)
            for j in range(5):
                image_ = DataAugmentation.random_Crop(r_img,1,j)
                image_ = normalize(image_)
                rgb_file_list.append(image_)
            
            # r_img_flip = np.fliplr(r_img)
            # rgb_file_list.append(r_img_flip)
        
        for i in index_list:
            f_img = flow_file[i:i+10]
            f_img = np.concatenate(f_img,axis=-1)
            for j in range(5):
                image_ = DataAugmentation.random_Crop(f_img,1,j)
                image_ = (image_ / 255) * 2 - 1
                flow_file_list.append(image_)

            # f_image = normalize(f_image,mean=[0.5],std=[0.226])
            # f_img_flip = np.fliplr(f_img)
            # flow_file_list.append(f_img_flip)
        
        r_file = np.stack(rgb_file_list,axis=0)
        f_file = np.stack(flow_file_list,axis=0)
        
        one_hot_label = np.zeros (101, dtype=np.float32)
        one_hot_label[label] = 1

        return r_file, f_file, one_hot_label